Merge pull request #25017 from kaingwade:ml_to_contrib

Move ml to opencv_contrib #25017
OpenCV cleanup: #24997

opencv_contrib: opencv/opencv_contrib#3636
This commit is contained in:
WU Jia 2024-02-27 20:54:08 +08:00 committed by GitHub
parent cb7d38b477
commit 6722d4a524
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
106 changed files with 5 additions and 23685 deletions

View File

@ -175,13 +175,6 @@
year = {1998}, year = {1998},
publisher = {Citeseer} publisher = {Citeseer}
} }
@book{Breiman84,
title = {Classification and regression trees},
author = {Breiman, Leo and Friedman, Jerome and Stone, Charles J and Olshen, Richard A},
year = {1984},
publisher = {CRC press},
url = {https://projecteuclid.org/download/pdf_1/euclid.aos/1016218223}
}
@incollection{Brox2004, @incollection{Brox2004,
author = {Brox, Thomas and Bruhn, Andres and Papenberg, Nils and Weickert, Joachim}, author = {Brox, Thomas and Bruhn, Andres and Papenberg, Nils and Weickert, Joachim},
title = {High accuracy optical flow estimation based on a theory for warping}, title = {High accuracy optical flow estimation based on a theory for warping},
@ -349,12 +342,6 @@
publisher = {ACM}, publisher = {ACM},
url = {https://www.researchgate.net/profile/Liyuan_Li/publication/221571587_Foreground_object_detection_from_videos_containing_complex_background/links/09e4150bdf566d110c000000/Foreground-object-detection-from-videos-containing-complex-background.pdf} url = {https://www.researchgate.net/profile/Liyuan_Li/publication/221571587_Foreground_object_detection_from_videos_containing_complex_background/links/09e4150bdf566d110c000000/Foreground-object-detection-from-videos-containing-complex-background.pdf}
} }
@article{FHT98,
author = {Friedman, Jerome and Hastie, Trevor and Tibshirani, Robert},
title = {Additive Logistic Regression: a Statistical View of Boosting},
year = {1998},
url = {https://projecteuclid.org/download/pdf_1/euclid.aos/1016218223}
}
@inproceedings{FL02, @inproceedings{FL02,
author = {Fattal, Raanan and Lischinski, Dani and Werman, Michael}, author = {Fattal, Raanan and Lischinski, Dani and Werman, Michael},
title = {Gradient domain high dynamic range compression}, title = {Gradient domain high dynamic range compression},
@ -521,16 +508,6 @@
publisher = {IEEE}, publisher = {IEEE},
url = {http://www.openrs.org/photogrammetry/2015/SGM%202008%20PAMI%20-%20Stereo%20Processing%20by%20Semiglobal%20Matching%20and%20Mutual%20Informtion.pdf} url = {http://www.openrs.org/photogrammetry/2015/SGM%202008%20PAMI%20-%20Stereo%20Processing%20by%20Semiglobal%20Matching%20and%20Mutual%20Informtion.pdf}
} }
@article{HTF01,
author = {Trevor, Hastie and Robert, Tibshirani and Jerome, Friedman},
title = {The elements of statistical learning: data mining, inference and prediction},
year = {2001},
pages = {371--406},
journal = {New York: Springer-Verlag},
volume = {1},
number = {8},
url = {http://www.stat.auckland.ac.nz/~yee/784/files/ch09AdditiveModelsTrees.pdf}
}
@article{Hartley99, @article{Hartley99,
author = {Hartley, Richard I}, author = {Hartley, Richard I},
title = {Theory and practice of projective rectification}, title = {Theory and practice of projective rectification},
@ -602,17 +579,6 @@
number = {3}, number = {3},
publisher = {Elsevier} publisher = {Elsevier}
} }
@article{Kirkpatrick83,
author = {Kirkpatrick, S. and Gelatt, C. D. Jr and Vecchi, M. P.},
title = {Optimization by Simulated Annealing},
year = {1983},
pages = {671--680},
journal = {Science},
volume = {220},
number = {4598},
publisher = {American Association for the Advancement of Science},
url = {http://sci2s.ugr.es/sites/default/files/files/Teaching/GraduatesCourses/Metaheuristicas/Bibliography/1983-Science-Kirkpatrick-sim_anneal.pdf}
}
@inproceedings{Kolmogorov03, @inproceedings{Kolmogorov03,
author = {Kim, Junhwan and Kolmogorov, Vladimir and Zabih, Ramin}, author = {Kim, Junhwan and Kolmogorov, Vladimir and Zabih, Ramin},
title = {Visual correspondence using energy minimization and mutual information}, title = {Visual correspondence using energy minimization and mutual information},
@ -657,16 +623,6 @@
volume = {5}, volume = {5},
pages = {1530-1536} pages = {1530-1536}
} }
@article{LibSVM,
author = {Chang, Chih-Chung and Lin, Chih-Jen},
title = {LIBSVM: a library for support vector machines},
year = {2011},
pages = {27},
journal = {ACM Transactions on Intelligent Systems and Technology (TIST)},
volume = {2},
number = {3},
publisher = {ACM}
}
@inproceedings{Lienhart02, @inproceedings{Lienhart02,
author = {Lienhart, Rainer and Maydt, Jochen}, author = {Lienhart, Rainer and Maydt, Jochen},
title = {An extended set of haar-like features for rapid object detection}, title = {An extended set of haar-like features for rapid object detection},
@ -905,14 +861,6 @@
number = {1}, number = {1},
publisher = {IEEE} publisher = {IEEE}
} }
@inproceedings{RPROP93,
author = {Riedmiller, Martin and Braun, Heinrich},
title = {A direct adaptive method for faster backpropagation learning: The RPROP algorithm},
booktitle = {Neural Networks, 1993., IEEE International Conference on},
year = {1993},
pages = {586--591},
publisher = {IEEE}
}
@inproceedings{RRKB11, @inproceedings{RRKB11,
author = {Rublee, Ethan and Rabaud, Vincent and Konolige, Kurt and Bradski, Gary}, author = {Rublee, Ethan and Rabaud, Vincent and Konolige, Kurt and Bradski, Gary},
title = {ORB: an efficient alternative to SIFT or SURF}, title = {ORB: an efficient alternative to SIFT or SURF},
@ -1235,14 +1183,6 @@
year = {2007}, year = {2007},
publisher = {IEEE} publisher = {IEEE}
} }
@incollection{bottou2010large,
title = {Large-scale machine learning with stochastic gradient descent},
author = {Bottou, L{\'e}on},
booktitle = {Proceedings of COMPSTAT'2010},
pages = {177--186},
year = {2010},
publisher = {Springer}
}
@inproceedings{Ke17, @inproceedings{Ke17,
author = {Ke, Tong and Roumeliotis, Stergios}, author = {Ke, Tong and Roumeliotis, Stergios},
title = {An Efficient Algebraic Solution to the Perspective-Three-Point Problem}, title = {An Efficient Algebraic Solution to the Perspective-Three-Point Problem},

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.4 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.8 KiB

View File

@ -1,10 +0,0 @@
K-Nearest Neighbour {#tutorial_py_knn_index}
===================
- @subpage tutorial_py_knn_understanding
Get a basic understanding of what kNN is
- @subpage tutorial_py_knn_opencv
Now let's use kNN in OpenCV for digit recognition OCR

View File

@ -1,123 +0,0 @@
OCR of Hand-written Data using kNN {#tutorial_py_knn_opencv}
==================================
Goal
----
In this chapter:
- We will use our knowledge on kNN to build a basic OCR (Optical Character Recognition) application.
- We will try our application on Digits and Alphabets data that comes with OpenCV.
OCR of Hand-written Digits
--------------------------
Our goal is to build an application which can read handwritten digits. For this we need some
training data and some test data. OpenCV comes with an image digits.png (in the folder
opencv/samples/data/) which has 5000 handwritten digits (500 for each digit). Each digit is
a 20x20 image. So our first step is to split this image into 5000 different digit images. Then for each digit (20x20 image),
we flatten it into a single row with 400 pixels. That is our feature set, i.e. intensity values of all
pixels. It is the simplest feature set we can create. We use the first 250 samples of each digit as
training data, and the other 250 samples as test data. So let's prepare them first.
@code{.py}
import numpy as np
import cv2 as cv
img = cv.imread('digits.png')
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
# Now we split the image to 5000 cells, each 20x20 size
cells = [np.hsplit(row,100) for row in np.vsplit(gray,50)]
# Make it into a Numpy array: its size will be (50,100,20,20)
x = np.array(cells)
# Now we prepare the training data and test data
train = x[:,:50].reshape(-1,400).astype(np.float32) # Size = (2500,400)
test = x[:,50:100].reshape(-1,400).astype(np.float32) # Size = (2500,400)
# Create labels for train and test data
k = np.arange(10)
train_labels = np.repeat(k,250)[:,np.newaxis]
test_labels = train_labels.copy()
# Initiate kNN, train it on the training data, then test it with the test data with k=1
knn = cv.ml.KNearest_create()
knn.train(train, cv.ml.ROW_SAMPLE, train_labels)
ret,result,neighbours,dist = knn.findNearest(test,k=5)
# Now we check the accuracy of classification
# For that, compare the result with test_labels and check which are wrong
matches = result==test_labels
correct = np.count_nonzero(matches)
accuracy = correct*100.0/result.size
print( accuracy )
@endcode
So our basic OCR app is ready. This particular example gave me an accuracy of 91%. One option to
improve accuracy is to add more data for training, especially for the digits where we had more errors.
Instead of finding
this training data every time I start the application, I better save it, so that the next time, I can directly
read this data from a file and start classification. This can be done with the help of some Numpy
functions like np.savetxt, np.savez, np.load, etc. Please check the NumPy docs for more details.
@code{.py}
# Save the data
np.savez('knn_data.npz',train=train, train_labels=train_labels)
# Now load the data
with np.load('knn_data.npz') as data:
print( data.files )
train = data['train']
train_labels = data['train_labels']
@endcode
In my system, it takes around 4.4 MB of memory. Since we are using intensity values (uint8 data) as
features, it would be better to convert the data to np.uint8 first and then save it. It takes only
1.1 MB in this case. Then while loading, you can convert back into float32.
OCR of the English Alphabet
------------------------
Next we will do the same for the English alphabet, but there is a slight change in data and feature
set. Here, instead of images, OpenCV comes with a data file, letter-recognition.data in
opencv/samples/cpp/ folder. If you open it, you will see 20000 lines which may, on first sight, look
like garbage. Actually, in each row, the first column is a letter which is our label. The next 16 numbers
following it are the different features. These features are obtained from the [UCI Machine Learning
Repository](http://archive.ics.uci.edu/ml/). You can find the details of these features in [this
page](http://archive.ics.uci.edu/ml/datasets/Letter+Recognition).
There are 20000 samples available, so we take the first 10000 as training samples and the remaining
10000 as test samples. We should change the letters to ascii characters because we can't work with
letters directly.
@code{.py}
import cv2 as cv
import numpy as np
# Load the data and convert the letters to numbers
data= np.loadtxt('letter-recognition.data', dtype= 'float32', delimiter = ',',
converters= {0: lambda ch: ord(ch)-ord('A')})
# Split the dataset in two, with 10000 samples each for training and test sets
train, test = np.vsplit(data,2)
# Split trainData and testData into features and responses
responses, trainData = np.hsplit(train,[1])
labels, testData = np.hsplit(test,[1])
# Initiate the kNN, classify, measure accuracy
knn = cv.ml.KNearest_create()
knn.train(trainData, cv.ml.ROW_SAMPLE, responses)
ret, result, neighbours, dist = knn.findNearest(testData, k=5)
correct = np.count_nonzero(result == labels)
accuracy = correct*100.0/10000
print( accuracy )
@endcode
It gives me an accuracy of 93.22%. Again, if you want to increase accuracy, you can iteratively add
more data.
Additional Resources
--------------------
1. [Wikipedia article on Optical character recognition](https://en.wikipedia.org/wiki/Optical_character_recognition)
Exercises
---------
1. Here we used k=5. What happens if you try other values of k? Can you find a value that maximizes accuracy (minimizes the number of errors)?

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.5 KiB

View File

@ -1,150 +0,0 @@
Understanding k-Nearest Neighbour {#tutorial_py_knn_understanding}
=================================
Goal
----
In this chapter, we will understand the concepts of the k-Nearest Neighbour (kNN) algorithm.
Theory
------
kNN is one of the simplest classification algorithms available for supervised learning. The idea
is to search for the closest match(es) of the test data in the feature space. We will look into it with the below
image.
![image](images/knn_theory.png)
In the image, there are two families: Blue Squares and Red Triangles. We refer to each family as
a **Class**. Their houses are shown in their town map which we call the **Feature Space**. You can consider
a feature space as a space where all data are projected. For example, consider a 2D coordinate
space. Each datum has two features, a x coordinate and a y coordinate. You can represent this datum in your 2D
coordinate space, right? Now imagine that there are three features, you will need 3D space. Now consider N
features: you need N-dimensional space, right? This N-dimensional space is its feature space.
In our image, you can consider it as a 2D case with two features.
Now consider what happens if a new member comes into the town and creates a new home, which is shown as the green circle. He
should be added to one of these Blue or Red families (or *classes*). We call that process, **Classification**. How exactly should this new member be classified? Since we are dealing with kNN, let us apply the algorithm.
One simple method is to check who is his nearest neighbour. From the image, it is clear that it is a member of the Red
Triangle family. So he is classified as a Red Triangle. This method is called simply **Nearest Neighbour** classification, because classification depends only on the *nearest neighbour*.
But there is a problem with this approach! Red Triangle may be the nearest neighbour, but what if there are also a lot of Blue
Squares nearby? Then Blue Squares have more strength in that locality than Red Triangles, so
just checking the nearest one is not sufficient. Instead we may want to check some **k** nearest families. Then whichever family is the majority amongst them, the new guy should belong to that family. In our image, let's take k=3, i.e. consider the 3 nearest
neighbours. The new member has two Red neighbours and one Blue neighbour (there are two Blues equidistant, but since k=3, we can take only
one of them), so again he should be added to Red family. But what if we take k=7? Then he has 5 Blue
neighbours and 2 Red neighbours and should be added to the Blue family. The result will vary with the selected
value of k. Note that if k is not an odd number, we can get a tie, as would happen in the above case with k=4. We would see that our new member has 2 Red and 2 Blue neighbours as his four nearest neighbours and we would need to choose a method for breaking the tie to perform classification. So to reiterate, this method is called **k-Nearest Neighbour** since
classification depends on the *k nearest neighbours*.
Again, in kNN, it is true we are considering k neighbours, but we are giving equal importance to
all, right? Is this justified? For example, take the tied case of k=4. As we can see, the 2
Red neighbours are actually closer to the new member than the other 2 Blue neighbours, so he is more eligible to be
added to the Red family. How do we mathematically explain that? We give some weights to each neighbour
depending on their distance to the new-comer: those who are nearer to him get higher weights, while
those that are farther away get lower weights. Then we add the total weights of each family separately and classify the new-comer as part of whichever family
received higher total weights. This is called **modified kNN** or **weighted kNN**.
So what are some important things you see here?
- Because we have to check
the distance from the new-comer to all the existing houses to find the nearest neighbour(s), you need to have information about all of the houses in town, right? If there are plenty of houses and families, it takes a lot of memory, and also more time for calculation.
- There is almost zero time for any kind of "training" or preparation. Our "learning" involves only memorizing (storing) the data, before testing and classifying.
Now let's see this algorithm at work in OpenCV.
kNN in OpenCV
-------------
We will do a simple example here, with two families (classes), just like above. Then in the next
chapter, we will do an even better example.
So here, we label the Red family as **Class-0** (so denoted by 0) and Blue family as **Class-1**
(denoted by 1). We create 25 neighbours or 25 training data, and label each of them as either part of Class-0 or Class-1.
We can do this with the help of a Random Number Generator from NumPy.
Then we can plot it with the help of Matplotlib. Red neighbours are shown as Red Triangles and Blue
neighbours are shown as Blue Squares.
@code{.py}
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
# Feature set containing (x,y) values of 25 known/training data
trainData = np.random.randint(0,100,(25,2)).astype(np.float32)
# Label each one either Red or Blue with numbers 0 and 1
responses = np.random.randint(0,2,(25,1)).astype(np.float32)
# Take Red neighbours and plot them
red = trainData[responses.ravel()==0]
plt.scatter(red[:,0],red[:,1],80,'r','^')
# Take Blue neighbours and plot them
blue = trainData[responses.ravel()==1]
plt.scatter(blue[:,0],blue[:,1],80,'b','s')
plt.show()
@endcode
You will get something similar to our first image. Since you are using a random number generator, you
will get different data each time you run the code.
Next initiate the kNN algorithm and pass the trainData and responses to train the kNN. (Underneath the hood, it constructs
a search tree: see the Additional Resources section below for more information on this.)
Then we will bring one new-comer and classify him as belonging to a family with the help of kNN in OpenCV. Before
running kNN, we need to know something about our test data (data of new comers). Our data should be a
floating point array with size \f$number \; of \; testdata \times number \; of \; features\f$. Then we
find the nearest neighbours of the new-comer. We can specify *k*: how many neighbours we want. (Here we used 3.) It returns:
1. The label given to the new-comer depending upon the kNN theory we saw earlier. If you want the *Nearest
Neighbour* algorithm, just specify k=1.
2. The labels of the k-Nearest Neighbours.
3. The corresponding distances from the new-comer to each nearest neighbour.
So let's see how it works. The new-comer is marked in green.
@code{.py}
newcomer = np.random.randint(0,100,(1,2)).astype(np.float32)
plt.scatter(newcomer[:,0],newcomer[:,1],80,'g','o')
knn = cv.ml.KNearest_create()
knn.train(trainData, cv.ml.ROW_SAMPLE, responses)
ret, results, neighbours ,dist = knn.findNearest(newcomer, 3)
print( "result: {}\n".format(results) )
print( "neighbours: {}\n".format(neighbours) )
print( "distance: {}\n".format(dist) )
plt.show()
@endcode
I got the following results:
@code{.py}
result: [[ 1.]]
neighbours: [[ 1. 1. 1.]]
distance: [[ 53. 58. 61.]]
@endcode
It says that our new-comer's 3 nearest neighbours are all from the Blue family. Therefore, he is labelled as part of the Blue
family. It is obvious from the plot below:
![image](images/knn_simple.png)
If you have multiple new-comers (test data), you can just pass them as an array. Corresponding results are also
obtained as arrays.
@code{.py}
# 10 new-comers
newcomers = np.random.randint(0,100,(10,2)).astype(np.float32)
ret, results,neighbours,dist = knn.findNearest(newcomer, 3)
# The results also will contain 10 labels.
@endcode
Additional Resources
--------------------
1. [NPTEL notes on Pattern Recognition, Chapter
11](https://nptel.ac.in/courses/106108057)
2. [Wikipedia article on Nearest neighbor search](https://en.wikipedia.org/wiki/Nearest_neighbor_search)
3. [Wikipedia article on k-d tree](https://en.wikipedia.org/wiki/K-d_tree)
Exercises
---------
1. Try repeating the above with more classes and different choices of k. Does choosing k become harder with more classes in the same 2D feature space?

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.5 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.5 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 10 KiB

View File

@ -1,134 +0,0 @@
Understanding SVM {#tutorial_py_svm_basics}
=================
Goal
----
In this chapter
- We will see an intuitive understanding of SVM
Theory
------
### Linearly Separable Data
Consider the image below which has two types of data, red and blue. In kNN, for a test data, we used
to measure its distance to all the training samples and take the one with minimum distance. It takes
plenty of time to measure all the distances and plenty of memory to store all the training-samples.
But considering the data given in image, should we need that much?
![image](images/svm_basics1.png)
Consider another idea. We find a line, \f$f(x)=ax_1+bx_2+c\f$ which divides both the data to two
regions. When we get a new test_data \f$X\f$, just substitute it in \f$f(x)\f$. If \f$f(X) > 0\f$, it belongs
to blue group, else it belongs to red group. We can call this line as **Decision Boundary**. It is
very simple and memory-efficient. Such data which can be divided into two with a straight line (or
hyperplanes in higher dimensions) is called **Linear Separable**.
So in above image, you can see plenty of such lines are possible. Which one we will take? Very
intuitively we can say that the line should be passing as far as possible from all the points. Why?
Because there can be noise in the incoming data. This data should not affect the classification
accuracy. So taking a farthest line will provide more immunity against noise. So what SVM does is to
find a straight line (or hyperplane) with largest minimum distance to the training samples. See the
bold line in below image passing through the center.
![image](images/svm_basics2.png)
So to find this Decision Boundary, you need training data. Do you need all? NO. Just the ones which
are close to the opposite group are sufficient. In our image, they are the one blue filled circle
and two red filled squares. We can call them **Support Vectors** and the lines passing through them
are called **Support Planes**. They are adequate for finding our decision boundary. We need not
worry about all the data. It helps in data reduction.
What happened is, first two hyperplanes are found which best represents the data. For eg, blue data
is represented by \f$w^Tx+b_0 > 1\f$ while red data is represented by \f$w^Tx+b_0 < -1\f$ where \f$w\f$ is
**weight vector** ( \f$w=[w_1, w_2,..., w_n]\f$) and \f$x\f$ is the feature vector
(\f$x = [x_1,x_2,..., x_n]\f$). \f$b_0\f$ is the **bias**. Weight vector decides the orientation of decision
boundary while bias point decides its location. Now decision boundary is defined to be midway
between these hyperplanes, so expressed as \f$w^Tx+b_0 = 0\f$. The minimum distance from support vector
to the decision boundary is given by, \f$distance_{support \, vectors}=\frac{1}{||w||}\f$. Margin is
twice this distance, and we need to maximize this margin. i.e. we need to minimize a new function
\f$L(w, b_0)\f$ with some constraints which can expressed below:
\f[\min_{w, b_0} L(w, b_0) = \frac{1}{2}||w||^2 \; \text{subject to} \; t_i(w^Tx+b_0) \geq 1 \; \forall i\f]
where \f$t_i\f$ is the label of each class, \f$t_i \in [-1,1]\f$.
### Non-Linearly Separable Data
Consider some data which can't be divided into two with a straight line. For example, consider an
one-dimensional data where 'X' is at -3 & +3 and 'O' is at -1 & +1. Clearly it is not linearly
separable. But there are methods to solve these kinds of problems. If we can map this data set with
a function, \f$f(x) = x^2\f$, we get 'X' at 9 and 'O' at 1 which are linear separable.
Otherwise we can convert this one-dimensional to two-dimensional data. We can use \f$f(x)=(x,x^2)\f$
function to map this data. Then 'X' becomes (-3,9) and (3,9) while 'O' becomes (-1,1) and (1,1).
This is also linear separable. In short, chance is more for a non-linear separable data in
lower-dimensional space to become linear separable in higher-dimensional space.
In general, it is possible to map points in a d-dimensional space to some D-dimensional space
\f$(D>d)\f$ to check the possibility of linear separability. There is an idea which helps to compute the
dot product in the high-dimensional (kernel) space by performing computations in the low-dimensional
input (feature) space. We can illustrate with following example.
Consider two points in two-dimensional space, \f$p=(p_1,p_2)\f$ and \f$q=(q_1,q_2)\f$. Let \f$\phi\f$ be a
mapping function which maps a two-dimensional point to three-dimensional space as follows:
\f[\phi (p) = (p_{1}^2,p_{2}^2,\sqrt{2} p_1 p_2)
\phi (q) = (q_{1}^2,q_{2}^2,\sqrt{2} q_1 q_2)\f]
Let us define a kernel function \f$K(p,q)\f$ which does a dot product between two points, shown below:
\f[
\begin{aligned}
K(p,q) = \phi(p).\phi(q) &= \phi(p)^T \phi(q) \\
&= (p_{1}^2,p_{2}^2,\sqrt{2} p_1 p_2).(q_{1}^2,q_{2}^2,\sqrt{2} q_1 q_2) \\
&= p_{1}^2 q_{1}^2 + p_{2}^2 q_{2}^2 + 2 p_1 q_1 p_2 q_2 \\
&= (p_1 q_1 + p_2 q_2)^2 \\
\phi(p).\phi(q) &= (p.q)^2
\end{aligned}
\f]
It means, a dot product in three-dimensional space can be achieved using squared dot product in
two-dimensional space. This can be applied to higher dimensional space. So we can calculate higher
dimensional features from lower dimensions itself. Once we map them, we get a higher dimensional
space.
In addition to all these concepts, there comes the problem of misclassification. So just finding
decision boundary with maximum margin is not sufficient. We need to consider the problem of
misclassification errors also. Sometimes, it may be possible to find a decision boundary with less
margin, but with reduced misclassification. Anyway we need to modify our model such that it should
find decision boundary with maximum margin, but with less misclassification. The minimization
criteria is modified as:
\f[min \; ||w||^2 + C(distance \; of \; misclassified \; samples \; to \; their \; correct \; regions)\f]
Below image shows this concept. For each sample of the training data a new parameter \f$\xi_i\f$ is
defined. It is the distance from its corresponding training sample to their correct decision region.
For those who are not misclassified, they fall on their corresponding support planes, so their
distance is zero.
![image](images/svm_basics3.png)
So the new optimization problem is :
\f[\min_{w, b_{0}} L(w,b_0) = ||w||^{2} + C \sum_{i} {\xi_{i}} \text{ subject to } y_{i}(w^{T} x_{i} + b_{0}) \geq 1 - \xi_{i} \text{ and } \xi_{i} \geq 0 \text{ } \forall i\f]
How should the parameter C be chosen? It is obvious that the answer to this question depends on how
the training data is distributed. Although there is no general answer, it is useful to take into
account these rules:
- Large values of C give solutions with less misclassification errors but a smaller margin.
Consider that in this case it is expensive to make misclassification errors. Since the aim of
the optimization is to minimize the argument, few misclassifications errors are allowed.
- Small values of C give solutions with bigger margin and more classification errors. In this
case the minimization does not consider that much the term of the sum so it focuses more on
finding a hyperplane with big margin.
Additional Resources
--------------------
-# [NPTEL notes on Statistical Pattern Recognition, Chapters
25-29](https://nptel.ac.in/courses/117108048)
Exercises
---------

View File

@ -1,10 +0,0 @@
Support Vector Machines (SVM) {#tutorial_py_svm_index}
=============================
- @subpage tutorial_py_svm_basics
Get a basic understanding of what SVM is
- @subpage tutorial_py_svm_opencv
Let's use SVM functionalities in OpenCV

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

View File

@ -1,56 +0,0 @@
OCR of Hand-written Data using SVM {#tutorial_py_svm_opencv}
==================================
Goal
----
In this chapter
- We will revisit the hand-written data OCR, but, with SVM instead of kNN.
OCR of Hand-written Digits
--------------------------
In kNN, we directly used pixel intensity as the feature vector. This time we will use [Histogram of
Oriented Gradients](http://en.wikipedia.org/wiki/Histogram_of_oriented_gradients) (HOG) as feature
vectors.
Here, before finding the HOG, we deskew the image using its second order moments. So we first define
a function **deskew()** which takes a digit image and deskew it. Below is the deskew() function:
@snippet samples/python/tutorial_code/ml/py_svm_opencv/hogsvm.py deskew
Below image shows above deskew function applied to an image of zero. Left image is the original
image and right image is the deskewed image.
![image](images/deskew.jpg)
Next we have to find the HOG Descriptor of each cell. For that, we find Sobel derivatives of each
cell in X and Y direction. Then find their magnitude and direction of gradient at each pixel. This
gradient is quantized to 16 integer values. Divide this image to four sub-squares. For each
sub-square, calculate the histogram of direction (16 bins) weighted with their magnitude. So each
sub-square gives you a vector containing 16 values. Four such vectors (of four sub-squares) together
gives us a feature vector containing 64 values. This is the feature vector we use to train our data.
@snippet samples/python/tutorial_code/ml/py_svm_opencv/hogsvm.py hog
Finally, as in the previous case, we start by splitting our big dataset into individual cells. For
every digit, 250 cells are reserved for training data and remaining 250 data is reserved for
testing. Full code is given below, you also can download it from [here](https://github.com/opencv/opencv/tree/5.x/samples/python/tutorial_code/ml/py_svm_opencv/hogsvm.py):
@include samples/python/tutorial_code/ml/py_svm_opencv/hogsvm.py
This particular technique gave me nearly 94% accuracy. You can try different values for various
parameters of SVM to check if higher accuracy is possible. Or you can read technical papers on this
area and try to implement them.
Additional Resources
--------------------
-# [Histograms of Oriented Gradients Video](https://www.youtube.com/watch?v=0Zib1YEE4LU)
Exercises
---------
-# OpenCV samples contain digits.py which applies a slight improvement of the above method to get
improved result. It also contains the reference. Check it and understand it.

View File

@ -1,15 +1,6 @@
Machine Learning {#tutorial_py_table_of_contents_ml} Machine Learning {#tutorial_py_table_of_contents_ml}
================ ================
- @subpage tutorial_py_knn_index
Learn to use kNN for classification
Plus learn about handwritten digit recognition using kNN
- @subpage tutorial_py_svm_index
Understand concepts of SVM
- @subpage tutorial_py_kmeans_index - @subpage tutorial_py_kmeans_index
Learn to use K-Means Clustering to group data to a number of clusters. Learn to use K-Means Clustering to group data to a number of clusters.

View File

@ -1,4 +0,0 @@
Machine Learning (ml module) {#tutorial_table_of_content_ml}
============================
Content has been moved to this page: @ref tutorial_table_of_content_other

View File

@ -4,7 +4,7 @@ Barcode Recognition {#tutorial_barcode_detect_and_decode}
@tableofcontents @tableofcontents
@prev_tutorial{tutorial_traincascade} @prev_tutorial{tutorial_traincascade}
@next_tutorial{tutorial_introduction_to_svm} @next_tutorial{tutorial_introduction_to_pca}
| | | | | |
| -: | :- | | -: | :- |

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 10 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.5 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 14 KiB

View File

@ -3,7 +3,7 @@ Introduction to Principal Component Analysis (PCA) {#tutorial_introduction_to_pc
@tableofcontents @tableofcontents
@prev_tutorial{tutorial_non_linear_svms} @prev_tutorial{tutorial_barcode_detect_and_decode}
| | | | | |
| -: | :- | | -: | :- |

View File

@ -1,273 +0,0 @@
Introduction to Support Vector Machines {#tutorial_introduction_to_svm}
=======================================
@tableofcontents
@prev_tutorial{tutorial_barcode_detect_and_decode}
@next_tutorial{tutorial_non_linear_svms}
| | |
| -: | :- |
| Original author | Fernando Iglesias García |
| Compatibility | OpenCV >= 3.0 |
Goal
----
In this tutorial you will learn how to:
- Use the OpenCV functions @ref cv::ml::SVM::train to build a classifier based on SVMs and @ref
cv::ml::SVM::predict to test its performance.
What is a SVM?
--------------
A Support Vector Machine (SVM) is a discriminative classifier formally defined by a separating
hyperplane. In other words, given labeled training data (*supervised learning*), the algorithm
outputs an optimal hyperplane which categorizes new examples.
In which sense is the hyperplane obtained optimal? Let's consider the following simple problem:
For a linearly separable set of 2D-points which belong to one of two classes, find a separating
straight line.
![](images/separating-lines.png)
@note In this example we deal with lines and points in the Cartesian plane instead of hyperplanes
and vectors in a high dimensional space. This is a simplification of the problem.It is important to
understand that this is done only because our intuition is better built from examples that are easy
to imagine. However, the same concepts apply to tasks where the examples to classify lie in a space
whose dimension is higher than two.
In the above picture you can see that there exists multiple lines that offer a solution to the
problem. Is any of them better than the others? We can intuitively define a criterion to estimate
the worth of the lines: <em> A line is bad if it passes too close to the points because it will be
noise sensitive and it will not generalize correctly. </em> Therefore, our goal should be to find
the line passing as far as possible from all points.
Then, the operation of the SVM algorithm is based on finding the hyperplane that gives the largest
minimum distance to the training examples. Twice, this distance receives the important name of
**margin** within SVM's theory. Therefore, the optimal separating hyperplane *maximizes* the margin
of the training data.
![](images/optimal-hyperplane.png)
How is the optimal hyperplane computed?
---------------------------------------
Let's introduce the notation used to define formally a hyperplane:
\f[f(x) = \beta_{0} + \beta^{T} x,\f]
where \f$\beta\f$ is known as the *weight vector* and \f$\beta_{0}\f$ as the *bias*.
@note A more in depth description of this and hyperplanes you can find in the section 4.5 (*Separating
Hyperplanes*) of the book: *Elements of Statistical Learning* by T. Hastie, R. Tibshirani and J. H.
Friedman (@cite HTF01).
The optimal hyperplane can be represented in an infinite number of different ways by
scaling of \f$\beta\f$ and \f$\beta_{0}\f$. As a matter of convention, among all the possible
representations of the hyperplane, the one chosen is
\f[|\beta_{0} + \beta^{T} x| = 1\f]
where \f$x\f$ symbolizes the training examples closest to the hyperplane. In general, the training
examples that are closest to the hyperplane are called **support vectors**. This representation is
known as the **canonical hyperplane**.
Now, we use the result of geometry that gives the distance between a point \f$x\f$ and a hyperplane
\f$(\beta, \beta_{0})\f$:
\f[\mathrm{distance} = \frac{|\beta_{0} + \beta^{T} x|}{||\beta||}.\f]
In particular, for the canonical hyperplane, the numerator is equal to one and the distance to the
support vectors is
\f[\mathrm{distance}_{\text{ support vectors}} = \frac{|\beta_{0} + \beta^{T} x|}{||\beta||} = \frac{1}{||\beta||}.\f]
Recall that the margin introduced in the previous section, here denoted as \f$M\f$, is twice the
distance to the closest examples:
\f[M = \frac{2}{||\beta||}\f]
Finally, the problem of maximizing \f$M\f$ is equivalent to the problem of minimizing a function
\f$L(\beta)\f$ subject to some constraints. The constraints model the requirement for the hyperplane to
classify correctly all the training examples \f$x_{i}\f$. Formally,
\f[\min_{\beta, \beta_{0}} L(\beta) = \frac{1}{2}||\beta||^{2} \text{ subject to } y_{i}(\beta^{T} x_{i} + \beta_{0}) \geq 1 \text{ } \forall i,\f]
where \f$y_{i}\f$ represents each of the labels of the training examples.
This is a problem of Lagrangian optimization that can be solved using Lagrange multipliers to obtain
the weight vector \f$\beta\f$ and the bias \f$\beta_{0}\f$ of the optimal hyperplane.
Source Code
-----------
@add_toggle_cpp
- **Downloadable code**: Click
[here](https://github.com/opencv/opencv/tree/5.x/samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp)
- **Code at glance:**
@include samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp
@end_toggle
@add_toggle_java
- **Downloadable code**: Click
[here](https://github.com/opencv/opencv/tree/5.x/samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java)
- **Code at glance:**
@include samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java
@end_toggle
@add_toggle_python
- **Downloadable code**: Click
[here](https://github.com/opencv/opencv/tree/5.x/samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py)
- **Code at glance:**
@include samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py
@end_toggle
Explanation
-----------
- **Set up the training data**
The training data of this exercise is formed by a set of labeled 2D-points that belong to one of
two different classes; one of the classes consists of one point and the other of three points.
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp setup1
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java setup1
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py setup1
@end_toggle
The function @ref cv::ml::SVM::train that will be used afterwards requires the training data to be
stored as @ref cv::Mat objects of floats. Therefore, we create these objects from the arrays
defined above:
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp setup2
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java setup2
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py setup1
@end_toggle
- **Set up SVM's parameters**
In this tutorial we have introduced the theory of SVMs in the most simple case, when the
training examples are spread into two classes that are linearly separable. However, SVMs can be
used in a wide variety of problems (e.g. problems with non-linearly separable data, a SVM using
a kernel function to raise the dimensionality of the examples, etc). As a consequence of this,
we have to define some parameters before training the SVM. These parameters are stored in an
object of the class @ref cv::ml::SVM.
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp init
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java init
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py init
@end_toggle
Here:
- *Type of SVM*. We choose here the type @ref cv::ml::SVM::C_SVC "C_SVC" that can be used for
n-class classification (n \f$\geq\f$ 2). The important feature of this type is that it deals
with imperfect separation of classes (i.e. when the training data is non-linearly separable).
This feature is not important here since the data is linearly separable and we chose this SVM
type only for being the most commonly used.
- *Type of SVM kernel*. We have not talked about kernel functions since they are not
interesting for the training data we are dealing with. Nevertheless, let's explain briefly now
the main idea behind a kernel function. It is a mapping done to the training data to improve
its resemblance to a linearly separable set of data. This mapping consists of increasing the
dimensionality of the data and is done efficiently using a kernel function. We choose here the
type @ref cv::ml::SVM::LINEAR "LINEAR" which means that no mapping is done. This parameter is
defined using cv::ml::SVM::setKernel.
- *Termination criteria of the algorithm*. The SVM training procedure is implemented solving a
constrained quadratic optimization problem in an **iterative** fashion. Here we specify a
maximum number of iterations and a tolerance error so we allow the algorithm to finish in
less number of steps even if the optimal hyperplane has not been computed yet. This
parameter is defined in a structure @ref cv::TermCriteria .
- **Train the SVM**
We call the method @ref cv::ml::SVM::train to build the SVM model.
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp train
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java train
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py train
@end_toggle
- **Regions classified by the SVM**
The method @ref cv::ml::SVM::predict is used to classify an input sample using a trained SVM. In
this example we have used this method in order to color the space depending on the prediction done
by the SVM. In other words, an image is traversed interpreting its pixels as points of the
Cartesian plane. Each of the points is colored depending on the class predicted by the SVM; in
green if it is the class with label 1 and in blue if it is the class with label -1.
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp show
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java show
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py show
@end_toggle
- **Support vectors**
We use here a couple of methods to obtain information about the support vectors.
The method @ref cv::ml::SVM::getSupportVectors obtain all of the support
vectors. We have used this methods here to find the training examples that are
support vectors and highlight them.
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp show_vectors
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java show_vectors
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py show_vectors
@end_toggle
Results
-------
- The code opens an image and shows the training examples of both classes. The points of one class
are represented with white circles and black ones are used for the other class.
- The SVM is trained and used to classify all the pixels of the image. This results in a division
of the image in a blue region and a green region. The boundary between both regions is the
optimal separating hyperplane.
- Finally the support vectors are shown using gray rings around the training examples.
![](images/svm_intro_result.png)

View File

@ -1,288 +0,0 @@
Support Vector Machines for Non-Linearly Separable Data {#tutorial_non_linear_svms}
=======================================================
@tableofcontents
@prev_tutorial{tutorial_introduction_to_svm}
@next_tutorial{tutorial_introduction_to_pca}
| | |
| -: | :- |
| Original author | Fernando Iglesias García |
| Compatibility | OpenCV >= 3.0 |
Goal
----
In this tutorial you will learn how to:
- Define the optimization problem for SVMs when it is not possible to separate linearly the
training data.
- How to configure the parameters to adapt your SVM for this class of problems.
Motivation
----------
Why is it interesting to extend the SVM optimization problem in order to handle non-linearly separable
training data? Most of the applications in which SVMs are used in computer vision require a more
powerful tool than a simple linear classifier. This stems from the fact that in these tasks __the
training data can be rarely separated using an hyperplane__.
Consider one of these tasks, for example, face detection. The training data in this case is composed
by a set of images that are faces and another set of images that are non-faces (_every other thing
in the world except from faces_). This training data is too complex so as to find a representation
of each sample (_feature vector_) that could make the whole set of faces linearly separable from the
whole set of non-faces.
Extension of the Optimization Problem
-------------------------------------
Remember that using SVMs we obtain a separating hyperplane. Therefore, since the training data is
now non-linearly separable, we must admit that the hyperplane found will misclassify some of the
samples. This _misclassification_ is a new variable in the optimization that must be taken into
account. The new model has to include both the old requirement of finding the hyperplane that gives
the biggest margin and the new one of generalizing the training data correctly by not allowing too
many classification errors.
We start here from the formulation of the optimization problem of finding the hyperplane which
maximizes the __margin__ (this is explained in the previous tutorial (@ref tutorial_introduction_to_svm):
\f[\min_{\beta, \beta_{0}} L(\beta) = \frac{1}{2}||\beta||^{2} \text{ subject to } y_{i}(\beta^{T} x_{i} + \beta_{0}) \geq 1 \text{ } \forall i\f]
There are multiple ways in which this model can be modified so it takes into account the
misclassification errors. For example, one could think of minimizing the same quantity plus a
constant times the number of misclassification errors in the training data, i.e.:
\f[\min ||\beta||^{2} + C \text{(misclassification errors)}\f]
However, this one is not a very good solution since, among some other reasons, we do not distinguish
between samples that are misclassified with a small distance to their appropriate decision region or
samples that are not. Therefore, a better solution will take into account the _distance of the
misclassified samples to their correct decision regions_, i.e.:
\f[\min ||\beta||^{2} + C \text{(distance of misclassified samples to their correct regions)}\f]
For each sample of the training data a new parameter \f$\xi_{i}\f$ is defined. Each one of these
parameters contains the distance from its corresponding training sample to their correct decision
region. The following picture shows non-linearly separable training data from two classes, a
separating hyperplane and the distances to their correct regions of the samples that are
misclassified.
![](images/sample-errors-dist.png)
@note Only the distances of the samples that are misclassified are shown in the picture. The
distances of the rest of the samples are zero since they lay already in their correct decision
region.
The red and blue lines that appear on the picture are the margins to each one of the
decision regions. It is very __important__ to realize that each of the \f$\xi_{i}\f$ goes from a
misclassified training sample to the margin of its appropriate region.
Finally, the new formulation for the optimization problem is:
\f[\min_{\beta, \beta_{0}} L(\beta) = ||\beta||^{2} + C \sum_{i} {\xi_{i}} \text{ subject to } y_{i}(\beta^{T} x_{i} + \beta_{0}) \geq 1 - \xi_{i} \text{ and } \xi_{i} \geq 0 \text{ } \forall i\f]
How should the parameter C be chosen? It is obvious that the answer to this question depends on how
the training data is distributed. Although there is no general answer, it is useful to take into
account these rules:
- Large values of C give solutions with _less misclassification errors_ but a _smaller margin_.
Consider that in this case it is expensive to make misclassification errors. Since the aim of
the optimization is to minimize the argument, few misclassifications errors are allowed.
- Small values of C give solutions with _bigger margin_ and _more classification errors_. In this
case the minimization does not consider that much the term of the sum so it focuses more on
finding a hyperplane with big margin.
Source Code
-----------
You may also find the source code in `samples/cpp/tutorial_code/ml/non_linear_svms` folder of the OpenCV source library or
[download it from here](https://github.com/opencv/opencv/tree/5.x/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp).
@add_toggle_cpp
- **Downloadable code**: Click
[here](https://github.com/opencv/opencv/tree/5.x/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp)
- **Code at glance:**
@include samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp
@end_toggle
@add_toggle_java
- **Downloadable code**: Click
[here](https://github.com/opencv/opencv/tree/5.x/samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java)
- **Code at glance:**
@include samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java
@end_toggle
@add_toggle_python
- **Downloadable code**: Click
[here](https://github.com/opencv/opencv/tree/5.x/samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py)
- **Code at glance:**
@include samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py
@end_toggle
Explanation
-----------
- __Set up the training data__
The training data of this exercise is formed by a set of labeled 2D-points that belong to one of
two different classes. To make the exercise more appealing, the training data is generated
randomly using a uniform probability density functions (PDFs).
We have divided the generation of the training data into two main parts.
In the first part we generate data for both classes that is linearly separable.
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp setup1
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java setup1
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py setup1
@end_toggle
In the second part we create data for both classes that is non-linearly separable, data that
overlaps.
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp setup2
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java setup2
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py setup2
@end_toggle
- __Set up SVM's parameters__
@note In the previous tutorial @ref tutorial_introduction_to_svm there is an explanation of the
attributes of the class @ref cv::ml::SVM that we configure here before training the SVM.
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp init
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java init
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py init
@end_toggle
There are just two differences between the configuration we do here and the one that was done in
the previous tutorial (@ref tutorial_introduction_to_svm) that we use as reference.
- _C_. We chose here a small value of this parameter in order not to punish too much the
misclassification errors in the optimization. The idea of doing this stems from the will of
obtaining a solution close to the one intuitively expected. However, we recommend to get a
better insight of the problem by making adjustments to this parameter.
@note In this case there are just very few points in the overlapping region between classes.
By giving a smaller value to __FRAC_LINEAR_SEP__ the density of points can be incremented and the
impact of the parameter _C_ explored deeply.
- _Termination Criteria of the algorithm_. The maximum number of iterations has to be
increased considerably in order to solve correctly a problem with non-linearly separable
training data. In particular, we have increased in five orders of magnitude this value.
- __Train the SVM__
We call the method @ref cv::ml::SVM::train to build the SVM model. Watch out that the training
process may take a quite long time. Have patiance when your run the program.
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp train
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java train
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py train
@end_toggle
- __Show the Decision Regions__
The method @ref cv::ml::SVM::predict is used to classify an input sample using a trained SVM. In
this example we have used this method in order to color the space depending on the prediction done
by the SVM. In other words, an image is traversed interpreting its pixels as points of the
Cartesian plane. Each of the points is colored depending on the class predicted by the SVM; in
dark green if it is the class with label 1 and in dark blue if it is the class with label 2.
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp show
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java show
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py show
@end_toggle
- __Show the training data__
The method @ref cv::circle is used to show the samples that compose the training data. The samples
of the class labeled with 1 are shown in light green and in light blue the samples of the class
labeled with 2.
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp show_data
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java show_data
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py show_data
@end_toggle
- __Support vectors__
We use here a couple of methods to obtain information about the support vectors. The method
@ref cv::ml::SVM::getSupportVectors obtain all support vectors. We have used this methods here
to find the training examples that are support vectors and highlight them.
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp show_vectors
@end_toggle
@add_toggle_java
@snippet samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java show_vectors
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py show_vectors
@end_toggle
Results
-------
- The code opens an image and shows the training examples of both classes. The points of one class
are represented with light green and light blue ones are used for the other class.
- The SVM is trained and used to classify all the pixels of the image. This results in a division
of the image in a blue region and a green region. The boundary between both regions is the
separating hyperplane. Since the training data is non-linearly separable, it can be seen that
some of the examples of both classes are misclassified; some green points lay on the blue region
and some blue points lay on the green one.
- Finally the support vectors are shown using gray rings around the training examples.
![](images/svm_non_linear_result.png)
You may observe a runtime instance of this on the [YouTube here](https://www.youtube.com/watch?v=vFv2yPcSo-Q).
@youtube{vFv2yPcSo-Q}

View File

@ -1,4 +1,4 @@
Other tutorials (ml, objdetect, photo, stitching, video) {#tutorial_table_of_content_other} Other tutorials (objdetect, photo, stitching, video) {#tutorial_table_of_content_other}
======================================================== ========================================================
- photo. @subpage tutorial_hdr_imaging - photo. @subpage tutorial_hdr_imaging
@ -9,6 +9,4 @@ Other tutorials (ml, objdetect, photo, stitching, video) {#tutorial_table_of_con
- objdetect. @subpage tutorial_cascade_classifier - objdetect. @subpage tutorial_cascade_classifier
- objdetect. @subpage tutorial_traincascade - objdetect. @subpage tutorial_traincascade
- objdetect. @subpage tutorial_barcode_detect_and_decode - objdetect. @subpage tutorial_barcode_detect_and_decode
- ml. @subpage tutorial_introduction_to_svm
- ml. @subpage tutorial_non_linear_svms
- ml. @subpage tutorial_introduction_to_pca - ml. @subpage tutorial_introduction_to_pca

View File

@ -10,7 +10,7 @@ OpenCV Tutorials {#tutorial_root}
- @subpage tutorial_table_of_content_features2d - feature detectors, descriptors and matching framework - @subpage tutorial_table_of_content_features2d - feature detectors, descriptors and matching framework
- @subpage tutorial_table_of_content_dnn - infer neural networks using built-in _dnn_ module - @subpage tutorial_table_of_content_dnn - infer neural networks using built-in _dnn_ module
- @subpage tutorial_table_of_content_gapi - graph-based approach to computer vision algorithms building - @subpage tutorial_table_of_content_gapi - graph-based approach to computer vision algorithms building
- @subpage tutorial_table_of_content_other - other modules (ml, objdetect, stitching, video, photo) - @subpage tutorial_table_of_content_other - other modules (objdetect, stitching, video, photo)
- @subpage tutorial_table_of_content_ios - running OpenCV on an iDevice - @subpage tutorial_table_of_content_ios - running OpenCV on an iDevice
- @subpage tutorial_table_of_content_3d - 3d objects processing and visualisation - @subpage tutorial_table_of_content_3d - 3d objects processing and visualisation
@cond CUDA_MODULES @cond CUDA_MODULES

View File

@ -20,7 +20,7 @@ foreach(mod ${OPENCV_MODULES_BUILD} ${OPENCV_MODULES_DISABLED_USER} ${OPENCV_MOD
endforeach() endforeach()
ocv_list_sort(OPENCV_MODULES_MAIN) ocv_list_sort(OPENCV_MODULES_MAIN)
ocv_list_sort(OPENCV_MODULES_EXTRA) ocv_list_sort(OPENCV_MODULES_EXTRA)
set(FIXED_ORDER_MODULES core imgproc imgcodecs videoio highgui video 3d stereo features2d calib objdetect dnn ml flann photo stitching) set(FIXED_ORDER_MODULES core imgproc imgcodecs videoio highgui video 3d stereo features2d calib objdetect dnn flann photo stitching)
list(REMOVE_ITEM OPENCV_MODULES_MAIN ${FIXED_ORDER_MODULES}) list(REMOVE_ITEM OPENCV_MODULES_MAIN ${FIXED_ORDER_MODULES})
set(OPENCV_MODULES_MAIN ${FIXED_ORDER_MODULES} ${OPENCV_MODULES_MAIN}) set(OPENCV_MODULES_MAIN ${FIXED_ORDER_MODULES} ${OPENCV_MODULES_MAIN})

View File

@ -1,2 +0,0 @@
set(the_description "Machine Learning")
ocv_define_module(ml opencv_core WRAP java objc python)

View File

@ -1,481 +0,0 @@
Machine Learning Overview {#ml_intro}
=========================
[TOC]
Training Data {#ml_intro_data}
=============
In machine learning algorithms there is notion of training data. Training data includes several
components:
- A set of training samples. Each training sample is a vector of values (in Computer Vision it's
sometimes referred to as feature vector). Usually all the vectors have the same number of
components (features); OpenCV ml module assumes that. Each feature can be ordered (i.e. its
values are floating-point numbers that can be compared with each other and strictly ordered,
i.e. sorted) or categorical (i.e. its value belongs to a fixed set of values that can be
integers, strings etc.).
- Optional set of responses corresponding to the samples. Training data with no responses is used
in unsupervised learning algorithms that learn structure of the supplied data based on distances
between different samples. Training data with responses is used in supervised learning
algorithms, which learn the function mapping samples to responses. Usually the responses are
scalar values, ordered (when we deal with regression problem) or categorical (when we deal with
classification problem; in this case the responses are often called "labels"). Some algorithms,
most noticeably Neural networks, can handle not only scalar, but also multi-dimensional or
vector responses.
- Another optional component is the mask of missing measurements. Most algorithms require all the
components in all the training samples be valid, but some other algorithms, such as decision
trees, can handle the cases of missing measurements.
- In the case of classification problem user may want to give different weights to different
classes. This is useful, for example, when:
- user wants to shift prediction accuracy towards lower false-alarm rate or higher hit-rate.
- user wants to compensate for significantly different amounts of training samples from
different classes.
- In addition to that, each training sample may be given a weight, if user wants the algorithm to
pay special attention to certain training samples and adjust the training model accordingly.
- Also, user may wish not to use the whole training data at once, but rather use parts of it, e.g.
to do parameter optimization via cross-validation procedure.
As you can see, training data can have rather complex structure; besides, it may be very big and/or
not entirely available, so there is need to make abstraction for this concept. In OpenCV ml there is
cv::ml::TrainData class for that.
@sa cv::ml::TrainData
Normal Bayes Classifier {#ml_intro_bayes}
=======================
This simple classification model assumes that feature vectors from each class are normally
distributed (though, not necessarily independently distributed). So, the whole data distribution
function is assumed to be a Gaussian mixture, one component per class. Using the training data the
algorithm estimates mean vectors and covariance matrices for every class, and then it uses them for
prediction.
@sa cv::ml::NormalBayesClassifier
K-Nearest Neighbors {#ml_intro_knn}
===================
The algorithm caches all training samples and predicts the response for a new sample by analyzing a
certain number (__K__) of the nearest neighbors of the sample using voting, calculating weighted
sum, and so on. The method is sometimes referred to as "learning by example" because for prediction
it looks for the feature vector with a known response that is closest to the given vector.
@sa cv::ml::KNearest
Support Vector Machines {#ml_intro_svm}
=======================
Originally, support vector machines (SVM) was a technique for building an optimal binary (2-class)
classifier. Later the technique was extended to regression and clustering problems. SVM is a partial
case of kernel-based methods. It maps feature vectors into a higher-dimensional space using a kernel
function and builds an optimal linear discriminating function in this space or an optimal hyper-
plane that fits into the training data. In case of SVM, the kernel is not defined explicitly.
Instead, a distance between any 2 points in the hyper-space needs to be defined.
The solution is optimal, which means that the margin between the separating hyper-plane and the
nearest feature vectors from both classes (in case of 2-class classifier) is maximal. The feature
vectors that are the closest to the hyper-plane are called _support vectors_, which means that the
position of other vectors does not affect the hyper-plane (the decision function).
SVM implementation in OpenCV is based on @cite LibSVM
@sa cv::ml::SVM
Prediction with SVM {#ml_intro_svm_predict}
-------------------
StatModel::predict(samples, results, flags) should be used. Pass flags=StatModel::RAW_OUTPUT to get
the raw response from SVM (in the case of regression, 1-class or 2-class classification problem).
Decision Trees {#ml_intro_trees}
==============
The ML classes discussed in this section implement Classification and Regression Tree algorithms
described in @cite Breiman84 .
The class cv::ml::DTrees represents a single decision tree or a collection of decision trees. It's
also a base class for RTrees and Boost.
A decision tree is a binary tree (tree where each non-leaf node has two child nodes). It can be used
either for classification or for regression. For classification, each tree leaf is marked with a
class label; multiple leaves may have the same label. For regression, a constant is also assigned to
each tree leaf, so the approximation function is piecewise constant.
@sa cv::ml::DTrees
Predicting with Decision Trees {#ml_intro_trees_predict}
------------------------------
To reach a leaf node and to obtain a response for the input feature vector, the prediction procedure
starts with the root node. From each non-leaf node the procedure goes to the left (selects the left
child node as the next observed node) or to the right based on the value of a certain variable whose
index is stored in the observed node. The following variables are possible:
- __Ordered variables.__ The variable value is compared with a threshold that is also stored in
the node. If the value is less than the threshold, the procedure goes to the left. Otherwise, it
goes to the right. For example, if the weight is less than 1 kilogram, the procedure goes to the
left, else to the right.
- __Categorical variables.__ A discrete variable value is tested to see whether it belongs to a
certain subset of values (also stored in the node) from a limited set of values the variable
could take. If it does, the procedure goes to the left. Otherwise, it goes to the right. For
example, if the color is green or red, go to the left, else to the right.
So, in each node, a pair of entities (variable_index , `decision_rule (threshold/subset)` ) is used.
This pair is called a _split_ (split on the variable variable_index ). Once a leaf node is reached,
the value assigned to this node is used as the output of the prediction procedure.
Sometimes, certain features of the input vector are missed (for example, in the darkness it is
difficult to determine the object color), and the prediction procedure may get stuck in the certain
node (in the mentioned example, if the node is split by color). To avoid such situations, decision
trees use so-called _surrogate splits_. That is, in addition to the best "primary" split, every tree
node may also be split to one or more other variables with nearly the same results.
Training Decision Trees {#ml_intro_trees_train}
-----------------------
The tree is built recursively, starting from the root node. All training data (feature vectors and
responses) is used to split the root node. In each node the optimum decision rule (the best
"primary" split) is found based on some criteria. In machine learning, gini "purity" criteria are
used for classification, and sum of squared errors is used for regression. Then, if necessary, the
surrogate splits are found. They resemble the results of the primary split on the training data. All
the data is divided using the primary and the surrogate splits (like it is done in the prediction
procedure) between the left and the right child node. Then, the procedure recursively splits both
left and right nodes. At each node the recursive procedure may stop (that is, stop splitting the
node further) in one of the following cases:
- Depth of the constructed tree branch has reached the specified maximum value.
- Number of training samples in the node is less than the specified threshold when it is not
statistically representative to split the node further.
- All the samples in the node belong to the same class or, in case of regression, the variation is
too small.
- The best found split does not give any noticeable improvement compared to a random choice.
When the tree is built, it may be pruned using a cross-validation procedure, if necessary. That is,
some branches of the tree that may lead to the model overfitting are cut off. Normally, this
procedure is only applied to standalone decision trees. Usually tree ensembles build trees that are
small enough and use their own protection schemes against overfitting.
Variable Importance {#ml_intro_trees_var}
-------------------
Besides the prediction that is an obvious use of decision trees, the tree can be also used for
various data analyses. One of the key properties of the constructed decision tree algorithms is an
ability to compute the importance (relative decisive power) of each variable. For example, in a spam
filter that uses a set of words occurred in the message as a feature vector, the variable importance
rating can be used to determine the most "spam-indicating" words and thus help keep the dictionary
size reasonable.
Importance of each variable is computed over all the splits on this variable in the tree, primary
and surrogate ones. Thus, to compute variable importance correctly, the surrogate splits must be
enabled in the training parameters, even if there is no missing data.
Boosting {#ml_intro_boost}
========
A common machine learning task is supervised learning. In supervised learning, the goal is to learn
the functional relationship \f$F: y = F(x)\f$ between the input \f$x\f$ and the output \f$y\f$ .
Predicting the qualitative output is called _classification_, while predicting the quantitative
output is called _regression_.
Boosting is a powerful learning concept that provides a solution to the supervised classification
learning task. It combines the performance of many "weak" classifiers to produce a powerful
committee @cite HTF01 . A weak classifier is only required to be better than chance, and thus can be
very simple and computationally inexpensive. However, many of them smartly combine results to a
strong classifier that often outperforms most "monolithic" strong classifiers such as SVMs and
Neural Networks.
Decision trees are the most popular weak classifiers used in boosting schemes. Often the simplest
decision trees with only a single split node per tree (called stumps ) are sufficient.
The boosted model is based on \f$N\f$ training examples \f${(x_i,y_i)}1N\f$ with \f$x_i \in{R^K}\f$
and \f$y_i \in{-1, +1}\f$ . \f$x_i\f$ is a \f$K\f$ -component vector. Each component encodes a
feature relevant to the learning task at hand. The desired two-class output is encoded as -1 and +1.
Different variants of boosting are known as Discrete Adaboost, Real AdaBoost, LogitBoost, and Gentle
AdaBoost @cite FHT98 . All of them are very similar in their overall structure. Therefore, this
chapter focuses only on the standard two-class Discrete AdaBoost algorithm, outlined below.
Initially the same weight is assigned to each sample (step 2). Then, a weak classifier
\f$f_{m(x)}\f$ is trained on the weighted training data (step 3a). Its weighted training error and
scaling factor \f$c_m\f$ is computed (step 3b). The weights are increased for training samples that
have been misclassified (step 3c). All weights are then normalized, and the process of finding the
next weak classifier continues for another \f$M\f$ -1 times. The final classifier \f$F(x)\f$ is the
sign of the weighted sum over the individual weak classifiers (step 4).
__Two-class Discrete AdaBoost Algorithm__
- Set \f$N\f$ examples \f${(x_i,y_i)}1N\f$ with \f$x_i \in{R^K}, y_i \in{-1, +1}\f$ .
- Assign weights as \f$w_i = 1/N, i = 1,...,N\f$ .
- Repeat for \f$m = 1,2,...,M\f$ :
- Fit the classifier \f$f_m(x) \in{-1,1}\f$, using weights \f$w_i\f$ on the training data.
- Compute \f$err_m = E_w [1_{(y \neq f_m(x))}], c_m = log((1 - err_m)/err_m)\f$ .
- Set \f$w_i \Leftarrow w_i exp[c_m 1_{(y_i \neq f_m(x_i))}], i = 1,2,...,N,\f$ and
renormalize so that \f$\Sigma i w_i = 1\f$ .
- Classify new samples _x_ using the formula: \f$\textrm{sign} (\Sigma m = 1M c_m f_m(x))\f$ .
@note Similar to the classical boosting methods, the current implementation supports two-class
classifiers only. For M \> 2 classes, there is the __AdaBoost.MH__ algorithm (described in
@cite FHT98) that reduces the problem to the two-class problem, yet with a much larger training set.
To reduce computation time for boosted models without substantially losing accuracy, the influence
trimming technique can be employed. As the training algorithm proceeds and the number of trees in
the ensemble is increased, a larger number of the training samples are classified correctly and with
increasing confidence, thereby those samples receive smaller weights on the subsequent iterations.
Examples with a very low relative weight have a small impact on the weak classifier training. Thus,
such examples may be excluded during the weak classifier training without having much effect on the
induced classifier. This process is controlled with the weight_trim_rate parameter. Only examples
with the summary fraction weight_trim_rate of the total weight mass are used in the weak classifier
training. Note that the weights for __all__ training examples are recomputed at each training
iteration. Examples deleted at a particular iteration may be used again for learning some of the
weak classifiers further @cite FHT98
@sa cv::ml::Boost
Prediction with Boost {#ml_intro_boost_predict}
---------------------
StatModel::predict(samples, results, flags) should be used. Pass flags=StatModel::RAW_OUTPUT to get
the raw sum from Boost classifier.
Random Trees {#ml_intro_rtrees}
============
Random trees have been introduced by Leo Breiman and Adele Cutler:
<http://www.stat.berkeley.edu/users/breiman/RandomForests/> . The algorithm can deal with both
classification and regression problems. Random trees is a collection (ensemble) of tree predictors
that is called _forest_ further in this section (the term has been also introduced by L. Breiman).
The classification works as follows: the random trees classifier takes the input feature vector,
classifies it with every tree in the forest, and outputs the class label that received the majority
of "votes". In case of a regression, the classifier response is the average of the responses over
all the trees in the forest.
All the trees are trained with the same parameters but on different training sets. These sets are
generated from the original training set using the bootstrap procedure: for each training set, you
randomly select the same number of vectors as in the original set ( =N ). The vectors are chosen
with replacement. That is, some vectors will occur more than once and some will be absent. At each
node of each trained tree, not all the variables are used to find the best split, but a random
subset of them. With each node a new subset is generated. However, its size is fixed for all the
nodes and all the trees. It is a training parameter set to \f$\sqrt{number\_of\_variables}\f$ by
default. None of the built trees are pruned.
In random trees there is no need for any accuracy estimation procedures, such as cross-validation or
bootstrap, or a separate test set to get an estimate of the training error. The error is estimated
internally during the training. When the training set for the current tree is drawn by sampling with
replacement, some vectors are left out (so-called _oob (out-of-bag) data_ ). The size of oob data is
about N/3 . The classification error is estimated by using this oob-data as follows:
- Get a prediction for each vector, which is oob relative to the i-th tree, using the very i-th
tree.
- After all the trees have been trained, for each vector that has ever been oob, find the
class-<em>winner</em> for it (the class that has got the majority of votes in the trees where
the vector was oob) and compare it to the ground-truth response.
- Compute the classification error estimate as a ratio of the number of misclassified oob vectors
to all the vectors in the original data. In case of regression, the oob-error is computed as the
squared error for oob vectors difference divided by the total number of vectors.
For the random trees usage example, please, see letter_recog.cpp sample in OpenCV distribution.
@sa cv::ml::RTrees
__References:__
- _Machine Learning_, Wald I, July 2002.
<http://stat-www.berkeley.edu/users/breiman/wald2002-1.pdf>
- _Looking Inside the Black Box_, Wald II, July 2002.
<http://stat-www.berkeley.edu/users/breiman/wald2002-2.pdf>
- _Software for the Masses_, Wald III, July 2002.
<http://stat-www.berkeley.edu/users/breiman/wald2002-3.pdf>
- And other articles from the web site
<http://www.stat.berkeley.edu/users/breiman/RandomForests/cc_home.htm>
Expectation Maximization {#ml_intro_em}
========================
The Expectation Maximization(EM) algorithm estimates the parameters of the multivariate probability
density function in the form of a Gaussian mixture distribution with a specified number of mixtures.
Consider the set of the N feature vectors { \f$x_1, x_2,...,x_{N}\f$ } from a d-dimensional Euclidean
space drawn from a Gaussian mixture:
\f[p(x;a_k,S_k, \pi _k) = \sum _{k=1}^{m} \pi _kp_k(x), \quad \pi _k \geq 0, \quad \sum _{k=1}^{m} \pi _k=1,\f]
\f[p_k(x)= \varphi (x;a_k,S_k)= \frac{1}{(2\pi)^{d/2}\mid{S_k}\mid^{1/2}} exp \left \{ - \frac{1}{2} (x-a_k)^TS_k^{-1}(x-a_k) \right \} ,\f]
where \f$m\f$ is the number of mixtures, \f$p_k\f$ is the normal distribution density with the mean
\f$a_k\f$ and covariance matrix \f$S_k\f$, \f$\pi_k\f$ is the weight of the k-th mixture. Given the
number of mixtures \f$M\f$ and the samples \f$x_i\f$, \f$i=1..N\f$ the algorithm finds the maximum-
likelihood estimates (MLE) of all the mixture parameters, that is, \f$a_k\f$, \f$S_k\f$ and
\f$\pi_k\f$ :
\f[L(x, \theta )=logp(x, \theta )= \sum _{i=1}^{N}log \left ( \sum _{k=1}^{m} \pi _kp_k(x) \right ) \to \max _{ \theta \in \Theta },\f]
\f[\Theta = \left \{ (a_k,S_k, \pi _k): a_k \in \mathbbm{R} ^d,S_k=S_k^T>0,S_k \in \mathbbm{R} ^{d \times d}, \pi _k \geq 0, \sum _{k=1}^{m} \pi _k=1 \right \} .\f]
The EM algorithm is an iterative procedure. Each iteration includes two steps. At the first step
(Expectation step or E-step), you find a probability \f$p_{i,k}\f$ (denoted \f$\alpha_{i,k}\f$ in
the formula below) of sample i to belong to mixture k using the currently available mixture
parameter estimates:
\f[\alpha _{ki} = \frac{\pi_k\varphi(x;a_k,S_k)}{\sum\limits_{j=1}^{m}\pi_j\varphi(x;a_j,S_j)} .\f]
At the second step (Maximization step or M-step), the mixture parameter estimates are refined using
the computed probabilities:
\f[\pi _k= \frac{1}{N} \sum _{i=1}^{N} \alpha _{ki}, \quad a_k= \frac{\sum\limits_{i=1}^{N}\alpha_{ki}x_i}{\sum\limits_{i=1}^{N}\alpha_{ki}} , \quad S_k= \frac{\sum\limits_{i=1}^{N}\alpha_{ki}(x_i-a_k)(x_i-a_k)^T}{\sum\limits_{i=1}^{N}\alpha_{ki}}\f]
Alternatively, the algorithm may start with the M-step when the initial values for \f$p_{i,k}\f$ can
be provided. Another alternative when \f$p_{i,k}\f$ are unknown is to use a simpler clustering
algorithm to pre-cluster the input samples and thus obtain initial \f$p_{i,k}\f$ . Often (including
machine learning) the k-means algorithm is used for that purpose.
One of the main problems of the EM algorithm is a large number of parameters to estimate. The
majority of the parameters reside in covariance matrices, which are \f$d \times d\f$ elements each
where \f$d\f$ is the feature space dimensionality. However, in many practical problems, the
covariance matrices are close to diagonal or even to \f$\mu_k*I\f$ , where \f$I\f$ is an identity
matrix and \f$\mu_k\f$ is a mixture-dependent "scale" parameter. So, a robust computation scheme
could start with harder constraints on the covariance matrices and then use the estimated parameters
as an input for a less constrained optimization problem (often a diagonal covariance matrix is
already a good enough approximation).
@sa cv::ml::EM
References:
- Bilmes98 J. A. Bilmes. _A Gentle Tutorial of the EM Algorithm and its Application to Parameter
Estimation for Gaussian Mixture and Hidden Markov Models_. Technical Report TR-97-021,
International Computer Science Institute and Computer Science Division, University of California
at Berkeley, April 1998.
Neural Networks {#ml_intro_ann}
===============
ML implements feed-forward artificial neural networks or, more particularly, multi-layer perceptrons
(MLP), the most commonly used type of neural networks. MLP consists of the input layer, output
layer, and one or more hidden layers. Each layer of MLP includes one or more neurons directionally
linked with the neurons from the previous and the next layer. The example below represents a 3-layer
perceptron with three inputs, two outputs, and the hidden layer including five neurons:
![image](pics/mlp.png)
All the neurons in MLP are similar. Each of them has several input links (it takes the output values
from several neurons in the previous layer as input) and several output links (it passes the
response to several neurons in the next layer). The values retrieved from the previous layer are
summed up with certain weights, individual for each neuron, plus the bias term. The sum is
transformed using the activation function \f$f\f$ that may be also different for different neurons.
![image](pics/neuron_model.png)
In other words, given the outputs \f$x_j\f$ of the layer \f$n\f$ , the outputs \f$y_i\f$ of the
layer \f$n+1\f$ are computed as:
\f[u_i = \sum _j (w^{n+1}_{i,j}*x_j) + w^{n+1}_{i,bias}\f]
\f[y_i = f(u_i)\f]
Different activation functions may be used. ML implements three standard functions:
- Identity function ( cv::ml::ANN_MLP::IDENTITY ): \f$f(x)=x\f$
- Symmetrical sigmoid ( cv::ml::ANN_MLP::SIGMOID_SYM ): \f$f(x)=\beta*(1-e^{-\alpha
x})/(1+e^{-\alpha x}\f$ ), which is the default choice for MLP. The standard sigmoid with
\f$\beta =1, \alpha =1\f$ is shown below:
![image](pics/sigmoid_bipolar.png)
- Gaussian function ( cv::ml::ANN_MLP::GAUSSIAN ): \f$f(x)=\beta e^{-\alpha x*x}\f$ , which is not
completely supported at the moment.
In ML, all the neurons have the same activation functions, with the same free parameters (
\f$\alpha, \beta\f$ ) that are specified by user and are not altered by the training algorithms.
So, the whole trained network works as follows:
1. Take the feature vector as input. The vector size is equal to the size of the input layer.
2. Pass values as input to the first hidden layer.
3. Compute outputs of the hidden layer using the weights and the activation functions.
4. Pass outputs further downstream until you compute the output layer.
So, to compute the network, you need to know all the weights \f$w^{n+1)}_{i,j}\f$ . The weights are
computed by the training algorithm. The algorithm takes a training set, multiple input vectors with
the corresponding output vectors, and iteratively adjusts the weights to enable the network to give
the desired response to the provided input vectors.
The larger the network size (the number of hidden layers and their sizes) is, the more the potential
network flexibility is. The error on the training set could be made arbitrarily small. But at the
same time the learned network also "learns" the noise present in the training set, so the error on
the test set usually starts increasing after the network size reaches a limit. Besides, the larger
networks are trained much longer than the smaller ones, so it is reasonable to pre-process the data,
using cv::PCA or similar technique, and train a smaller network on only essential features.
Another MLP feature is an inability to handle categorical data as is. However, there is a
workaround. If a certain feature in the input or output (in case of n -class classifier for
\f$n>2\f$ ) layer is categorical and can take \f$M>2\f$ different values, it makes sense to
represent it as a binary tuple of M elements, where the i -th element is 1 if and only if the
feature is equal to the i -th value out of M possible. It increases the size of the input/output
layer but speeds up the training algorithm convergence and at the same time enables "fuzzy" values
of such variables, that is, a tuple of probabilities instead of a fixed value.
ML implements two algorithms for training MLP's. The first algorithm is a classical random
sequential back-propagation algorithm. The second (default) one is a batch RPROP algorithm.
@sa cv::ml::ANN_MLP
Logistic Regression {#ml_intro_lr}
===================
ML implements logistic regression, which is a probabilistic classification technique. Logistic
Regression is a binary classification algorithm which is closely related to Support Vector Machines
(SVM). Like SVM, Logistic Regression can be extended to work on multi-class classification problems
like digit recognition (i.e. recognizing digits like 0,1 2, 3,... from the given images). This
version of Logistic Regression supports both binary and multi-class classifications (for multi-class
it creates a multiple 2-class classifiers). In order to train the logistic regression classifier,
Batch Gradient Descent and Mini-Batch Gradient Descent algorithms are used (see
<http://en.wikipedia.org/wiki/Gradient_descent_optimization>). Logistic Regression is a
discriminative classifier (see <http://www.cs.cmu.edu/~tom/NewChapters.html> for more details).
Logistic Regression is implemented as a C++ class in LogisticRegression.
In Logistic Regression, we try to optimize the training parameter \f$\theta\f$ such that the
hypothesis \f$0 \leq h_\theta(x) \leq 1\f$ is achieved. We have \f$h_\theta(x) = g(h_\theta(x))\f$
and \f$g(z) = \frac{1}{1+e^{-z}}\f$ as the logistic or sigmoid function. The term "Logistic" in
Logistic Regression refers to this function. For given data of a binary classification problem of
classes 0 and 1, one can determine that the given data instance belongs to class 1 if \f$h_\theta(x)
\geq 0.5\f$ or class 0 if \f$h_\theta(x) < 0.5\f$ .
In Logistic Regression, choosing the right parameters is of utmost importance for reducing the
training error and ensuring high training accuracy:
- The learning rate can be set with @ref cv::ml::LogisticRegression::setLearningRate "setLearningRate"
method. It determines how fast we approach the solution. It is a positive real number.
- Optimization algorithms like Batch Gradient Descent and Mini-Batch Gradient Descent are supported
in LogisticRegression. It is important that we mention the number of iterations these optimization
algorithms have to run. The number of iterations can be set with @ref
cv::ml::LogisticRegression::setIterations "setIterations". This parameter can be thought
as number of steps taken and learning rate specifies if it is a long step or a short step. This
and previous parameter define how fast we arrive at a possible solution.
- In order to compensate for overfitting regularization is performed, which can be enabled with
@ref cv::ml::LogisticRegression::setRegularization "setRegularization". One can specify what
kind of regularization has to be performed by passing one of @ref
cv::ml::LogisticRegression::RegKinds "regularization kinds" to this method.
- Logistic regression implementation provides a choice of 2 training methods with Batch Gradient
Descent or the MiniBatch Gradient Descent. To specify this, call @ref
cv::ml::LogisticRegression::setTrainMethod "setTrainMethod" with either @ref
cv::ml::LogisticRegression::BATCH "LogisticRegression::BATCH" or @ref
cv::ml::LogisticRegression::MINI_BATCH "LogisticRegression::MINI_BATCH". If training method is
set to @ref cv::ml::LogisticRegression::MINI_BATCH "MINI_BATCH", the size of the mini batch has
to be to a positive integer set with @ref cv::ml::LogisticRegression::setMiniBatchSize
"setMiniBatchSize".
A sample set of training parameters for the Logistic Regression classifier can be initialized as follows:
@snippet samples/cpp/logistic_regression.cpp init
@sa cv::ml::LogisticRegression

Binary file not shown.

Before

Width:  |  Height:  |  Size: 92 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.0 KiB

File diff suppressed because it is too large Load Diff

View File

@ -1,48 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifdef __OPENCV_BUILD
#error this is a compatibility header which should not be used inside the OpenCV library
#endif
#include "opencv2/ml.hpp"

View File

@ -1,60 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef OPENCV_ML_INL_HPP
#define OPENCV_ML_INL_HPP
namespace cv { namespace ml {
// declared in ml.hpp
template<class SimulatedAnnealingSolverSystem>
int simulatedAnnealingSolver(SimulatedAnnealingSolverSystem& solverSystem,
double initialTemperature, double finalTemperature, double coolingRatio,
size_t iterationsPerStep,
CV_OUT double* lastTemperature,
cv::RNG& rngEnergy
)
{
CV_Assert(finalTemperature > 0);
CV_Assert(initialTemperature > finalTemperature);
CV_Assert(iterationsPerStep > 0);
CV_Assert(coolingRatio < 1.0f);
double Ti = initialTemperature;
double previousEnergy = solverSystem.energy();
int exchange = 0;
while (Ti > finalTemperature)
{
for (size_t i = 0; i < iterationsPerStep; i++)
{
solverSystem.changeState();
double newEnergy = solverSystem.energy();
if (newEnergy < previousEnergy)
{
previousEnergy = newEnergy;
exchange++;
}
else
{
double r = rngEnergy.uniform(0.0, 1.0);
if (r < std::exp(-(newEnergy - previousEnergy) / Ti))
{
previousEnergy = newEnergy;
exchange++;
}
else
{
solverSystem.reverseState();
}
}
}
Ti *= coolingRatio;
}
if (lastTemperature)
*lastTemperature = Ti;
return exchange;
}
}} //namespace
#endif // OPENCV_ML_INL_HPP

View File

@ -1,42 +0,0 @@
package org.opencv.test.ml;
import org.opencv.ml.Ml;
import org.opencv.ml.SVM;
import org.opencv.core.Mat;
import org.opencv.core.MatOfFloat;
import org.opencv.core.MatOfInt;
import org.opencv.core.CvType;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
public class MLTest extends OpenCVTestCase {
public void testSaveLoad() {
Mat samples = new MatOfFloat(new float[] {
5.1f, 3.5f, 1.4f, 0.2f,
4.9f, 3.0f, 1.4f, 0.2f,
4.7f, 3.2f, 1.3f, 0.2f,
4.6f, 3.1f, 1.5f, 0.2f,
5.0f, 3.6f, 1.4f, 0.2f,
7.0f, 3.2f, 4.7f, 1.4f,
6.4f, 3.2f, 4.5f, 1.5f,
6.9f, 3.1f, 4.9f, 1.5f,
5.5f, 2.3f, 4.0f, 1.3f,
6.5f, 2.8f, 4.6f, 1.5f
}).reshape(1, 10);
Mat responses = new MatOfInt(new int[] {
0, 0, 0, 0, 0, 1, 1, 1, 1, 1
}).reshape(1, 10);
SVM saved = SVM.create();
assertFalse(saved.isTrained());
saved.train(samples, Ml.ROW_SAMPLE, responses);
assertTrue(saved.isTrained());
String filename = OpenCVTestRunner.getTempFileName("yml");
saved.save(filename);
SVM loaded = SVM.load(filename);
assertTrue(loaded.isTrained());
}
}

View File

@ -1,9 +0,0 @@
{
"enum_fix" : {
"EM" : { "Types": "EMTypes" },
"SVM" : { "Types": "SVMTypes" },
"KNearest" : { "Types": "KNearestTypes" },
"DTrees" : { "Flags": "DTreeFlags" },
"StatModel" : { "Flags": "StatModelFlags" }
}
}

View File

@ -1,22 +0,0 @@
template<>
bool pyopencv_to(PyObject *obj, CvTermCriteria& dst, const ArgInfo& info)
{
CV_UNUSED(info);
if(!obj)
return true;
return PyArg_ParseTuple(obj, "iid", &dst.type, &dst.max_iter, &dst.epsilon) > 0;
}
template<>
bool pyopencv_to(PyObject* obj, CvSlice& r, const ArgInfo& info)
{
CV_UNUSED(info);
if(!obj || obj == Py_None)
return true;
if(PyObject_Size(obj) == 0)
{
r = CV_WHOLE_SEQ;
return true;
}
return PyArg_ParseTuple(obj, "ii", &r.start_index, &r.end_index) > 0;
}

View File

@ -1,201 +0,0 @@
#!/usr/bin/env python
'''
SVM and KNearest digit recognition.
Sample loads a dataset of handwritten digits from '../data/digits.png'.
Then it trains a SVM and KNearest classifiers on it and evaluates
their accuracy.
Following preprocessing is applied to the dataset:
- Moment-based image deskew (see deskew())
- Digit images are split into 4 10x10 cells and 16-bin
histogram of oriented gradients is computed for each
cell
- Transform histograms to space with Hellinger metric (see [1] (RootSIFT))
[1] R. Arandjelovic, A. Zisserman
"Three things everyone should know to improve object retrieval"
http://www.robots.ox.ac.uk/~vgg/publications/2012/Arandjelovic12/arandjelovic12.pdf
'''
# Python 2/3 compatibility
from __future__ import print_function
# built-in modules
from multiprocessing.pool import ThreadPool
import cv2 as cv
import numpy as np
from numpy.linalg import norm
SZ = 20 # size of each digit is SZ x SZ
CLASS_N = 10
DIGITS_FN = 'samples/data/digits.png'
def split2d(img, cell_size, flatten=True):
h, w = img.shape[:2]
sx, sy = cell_size
cells = [np.hsplit(row, w//sx) for row in np.vsplit(img, h//sy)]
cells = np.array(cells)
if flatten:
cells = cells.reshape(-1, sy, sx)
return cells
def deskew(img):
m = cv.moments(img)
if abs(m['mu02']) < 1e-2:
return img.copy()
skew = m['mu11']/m['mu02']
M = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]])
img = cv.warpAffine(img, M, (SZ, SZ), flags=cv.WARP_INVERSE_MAP | cv.INTER_LINEAR)
return img
class StatModel(object):
def load(self, fn):
self.model.load(fn) # Known bug: https://github.com/opencv/opencv/issues/4969
def save(self, fn):
self.model.save(fn)
class KNearest(StatModel):
def __init__(self, k = 3):
self.k = k
self.model = cv.ml.KNearest_create()
def train(self, samples, responses):
self.model.train(samples, cv.ml.ROW_SAMPLE, responses)
def predict(self, samples):
_retval, results, _neigh_resp, _dists = self.model.findNearest(samples, self.k)
return results.ravel()
class SVM(StatModel):
def __init__(self, C = 1, gamma = 0.5):
self.model = cv.ml.SVM_create()
self.model.setGamma(gamma)
self.model.setC(C)
self.model.setKernel(cv.ml.SVM_RBF)
self.model.setType(cv.ml.SVM_C_SVC)
def train(self, samples, responses):
self.model.train(samples, cv.ml.ROW_SAMPLE, responses)
def predict(self, samples):
return self.model.predict(samples)[1].ravel()
def evaluate_model(model, digits, samples, labels):
resp = model.predict(samples)
err = (labels != resp).mean()
confusion = np.zeros((10, 10), np.int32)
for i, j in zip(labels, resp):
confusion[int(i), int(j)] += 1
return err, confusion
def preprocess_simple(digits):
return np.float32(digits).reshape(-1, SZ*SZ) / 255.0
def preprocess_hog(digits):
samples = []
for img in digits:
gx = cv.Sobel(img, cv.CV_32F, 1, 0)
gy = cv.Sobel(img, cv.CV_32F, 0, 1)
mag, ang = cv.cartToPolar(gx, gy)
bin_n = 16
bin = np.int32(bin_n*ang/(2*np.pi))
bin_cells = bin[:10,:10], bin[10:,:10], bin[:10,10:], bin[10:,10:]
mag_cells = mag[:10,:10], mag[10:,:10], mag[:10,10:], mag[10:,10:]
hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
hist = np.hstack(hists)
# transform to Hellinger kernel
eps = 1e-7
hist /= hist.sum() + eps
hist = np.sqrt(hist)
hist /= norm(hist) + eps
samples.append(hist)
return np.float32(samples)
from tests_common import NewOpenCVTests
class digits_test(NewOpenCVTests):
def load_digits(self, fn):
digits_img = self.get_sample(fn, 0)
digits = split2d(digits_img, (SZ, SZ))
labels = np.repeat(np.arange(CLASS_N), len(digits)/CLASS_N)
return digits, labels
def test_digits(self):
digits, labels = self.load_digits(DIGITS_FN)
# shuffle digits
rand = np.random.RandomState(321)
shuffle = rand.permutation(len(digits))
digits, labels = digits[shuffle], labels[shuffle]
digits2 = list(map(deskew, digits))
samples = preprocess_hog(digits2)
train_n = int(0.9*len(samples))
_digits_train, digits_test = np.split(digits2, [train_n])
samples_train, samples_test = np.split(samples, [train_n])
labels_train, labels_test = np.split(labels, [train_n])
errors = list()
confusionMatrixes = list()
model = KNearest(k=4)
model.train(samples_train, labels_train)
error, confusion = evaluate_model(model, digits_test, samples_test, labels_test)
errors.append(error)
confusionMatrixes.append(confusion)
model = SVM(C=2.67, gamma=5.383)
model.train(samples_train, labels_train)
error, confusion = evaluate_model(model, digits_test, samples_test, labels_test)
errors.append(error)
confusionMatrixes.append(confusion)
eps = 0.001
normEps = len(samples_test) * 0.02
confusionKNN = [[45, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 57, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 59, 1, 0, 0, 0, 0, 1, 0],
[ 0, 0, 0, 43, 0, 0, 0, 1, 0, 0],
[ 0, 0, 0, 0, 38, 0, 2, 0, 0, 0],
[ 0, 0, 0, 2, 0, 48, 0, 0, 1, 0],
[ 0, 1, 0, 0, 0, 0, 51, 0, 0, 0],
[ 0, 0, 1, 0, 0, 0, 0, 54, 0, 0],
[ 0, 0, 0, 0, 0, 1, 0, 0, 46, 0],
[ 1, 1, 0, 1, 1, 0, 0, 0, 2, 42]]
confusionSVM = [[45, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 57, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 59, 2, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 43, 0, 0, 0, 1, 0, 0],
[ 0, 0, 0, 0, 40, 0, 0, 0, 0, 0],
[ 0, 0, 0, 1, 0, 50, 0, 0, 0, 0],
[ 0, 0, 0, 0, 1, 0, 51, 0, 0, 0],
[ 0, 0, 1, 0, 0, 0, 0, 54, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 47, 0],
[ 0, 1, 0, 1, 0, 0, 0, 0, 1, 45]]
self.assertLess(cv.norm(confusionMatrixes[0] - confusionKNN, cv.NORM_L1), normEps)
self.assertLess(cv.norm(confusionMatrixes[1] - confusionSVM, cv.NORM_L1), normEps)
self.assertLess(errors[0] - 0.034, eps)
self.assertLess(errors[1] - 0.018, eps)
if __name__ == '__main__':
NewOpenCVTests.bootstrap()

View File

@ -1,40 +0,0 @@
#!/usr/bin/env python
# Python 2/3 compatibility
from __future__ import print_function
import cv2 as cv
import numpy as np
from tests_common import NewOpenCVTests
class TestGoodFeaturesToTrack_test(NewOpenCVTests):
def test_goodFeaturesToTrack(self):
arr = self.get_sample('samples/data/lena.jpg', 0)
original = arr.copy()
threshes = [ x / 100. for x in range(1,10) ]
numPoints = 20000
results = dict([(t, cv.goodFeaturesToTrack(arr, numPoints, t, 2, useHarrisDetector=True)) for t in threshes])
# Check that GoodFeaturesToTrack has not modified input image
self.assertTrue(arr.tostring() == original.tostring())
# Check for repeatability
for i in range(1):
results2 = dict([(t, cv.goodFeaturesToTrack(arr, numPoints, t, 2, useHarrisDetector=True)) for t in threshes])
for t in threshes:
self.assertTrue(len(results2[t]) == len(results[t]))
for i in range(len(results[t])):
self.assertTrue(cv.norm(results[t][i][0] - results2[t][i][0]) == 0)
for t0,t1 in zip(threshes, threshes[1:]):
r0 = results[t0]
r1 = results[t1]
# Increasing thresh should make result list shorter
self.assertTrue(len(r0) > len(r1))
# Increasing thresh should monly truncate result list
for i in range(len(r1)):
self.assertTrue(cv.norm(r1[i][0] - r0[i][0])==0)
if __name__ == '__main__':
NewOpenCVTests.bootstrap()

View File

@ -1,13 +0,0 @@
#!/usr/bin/env python
import cv2 as cv
from tests_common import NewOpenCVTests
class knearest_test(NewOpenCVTests):
def test_load(self):
k_nearest = cv.ml.KNearest_load(self.find_file("ml/opencv_ml_knn.xml"))
self.assertFalse(k_nearest.empty())
self.assertTrue(k_nearest.isTrained())
if __name__ == '__main__':
NewOpenCVTests.bootstrap()

View File

@ -1,171 +0,0 @@
#!/usr/bin/env python
'''
The sample demonstrates how to train Random Trees classifier
(or Boosting classifier, or MLP, or Knearest, or Support Vector Machines) using the provided dataset.
We use the sample database letter-recognition.data
from UCI Repository, here is the link:
Newman, D.J. & Hettich, S. & Blake, C.L. & Merz, C.J. (1998).
UCI Repository of machine learning databases
[http://www.ics.uci.edu/~mlearn/MLRepository.html].
Irvine, CA: University of California, Department of Information and Computer Science.
The dataset consists of 20000 feature vectors along with the
responses - capital latin letters A..Z.
The first 10000 samples are used for training
and the remaining 10000 - to test the classifier.
======================================================
Models: RTrees, KNearest, Boost, SVM, MLP
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
def load_base(fn):
a = np.loadtxt(fn, np.float32, delimiter=',', converters={ 0 : lambda ch : ord(ch)-ord('A') })
samples, responses = a[:,1:], a[:,0]
return samples, responses
class LetterStatModel(object):
class_n = 26
train_ratio = 0.5
def load(self, fn):
self.model.load(fn)
def save(self, fn):
self.model.save(fn)
def unroll_samples(self, samples):
sample_n, var_n = samples.shape
new_samples = np.zeros((sample_n * self.class_n, var_n+1), np.float32)
new_samples[:,:-1] = np.repeat(samples, self.class_n, axis=0)
new_samples[:,-1] = np.tile(np.arange(self.class_n), sample_n)
return new_samples
def unroll_responses(self, responses):
sample_n = len(responses)
new_responses = np.zeros(sample_n*self.class_n, np.int32)
resp_idx = np.int32( responses + np.arange(sample_n)*self.class_n )
new_responses[resp_idx] = 1
return new_responses
class RTrees(LetterStatModel):
def __init__(self):
self.model = cv.ml.RTrees_create()
def train(self, samples, responses):
#sample_n, var_n = samples.shape
self.model.setMaxDepth(20)
self.model.train(samples, cv.ml.ROW_SAMPLE, responses.astype(int))
def predict(self, samples):
_ret, resp = self.model.predict(samples)
return resp.ravel()
class KNearest(LetterStatModel):
def __init__(self):
self.model = cv.ml.KNearest_create()
def train(self, samples, responses):
self.model.train(samples, cv.ml.ROW_SAMPLE, responses)
def predict(self, samples):
_retval, results, _neigh_resp, _dists = self.model.findNearest(samples, k = 10)
return results.ravel()
class Boost(LetterStatModel):
def __init__(self):
self.model = cv.ml.Boost_create()
def train(self, samples, responses):
_sample_n, var_n = samples.shape
new_samples = self.unroll_samples(samples)
new_responses = self.unroll_responses(responses)
var_types = np.array([cv.ml.VAR_NUMERICAL] * var_n + [cv.ml.VAR_CATEGORICAL, cv.ml.VAR_CATEGORICAL], np.uint8)
self.model.setWeakCount(15)
self.model.setMaxDepth(10)
self.model.train(cv.ml.TrainData_create(new_samples, cv.ml.ROW_SAMPLE, new_responses.astype(int), varType = var_types))
def predict(self, samples):
new_samples = self.unroll_samples(samples)
_ret, resp = self.model.predict(new_samples)
return resp.ravel().reshape(-1, self.class_n).argmax(1)
class SVM(LetterStatModel):
def __init__(self):
self.model = cv.ml.SVM_create()
def train(self, samples, responses):
self.model.setType(cv.ml.SVM_C_SVC)
self.model.setC(1)
self.model.setKernel(cv.ml.SVM_RBF)
self.model.setGamma(.1)
self.model.train(samples, cv.ml.ROW_SAMPLE, responses.astype(int))
def predict(self, samples):
_ret, resp = self.model.predict(samples)
return resp.ravel()
class MLP(LetterStatModel):
def __init__(self):
self.model = cv.ml.ANN_MLP_create()
def train(self, samples, responses):
_sample_n, var_n = samples.shape
new_responses = self.unroll_responses(responses).reshape(-1, self.class_n)
layer_sizes = np.int32([var_n, 100, 100, self.class_n])
self.model.setLayerSizes(layer_sizes)
self.model.setTrainMethod(cv.ml.ANN_MLP_BACKPROP)
self.model.setBackpropMomentumScale(0)
self.model.setBackpropWeightScale(0.001)
self.model.setTermCriteria((cv.TERM_CRITERIA_COUNT, 20, 0.01))
self.model.setActivationFunction(cv.ml.ANN_MLP_SIGMOID_SYM, 2, 1)
self.model.train(samples, cv.ml.ROW_SAMPLE, np.float32(new_responses))
def predict(self, samples):
_ret, resp = self.model.predict(samples)
return resp.argmax(-1)
from tests_common import NewOpenCVTests
class letter_recog_test(NewOpenCVTests):
def test_letter_recog(self):
eps = 0.01
models = [RTrees, KNearest, Boost, SVM, MLP]
models = dict( [(cls.__name__.lower(), cls) for cls in models] )
testErrors = {RTrees: (98.930000, 92.390000), KNearest: (94.960000, 92.010000),
Boost: (85.970000, 74.920000), SVM: (99.780000, 95.680000), MLP: (90.060000, 87.410000)}
for model in models:
Model = models[model]
classifier = Model()
samples, responses = load_base(self.repoPath + '/samples/data/letter-recognition.data')
train_n = int(len(samples)*classifier.train_ratio)
classifier.train(samples[:train_n], responses[:train_n])
train_rate = np.mean(classifier.predict(samples[:train_n]) == responses[:train_n].astype(int))
test_rate = np.mean(classifier.predict(samples[train_n:]) == responses[train_n:].astype(int))
self.assertLess(train_rate - testErrors[Model][0], eps)
self.assertLess(test_rate - testErrors[Model][1], eps)
if __name__ == '__main__':
NewOpenCVTests.bootstrap()

File diff suppressed because it is too large Load Diff

View File

@ -1,533 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Copyright (C) 2014, Itseez Inc, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
namespace cv { namespace ml {
static inline double
log_ratio( double val )
{
const double eps = 1e-5;
val = std::max( val, eps );
val = std::min( val, 1. - eps );
return log( val/(1. - val) );
}
BoostTreeParams::BoostTreeParams()
{
boostType = Boost::REAL;
weakCount = 100;
weightTrimRate = 0.95;
}
BoostTreeParams::BoostTreeParams( int _boostType, int _weak_count,
double _weightTrimRate)
{
boostType = _boostType;
weakCount = _weak_count;
weightTrimRate = _weightTrimRate;
}
class DTreesImplForBoost CV_FINAL : public DTreesImpl
{
public:
DTreesImplForBoost()
{
params.setCVFolds(0);
params.setMaxDepth(1);
}
virtual ~DTreesImplForBoost() {}
bool isClassifier() const CV_OVERRIDE { return true; }
void clear() CV_OVERRIDE
{
DTreesImpl::clear();
}
void startTraining( const Ptr<TrainData>& trainData, int flags ) CV_OVERRIDE
{
CV_Assert(!trainData.empty());
DTreesImpl::startTraining(trainData, flags);
sumResult.assign(w->sidx.size(), 0.);
if( bparams.boostType != Boost::DISCRETE )
{
_isClassifier = false;
int i, n = (int)w->cat_responses.size();
w->ord_responses.resize(n);
double a = -1, b = 1;
if( bparams.boostType == Boost::LOGIT )
{
a = -2, b = 2;
}
for( i = 0; i < n; i++ )
w->ord_responses[i] = w->cat_responses[i] > 0 ? b : a;
}
normalizeWeights();
}
void normalizeWeights()
{
int i, n = (int)w->sidx.size();
double sumw = 0, a, b;
for( i = 0; i < n; i++ )
sumw += w->sample_weights[w->sidx[i]];
if( sumw > DBL_EPSILON )
{
a = 1./sumw;
b = 0;
}
else
{
a = 0;
b = 1;
}
for( i = 0; i < n; i++ )
{
double& wval = w->sample_weights[w->sidx[i]];
wval = wval*a + b;
}
}
void endTraining() CV_OVERRIDE
{
DTreesImpl::endTraining();
vector<double> e;
std::swap(sumResult, e);
}
void scaleTree( int root, double scale )
{
int nidx = root, pidx = 0;
Node *node = 0;
// traverse the tree and save all the nodes in depth-first order
for(;;)
{
for(;;)
{
node = &nodes[nidx];
node->value *= scale;
if( node->left < 0 )
break;
nidx = node->left;
}
for( pidx = node->parent; pidx >= 0 && nodes[pidx].right == nidx;
nidx = pidx, pidx = nodes[pidx].parent )
;
if( pidx < 0 )
break;
nidx = nodes[pidx].right;
}
}
void calcValue( int nidx, const vector<int>& _sidx ) CV_OVERRIDE
{
DTreesImpl::calcValue(nidx, _sidx);
WNode* node = &w->wnodes[nidx];
if( bparams.boostType == Boost::DISCRETE )
{
node->value = node->class_idx == 0 ? -1 : 1;
}
else if( bparams.boostType == Boost::REAL )
{
double p = (node->value+1)*0.5;
node->value = 0.5*log_ratio(p);
}
}
bool train( const Ptr<TrainData>& trainData, int flags ) CV_OVERRIDE
{
CV_Assert(!trainData.empty());
startTraining(trainData, flags);
int treeidx, ntrees = bparams.weakCount >= 0 ? bparams.weakCount : 10000;
vector<int> sidx = w->sidx;
for( treeidx = 0; treeidx < ntrees; treeidx++ )
{
int root = addTree( sidx );
if( root < 0 )
return false;
updateWeightsAndTrim( treeidx, sidx );
}
endTraining();
return true;
}
void updateWeightsAndTrim( int treeidx, vector<int>& sidx )
{
int i, n = (int)w->sidx.size();
int nvars = (int)varIdx.size();
double sumw = 0., C = 1.;
cv::AutoBuffer<double> buf(n + nvars);
double* result = buf.data();
float* sbuf = (float*)(result + n);
Mat sample(1, nvars, CV_32F, sbuf);
int predictFlags = bparams.boostType == Boost::DISCRETE ? (PREDICT_MAX_VOTE | RAW_OUTPUT) : PREDICT_SUM;
predictFlags |= COMPRESSED_INPUT;
for( i = 0; i < n; i++ )
{
w->data->getSample(varIdx, w->sidx[i], sbuf );
result[i] = predictTrees(Range(treeidx, treeidx+1), sample, predictFlags);
}
// now update weights and other parameters for each type of boosting
if( bparams.boostType == Boost::DISCRETE )
{
// Discrete AdaBoost:
// weak_eval[i] (=f(x_i)) is in {-1,1}
// err = sum(w_i*(f(x_i) != y_i))/sum(w_i)
// C = log((1-err)/err)
// w_i *= exp(C*(f(x_i) != y_i))
double err = 0.;
for( i = 0; i < n; i++ )
{
int si = w->sidx[i];
double wval = w->sample_weights[si];
sumw += wval;
err += wval*(result[i] != w->cat_responses[si]);
}
if( sumw != 0 )
err /= sumw;
C = -log_ratio( err );
double scale = std::exp(C);
sumw = 0;
for( i = 0; i < n; i++ )
{
int si = w->sidx[i];
double wval = w->sample_weights[si];
if( result[i] != w->cat_responses[si] )
wval *= scale;
sumw += wval;
w->sample_weights[si] = wval;
}
scaleTree(roots[treeidx], C);
}
else if( bparams.boostType == Boost::REAL || bparams.boostType == Boost::GENTLE )
{
// Real AdaBoost:
// weak_eval[i] = f(x_i) = 0.5*log(p(x_i)/(1-p(x_i))), p(x_i)=P(y=1|x_i)
// w_i *= exp(-y_i*f(x_i))
// Gentle AdaBoost:
// weak_eval[i] = f(x_i) in [-1,1]
// w_i *= exp(-y_i*f(x_i))
for( i = 0; i < n; i++ )
{
int si = w->sidx[i];
CV_Assert( std::abs(w->ord_responses[si]) == 1 );
double wval = w->sample_weights[si]*std::exp(-result[i]*w->ord_responses[si]);
sumw += wval;
w->sample_weights[si] = wval;
}
}
else if( bparams.boostType == Boost::LOGIT )
{
// LogitBoost:
// weak_eval[i] = f(x_i) in [-z_max,z_max]
// sum_response = F(x_i).
// F(x_i) += 0.5*f(x_i)
// p(x_i) = exp(F(x_i))/(exp(F(x_i)) + exp(-F(x_i))=1/(1+exp(-2*F(x_i)))
// reuse weak_eval: weak_eval[i] <- p(x_i)
// w_i = p(x_i)*1(1 - p(x_i))
// z_i = ((y_i+1)/2 - p(x_i))/(p(x_i)*(1 - p(x_i)))
// store z_i to the data->data_root as the new target responses
const double lb_weight_thresh = FLT_EPSILON;
const double lb_z_max = 10.;
for( i = 0; i < n; i++ )
{
int si = w->sidx[i];
sumResult[i] += 0.5*result[i];
double p = 1./(1 + std::exp(-2*sumResult[i]));
double wval = std::max( p*(1 - p), lb_weight_thresh ), z;
w->sample_weights[si] = wval;
sumw += wval;
if( w->ord_responses[si] > 0 )
{
z = 1./p;
w->ord_responses[si] = std::min(z, lb_z_max);
}
else
{
z = 1./(1-p);
w->ord_responses[si] = -std::min(z, lb_z_max);
}
}
}
else
CV_Error(CV_StsNotImplemented, "Unknown boosting type");
/*if( bparams.boostType != Boost::LOGIT )
{
double err = 0;
for( i = 0; i < n; i++ )
{
sumResult[i] += result[i]*C;
if( bparams.boostType != Boost::DISCRETE )
err += sumResult[i]*w->ord_responses[w->sidx[i]] < 0;
else
err += sumResult[i]*w->cat_responses[w->sidx[i]] < 0;
}
printf("%d trees. C=%.2f, training error=%.1f%%, working set size=%d (out of %d)\n", (int)roots.size(), C, err*100./n, (int)sidx.size(), n);
}*/
// renormalize weights
if( sumw > FLT_EPSILON )
normalizeWeights();
if( bparams.weightTrimRate <= 0. || bparams.weightTrimRate >= 1. )
return;
for( i = 0; i < n; i++ )
result[i] = w->sample_weights[w->sidx[i]];
std::sort(result, result + n);
// as weight trimming occurs immediately after updating the weights,
// where they are renormalized, we assume that the weight sum = 1.
sumw = 1. - bparams.weightTrimRate;
for( i = 0; i < n; i++ )
{
double wval = result[i];
if( sumw <= 0 )
break;
sumw -= wval;
}
double threshold = i < n ? result[i] : DBL_MAX;
sidx.clear();
for( i = 0; i < n; i++ )
{
int si = w->sidx[i];
if( w->sample_weights[si] >= threshold )
sidx.push_back(si);
}
}
float predictTrees( const Range& range, const Mat& sample, int flags0 ) const CV_OVERRIDE
{
int flags = (flags0 & ~PREDICT_MASK) | PREDICT_SUM;
float val = DTreesImpl::predictTrees(range, sample, flags);
if( flags != flags0 )
{
int ival = (int)(val > 0);
if( !(flags0 & RAW_OUTPUT) )
ival = classLabels[ival];
val = (float)ival;
}
return val;
}
void writeTrainingParams( FileStorage& fs ) const CV_OVERRIDE
{
fs << "boosting_type" <<
(bparams.boostType == Boost::DISCRETE ? "DiscreteAdaboost" :
bparams.boostType == Boost::REAL ? "RealAdaboost" :
bparams.boostType == Boost::LOGIT ? "LogitBoost" :
bparams.boostType == Boost::GENTLE ? "GentleAdaboost" : "Unknown");
DTreesImpl::writeTrainingParams(fs);
fs << "weight_trimming_rate" << bparams.weightTrimRate;
}
void write( FileStorage& fs ) const CV_OVERRIDE
{
if( roots.empty() )
CV_Error( CV_StsBadArg, "RTrees have not been trained" );
writeFormat(fs);
writeParams(fs);
int k, ntrees = (int)roots.size();
fs << "ntrees" << ntrees
<< "trees" << "[";
for( k = 0; k < ntrees; k++ )
{
fs << "{";
writeTree(fs, roots[k]);
fs << "}";
}
fs << "]";
}
void readParams( const FileNode& fn ) CV_OVERRIDE
{
DTreesImpl::readParams(fn);
FileNode tparams_node = fn["training_params"];
// check for old layout
String bts = (String)(fn["boosting_type"].empty() ?
tparams_node["boosting_type"] : fn["boosting_type"]);
bparams.boostType = (bts == "DiscreteAdaboost" ? Boost::DISCRETE :
bts == "RealAdaboost" ? Boost::REAL :
bts == "LogitBoost" ? Boost::LOGIT :
bts == "GentleAdaboost" ? Boost::GENTLE : -1);
_isClassifier = bparams.boostType == Boost::DISCRETE;
// check for old layout
bparams.weightTrimRate = (double)(fn["weight_trimming_rate"].empty() ?
tparams_node["weight_trimming_rate"] : fn["weight_trimming_rate"]);
}
void read( const FileNode& fn ) CV_OVERRIDE
{
clear();
int ntrees = (int)fn["ntrees"];
readParams(fn);
FileNode trees_node = fn["trees"];
FileNodeIterator it = trees_node.begin();
CV_Assert( ntrees == (int)trees_node.size() );
for( int treeidx = 0; treeidx < ntrees; treeidx++, ++it )
{
FileNode nfn = (*it)["nodes"];
readTree(nfn);
}
}
BoostTreeParams bparams;
vector<double> sumResult;
};
class BoostImpl : public Boost
{
public:
BoostImpl() {}
virtual ~BoostImpl() {}
inline int getBoostType() const CV_OVERRIDE { return impl.bparams.boostType; }
inline void setBoostType(int val) CV_OVERRIDE { impl.bparams.boostType = val; }
inline int getWeakCount() const CV_OVERRIDE { return impl.bparams.weakCount; }
inline void setWeakCount(int val) CV_OVERRIDE { impl.bparams.weakCount = val; }
inline double getWeightTrimRate() const CV_OVERRIDE { return impl.bparams.weightTrimRate; }
inline void setWeightTrimRate(double val) CV_OVERRIDE { impl.bparams.weightTrimRate = val; }
inline int getMaxCategories() const CV_OVERRIDE { return impl.params.getMaxCategories(); }
inline void setMaxCategories(int val) CV_OVERRIDE { impl.params.setMaxCategories(val); }
inline int getMaxDepth() const CV_OVERRIDE { return impl.params.getMaxDepth(); }
inline void setMaxDepth(int val) CV_OVERRIDE { impl.params.setMaxDepth(val); }
inline int getMinSampleCount() const CV_OVERRIDE { return impl.params.getMinSampleCount(); }
inline void setMinSampleCount(int val) CV_OVERRIDE { impl.params.setMinSampleCount(val); }
inline int getCVFolds() const CV_OVERRIDE { return impl.params.getCVFolds(); }
inline void setCVFolds(int val) CV_OVERRIDE { impl.params.setCVFolds(val); }
inline bool getUseSurrogates() const CV_OVERRIDE { return impl.params.getUseSurrogates(); }
inline void setUseSurrogates(bool val) CV_OVERRIDE { impl.params.setUseSurrogates(val); }
inline bool getUse1SERule() const CV_OVERRIDE { return impl.params.getUse1SERule(); }
inline void setUse1SERule(bool val) CV_OVERRIDE { impl.params.setUse1SERule(val); }
inline bool getTruncatePrunedTree() const CV_OVERRIDE { return impl.params.getTruncatePrunedTree(); }
inline void setTruncatePrunedTree(bool val) CV_OVERRIDE { impl.params.setTruncatePrunedTree(val); }
inline float getRegressionAccuracy() const CV_OVERRIDE { return impl.params.getRegressionAccuracy(); }
inline void setRegressionAccuracy(float val) CV_OVERRIDE { impl.params.setRegressionAccuracy(val); }
inline cv::Mat getPriors() const CV_OVERRIDE { return impl.params.getPriors(); }
inline void setPriors(const cv::Mat& val) CV_OVERRIDE { impl.params.setPriors(val); }
String getDefaultName() const CV_OVERRIDE { return "opencv_ml_boost"; }
bool train( const Ptr<TrainData>& trainData, int flags ) CV_OVERRIDE
{
CV_Assert(!trainData.empty());
return impl.train(trainData, flags);
}
float predict( InputArray samples, OutputArray results, int flags ) const CV_OVERRIDE
{
CV_CheckEQ(samples.cols(), getVarCount(), "");
return impl.predict(samples, results, flags);
}
void write( FileStorage& fs ) const CV_OVERRIDE
{
impl.write(fs);
}
void read( const FileNode& fn ) CV_OVERRIDE
{
impl.read(fn);
}
int getVarCount() const CV_OVERRIDE { return impl.getVarCount(); }
bool isTrained() const CV_OVERRIDE { return impl.isTrained(); }
bool isClassifier() const CV_OVERRIDE { return impl.isClassifier(); }
const vector<int>& getRoots() const CV_OVERRIDE { return impl.getRoots(); }
const vector<Node>& getNodes() const CV_OVERRIDE { return impl.getNodes(); }
const vector<Split>& getSplits() const CV_OVERRIDE { return impl.getSplits(); }
const vector<int>& getSubsets() const CV_OVERRIDE { return impl.getSubsets(); }
DTreesImplForBoost impl;
};
Ptr<Boost> Boost::create()
{
return makePtr<BoostImpl>();
}
Ptr<Boost> Boost::load(const String& filepath, const String& nodeName)
{
return Algorithm::load<Boost>(filepath, nodeName);
}
}}
/* End of file. */

File diff suppressed because it is too large Load Diff

View File

@ -1,859 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright( C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
//(including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort(including negligence or otherwise) arising in any way out of
// the use of this software, even ifadvised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
namespace cv
{
namespace ml
{
const double minEigenValue = DBL_EPSILON;
class CV_EXPORTS EMImpl CV_FINAL : public EM
{
public:
int nclusters;
int covMatType;
TermCriteria termCrit;
inline TermCriteria getTermCriteria() const CV_OVERRIDE { return termCrit; }
inline void setTermCriteria(const TermCriteria& val) CV_OVERRIDE { termCrit = val; }
void setClustersNumber(int val) CV_OVERRIDE
{
nclusters = val;
CV_Assert(nclusters >= 1);
}
int getClustersNumber() const CV_OVERRIDE
{
return nclusters;
}
void setCovarianceMatrixType(int val) CV_OVERRIDE
{
covMatType = val;
CV_Assert(covMatType == COV_MAT_SPHERICAL ||
covMatType == COV_MAT_DIAGONAL ||
covMatType == COV_MAT_GENERIC);
}
int getCovarianceMatrixType() const CV_OVERRIDE
{
return covMatType;
}
EMImpl()
{
nclusters = DEFAULT_NCLUSTERS;
covMatType=EM::COV_MAT_DIAGONAL;
termCrit = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, EM::DEFAULT_MAX_ITERS, 1e-6);
}
virtual ~EMImpl() {}
void clear() CV_OVERRIDE
{
trainSamples.release();
trainProbs.release();
trainLogLikelihoods.release();
trainLabels.release();
weights.release();
means.release();
covs.clear();
covsEigenValues.clear();
invCovsEigenValues.clear();
covsRotateMats.clear();
logWeightDivDet.release();
}
bool train(const Ptr<TrainData>& data, int) CV_OVERRIDE
{
CV_Assert(!data.empty());
Mat samples = data->getTrainSamples(), labels;
return trainEM(samples, labels, noArray(), noArray());
}
bool trainEM(InputArray samples,
OutputArray logLikelihoods,
OutputArray labels,
OutputArray probs) CV_OVERRIDE
{
Mat samplesMat = samples.getMat();
setTrainData(START_AUTO_STEP, samplesMat, 0, 0, 0, 0);
return doTrain(START_AUTO_STEP, logLikelihoods, labels, probs);
}
bool trainE(InputArray samples,
InputArray _means0,
InputArray _covs0,
InputArray _weights0,
OutputArray logLikelihoods,
OutputArray labels,
OutputArray probs) CV_OVERRIDE
{
Mat samplesMat = samples.getMat();
std::vector<Mat> covs0;
_covs0.getMatVector(covs0);
Mat means0 = _means0.getMat(), weights0 = _weights0.getMat();
setTrainData(START_E_STEP, samplesMat, 0, !_means0.empty() ? &means0 : 0,
!_covs0.empty() ? &covs0 : 0, !_weights0.empty() ? &weights0 : 0);
return doTrain(START_E_STEP, logLikelihoods, labels, probs);
}
bool trainM(InputArray samples,
InputArray _probs0,
OutputArray logLikelihoods,
OutputArray labels,
OutputArray probs) CV_OVERRIDE
{
Mat samplesMat = samples.getMat();
Mat probs0 = _probs0.getMat();
setTrainData(START_M_STEP, samplesMat, !_probs0.empty() ? &probs0 : 0, 0, 0, 0);
return doTrain(START_M_STEP, logLikelihoods, labels, probs);
}
float predict(InputArray _inputs, OutputArray _outputs, int) const CV_OVERRIDE
{
bool needprobs = _outputs.needed();
Mat samples = _inputs.getMat(), probs, probsrow;
int ptype = CV_64F;
float firstres = 0.f;
int i, nsamples = samples.rows;
if( needprobs )
{
if( _outputs.fixedType() )
ptype = _outputs.type();
_outputs.create(samples.rows, nclusters, ptype);
probs = _outputs.getMat();
}
else
nsamples = std::min(nsamples, 1);
for( i = 0; i < nsamples; i++ )
{
if( needprobs )
probsrow = probs.row(i);
Vec2d res = computeProbabilities(samples.row(i), needprobs ? &probsrow : 0, ptype);
if( i == 0 )
firstres = (float)res[1];
}
return firstres;
}
Vec2d predict2(InputArray _sample, OutputArray _probs) const CV_OVERRIDE
{
int ptype = CV_64F;
Mat sample = _sample.getMat();
CV_Assert(isTrained());
CV_Assert(!sample.empty());
if(sample.type() != CV_64FC1)
{
Mat tmp;
sample.convertTo(tmp, CV_64FC1);
sample = tmp;
}
sample = sample.reshape(1, 1);
Mat probs;
if( _probs.needed() )
{
if( _probs.fixedType() )
ptype = _probs.type();
_probs.create(1, nclusters, ptype);
probs = _probs.getMat();
}
return computeProbabilities(sample, !probs.empty() ? &probs : 0, ptype);
}
bool isTrained() const CV_OVERRIDE
{
return !means.empty();
}
bool isClassifier() const CV_OVERRIDE
{
return true;
}
int getVarCount() const CV_OVERRIDE
{
return means.cols;
}
String getDefaultName() const CV_OVERRIDE
{
return "opencv_ml_em";
}
static void checkTrainData(int startStep, const Mat& samples,
int nclusters, int covMatType, const Mat* probs, const Mat* means,
const std::vector<Mat>* covs, const Mat* weights)
{
// Check samples.
CV_Assert(!samples.empty());
CV_Assert(samples.channels() == 1);
int nsamples = samples.rows;
int dim = samples.cols;
// Check training params.
CV_Assert(nclusters > 0);
CV_Assert(nclusters <= nsamples);
CV_Assert(startStep == START_AUTO_STEP ||
startStep == START_E_STEP ||
startStep == START_M_STEP);
CV_Assert(covMatType == COV_MAT_GENERIC ||
covMatType == COV_MAT_DIAGONAL ||
covMatType == COV_MAT_SPHERICAL);
CV_Assert(!probs ||
(!probs->empty() &&
probs->rows == nsamples && probs->cols == nclusters &&
(probs->type() == CV_32FC1 || probs->type() == CV_64FC1)));
CV_Assert(!weights ||
(!weights->empty() &&
(weights->cols == 1 || weights->rows == 1) && static_cast<int>(weights->total()) == nclusters &&
(weights->type() == CV_32FC1 || weights->type() == CV_64FC1)));
CV_Assert(!means ||
(!means->empty() &&
means->rows == nclusters && means->cols == dim &&
means->channels() == 1));
CV_Assert(!covs ||
(!covs->empty() &&
static_cast<int>(covs->size()) == nclusters));
if(covs)
{
const Size covSize(dim, dim);
for(size_t i = 0; i < covs->size(); i++)
{
const Mat& m = (*covs)[i];
CV_Assert(!m.empty() && m.size() == covSize && (m.channels() == 1));
}
}
if(startStep == START_E_STEP)
{
CV_Assert(means);
}
else if(startStep == START_M_STEP)
{
CV_Assert(probs);
}
}
static void preprocessSampleData(const Mat& src, Mat& dst, int dstType, bool isAlwaysClone)
{
if(src.type() == dstType && !isAlwaysClone)
dst = src;
else
src.convertTo(dst, dstType);
}
static void preprocessProbability(Mat& probs)
{
max(probs, 0., probs);
const double uniformProbability = (double)(1./probs.cols);
for(int y = 0; y < probs.rows; y++)
{
Mat sampleProbs = probs.row(y);
double maxVal = 0;
minMaxLoc(sampleProbs, 0, &maxVal);
if(maxVal < FLT_EPSILON)
sampleProbs.setTo(uniformProbability);
else
normalize(sampleProbs, sampleProbs, 1, 0, NORM_L1);
}
}
void setTrainData(int startStep, const Mat& samples,
const Mat* probs0,
const Mat* means0,
const std::vector<Mat>* covs0,
const Mat* weights0)
{
clear();
checkTrainData(startStep, samples, nclusters, covMatType, probs0, means0, covs0, weights0);
bool isKMeansInit = (startStep == START_AUTO_STEP) || (startStep == START_E_STEP && (covs0 == 0 || weights0 == 0));
// Set checked data
preprocessSampleData(samples, trainSamples, isKMeansInit ? CV_32FC1 : CV_64FC1, false);
// set probs
if(probs0 && startStep == START_M_STEP)
{
preprocessSampleData(*probs0, trainProbs, CV_64FC1, true);
preprocessProbability(trainProbs);
}
// set weights
if(weights0 && (startStep == START_E_STEP && covs0))
{
weights0->convertTo(weights, CV_64FC1);
weights = weights.reshape(1,1);
preprocessProbability(weights);
}
// set means
if(means0 && (startStep == START_E_STEP/* || startStep == START_AUTO_STEP*/))
means0->convertTo(means, isKMeansInit ? CV_32FC1 : CV_64FC1);
// set covs
if(covs0 && (startStep == START_E_STEP && weights0))
{
covs.resize(nclusters);
for(size_t i = 0; i < covs0->size(); i++)
(*covs0)[i].convertTo(covs[i], CV_64FC1);
}
}
void decomposeCovs()
{
CV_Assert(!covs.empty());
covsEigenValues.resize(nclusters);
if(covMatType == COV_MAT_GENERIC)
covsRotateMats.resize(nclusters);
invCovsEigenValues.resize(nclusters);
for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++)
{
CV_Assert(!covs[clusterIndex].empty());
SVD svd(covs[clusterIndex], SVD::MODIFY_A + SVD::FULL_UV);
if(covMatType == COV_MAT_SPHERICAL)
{
double maxSingularVal = svd.w.at<double>(0);
covsEigenValues[clusterIndex] = Mat(1, 1, CV_64FC1, Scalar(maxSingularVal));
}
else if(covMatType == COV_MAT_DIAGONAL)
{
covsEigenValues[clusterIndex] = covs[clusterIndex].diag().clone(); //Preserve the original order of eigen values.
}
else //COV_MAT_GENERIC
{
covsEigenValues[clusterIndex] = svd.w;
covsRotateMats[clusterIndex] = svd.u;
}
max(covsEigenValues[clusterIndex], minEigenValue, covsEigenValues[clusterIndex]);
invCovsEigenValues[clusterIndex] = 1./covsEigenValues[clusterIndex];
}
}
void clusterTrainSamples()
{
int nsamples = trainSamples.rows;
// Cluster samples, compute/update means
// Convert samples and means to 32F, because kmeans requires this type.
Mat trainSamplesFlt, meansFlt;
if(trainSamples.type() != CV_32FC1)
trainSamples.convertTo(trainSamplesFlt, CV_32FC1);
else
trainSamplesFlt = trainSamples;
if(!means.empty())
{
if(means.type() != CV_32FC1)
means.convertTo(meansFlt, CV_32FC1);
else
meansFlt = means;
}
Mat labels;
kmeans(trainSamplesFlt, nclusters, labels,
TermCriteria(TermCriteria::COUNT, means.empty() ? 10 : 1, 0.5),
10, KMEANS_PP_CENTERS, meansFlt);
// Convert samples and means back to 64F.
CV_Assert(meansFlt.type() == CV_32FC1);
if(trainSamples.type() != CV_64FC1)
{
Mat trainSamplesBuffer;
trainSamplesFlt.convertTo(trainSamplesBuffer, CV_64FC1);
trainSamples = trainSamplesBuffer;
}
meansFlt.convertTo(means, CV_64FC1);
// Compute weights and covs
weights = Mat(1, nclusters, CV_64FC1, Scalar(0));
covs.resize(nclusters);
for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++)
{
Mat clusterSamples;
for(int sampleIndex = 0; sampleIndex < nsamples; sampleIndex++)
{
if(labels.at<int>(sampleIndex) == clusterIndex)
{
const Mat sample = trainSamples.row(sampleIndex);
clusterSamples.push_back(sample);
}
}
CV_Assert(!clusterSamples.empty());
calcCovarMatrix(clusterSamples, covs[clusterIndex], means.row(clusterIndex),
CV_COVAR_NORMAL + CV_COVAR_ROWS + CV_COVAR_USE_AVG + CV_COVAR_SCALE, CV_64FC1);
weights.at<double>(clusterIndex) = static_cast<double>(clusterSamples.rows)/static_cast<double>(nsamples);
}
decomposeCovs();
}
void computeLogWeightDivDet()
{
CV_Assert(!covsEigenValues.empty());
Mat logWeights;
cv::max(weights, DBL_MIN, weights);
log(weights, logWeights);
logWeightDivDet.create(1, nclusters, CV_64FC1);
// note: logWeightDivDet = log(weight_k) - 0.5 * log(|det(cov_k)|)
for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++)
{
double logDetCov = 0.;
const int evalCount = static_cast<int>(covsEigenValues[clusterIndex].total());
for(int di = 0; di < evalCount; di++)
logDetCov += std::log(covsEigenValues[clusterIndex].at<double>(covMatType != COV_MAT_SPHERICAL ? di : 0));
logWeightDivDet.at<double>(clusterIndex) = logWeights.at<double>(clusterIndex) - 0.5 * logDetCov;
}
}
bool doTrain(int startStep, OutputArray logLikelihoods, OutputArray labels, OutputArray probs)
{
int dim = trainSamples.cols;
// Precompute the empty initial train data in the cases of START_E_STEP and START_AUTO_STEP
if(startStep != START_M_STEP)
{
if(covs.empty())
{
CV_Assert(weights.empty());
clusterTrainSamples();
}
}
if(!covs.empty() && covsEigenValues.empty() )
{
CV_Assert(invCovsEigenValues.empty());
decomposeCovs();
}
if(startStep == START_M_STEP)
mStep();
double trainLogLikelihood, prevTrainLogLikelihood = 0.;
int maxIters = (termCrit.type & TermCriteria::MAX_ITER) ?
termCrit.maxCount : DEFAULT_MAX_ITERS;
double epsilon = (termCrit.type & TermCriteria::EPS) ? termCrit.epsilon : 0.;
for(int iter = 0; ; iter++)
{
eStep();
trainLogLikelihood = sum(trainLogLikelihoods)[0];
if(iter >= maxIters - 1)
break;
double trainLogLikelihoodDelta = trainLogLikelihood - prevTrainLogLikelihood;
if( iter != 0 &&
(trainLogLikelihoodDelta < -DBL_EPSILON ||
trainLogLikelihoodDelta < epsilon * std::fabs(trainLogLikelihood)))
break;
mStep();
prevTrainLogLikelihood = trainLogLikelihood;
}
if( trainLogLikelihood <= -DBL_MAX/10000. )
{
clear();
return false;
}
// postprocess covs
covs.resize(nclusters);
for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++)
{
if(covMatType == COV_MAT_SPHERICAL)
{
covs[clusterIndex].create(dim, dim, CV_64FC1);
setIdentity(covs[clusterIndex], Scalar(covsEigenValues[clusterIndex].at<double>(0)));
}
else if(covMatType == COV_MAT_DIAGONAL)
{
covs[clusterIndex] = Mat::diag(covsEigenValues[clusterIndex]);
}
}
if(labels.needed())
trainLabels.copyTo(labels);
if(probs.needed())
trainProbs.copyTo(probs);
if(logLikelihoods.needed())
trainLogLikelihoods.copyTo(logLikelihoods);
trainSamples.release();
trainProbs.release();
trainLabels.release();
trainLogLikelihoods.release();
return true;
}
Vec2d computeProbabilities(const Mat& sample, Mat* probs, int ptype) const
{
// L_ik = log(weight_k) - 0.5 * log(|det(cov_k)|) - 0.5 *(x_i - mean_k)' cov_k^(-1) (x_i - mean_k)]
// q = arg(max_k(L_ik))
// probs_ik = exp(L_ik - L_iq) / (1 + sum_j!=q (exp(L_ij - L_iq))
// see Alex Smola's blog http://blog.smola.org/page/2 for
// details on the log-sum-exp trick
int stype = sample.type();
CV_Assert(!means.empty());
CV_Assert((stype == CV_32F || stype == CV_64F) && (ptype == CV_32F || ptype == CV_64F));
CV_Assert(sample.size() == Size(means.cols, 1));
int dim = sample.cols;
Mat L(1, nclusters, CV_64FC1), centeredSample(1, dim, CV_64F);
int i, label = 0;
for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++)
{
const double* mptr = means.ptr<double>(clusterIndex);
double* dptr = centeredSample.ptr<double>();
if( stype == CV_32F )
{
const float* sptr = sample.ptr<float>();
for( i = 0; i < dim; i++ )
dptr[i] = sptr[i] - mptr[i];
}
else
{
const double* sptr = sample.ptr<double>();
for( i = 0; i < dim; i++ )
dptr[i] = sptr[i] - mptr[i];
}
Mat rotatedCenteredSample = covMatType != COV_MAT_GENERIC ?
centeredSample : centeredSample * covsRotateMats[clusterIndex];
double Lval = 0;
for(int di = 0; di < dim; di++)
{
double w = invCovsEigenValues[clusterIndex].at<double>(covMatType != COV_MAT_SPHERICAL ? di : 0);
double val = rotatedCenteredSample.at<double>(di);
Lval += w * val * val;
}
CV_DbgAssert(!logWeightDivDet.empty());
L.at<double>(clusterIndex) = logWeightDivDet.at<double>(clusterIndex) - 0.5 * Lval;
if(L.at<double>(clusterIndex) > L.at<double>(label))
label = clusterIndex;
}
double maxLVal = L.at<double>(label);
double expDiffSum = 0;
for( i = 0; i < L.cols; i++ )
{
double v = std::exp(L.at<double>(i) - maxLVal);
L.at<double>(i) = v;
expDiffSum += v; // sum_j(exp(L_ij - L_iq))
}
CV_Assert(expDiffSum > 0);
if(probs)
L.convertTo(*probs, ptype, 1./expDiffSum);
Vec2d res;
res[0] = std::log(expDiffSum) + maxLVal - 0.5 * dim * CV_LOG2PI;
res[1] = label;
return res;
}
void eStep()
{
// Compute probs_ik from means_k, covs_k and weights_k.
trainProbs.create(trainSamples.rows, nclusters, CV_64FC1);
trainLabels.create(trainSamples.rows, 1, CV_32SC1);
trainLogLikelihoods.create(trainSamples.rows, 1, CV_64FC1);
computeLogWeightDivDet();
CV_DbgAssert(trainSamples.type() == CV_64FC1);
CV_DbgAssert(means.type() == CV_64FC1);
for(int sampleIndex = 0; sampleIndex < trainSamples.rows; sampleIndex++)
{
Mat sampleProbs = trainProbs.row(sampleIndex);
Vec2d res = computeProbabilities(trainSamples.row(sampleIndex), &sampleProbs, CV_64F);
trainLogLikelihoods.at<double>(sampleIndex) = res[0];
trainLabels.at<int>(sampleIndex) = static_cast<int>(res[1]);
}
}
void mStep()
{
// Update means_k, covs_k and weights_k from probs_ik
int dim = trainSamples.cols;
// Update weights
// not normalized first
reduce(trainProbs, weights, 0, REDUCE_SUM);
// Update means
means.create(nclusters, dim, CV_64FC1);
means = Scalar(0);
const double minPosWeight = trainSamples.rows * DBL_EPSILON;
double minWeight = DBL_MAX;
int minWeightClusterIndex = -1;
for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++)
{
if(weights.at<double>(clusterIndex) <= minPosWeight)
continue;
if(weights.at<double>(clusterIndex) < minWeight)
{
minWeight = weights.at<double>(clusterIndex);
minWeightClusterIndex = clusterIndex;
}
Mat clusterMean = means.row(clusterIndex);
for(int sampleIndex = 0; sampleIndex < trainSamples.rows; sampleIndex++)
clusterMean += trainProbs.at<double>(sampleIndex, clusterIndex) * trainSamples.row(sampleIndex);
clusterMean /= weights.at<double>(clusterIndex);
}
// Update covsEigenValues and invCovsEigenValues
covs.resize(nclusters);
covsEigenValues.resize(nclusters);
if(covMatType == COV_MAT_GENERIC)
covsRotateMats.resize(nclusters);
invCovsEigenValues.resize(nclusters);
for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++)
{
if(weights.at<double>(clusterIndex) <= minPosWeight)
continue;
if(covMatType != COV_MAT_SPHERICAL)
covsEigenValues[clusterIndex].create(1, dim, CV_64FC1);
else
covsEigenValues[clusterIndex].create(1, 1, CV_64FC1);
if(covMatType == COV_MAT_GENERIC)
covs[clusterIndex].create(dim, dim, CV_64FC1);
Mat clusterCov = covMatType != COV_MAT_GENERIC ?
covsEigenValues[clusterIndex] : covs[clusterIndex];
clusterCov = Scalar(0);
Mat centeredSample;
for(int sampleIndex = 0; sampleIndex < trainSamples.rows; sampleIndex++)
{
centeredSample = trainSamples.row(sampleIndex) - means.row(clusterIndex);
if(covMatType == COV_MAT_GENERIC)
clusterCov += trainProbs.at<double>(sampleIndex, clusterIndex) * centeredSample.t() * centeredSample;
else
{
double p = trainProbs.at<double>(sampleIndex, clusterIndex);
for(int di = 0; di < dim; di++ )
{
double val = centeredSample.at<double>(di);
clusterCov.at<double>(covMatType != COV_MAT_SPHERICAL ? di : 0) += p*val*val;
}
}
}
if(covMatType == COV_MAT_SPHERICAL)
clusterCov /= dim;
clusterCov /= weights.at<double>(clusterIndex);
// Update covsRotateMats for COV_MAT_GENERIC only
if(covMatType == COV_MAT_GENERIC)
{
SVD svd(covs[clusterIndex], SVD::MODIFY_A + SVD::FULL_UV);
covsEigenValues[clusterIndex] = svd.w;
covsRotateMats[clusterIndex] = svd.u;
}
max(covsEigenValues[clusterIndex], minEigenValue, covsEigenValues[clusterIndex]);
// update invCovsEigenValues
invCovsEigenValues[clusterIndex] = 1./covsEigenValues[clusterIndex];
}
for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++)
{
if(weights.at<double>(clusterIndex) <= minPosWeight)
{
Mat clusterMean = means.row(clusterIndex);
means.row(minWeightClusterIndex).copyTo(clusterMean);
covs[minWeightClusterIndex].copyTo(covs[clusterIndex]);
covsEigenValues[minWeightClusterIndex].copyTo(covsEigenValues[clusterIndex]);
if(covMatType == COV_MAT_GENERIC)
covsRotateMats[minWeightClusterIndex].copyTo(covsRotateMats[clusterIndex]);
invCovsEigenValues[minWeightClusterIndex].copyTo(invCovsEigenValues[clusterIndex]);
}
}
// Normalize weights
weights /= trainSamples.rows;
}
void write_params(FileStorage& fs) const
{
fs << "nclusters" << nclusters;
fs << "cov_mat_type" << (covMatType == COV_MAT_SPHERICAL ? String("spherical") :
covMatType == COV_MAT_DIAGONAL ? String("diagonal") :
covMatType == COV_MAT_GENERIC ? String("generic") :
format("unknown_%d", covMatType));
writeTermCrit(fs, termCrit);
}
void write(FileStorage& fs) const CV_OVERRIDE
{
writeFormat(fs);
fs << "training_params" << "{";
write_params(fs);
fs << "}";
fs << "weights" << weights;
fs << "means" << means;
size_t i, n = covs.size();
fs << "covs" << "[";
for( i = 0; i < n; i++ )
fs << covs[i];
fs << "]";
}
void read_params(const FileNode& fn)
{
nclusters = (int)fn["nclusters"];
String s = (String)fn["cov_mat_type"];
covMatType = s == "spherical" ? COV_MAT_SPHERICAL :
s == "diagonal" ? COV_MAT_DIAGONAL :
s == "generic" ? COV_MAT_GENERIC : -1;
CV_Assert(covMatType >= 0);
termCrit = readTermCrit(fn);
}
void read(const FileNode& fn) CV_OVERRIDE
{
clear();
read_params(fn["training_params"]);
fn["weights"] >> weights;
fn["means"] >> means;
FileNode cfn = fn["covs"];
FileNodeIterator cfn_it = cfn.begin();
int i, n = (int)cfn.size();
covs.resize(n);
for( i = 0; i < n; i++, ++cfn_it )
(*cfn_it) >> covs[i];
decomposeCovs();
computeLogWeightDivDet();
}
Mat getWeights() const CV_OVERRIDE { return weights; }
Mat getMeans() const CV_OVERRIDE { return means; }
void getCovs(std::vector<Mat>& _covs) const CV_OVERRIDE
{
_covs.resize(covs.size());
std::copy(covs.begin(), covs.end(), _covs.begin());
}
// all inner matrices have type CV_64FC1
Mat trainSamples;
Mat trainProbs;
Mat trainLogLikelihoods;
Mat trainLabels;
Mat weights;
Mat means;
std::vector<Mat> covs;
std::vector<Mat> covsEigenValues;
std::vector<Mat> covsRotateMats;
std::vector<Mat> invCovsEigenValues;
Mat logWeightDivDet;
};
Ptr<EM> EM::create()
{
return makePtr<EMImpl>();
}
Ptr<EM> EM::load(const String& filepath, const String& nodeName)
{
return Algorithm::load<EM>(filepath, nodeName);
}
}
} // namespace cv
/* End of file. */

File diff suppressed because it is too large Load Diff

View File

@ -1,222 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
namespace cv { namespace ml {
ParamGrid::ParamGrid() { minVal = maxVal = 0.; logStep = 1; }
ParamGrid::ParamGrid(double _minVal, double _maxVal, double _logStep)
{
CV_TRACE_FUNCTION();
minVal = std::min(_minVal, _maxVal);
maxVal = std::max(_minVal, _maxVal);
logStep = std::max(_logStep, 1.);
}
Ptr<ParamGrid> ParamGrid::create(double minval, double maxval, double logstep) {
return makePtr<ParamGrid>(minval, maxval, logstep);
}
bool StatModel::empty() const { return !isTrained(); }
int StatModel::getVarCount() const { return 0; }
bool StatModel::train(const Ptr<TrainData>& trainData, int )
{
CV_TRACE_FUNCTION();
CV_Assert(!trainData.empty());
CV_Error(CV_StsNotImplemented, "");
return false;
}
bool StatModel::train( InputArray samples, int layout, InputArray responses )
{
CV_TRACE_FUNCTION();
CV_Assert(!samples.empty());
return train(TrainData::create(samples, layout, responses));
}
class ParallelCalcError : public ParallelLoopBody
{
private:
const Ptr<TrainData>& data;
bool &testerr;
Mat &resp;
const StatModel &s;
vector<double> &errStrip;
public:
ParallelCalcError(const Ptr<TrainData>& d, bool &t, Mat &_r,const StatModel &w, vector<double> &e) :
data(d),
testerr(t),
resp(_r),
s(w),
errStrip(e)
{
}
virtual void operator()(const Range& range) const CV_OVERRIDE
{
int idxErr = range.start;
CV_TRACE_FUNCTION_SKIP_NESTED();
Mat samples = data->getSamples();
Mat weights=testerr? data->getTestSampleWeights() : data->getTrainSampleWeights();
int layout = data->getLayout();
Mat sidx = testerr ? data->getTestSampleIdx() : data->getTrainSampleIdx();
const int* sidx_ptr = sidx.ptr<int>();
bool isclassifier = s.isClassifier();
Mat responses = data->getResponses();
int responses_type = responses.type();
double err = 0;
const float* sw = weights.empty() ? 0 : weights.ptr<float>();
for (int i = range.start; i < range.end; i++)
{
int si = sidx_ptr ? sidx_ptr[i] : i;
double sweight = sw ? static_cast<double>(sw[i]) : 1.;
Mat sample = layout == ROW_SAMPLE ? samples.row(si) : samples.col(si);
float val = s.predict(sample);
float val0 = (responses_type == CV_32S) ? (float)responses.at<int>(si) : responses.at<float>(si);
if (isclassifier)
err += sweight * fabs(val - val0) > FLT_EPSILON;
else
err += sweight * (val - val0)*(val - val0);
if (!resp.empty())
resp.at<float>(i) = val;
}
errStrip[idxErr]=err ;
}
ParallelCalcError& operator=(const ParallelCalcError &) {
return *this;
}
};
float StatModel::calcError(const Ptr<TrainData>& data, bool testerr, OutputArray _resp) const
{
CV_TRACE_FUNCTION_SKIP_NESTED();
CV_Assert(!data.empty());
Mat samples = data->getSamples();
Mat sidx = testerr ? data->getTestSampleIdx() : data->getTrainSampleIdx();
Mat weights = testerr ? data->getTestSampleWeights() : data->getTrainSampleWeights();
int n = (int)sidx.total();
bool isclassifier = isClassifier();
Mat responses = data->getResponses();
if (n == 0)
{
n = data->getNSamples();
weights = data->getTrainSampleWeights();
testerr =false;
}
if (n == 0)
return -FLT_MAX;
Mat resp;
if (_resp.needed())
resp.create(n, 1, CV_32F);
double err = 0;
vector<double> errStrip(n,0.0);
ParallelCalcError x(data, testerr, resp, *this,errStrip);
parallel_for_(Range(0,n),x);
for (size_t i = 0; i < errStrip.size(); i++)
err += errStrip[i];
float weightSum= weights.empty() ? n: static_cast<float>(sum(weights)(0));
if (_resp.needed())
resp.copyTo(_resp);
return (float)(err/ weightSum * (isclassifier ? 100 : 1));
}
/* Calculates upper triangular matrix S, where A is a symmetrical matrix A=S'*S */
static void Cholesky( const Mat& A, Mat& S )
{
CV_TRACE_FUNCTION();
CV_Assert(A.type() == CV_32F);
S = A.clone();
cv::Cholesky ((float*)S.ptr(),S.step, S.rows,NULL, 0, 0);
S = S.t();
for (int i=1;i<S.rows;i++)
for (int j=0;j<i;j++)
S.at<float>(i,j)=0;
}
/* Generates <sample> from multivariate normal distribution, where <mean> - is an
average row vector, <cov> - symmetric covariation matrix */
void randMVNormal( InputArray _mean, InputArray _cov, int nsamples, OutputArray _samples )
{
CV_TRACE_FUNCTION();
// check mean vector and covariance matrix
Mat mean = _mean.getMat(), cov = _cov.getMat();
int dim = (int)mean.total(); // dimensionality
CV_Assert(mean.rows == 1 || mean.cols == 1);
CV_Assert(cov.rows == dim && cov.cols == dim);
mean = mean.reshape(1,1); // ensure a row vector
// generate n-samples of the same dimension, from ~N(0,1)
_samples.create(nsamples, dim, CV_32F);
Mat samples = _samples.getMat();
randn(samples, Scalar::all(0), Scalar::all(1));
// decompose covariance using Cholesky: cov = U'*U
// (cov must be square, symmetric, and positive semi-definite matrix)
Mat utmat;
Cholesky(cov, utmat);
// transform random numbers using specified mean and covariance
for( int i = 0; i < nsamples; i++ )
{
Mat sample = samples.row(i);
sample = sample * utmat + mean;
}
}
}}
/* End of file */

View File

@ -1,530 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Copyright (C) 2014, Itseez Inc, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "kdtree.hpp"
namespace cv
{
namespace ml
{
// This is reimplementation of kd-trees from cvkdtree*.* by Xavier Delacour, cleaned-up and
// adopted to work with the new OpenCV data structures.
// The algorithm is taken from:
// J.S. Beis and D.G. Lowe. Shape indexing using approximate nearest-neighbor search
// in highdimensional spaces. In Proc. IEEE Conf. Comp. Vision Patt. Recog.,
// pages 1000--1006, 1997. http://citeseer.ist.psu.edu/beis97shape.html
const int MAX_TREE_DEPTH = 32;
KDTree::KDTree()
{
maxDepth = -1;
normType = NORM_L2;
}
KDTree::KDTree(InputArray _points, bool _copyData)
{
maxDepth = -1;
normType = NORM_L2;
build(_points, _copyData);
}
KDTree::KDTree(InputArray _points, InputArray _labels, bool _copyData)
{
maxDepth = -1;
normType = NORM_L2;
build(_points, _labels, _copyData);
}
struct SubTree
{
SubTree() : first(0), last(0), nodeIdx(0), depth(0) {}
SubTree(int _first, int _last, int _nodeIdx, int _depth)
: first(_first), last(_last), nodeIdx(_nodeIdx), depth(_depth) {}
int first;
int last;
int nodeIdx;
int depth;
};
static float
medianPartition( size_t* ofs, int a, int b, const float* vals )
{
int k, a0 = a, b0 = b;
int middle = (a + b)/2;
while( b > a )
{
int i0 = a, i1 = (a+b)/2, i2 = b;
float v0 = vals[ofs[i0]], v1 = vals[ofs[i1]], v2 = vals[ofs[i2]];
int ip = v0 < v1 ? (v1 < v2 ? i1 : v0 < v2 ? i2 : i0) :
v0 < v2 ? (v1 == v0 ? i2 : i0): (v1 < v2 ? i2 : i1);
float pivot = vals[ofs[ip]];
std::swap(ofs[ip], ofs[i2]);
for( i1 = i0, i0--; i1 <= i2; i1++ )
if( vals[ofs[i1]] <= pivot )
{
i0++;
std::swap(ofs[i0], ofs[i1]);
}
if( i0 == middle )
break;
if( i0 > middle )
b = i0 - (b == i0);
else
a = i0;
}
float pivot = vals[ofs[middle]];
for( k = a0; k < middle; k++ )
{
CV_Assert(vals[ofs[k]] <= pivot);
}
for( k = b0; k > middle; k-- )
{
CV_Assert(vals[ofs[k]] >= pivot);
}
return vals[ofs[middle]];
}
static void
computeSums( const Mat& points, const size_t* ofs, int a, int b, double* sums )
{
int i, j, dims = points.cols;
const float* data = points.ptr<float>(0);
for( j = 0; j < dims; j++ )
sums[j*2] = sums[j*2+1] = 0;
for( i = a; i <= b; i++ )
{
const float* row = data + ofs[i];
for( j = 0; j < dims; j++ )
{
double t = row[j], s = sums[j*2] + t, s2 = sums[j*2+1] + t*t;
sums[j*2] = s; sums[j*2+1] = s2;
}
}
}
void KDTree::build(InputArray _points, bool _copyData)
{
build(_points, noArray(), _copyData);
}
void KDTree::build(InputArray __points, InputArray __labels, bool _copyData)
{
Mat _points = __points.getMat(), _labels = __labels.getMat();
CV_Assert(_points.type() == CV_32F && !_points.empty());
std::vector<KDTree::Node>().swap(nodes);
if( !_copyData )
points = _points;
else
{
points.release();
points.create(_points.size(), _points.type());
}
int i, j, n = _points.rows, ptdims = _points.cols, top = 0;
const float* data = _points.ptr<float>(0);
float* dstdata = points.ptr<float>(0);
size_t step = _points.step1();
size_t dstep = points.step1();
int ptpos = 0;
labels.resize(n);
const int* _labels_data = 0;
if( !_labels.empty() )
{
int nlabels = _labels.checkVector(1, CV_32S, true);
CV_Assert(nlabels == n);
_labels_data = _labels.ptr<int>();
}
Mat sumstack(MAX_TREE_DEPTH*2, ptdims*2, CV_64F);
SubTree stack[MAX_TREE_DEPTH*2];
std::vector<size_t> _ptofs(n);
size_t* ptofs = &_ptofs[0];
for( i = 0; i < n; i++ )
ptofs[i] = i*step;
nodes.push_back(Node());
computeSums(points, ptofs, 0, n-1, sumstack.ptr<double>(top));
stack[top++] = SubTree(0, n-1, 0, 0);
int _maxDepth = 0;
while( --top >= 0 )
{
int first = stack[top].first, last = stack[top].last;
int depth = stack[top].depth, nidx = stack[top].nodeIdx;
int count = last - first + 1, dim = -1;
const double* sums = sumstack.ptr<double>(top);
double invCount = 1./count, maxVar = -1.;
if( count == 1 )
{
int idx0 = (int)(ptofs[first]/step);
int idx = _copyData ? ptpos++ : idx0;
nodes[nidx].idx = ~idx;
if( _copyData )
{
const float* src = data + ptofs[first];
float* dst = dstdata + idx*dstep;
for( j = 0; j < ptdims; j++ )
dst[j] = src[j];
}
labels[idx] = _labels_data ? _labels_data[idx0] : idx0;
_maxDepth = std::max(_maxDepth, depth);
continue;
}
// find the dimensionality with the biggest variance
for( j = 0; j < ptdims; j++ )
{
double m = sums[j*2]*invCount;
double varj = sums[j*2+1]*invCount - m*m;
if( maxVar < varj )
{
maxVar = varj;
dim = j;
}
}
int left = (int)nodes.size(), right = left + 1;
nodes.push_back(Node());
nodes.push_back(Node());
nodes[nidx].idx = dim;
nodes[nidx].left = left;
nodes[nidx].right = right;
nodes[nidx].boundary = medianPartition(ptofs, first, last, data + dim);
int middle = (first + last)/2;
double *lsums = (double*)sums, *rsums = lsums + ptdims*2;
computeSums(points, ptofs, middle+1, last, rsums);
for( j = 0; j < ptdims*2; j++ )
lsums[j] = sums[j] - rsums[j];
stack[top++] = SubTree(first, middle, left, depth+1);
stack[top++] = SubTree(middle+1, last, right, depth+1);
}
maxDepth = _maxDepth;
}
struct PQueueElem
{
PQueueElem() : dist(0), idx(0) {}
PQueueElem(float _dist, int _idx) : dist(_dist), idx(_idx) {}
float dist;
int idx;
};
int KDTree::findNearest(InputArray _vec, int K, int emax,
OutputArray _neighborsIdx, OutputArray _neighbors,
OutputArray _dist, OutputArray _labels) const
{
Mat vecmat = _vec.getMat();
CV_Assert( vecmat.isContinuous() && vecmat.type() == CV_32F && vecmat.total() == (size_t)points.cols );
const float* vec = vecmat.ptr<float>();
K = std::min(K, points.rows);
int ptdims = points.cols;
CV_Assert(K > 0 && (normType == NORM_L2 || normType == NORM_L1));
AutoBuffer<uchar> _buf((K+1)*(sizeof(float) + sizeof(int)));
int* idx = (int*)_buf.data();
float* dist = (float*)(idx + K + 1);
int i, j, ncount = 0, e = 0;
int qsize = 0, maxqsize = 1 << 10;
AutoBuffer<uchar> _pqueue(maxqsize*sizeof(PQueueElem));
PQueueElem* pqueue = (PQueueElem*)_pqueue.data();
emax = std::max(emax, 1);
for( e = 0; e < emax; )
{
float d, alt_d = 0.f;
int nidx;
if( e == 0 )
nidx = 0;
else
{
// take the next node from the priority queue
if( qsize == 0 )
break;
nidx = pqueue[0].idx;
alt_d = pqueue[0].dist;
if( --qsize > 0 )
{
std::swap(pqueue[0], pqueue[qsize]);
d = pqueue[0].dist;
for( i = 0;;)
{
int left = i*2 + 1, right = i*2 + 2;
if( left >= qsize )
break;
if( right < qsize && pqueue[right].dist < pqueue[left].dist )
left = right;
if( pqueue[left].dist >= d )
break;
std::swap(pqueue[i], pqueue[left]);
i = left;
}
}
if( ncount == K && alt_d > dist[ncount-1] )
continue;
}
for(;;)
{
if( nidx < 0 )
break;
const Node& n = nodes[nidx];
if( n.idx < 0 )
{
i = ~n.idx;
const float* row = points.ptr<float>(i);
if( normType == NORM_L2 )
for( j = 0, d = 0.f; j < ptdims; j++ )
{
float t = vec[j] - row[j];
d += t*t;
}
else
for( j = 0, d = 0.f; j < ptdims; j++ )
d += std::abs(vec[j] - row[j]);
dist[ncount] = d;
idx[ncount] = i;
for( i = ncount-1; i >= 0; i-- )
{
if( dist[i] <= d )
break;
std::swap(dist[i], dist[i+1]);
std::swap(idx[i], idx[i+1]);
}
ncount += ncount < K;
e++;
break;
}
int alt;
if( vec[n.idx] <= n.boundary )
{
nidx = n.left;
alt = n.right;
}
else
{
nidx = n.right;
alt = n.left;
}
d = vec[n.idx] - n.boundary;
if( normType == NORM_L2 )
d = d*d + alt_d;
else
d = std::abs(d) + alt_d;
// subtree prunning
if( ncount == K && d > dist[ncount-1] )
continue;
// add alternative subtree to the priority queue
pqueue[qsize] = PQueueElem(d, alt);
for( i = qsize; i > 0; )
{
int parent = (i-1)/2;
if( parent < 0 || pqueue[parent].dist <= d )
break;
std::swap(pqueue[i], pqueue[parent]);
i = parent;
}
qsize += qsize+1 < maxqsize;
}
}
K = std::min(K, ncount);
if( _neighborsIdx.needed() )
{
_neighborsIdx.create(K, 1, CV_32S, -1, true);
Mat nidx = _neighborsIdx.getMat();
Mat(nidx.size(), CV_32S, &idx[0]).copyTo(nidx);
}
if( _dist.needed() )
sqrt(Mat(K, 1, CV_32F, dist), _dist);
if( _neighbors.needed() || _labels.needed() )
getPoints(Mat(K, 1, CV_32S, idx), _neighbors, _labels);
return K;
}
void KDTree::findOrthoRange(InputArray _lowerBound,
InputArray _upperBound,
OutputArray _neighborsIdx,
OutputArray _neighbors,
OutputArray _labels ) const
{
int ptdims = points.cols;
Mat lowerBound = _lowerBound.getMat(), upperBound = _upperBound.getMat();
CV_Assert( lowerBound.size == upperBound.size &&
lowerBound.isContinuous() &&
upperBound.isContinuous() &&
lowerBound.type() == upperBound.type() &&
lowerBound.type() == CV_32F &&
lowerBound.total() == (size_t)ptdims );
const float* L = lowerBound.ptr<float>();
const float* R = upperBound.ptr<float>();
std::vector<int> idx;
AutoBuffer<int> _stack(MAX_TREE_DEPTH*2 + 1);
int* stack = _stack.data();
int top = 0;
stack[top++] = 0;
while( --top >= 0 )
{
int nidx = stack[top];
if( nidx < 0 )
break;
const Node& n = nodes[nidx];
if( n.idx < 0 )
{
int j, i = ~n.idx;
const float* row = points.ptr<float>(i);
for( j = 0; j < ptdims; j++ )
if( row[j] < L[j] || row[j] >= R[j] )
break;
if( j == ptdims )
idx.push_back(i);
continue;
}
if( L[n.idx] <= n.boundary )
stack[top++] = n.left;
if( R[n.idx] > n.boundary )
stack[top++] = n.right;
}
if( _neighborsIdx.needed() )
{
_neighborsIdx.create((int)idx.size(), 1, CV_32S, -1, true);
Mat nidx = _neighborsIdx.getMat();
Mat(nidx.size(), CV_32S, &idx[0]).copyTo(nidx);
}
getPoints( idx, _neighbors, _labels );
}
void KDTree::getPoints(InputArray _idx, OutputArray _pts, OutputArray _labels) const
{
Mat idxmat = _idx.getMat(), pts, labelsmat;
CV_Assert( idxmat.isContinuous() && idxmat.type() == CV_32S &&
(idxmat.cols == 1 || idxmat.rows == 1) );
const int* idx = idxmat.ptr<int>();
int* dstlabels = 0;
int ptdims = points.cols;
int i, nidx = (int)idxmat.total();
if( nidx == 0 )
{
_pts.release();
_labels.release();
return;
}
if( _pts.needed() )
{
_pts.create( nidx, ptdims, points.type());
pts = _pts.getMat();
}
if(_labels.needed())
{
_labels.create(nidx, 1, CV_32S, -1, true);
labelsmat = _labels.getMat();
CV_Assert( labelsmat.isContinuous() );
dstlabels = labelsmat.ptr<int>();
}
const int* srclabels = !labels.empty() ? &labels[0] : 0;
for( i = 0; i < nidx; i++ )
{
int k = idx[i];
CV_Assert( (unsigned)k < (unsigned)points.rows );
const float* src = points.ptr<float>(k);
if( !pts.empty() )
std::copy(src, src + ptdims, pts.ptr<float>(i));
if( dstlabels )
dstlabels[i] = srclabels ? srclabels[k] : k;
}
}
const float* KDTree::getPoint(int ptidx, int* label) const
{
CV_Assert( (unsigned)ptidx < (unsigned)points.rows);
if(label)
*label = labels[ptidx];
return points.ptr<float>(ptidx);
}
int KDTree::dims() const
{
return !points.empty() ? points.cols : 0;
}
}
}

View File

@ -1,97 +0,0 @@
#ifndef KDTREE_H
#define KDTREE_H
#include "precomp.hpp"
namespace cv
{
namespace ml
{
/*!
Fast Nearest Neighbor Search Class.
The class implements D. Lowe BBF (Best-Bin-First) algorithm for the last
approximate (or accurate) nearest neighbor search in multi-dimensional spaces.
First, a set of vectors is passed to KDTree::KDTree() constructor
or KDTree::build() method, where it is reordered.
Then arbitrary vectors can be passed to KDTree::findNearest() methods, which
find the K nearest neighbors among the vectors from the initial set.
The user can balance between the speed and accuracy of the search by varying Emax
parameter, which is the number of leaves that the algorithm checks.
The larger parameter values yield more accurate results at the expense of lower processing speed.
\code
KDTree T(points, false);
const int K = 3, Emax = INT_MAX;
int idx[K];
float dist[K];
T.findNearest(query_vec, K, Emax, idx, 0, dist);
CV_Assert(dist[0] <= dist[1] && dist[1] <= dist[2]);
\endcode
*/
class CV_EXPORTS_W KDTree
{
public:
/*!
The node of the search tree.
*/
struct Node
{
Node() : idx(-1), left(-1), right(-1), boundary(0.f) {}
Node(int _idx, int _left, int _right, float _boundary)
: idx(_idx), left(_left), right(_right), boundary(_boundary) {}
//! split dimension; >=0 for nodes (dim), < 0 for leaves (index of the point)
int idx;
//! node indices of the left and the right branches
int left, right;
//! go to the left if query_vec[node.idx]<=node.boundary, otherwise go to the right
float boundary;
};
//! the default constructor
CV_WRAP KDTree();
//! the full constructor that builds the search tree
CV_WRAP KDTree(InputArray points, bool copyAndReorderPoints = false);
//! the full constructor that builds the search tree
CV_WRAP KDTree(InputArray points, InputArray _labels,
bool copyAndReorderPoints = false);
//! builds the search tree
CV_WRAP void build(InputArray points, bool copyAndReorderPoints = false);
//! builds the search tree
CV_WRAP void build(InputArray points, InputArray labels,
bool copyAndReorderPoints = false);
//! finds the K nearest neighbors of "vec" while looking at Emax (at most) leaves
CV_WRAP int findNearest(InputArray vec, int K, int Emax,
OutputArray neighborsIdx,
OutputArray neighbors = noArray(),
OutputArray dist = noArray(),
OutputArray labels = noArray()) const;
//! finds all the points from the initial set that belong to the specified box
CV_WRAP void findOrthoRange(InputArray minBounds,
InputArray maxBounds,
OutputArray neighborsIdx,
OutputArray neighbors = noArray(),
OutputArray labels = noArray()) const;
//! returns vectors with the specified indices
CV_WRAP void getPoints(InputArray idx, OutputArray pts,
OutputArray labels = noArray()) const;
//! return a vector with the specified index
const float* getPoint(int ptidx, int* label = 0) const;
//! returns the search space dimensionality
CV_WRAP int dims() const;
std::vector<Node> nodes; //!< all the tree nodes
CV_PROP Mat points; //!< all the points. It can be a reordered copy of the input vector set or the original vector set.
CV_PROP std::vector<int> labels; //!< the parallel array of labels.
CV_PROP int maxDepth; //!< maximum depth of the search tree. Do not modify it
CV_PROP_RW int normType; //!< type of the distance (cv::NORM_L1 or cv::NORM_L2) used for search. Initially set to cv::NORM_L2, but you can modify it
};
}
}
#endif

View File

@ -1,521 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Copyright (C) 2014, Itseez Inc, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "kdtree.hpp"
/****************************************************************************************\
* K-Nearest Neighbors Classifier *
\****************************************************************************************/
namespace cv {
namespace ml {
const String NAME_BRUTE_FORCE = "opencv_ml_knn";
const String NAME_KDTREE = "opencv_ml_knn_kd";
class Impl
{
public:
Impl()
{
defaultK = 10;
isclassifier = true;
Emax = INT_MAX;
}
virtual ~Impl() {}
virtual String getModelName() const = 0;
virtual int getType() const = 0;
virtual float findNearest( InputArray _samples, int k,
OutputArray _results,
OutputArray _neighborResponses,
OutputArray _dists ) const = 0;
bool train( const Ptr<TrainData>& data, int flags )
{
CV_Assert(!data.empty());
Mat new_samples = data->getTrainSamples(ROW_SAMPLE);
Mat new_responses;
data->getTrainResponses().convertTo(new_responses, CV_32F);
bool update = (flags & ml::KNearest::UPDATE_MODEL) != 0 && !samples.empty();
CV_Assert( new_samples.type() == CV_32F );
if( !update )
{
clear();
}
else
{
CV_Assert( new_samples.cols == samples.cols &&
new_responses.cols == responses.cols );
}
samples.push_back(new_samples);
responses.push_back(new_responses);
doTrain(samples);
return true;
}
virtual void doTrain(InputArray points) { CV_UNUSED(points); }
void clear()
{
samples.release();
responses.release();
}
void read( const FileNode& fn )
{
clear();
isclassifier = (int)fn["is_classifier"] != 0;
defaultK = (int)fn["default_k"];
fn["samples"] >> samples;
fn["responses"] >> responses;
}
void write( FileStorage& fs ) const
{
fs << "is_classifier" << (int)isclassifier;
fs << "default_k" << defaultK;
fs << "samples" << samples;
fs << "responses" << responses;
}
public:
int defaultK;
bool isclassifier;
int Emax;
Mat samples;
Mat responses;
};
class BruteForceImpl CV_FINAL : public Impl
{
public:
String getModelName() const CV_OVERRIDE { return NAME_BRUTE_FORCE; }
int getType() const CV_OVERRIDE { return ml::KNearest::BRUTE_FORCE; }
void findNearestCore( const Mat& _samples, int k, const Range& range,
Mat* results, Mat* neighbor_responses,
Mat* dists, float* presult ) const
{
int testidx, baseidx, i, j, d = samples.cols, nsamples = samples.rows;
int testcount = range.end - range.start;
AutoBuffer<float> buf(testcount*k*2);
float* dbuf = buf.data();
float* rbuf = dbuf + testcount*k;
const float* rptr = responses.ptr<float>();
for( testidx = 0; testidx < testcount; testidx++ )
{
for( i = 0; i < k; i++ )
{
dbuf[testidx*k + i] = FLT_MAX;
rbuf[testidx*k + i] = 0.f;
}
}
for( baseidx = 0; baseidx < nsamples; baseidx++ )
{
for( testidx = 0; testidx < testcount; testidx++ )
{
const float* v = samples.ptr<float>(baseidx);
const float* u = _samples.ptr<float>(testidx + range.start);
float s = 0;
for( i = 0; i <= d - 4; i += 4 )
{
float t0 = u[i] - v[i], t1 = u[i+1] - v[i+1];
float t2 = u[i+2] - v[i+2], t3 = u[i+3] - v[i+3];
s += t0*t0 + t1*t1 + t2*t2 + t3*t3;
}
for( ; i < d; i++ )
{
float t0 = u[i] - v[i];
s += t0*t0;
}
Cv32suf si;
si.f = (float)s;
Cv32suf* dd = (Cv32suf*)(&dbuf[testidx*k]);
float* nr = &rbuf[testidx*k];
for( i = k; i > 0; i-- )
if( si.i >= dd[i-1].i )
break;
if( i >= k )
continue;
for( j = k-2; j >= i; j-- )
{
dd[j+1].i = dd[j].i;
nr[j+1] = nr[j];
}
dd[i].i = si.i;
nr[i] = rptr[baseidx];
}
}
float result = 0.f;
float inv_scale = 1.f/k;
for( testidx = 0; testidx < testcount; testidx++ )
{
if( neighbor_responses )
{
float* nr = neighbor_responses->ptr<float>(testidx + range.start);
for( j = 0; j < k; j++ )
nr[j] = rbuf[testidx*k + j];
for( ; j < k; j++ )
nr[j] = 0.f;
}
if( dists )
{
float* dptr = dists->ptr<float>(testidx + range.start);
for( j = 0; j < k; j++ )
dptr[j] = dbuf[testidx*k + j];
for( ; j < k; j++ )
dptr[j] = 0.f;
}
if( results || testidx+range.start == 0 )
{
if( !isclassifier || k == 1 )
{
float s = 0.f;
for( j = 0; j < k; j++ )
s += rbuf[testidx*k + j];
result = (float)(s*inv_scale);
}
else
{
float* rp = rbuf + testidx*k;
std::sort(rp, rp+k);
result = rp[0];
int prev_start = 0;
int best_count = 0;
for( j = 1; j <= k; j++ )
{
if( j == k || rp[j] != rp[j-1] )
{
int count = j - prev_start;
if( best_count < count )
{
best_count = count;
result = rp[j-1];
}
prev_start = j;
}
}
}
if( results )
results->at<float>(testidx + range.start) = result;
if( presult && testidx+range.start == 0 )
*presult = result;
}
}
}
struct findKNearestInvoker : public ParallelLoopBody
{
findKNearestInvoker(const BruteForceImpl* _p, int _k, const Mat& __samples,
Mat* __results, Mat* __neighbor_responses, Mat* __dists, float* _presult)
{
p = _p;
k = _k;
_samples = &__samples;
_results = __results;
_neighbor_responses = __neighbor_responses;
_dists = __dists;
presult = _presult;
}
void operator()(const Range& range) const CV_OVERRIDE
{
int delta = std::min(range.end - range.start, 256);
for( int start = range.start; start < range.end; start += delta )
{
p->findNearestCore( *_samples, k, Range(start, std::min(start + delta, range.end)),
_results, _neighbor_responses, _dists, presult );
}
}
const BruteForceImpl* p;
int k;
const Mat* _samples;
Mat* _results;
Mat* _neighbor_responses;
Mat* _dists;
float* presult;
};
float findNearest( InputArray _samples, int k,
OutputArray _results,
OutputArray _neighborResponses,
OutputArray _dists ) const CV_OVERRIDE
{
float result = 0.f;
CV_Assert( 0 < k );
k = std::min(k, samples.rows);
Mat test_samples = _samples.getMat();
CV_Assert( test_samples.type() == CV_32F && test_samples.cols == samples.cols );
int testcount = test_samples.rows;
if( testcount == 0 )
{
_results.release();
_neighborResponses.release();
_dists.release();
return 0.f;
}
Mat res, nr, d, *pres = 0, *pnr = 0, *pd = 0;
if( _results.needed() )
{
_results.create(testcount, 1, CV_32F);
pres = &(res = _results.getMat());
}
if( _neighborResponses.needed() )
{
_neighborResponses.create(testcount, k, CV_32F);
pnr = &(nr = _neighborResponses.getMat());
}
if( _dists.needed() )
{
_dists.create(testcount, k, CV_32F);
pd = &(d = _dists.getMat());
}
findKNearestInvoker invoker(this, k, test_samples, pres, pnr, pd, &result);
parallel_for_(Range(0, testcount), invoker);
//invoker(Range(0, testcount));
return result;
}
};
class KDTreeImpl CV_FINAL : public Impl
{
public:
String getModelName() const CV_OVERRIDE { return NAME_KDTREE; }
int getType() const CV_OVERRIDE { return ml::KNearest::KDTREE; }
void doTrain(InputArray points) CV_OVERRIDE
{
tr.build(points);
}
float findNearest( InputArray _samples, int k,
OutputArray _results,
OutputArray _neighborResponses,
OutputArray _dists ) const CV_OVERRIDE
{
float result = 0.f;
CV_Assert( 0 < k );
k = std::min(k, samples.rows);
Mat test_samples = _samples.getMat();
CV_Assert( test_samples.type() == CV_32F && test_samples.cols == samples.cols );
int testcount = test_samples.rows;
if( testcount == 0 )
{
_results.release();
_neighborResponses.release();
_dists.release();
return 0.f;
}
Mat res, nr, d;
if( _results.needed() )
{
res = _results.getMat();
}
if( _neighborResponses.needed() )
{
nr = _neighborResponses.getMat();
}
if( _dists.needed() )
{
d = _dists.getMat();
}
for (int i=0; i<test_samples.rows; ++i)
{
Mat _res, _nr, _d;
tr.findNearest(test_samples.row(i), k, Emax, _res, _nr, _d, noArray());
res.push_back(_res.t());
_results.assign(res);
}
return result; // currently always 0
}
KDTree tr;
};
//================================================================
class KNearestImpl CV_FINAL : public KNearest
{
inline int getDefaultK() const CV_OVERRIDE { return impl->defaultK; }
inline void setDefaultK(int val) CV_OVERRIDE { impl->defaultK = val; }
inline bool getIsClassifier() const CV_OVERRIDE { return impl->isclassifier; }
inline void setIsClassifier(bool val) CV_OVERRIDE { impl->isclassifier = val; }
inline int getEmax() const CV_OVERRIDE { return impl->Emax; }
inline void setEmax(int val) CV_OVERRIDE { impl->Emax = val; }
public:
int getAlgorithmType() const CV_OVERRIDE
{
return impl->getType();
}
void setAlgorithmType(int val) CV_OVERRIDE
{
if (val != BRUTE_FORCE && val != KDTREE)
val = BRUTE_FORCE;
int k = getDefaultK();
int e = getEmax();
bool c = getIsClassifier();
initImpl(val);
setDefaultK(k);
setEmax(e);
setIsClassifier(c);
}
public:
KNearestImpl()
{
initImpl(BRUTE_FORCE);
}
~KNearestImpl()
{
}
bool isClassifier() const CV_OVERRIDE { return impl->isclassifier; }
bool isTrained() const CV_OVERRIDE { return !impl->samples.empty(); }
int getVarCount() const CV_OVERRIDE { return impl->samples.cols; }
void write( FileStorage& fs ) const CV_OVERRIDE
{
writeFormat(fs);
impl->write(fs);
}
void read( const FileNode& fn ) CV_OVERRIDE
{
int algorithmType = BRUTE_FORCE;
if (fn.name() == NAME_KDTREE)
algorithmType = KDTREE;
initImpl(algorithmType);
impl->read(fn);
}
float findNearest( InputArray samples, int k,
OutputArray results,
OutputArray neighborResponses=noArray(),
OutputArray dist=noArray() ) const CV_OVERRIDE
{
return impl->findNearest(samples, k, results, neighborResponses, dist);
}
float predict(InputArray inputs, OutputArray outputs, int) const CV_OVERRIDE
{
return impl->findNearest( inputs, impl->defaultK, outputs, noArray(), noArray() );
}
bool train( const Ptr<TrainData>& data, int flags ) CV_OVERRIDE
{
CV_Assert(!data.empty());
return impl->train(data, flags);
}
String getDefaultName() const CV_OVERRIDE { return impl->getModelName(); }
protected:
void initImpl(int algorithmType)
{
if (algorithmType != KDTREE)
impl = makePtr<BruteForceImpl>();
else
impl = makePtr<KDTreeImpl>();
}
Ptr<Impl> impl;
};
Ptr<KNearest> KNearest::create()
{
return makePtr<KNearestImpl>();
}
Ptr<KNearest> KNearest::load(const String& filepath)
{
FileStorage fs;
fs.open(filepath, FileStorage::READ);
Ptr<KNearest> knearest = makePtr<KNearestImpl>();
((KNearestImpl*)knearest.get())->read(fs.getFirstTopLevelNode());
return knearest;
}
}
}
/* End of file */

View File

@ -1,604 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// AUTHOR: Rahul Kavi rahulkavi[at]live[at]com
//
// This is a implementation of the Logistic Regression algorithm
//
#include "precomp.hpp"
using namespace std;
namespace cv {
namespace ml {
class LrParams
{
public:
LrParams()
{
alpha = 0.001;
num_iters = 1000;
norm = LogisticRegression::REG_L2;
train_method = LogisticRegression::BATCH;
mini_batch_size = 1;
term_crit = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, num_iters, alpha);
}
double alpha; //!< learning rate.
int num_iters; //!< number of iterations.
int norm;
int train_method;
int mini_batch_size;
TermCriteria term_crit;
};
class LogisticRegressionImpl CV_FINAL : public LogisticRegression
{
public:
LogisticRegressionImpl() { }
virtual ~LogisticRegressionImpl() {}
inline double getLearningRate() const CV_OVERRIDE { return params.alpha; }
inline void setLearningRate(double val) CV_OVERRIDE { params.alpha = val; }
inline int getIterations() const CV_OVERRIDE { return params.num_iters; }
inline void setIterations(int val) CV_OVERRIDE { params.num_iters = val; }
inline int getRegularization() const CV_OVERRIDE { return params.norm; }
inline void setRegularization(int val) CV_OVERRIDE { params.norm = val; }
inline int getTrainMethod() const CV_OVERRIDE { return params.train_method; }
inline void setTrainMethod(int val) CV_OVERRIDE { params.train_method = val; }
inline int getMiniBatchSize() const CV_OVERRIDE { return params.mini_batch_size; }
inline void setMiniBatchSize(int val) CV_OVERRIDE { params.mini_batch_size = val; }
inline TermCriteria getTermCriteria() const CV_OVERRIDE { return params.term_crit; }
inline void setTermCriteria(TermCriteria val) CV_OVERRIDE { params.term_crit = val; }
virtual bool train( const Ptr<TrainData>& trainData, int=0 ) CV_OVERRIDE;
virtual float predict(InputArray samples, OutputArray results, int flags=0) const CV_OVERRIDE;
virtual void clear() CV_OVERRIDE;
virtual void write(FileStorage& fs) const CV_OVERRIDE;
virtual void read(const FileNode& fn) CV_OVERRIDE;
virtual Mat get_learnt_thetas() const CV_OVERRIDE { return learnt_thetas; }
virtual int getVarCount() const CV_OVERRIDE { return learnt_thetas.cols; }
virtual bool isTrained() const CV_OVERRIDE { return !learnt_thetas.empty(); }
virtual bool isClassifier() const CV_OVERRIDE { return true; }
virtual String getDefaultName() const CV_OVERRIDE { return "opencv_ml_lr"; }
protected:
Mat calc_sigmoid(const Mat& data) const;
double compute_cost(const Mat& _data, const Mat& _labels, const Mat& _init_theta);
void compute_gradient(const Mat& _data, const Mat& _labels, const Mat &_theta, const double _lambda, Mat & _gradient );
Mat batch_gradient_descent(const Mat& _data, const Mat& _labels, const Mat& _init_theta);
Mat mini_batch_gradient_descent(const Mat& _data, const Mat& _labels, const Mat& _init_theta);
bool set_label_map(const Mat& _labels_i);
Mat remap_labels(const Mat& _labels_i, const map<int, int>& lmap) const;
protected:
LrParams params;
Mat learnt_thetas;
map<int, int> forward_mapper;
map<int, int> reverse_mapper;
Mat labels_o;
Mat labels_n;
};
Ptr<LogisticRegression> LogisticRegression::create()
{
return makePtr<LogisticRegressionImpl>();
}
Ptr<LogisticRegression> LogisticRegression::load(const String& filepath, const String& nodeName)
{
return Algorithm::load<LogisticRegression>(filepath, nodeName);
}
bool LogisticRegressionImpl::train(const Ptr<TrainData>& trainData, int)
{
CV_TRACE_FUNCTION_SKIP_NESTED();
CV_Assert(!trainData.empty());
// return value
bool ok = false;
clear();
Mat _data_i = trainData->getSamples();
Mat _labels_i = trainData->getResponses();
// check size and type of training data
CV_Assert( !_labels_i.empty() && !_data_i.empty());
if(_labels_i.cols != 1)
{
CV_Error( CV_StsBadArg, "labels should be a column matrix" );
}
if(_data_i.type() != CV_32FC1 || _labels_i.type() != CV_32FC1)
{
CV_Error( CV_StsBadArg, "data and labels must be a floating point matrix" );
}
if(_labels_i.rows != _data_i.rows)
{
CV_Error( CV_StsBadArg, "number of rows in data and labels should be equal" );
}
// class labels
set_label_map(_labels_i);
Mat labels_l = remap_labels(_labels_i, this->forward_mapper);
int num_classes = (int) this->forward_mapper.size();
if(num_classes < 2)
{
CV_Error( CV_StsBadArg, "data should have at least 2 classes" );
}
// add a column of ones to the data (bias/intercept term)
Mat data_t;
hconcat( cv::Mat::ones( _data_i.rows, 1, CV_32F ), _data_i, data_t );
// coefficient matrix (zero-initialized)
Mat thetas;
Mat init_theta = Mat::zeros(data_t.cols, 1, CV_32F);
// fit the model (handles binary and multiclass cases)
Mat new_theta;
Mat labels;
if(num_classes == 2)
{
labels_l.convertTo(labels, CV_32F);
if(this->params.train_method == LogisticRegression::BATCH)
new_theta = batch_gradient_descent(data_t, labels, init_theta);
else
new_theta = mini_batch_gradient_descent(data_t, labels, init_theta);
thetas = new_theta.t();
}
else
{
/* take each class and rename classes you will get a theta per class
as in multi class class scenario, we will have n thetas for n classes */
thetas.create(num_classes, data_t.cols, CV_32F);
Mat labels_binary;
int ii = 0;
for(map<int,int>::iterator it = this->forward_mapper.begin(); it != this->forward_mapper.end(); ++it)
{
// one-vs-rest (OvR) scheme
labels_binary = (labels_l == it->second)/255;
labels_binary.convertTo(labels, CV_32F);
if(this->params.train_method == LogisticRegression::BATCH)
new_theta = batch_gradient_descent(data_t, labels, init_theta);
else
new_theta = mini_batch_gradient_descent(data_t, labels, init_theta);
hconcat(new_theta.t(), thetas.row(ii));
ii += 1;
}
}
// check that the estimates are stable and finite
this->learnt_thetas = thetas.clone();
if( cvIsNaN( (double)sum(this->learnt_thetas)[0] ) )
{
CV_Error( CV_StsBadArg, "check training parameters. Invalid training classifier" );
}
// success
ok = true;
return ok;
}
float LogisticRegressionImpl::predict(InputArray samples, OutputArray results, int flags) const
{
// check if learnt_mats array is populated
if(!this->isTrained())
{
CV_Error( CV_StsBadArg, "classifier should be trained first" );
}
// coefficient matrix
Mat thetas;
if ( learnt_thetas.type() == CV_32F )
{
thetas = learnt_thetas;
}
else
{
this->learnt_thetas.convertTo( thetas, CV_32F );
}
CV_Assert(thetas.rows > 0);
// data samples
Mat data = samples.getMat();
if(data.type() != CV_32F)
{
CV_Error( CV_StsBadArg, "data must be of floating type" );
}
// add a column of ones to the data (bias/intercept term)
Mat data_t;
hconcat( cv::Mat::ones( data.rows, 1, CV_32F ), data, data_t );
CV_Assert(data_t.cols == thetas.cols);
// predict class labels for samples (handles binary and multiclass cases)
Mat labels_c;
Mat pred_m;
Mat temp_pred;
if(thetas.rows == 1)
{
// apply sigmoid function
temp_pred = calc_sigmoid(data_t * thetas.t());
CV_Assert(temp_pred.cols==1);
pred_m = temp_pred.clone();
// if greater than 0.5, predict class 0 or predict class 1
temp_pred = (temp_pred > 0.5f) / 255;
temp_pred.convertTo(labels_c, CV_32S);
}
else
{
// apply sigmoid function
pred_m.create(data_t.rows, thetas.rows, data.type());
for(int i = 0; i < thetas.rows; i++)
{
temp_pred = calc_sigmoid(data_t * thetas.row(i).t());
vconcat(temp_pred, pred_m.col(i));
}
// predict class with the maximum output
Point max_loc;
Mat labels;
for(int i = 0; i < pred_m.rows; i++)
{
temp_pred = pred_m.row(i);
minMaxLoc( temp_pred, NULL, NULL, NULL, &max_loc );
labels.push_back(max_loc.x);
}
labels.convertTo(labels_c, CV_32S);
}
// return label of the predicted class. class names can be 1,2,3,...
Mat pred_labs = remap_labels(labels_c, this->reverse_mapper);
pred_labs.convertTo(pred_labs, CV_32S);
// return either the labels or the raw output
if ( results.needed() )
{
if ( flags & StatModel::RAW_OUTPUT )
{
pred_m.copyTo( results );
}
else
{
pred_labs.copyTo(results);
}
}
return ( pred_labs.empty() ? 0.f : static_cast<float>(pred_labs.at<int>(0)) );
}
Mat LogisticRegressionImpl::calc_sigmoid(const Mat& data) const
{
CV_TRACE_FUNCTION();
Mat dest;
exp(-data, dest);
return 1.0/(1.0+dest);
}
double LogisticRegressionImpl::compute_cost(const Mat& _data, const Mat& _labels, const Mat& _init_theta)
{
CV_TRACE_FUNCTION();
float llambda = 0; /*changed llambda from int to float to solve issue #7924*/
int m;
int n;
double cost = 0;
double rparameter = 0;
Mat theta_b;
Mat theta_c;
Mat d_a;
Mat d_b;
m = _data.rows;
n = _data.cols;
theta_b = _init_theta(Range(1, n), Range::all());
if (params.norm != REG_DISABLE)
{
llambda = 1;
}
if(this->params.norm == LogisticRegression::REG_L1)
{
rparameter = (llambda/(2*m)) * sum(theta_b)[0];
}
else
{
// assuming it to be L2 by default
multiply(theta_b, theta_b, theta_c, 1);
rparameter = (llambda/(2*m)) * sum(theta_c)[0];
}
d_a = calc_sigmoid(_data * _init_theta);
log(d_a, d_a);
multiply(d_a, _labels, d_a);
// use the fact that: log(1 - sigmoid(x)) = log(sigmoid(-x))
d_b = calc_sigmoid(- _data * _init_theta);
log(d_b, d_b);
multiply(d_b, 1-_labels, d_b);
cost = (-1.0/m) * (sum(d_a)[0] + sum(d_b)[0]);
cost = cost + rparameter;
if(cvIsNaN( cost ) == 1)
{
CV_Error( CV_StsBadArg, "check training parameters. Invalid training classifier" );
}
return cost;
}
struct LogisticRegressionImpl_ComputeDradient_Impl : ParallelLoopBody
{
const Mat* data;
const Mat* theta;
const Mat* pcal_a;
Mat* gradient;
double lambda;
LogisticRegressionImpl_ComputeDradient_Impl(const Mat& _data, const Mat &_theta, const Mat& _pcal_a, const double _lambda, Mat & _gradient)
: data(&_data)
, theta(&_theta)
, pcal_a(&_pcal_a)
, gradient(&_gradient)
, lambda(_lambda)
{
}
void operator()(const cv::Range& r) const CV_OVERRIDE
{
const Mat& _data = *data;
const Mat &_theta = *theta;
Mat & _gradient = *gradient;
const Mat & _pcal_a = *pcal_a;
const int m = _data.rows;
Mat pcal_ab;
for (int ii = r.start; ii<r.end; ii++)
{
Mat pcal_b = _data(Range::all(), Range(ii,ii+1));
multiply(_pcal_a, pcal_b, pcal_ab, 1);
_gradient.row(ii) = (1.0/m)*sum(pcal_ab)[0] + (lambda/m) * _theta.row(ii);
}
}
};
void LogisticRegressionImpl::compute_gradient(const Mat& _data, const Mat& _labels, const Mat &_theta, const double _lambda, Mat & _gradient )
{
CV_TRACE_FUNCTION();
const int m = _data.rows;
Mat pcal_a, pcal_b, pcal_ab;
const Mat z = _data * _theta;
CV_Assert( _gradient.rows == _theta.rows && _gradient.cols == _theta.cols );
pcal_a = calc_sigmoid(z) - _labels;
pcal_b = _data(Range::all(), Range(0,1));
multiply(pcal_a, pcal_b, pcal_ab, 1);
_gradient.row(0) = ((float)1/m) * sum(pcal_ab)[0];
//cout<<"for each training data entry"<<endl;
LogisticRegressionImpl_ComputeDradient_Impl invoker(_data, _theta, pcal_a, _lambda, _gradient);
cv::parallel_for_(cv::Range(1, _gradient.rows), invoker);
}
Mat LogisticRegressionImpl::batch_gradient_descent(const Mat& _data, const Mat& _labels, const Mat& _init_theta)
{
CV_TRACE_FUNCTION();
// implements batch gradient descent
if(this->params.alpha<=0)
{
CV_Error( CV_StsBadArg, "check training parameters (learning rate) for the classifier" );
}
if(this->params.num_iters <= 0)
{
CV_Error( CV_StsBadArg, "number of iterations cannot be zero or a negative number" );
}
int llambda = 0;
int m;
Mat theta_p = _init_theta.clone();
Mat gradient( theta_p.rows, theta_p.cols, theta_p.type() );
m = _data.rows;
if (params.norm != REG_DISABLE)
{
llambda = 1;
}
for(int i = 0;i<this->params.num_iters;i++)
{
// this seems to only be called to ensure that cost is not NaN
compute_cost(_data, _labels, theta_p);
compute_gradient( _data, _labels, theta_p, llambda, gradient );
theta_p = theta_p - ( static_cast<double>(this->params.alpha)/m)*gradient;
}
return theta_p;
}
Mat LogisticRegressionImpl::mini_batch_gradient_descent(const Mat& _data, const Mat& _labels, const Mat& _init_theta)
{
// implements batch gradient descent
int lambda_l = 0;
int m;
int j = 0;
int size_b = this->params.mini_batch_size;
if(this->params.mini_batch_size <= 0 || this->params.alpha == 0)
{
CV_Error( CV_StsBadArg, "check training parameters for the classifier" );
}
if(this->params.num_iters <= 0)
{
CV_Error( CV_StsBadArg, "number of iterations cannot be zero or a negative number" );
}
Mat theta_p = _init_theta.clone();
Mat gradient( theta_p.rows, theta_p.cols, theta_p.type() );
Mat data_d;
Mat labels_l;
if (params.norm != REG_DISABLE)
{
lambda_l = 1;
}
for(int i = 0;i<this->params.term_crit.maxCount;i++)
{
if(j+size_b<=_data.rows)
{
data_d = _data(Range(j,j+size_b), Range::all());
labels_l = _labels(Range(j,j+size_b),Range::all());
}
else
{
data_d = _data(Range(j, _data.rows), Range::all());
labels_l = _labels(Range(j, _labels.rows),Range::all());
}
m = data_d.rows;
// this seems to only be called to ensure that cost is not NaN
compute_cost(data_d, labels_l, theta_p);
compute_gradient(data_d, labels_l, theta_p, lambda_l, gradient);
theta_p = theta_p - ( static_cast<double>(this->params.alpha)/m)*gradient;
j += this->params.mini_batch_size;
// if parsed through all data variables
if (j >= _data.rows) {
j = 0;
}
}
return theta_p;
}
bool LogisticRegressionImpl::set_label_map(const Mat &_labels_i)
{
// this function creates two maps to map user defined labels to program friendly labels two ways.
int ii = 0;
Mat labels;
this->labels_o = Mat(0,1, CV_8U);
this->labels_n = Mat(0,1, CV_8U);
_labels_i.convertTo(labels, CV_32S);
for(int i = 0;i<labels.rows;i++)
{
this->forward_mapper[labels.at<int>(i)] += 1;
}
for(map<int,int>::iterator it = this->forward_mapper.begin(); it != this->forward_mapper.end(); ++it)
{
this->forward_mapper[it->first] = ii;
this->labels_o.push_back(it->first);
this->labels_n.push_back(ii);
ii += 1;
}
for(map<int,int>::iterator it = this->forward_mapper.begin(); it != this->forward_mapper.end(); ++it)
{
this->reverse_mapper[it->second] = it->first;
}
return true;
}
Mat LogisticRegressionImpl::remap_labels(const Mat& _labels_i, const map<int, int>& lmap) const
{
Mat labels;
_labels_i.convertTo(labels, CV_32S);
Mat new_labels = Mat::zeros(labels.rows, labels.cols, labels.type());
CV_Assert( !lmap.empty() );
for(int i =0;i<labels.rows;i++)
{
map<int, int>::const_iterator val = lmap.find(labels.at<int>(i,0));
CV_Assert(val != lmap.end());
new_labels.at<int>(i,0) = val->second;
}
return new_labels;
}
void LogisticRegressionImpl::clear()
{
this->learnt_thetas.release();
this->labels_o.release();
this->labels_n.release();
}
void LogisticRegressionImpl::write(FileStorage& fs) const
{
// check if open
if(fs.isOpened() == 0)
{
CV_Error(CV_StsBadArg,"file can't open. Check file path");
}
writeFormat(fs);
string desc = "Logistic Regression Classifier";
fs<<"classifier"<<desc.c_str();
fs<<"alpha"<<this->params.alpha;
fs<<"iterations"<<this->params.num_iters;
fs<<"norm"<<this->params.norm;
fs<<"train_method"<<this->params.train_method;
if(this->params.train_method == LogisticRegression::MINI_BATCH)
{
fs<<"mini_batch_size"<<this->params.mini_batch_size;
}
fs<<"learnt_thetas"<<this->learnt_thetas;
fs<<"n_labels"<<this->labels_n;
fs<<"o_labels"<<this->labels_o;
}
void LogisticRegressionImpl::read(const FileNode& fn)
{
// check if empty
if(fn.empty())
{
CV_Error( CV_StsBadArg, "empty FileNode object" );
}
this->params.alpha = (double)fn["alpha"];
this->params.num_iters = (int)fn["iterations"];
this->params.norm = (int)fn["norm"];
this->params.train_method = (int)fn["train_method"];
if(this->params.train_method == LogisticRegression::MINI_BATCH)
{
this->params.mini_batch_size = (int)fn["mini_batch_size"];
}
fn["learnt_thetas"] >> this->learnt_thetas;
fn["o_labels"] >> this->labels_o;
fn["n_labels"] >> this->labels_n;
for(int ii =0;ii<labels_o.rows;ii++)
{
this->forward_mapper[labels_o.at<int>(ii,0)] = labels_n.at<int>(ii,0);
this->reverse_mapper[labels_n.at<int>(ii,0)] = labels_o.at<int>(ii,0);
}
}
}
}
/* End of file. */

View File

@ -1,471 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
namespace cv {
namespace ml {
class NormalBayesClassifierImpl : public NormalBayesClassifier
{
public:
NormalBayesClassifierImpl()
{
nallvars = 0;
}
bool train( const Ptr<TrainData>& trainData, int flags ) CV_OVERRIDE
{
CV_Assert(!trainData.empty());
const float min_variation = FLT_EPSILON;
Mat responses = trainData->getNormCatResponses();
Mat __cls_labels = trainData->getClassLabels();
Mat __var_idx = trainData->getVarIdx();
Mat samples = trainData->getTrainSamples();
int nclasses = (int)__cls_labels.total();
int nvars = trainData->getNVars();
int s, c1, c2, cls;
int __nallvars = trainData->getNAllVars();
bool update = (flags & UPDATE_MODEL) != 0;
if( !update )
{
nallvars = __nallvars;
count.resize(nclasses);
sum.resize(nclasses);
productsum.resize(nclasses);
avg.resize(nclasses);
inv_eigen_values.resize(nclasses);
cov_rotate_mats.resize(nclasses);
for( cls = 0; cls < nclasses; cls++ )
{
count[cls] = Mat::zeros( 1, nvars, CV_32SC1 );
sum[cls] = Mat::zeros( 1, nvars, CV_64FC1 );
productsum[cls] = Mat::zeros( nvars, nvars, CV_64FC1 );
avg[cls] = Mat::zeros( 1, nvars, CV_64FC1 );
inv_eigen_values[cls] = Mat::zeros( 1, nvars, CV_64FC1 );
cov_rotate_mats[cls] = Mat::zeros( nvars, nvars, CV_64FC1 );
}
var_idx = __var_idx;
cls_labels = __cls_labels;
c.create(1, nclasses, CV_64FC1);
}
else
{
// check that the new training data has the same dimensionality etc.
if( nallvars != __nallvars ||
var_idx.size() != __var_idx.size() ||
norm(var_idx, __var_idx, NORM_INF) != 0 ||
cls_labels.size() != __cls_labels.size() ||
norm(cls_labels, __cls_labels, NORM_INF) != 0 )
CV_Error( CV_StsBadArg,
"The new training data is inconsistent with the original training data; varIdx and the class labels should be the same" );
}
Mat cov( nvars, nvars, CV_64FC1 );
int nsamples = samples.rows;
// process train data (count, sum , productsum)
for( s = 0; s < nsamples; s++ )
{
cls = responses.at<int>(s);
int* count_data = count[cls].ptr<int>();
double* sum_data = sum[cls].ptr<double>();
double* prod_data = productsum[cls].ptr<double>();
const float* train_vec = samples.ptr<float>(s);
for( c1 = 0; c1 < nvars; c1++, prod_data += nvars )
{
double val1 = train_vec[c1];
sum_data[c1] += val1;
count_data[c1]++;
for( c2 = c1; c2 < nvars; c2++ )
prod_data[c2] += train_vec[c2]*val1;
}
}
Mat vt;
// calculate avg, covariance matrix, c
for( cls = 0; cls < nclasses; cls++ )
{
double det = 1;
int i, j;
Mat& w = inv_eigen_values[cls];
int* count_data = count[cls].ptr<int>();
double* avg_data = avg[cls].ptr<double>();
double* sum1 = sum[cls].ptr<double>();
completeSymm(productsum[cls], 0);
for( j = 0; j < nvars; j++ )
{
int n = count_data[j];
avg_data[j] = n ? sum1[j] / n : 0.;
}
count_data = count[cls].ptr<int>();
avg_data = avg[cls].ptr<double>();
sum1 = sum[cls].ptr<double>();
for( i = 0; i < nvars; i++ )
{
double* avg2_data = avg[cls].ptr<double>();
double* sum2 = sum[cls].ptr<double>();
double* prod_data = productsum[cls].ptr<double>(i);
double* cov_data = cov.ptr<double>(i);
double s1val = sum1[i];
double avg1 = avg_data[i];
int _count = count_data[i];
for( j = 0; j <= i; j++ )
{
double avg2 = avg2_data[j];
double cov_val = prod_data[j] - avg1 * sum2[j] - avg2 * s1val + avg1 * avg2 * _count;
cov_val = (_count > 1) ? cov_val / (_count - 1) : cov_val;
cov_data[j] = cov_val;
}
}
completeSymm( cov, 1 );
SVD::compute(cov, w, cov_rotate_mats[cls], noArray());
transpose(cov_rotate_mats[cls], cov_rotate_mats[cls]);
cv::max(w, min_variation, w);
for( j = 0; j < nvars; j++ )
det *= w.at<double>(j);
divide(1., w, w);
c.at<double>(cls) = det > 0 ? log(det) : -700;
}
return true;
}
class NBPredictBody : public ParallelLoopBody
{
public:
NBPredictBody( const Mat& _c, const vector<Mat>& _cov_rotate_mats,
const vector<Mat>& _inv_eigen_values,
const vector<Mat>& _avg,
const Mat& _samples, const Mat& _vidx, const Mat& _cls_labels,
Mat& _results, Mat& _results_prob, bool _rawOutput )
{
c = &_c;
cov_rotate_mats = &_cov_rotate_mats;
inv_eigen_values = &_inv_eigen_values;
avg = &_avg;
samples = &_samples;
vidx = &_vidx;
cls_labels = &_cls_labels;
results = &_results;
results_prob = !_results_prob.empty() ? &_results_prob : 0;
rawOutput = _rawOutput;
value = 0;
}
const Mat* c;
const vector<Mat>* cov_rotate_mats;
const vector<Mat>* inv_eigen_values;
const vector<Mat>* avg;
const Mat* samples;
const Mat* vidx;
const Mat* cls_labels;
Mat* results_prob;
Mat* results;
float* value;
bool rawOutput;
void operator()(const Range& range) const CV_OVERRIDE
{
int cls = -1;
int rtype = 0, rptype = 0;
size_t rstep = 0, rpstep = 0;
int nclasses = (int)cls_labels->total();
int nvars = avg->at(0).cols;
double probability = 0;
const int* vptr = vidx && !vidx->empty() ? vidx->ptr<int>() : 0;
if (results)
{
rtype = results->type();
rstep = results->isContinuous() ? 1 : results->step/results->elemSize();
}
if (results_prob)
{
rptype = results_prob->type();
rpstep = results_prob->isContinuous() ? results_prob->cols : results_prob->step/results_prob->elemSize();
}
// allocate memory and initializing headers for calculating
cv::AutoBuffer<double> _buffer(nvars*2);
double* _diffin = _buffer.data();
double* _diffout = _buffer.data() + nvars;
Mat diffin( 1, nvars, CV_64FC1, _diffin );
Mat diffout( 1, nvars, CV_64FC1, _diffout );
for(int k = range.start; k < range.end; k++ )
{
double opt = FLT_MAX;
for(int i = 0; i < nclasses; i++ )
{
double cur = c->at<double>(i);
const Mat& u = cov_rotate_mats->at(i);
const Mat& w = inv_eigen_values->at(i);
const double* avg_data = avg->at(i).ptr<double>();
const float* x = samples->ptr<float>(k);
// cov = u w u' --> cov^(-1) = u w^(-1) u'
for(int j = 0; j < nvars; j++ )
_diffin[j] = avg_data[j] - x[vptr ? vptr[j] : j];
gemm( diffin, u, 1, noArray(), 0, diffout, GEMM_2_T );
for(int j = 0; j < nvars; j++ )
{
double d = _diffout[j];
cur += d*d*w.ptr<double>()[j];
}
if( cur < opt )
{
cls = i;
opt = cur;
}
probability = exp( -0.5 * cur );
if( results_prob )
{
if ( rptype == CV_32FC1 )
results_prob->ptr<float>()[k*rpstep + i] = (float)probability;
else
results_prob->ptr<double>()[k*rpstep + i] = probability;
}
}
int ival = rawOutput ? cls : cls_labels->at<int>(cls);
if( results )
{
if( rtype == CV_32SC1 )
results->ptr<int>()[k*rstep] = ival;
else
results->ptr<float>()[k*rstep] = (float)ival;
}
}
}
};
float predict( InputArray _samples, OutputArray _results, int flags ) const CV_OVERRIDE
{
return predictProb(_samples, _results, noArray(), flags);
}
float predictProb( InputArray _samples, OutputArray _results, OutputArray _resultsProb, int flags ) const CV_OVERRIDE
{
int value=0;
Mat samples = _samples.getMat(), results, resultsProb;
int nsamples = samples.rows, nclasses = (int)cls_labels.total();
bool rawOutput = (flags & RAW_OUTPUT) != 0;
if( samples.type() != CV_32F || samples.cols != nallvars )
CV_Error( CV_StsBadArg,
"The input samples must be 32f matrix with the number of columns = nallvars" );
if( (samples.rows > 1) && (! _results.needed()) )
CV_Error( CV_StsNullPtr,
"When the number of input samples is >1, the output vector of results must be passed" );
if( _results.needed() )
{
_results.create(nsamples, 1, CV_32S);
results = _results.getMat();
}
else
results = Mat(1, 1, CV_32S, &value);
if( _resultsProb.needed() )
{
_resultsProb.create(nsamples, nclasses, CV_32F);
resultsProb = _resultsProb.getMat();
}
cv::parallel_for_(cv::Range(0, nsamples),
NBPredictBody(c, cov_rotate_mats, inv_eigen_values, avg, samples,
var_idx, cls_labels, results, resultsProb, rawOutput));
return (float)value;
}
void write( FileStorage& fs ) const CV_OVERRIDE
{
int nclasses = (int)cls_labels.total(), i;
writeFormat(fs);
fs << "var_count" << (var_idx.empty() ? nallvars : (int)var_idx.total());
fs << "var_all" << nallvars;
if( !var_idx.empty() )
fs << "var_idx" << var_idx;
fs << "cls_labels" << cls_labels;
fs << "count" << "[";
for( i = 0; i < nclasses; i++ )
fs << count[i];
fs << "]" << "sum" << "[";
for( i = 0; i < nclasses; i++ )
fs << sum[i];
fs << "]" << "productsum" << "[";
for( i = 0; i < nclasses; i++ )
fs << productsum[i];
fs << "]" << "avg" << "[";
for( i = 0; i < nclasses; i++ )
fs << avg[i];
fs << "]" << "inv_eigen_values" << "[";
for( i = 0; i < nclasses; i++ )
fs << inv_eigen_values[i];
fs << "]" << "cov_rotate_mats" << "[";
for( i = 0; i < nclasses; i++ )
fs << cov_rotate_mats[i];
fs << "]";
fs << "c" << c;
}
void read( const FileNode& fn ) CV_OVERRIDE
{
clear();
fn["var_all"] >> nallvars;
if( nallvars <= 0 )
CV_Error( CV_StsParseError,
"The field \"var_count\" of NBayes classifier is missing or non-positive" );
fn["var_idx"] >> var_idx;
fn["cls_labels"] >> cls_labels;
int nclasses = (int)cls_labels.total(), i;
if( cls_labels.empty() || nclasses < 1 )
CV_Error( CV_StsParseError, "No or invalid \"cls_labels\" in NBayes classifier" );
FileNodeIterator
count_it = fn["count"].begin(),
sum_it = fn["sum"].begin(),
productsum_it = fn["productsum"].begin(),
avg_it = fn["avg"].begin(),
inv_eigen_values_it = fn["inv_eigen_values"].begin(),
cov_rotate_mats_it = fn["cov_rotate_mats"].begin();
count.resize(nclasses);
sum.resize(nclasses);
productsum.resize(nclasses);
avg.resize(nclasses);
inv_eigen_values.resize(nclasses);
cov_rotate_mats.resize(nclasses);
for( i = 0; i < nclasses; i++, ++count_it, ++sum_it, ++productsum_it, ++avg_it,
++inv_eigen_values_it, ++cov_rotate_mats_it )
{
*count_it >> count[i];
*sum_it >> sum[i];
*productsum_it >> productsum[i];
*avg_it >> avg[i];
*inv_eigen_values_it >> inv_eigen_values[i];
*cov_rotate_mats_it >> cov_rotate_mats[i];
}
fn["c"] >> c;
}
void clear() CV_OVERRIDE
{
count.clear();
sum.clear();
productsum.clear();
avg.clear();
inv_eigen_values.clear();
cov_rotate_mats.clear();
var_idx.release();
cls_labels.release();
c.release();
nallvars = 0;
}
bool isTrained() const CV_OVERRIDE { return !avg.empty(); }
bool isClassifier() const CV_OVERRIDE { return true; }
int getVarCount() const CV_OVERRIDE { return nallvars; }
String getDefaultName() const CV_OVERRIDE { return "opencv_ml_nbayes"; }
int nallvars;
Mat var_idx, cls_labels, c;
vector<Mat> count, sum, productsum, avg, inv_eigen_values, cov_rotate_mats;
};
Ptr<NormalBayesClassifier> NormalBayesClassifier::create()
{
Ptr<NormalBayesClassifierImpl> p = makePtr<NormalBayesClassifierImpl>();
return p;
}
Ptr<NormalBayesClassifier> NormalBayesClassifier::load(const String& filepath, const String& nodeName)
{
return Algorithm::load<NormalBayesClassifier>(filepath, nodeName);
}
}
}
/* End of file. */

View File

@ -1,400 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_ML_PRECOMP_HPP__
#define __OPENCV_ML_PRECOMP_HPP__
#include "opencv2/core.hpp"
#include "opencv2/ml.hpp"
#include "opencv2/core/core_c.h"
#include "opencv2/core/utility.hpp"
#include "opencv2/core/private.hpp"
#include <float.h>
#include <limits.h>
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <vector>
/****************************************************************************************\
* Main struct definitions *
\****************************************************************************************/
/* log(2*PI) */
#define CV_LOG2PI (1.8378770664093454835606594728112)
namespace cv
{
namespace ml
{
using std::vector;
#define CV_DTREE_CAT_DIR(idx,subset) \
(2*((subset[(idx)>>5]&(1 << ((idx) & 31)))==0)-1)
template<typename _Tp> struct cmp_lt_idx
{
cmp_lt_idx(const _Tp* _arr) : arr(_arr) {}
bool operator ()(int a, int b) const { return arr[a] < arr[b]; }
const _Tp* arr;
};
template<typename _Tp> struct cmp_lt_ptr
{
cmp_lt_ptr() {}
bool operator ()(const _Tp* a, const _Tp* b) const { return *a < *b; }
};
static inline void setRangeVector(std::vector<int>& vec, int n)
{
vec.resize(n);
for( int i = 0; i < n; i++ )
vec[i] = i;
}
static inline void writeTermCrit(FileStorage& fs, const TermCriteria& termCrit)
{
if( (termCrit.type & TermCriteria::EPS) != 0 )
fs << "epsilon" << termCrit.epsilon;
if( (termCrit.type & TermCriteria::COUNT) != 0 )
fs << "iterations" << termCrit.maxCount;
}
static inline TermCriteria readTermCrit(const FileNode& fn)
{
TermCriteria termCrit;
double epsilon = (double)fn["epsilon"];
if( epsilon > 0 )
{
termCrit.type |= TermCriteria::EPS;
termCrit.epsilon = epsilon;
}
int iters = (int)fn["iterations"];
if( iters > 0 )
{
termCrit.type |= TermCriteria::COUNT;
termCrit.maxCount = iters;
}
return termCrit;
}
struct TreeParams
{
TreeParams();
TreeParams( int maxDepth, int minSampleCount,
double regressionAccuracy, bool useSurrogates,
int maxCategories, int CVFolds,
bool use1SERule, bool truncatePrunedTree,
const Mat& priors );
inline void setMaxCategories(int val)
{
if( val < 2 )
CV_Error( CV_StsOutOfRange, "max_categories should be >= 2" );
maxCategories = std::min(val, 15 );
}
inline void setMaxDepth(int val)
{
if( val < 0 )
CV_Error( CV_StsOutOfRange, "max_depth should be >= 0" );
maxDepth = std::min( val, 25 );
}
inline void setMinSampleCount(int val)
{
minSampleCount = std::max(val, 1);
}
inline void setCVFolds(int val)
{
if( val < 0 )
CV_Error( CV_StsOutOfRange,
"params.CVFolds should be =0 (the tree is not pruned) "
"or n>0 (tree is pruned using n-fold cross-validation)" );
if(val > 1)
CV_Error( CV_StsNotImplemented,
"tree pruning using cross-validation is not implemented."
"Set CVFolds to 1");
if( val == 1 )
val = 0;
CVFolds = val;
}
inline void setRegressionAccuracy(float val)
{
if( val < 0 )
CV_Error( CV_StsOutOfRange, "params.regression_accuracy should be >= 0" );
regressionAccuracy = val;
}
inline int getMaxCategories() const { return maxCategories; }
inline int getMaxDepth() const { return maxDepth; }
inline int getMinSampleCount() const { return minSampleCount; }
inline int getCVFolds() const { return CVFolds; }
inline float getRegressionAccuracy() const { return regressionAccuracy; }
inline bool getUseSurrogates() const { return useSurrogates; }
inline void setUseSurrogates(bool val) { useSurrogates = val; }
inline bool getUse1SERule() const { return use1SERule; }
inline void setUse1SERule(bool val) { use1SERule = val; }
inline bool getTruncatePrunedTree() const { return truncatePrunedTree; }
inline void setTruncatePrunedTree(bool val) { truncatePrunedTree = val; }
inline cv::Mat getPriors() const { return priors; }
inline void setPriors(const cv::Mat& val) { priors = val; }
public:
bool useSurrogates;
bool use1SERule;
bool truncatePrunedTree;
Mat priors;
protected:
int maxCategories;
int maxDepth;
int minSampleCount;
int CVFolds;
float regressionAccuracy;
};
struct RTreeParams
{
RTreeParams();
RTreeParams(bool calcVarImportance, int nactiveVars, TermCriteria termCrit );
bool calcVarImportance;
int nactiveVars;
TermCriteria termCrit;
};
struct BoostTreeParams
{
BoostTreeParams();
BoostTreeParams(int boostType, int weakCount, double weightTrimRate);
int boostType;
int weakCount;
double weightTrimRate;
};
class DTreesImpl : public DTrees
{
public:
struct WNode
{
WNode()
{
class_idx = sample_count = depth = complexity = 0;
parent = left = right = split = defaultDir = -1;
Tn = INT_MAX;
value = maxlr = alpha = node_risk = tree_risk = tree_error = 0.;
}
int class_idx;
double Tn;
double value;
int parent;
int left;
int right;
int defaultDir;
int split;
int sample_count;
int depth;
double maxlr;
// global pruning data
int complexity;
double alpha;
double node_risk, tree_risk, tree_error;
};
struct WSplit
{
WSplit()
{
varIdx = next = 0;
inversed = false;
quality = c = 0.f;
subsetOfs = -1;
}
int varIdx;
bool inversed;
float quality;
int next;
float c;
int subsetOfs;
};
struct WorkData
{
WorkData(const Ptr<TrainData>& _data);
Ptr<TrainData> data;
vector<WNode> wnodes;
vector<WSplit> wsplits;
vector<int> wsubsets;
vector<double> cv_Tn;
vector<double> cv_node_risk;
vector<double> cv_node_error;
vector<int> cv_labels;
vector<double> sample_weights;
vector<int> cat_responses;
vector<double> ord_responses;
vector<int> sidx;
int maxSubsetSize;
};
inline int getMaxCategories() const CV_OVERRIDE { return params.getMaxCategories(); }
inline void setMaxCategories(int val) CV_OVERRIDE { params.setMaxCategories(val); }
inline int getMaxDepth() const CV_OVERRIDE { return params.getMaxDepth(); }
inline void setMaxDepth(int val) CV_OVERRIDE { params.setMaxDepth(val); }
inline int getMinSampleCount() const CV_OVERRIDE { return params.getMinSampleCount(); }
inline void setMinSampleCount(int val) CV_OVERRIDE { params.setMinSampleCount(val); }
inline int getCVFolds() const CV_OVERRIDE { return params.getCVFolds(); }
inline void setCVFolds(int val) CV_OVERRIDE { params.setCVFolds(val); }
inline bool getUseSurrogates() const CV_OVERRIDE { return params.getUseSurrogates(); }
inline void setUseSurrogates(bool val) CV_OVERRIDE { params.setUseSurrogates(val); }
inline bool getUse1SERule() const CV_OVERRIDE { return params.getUse1SERule(); }
inline void setUse1SERule(bool val) CV_OVERRIDE { params.setUse1SERule(val); }
inline bool getTruncatePrunedTree() const CV_OVERRIDE { return params.getTruncatePrunedTree(); }
inline void setTruncatePrunedTree(bool val) CV_OVERRIDE { params.setTruncatePrunedTree(val); }
inline float getRegressionAccuracy() const CV_OVERRIDE { return params.getRegressionAccuracy(); }
inline void setRegressionAccuracy(float val) CV_OVERRIDE { params.setRegressionAccuracy(val); }
inline cv::Mat getPriors() const CV_OVERRIDE { return params.getPriors(); }
inline void setPriors(const cv::Mat& val) CV_OVERRIDE { params.setPriors(val); }
DTreesImpl();
virtual ~DTreesImpl() CV_OVERRIDE;
virtual void clear() CV_OVERRIDE;
String getDefaultName() const CV_OVERRIDE { return "opencv_ml_dtree"; }
bool isTrained() const CV_OVERRIDE { return !roots.empty(); }
bool isClassifier() const CV_OVERRIDE { return _isClassifier; }
int getVarCount() const CV_OVERRIDE { return varType.empty() ? 0 : (int)(varType.size() - 1); }
int getCatCount(int vi) const { return catOfs[vi][1] - catOfs[vi][0]; }
int getSubsetSize(int vi) const { return (getCatCount(vi) + 31)/32; }
virtual void setDParams(const TreeParams& _params);
virtual void startTraining( const Ptr<TrainData>& trainData, int flags );
virtual void endTraining();
virtual void initCompVarIdx();
virtual bool train( const Ptr<TrainData>& trainData, int flags ) CV_OVERRIDE;
virtual int addTree( const vector<int>& sidx );
virtual int addNodeAndTrySplit( int parent, const vector<int>& sidx );
virtual const vector<int>& getActiveVars();
virtual int findBestSplit( const vector<int>& _sidx );
virtual void calcValue( int nidx, const vector<int>& _sidx );
virtual WSplit findSplitOrdClass( int vi, const vector<int>& _sidx, double initQuality );
// simple k-means, slightly modified to take into account the "weight" (L1-norm) of each vector.
virtual void clusterCategories( const double* vectors, int n, int m, double* csums, int k, int* labels );
virtual WSplit findSplitCatClass( int vi, const vector<int>& _sidx, double initQuality, int* subset );
virtual WSplit findSplitOrdReg( int vi, const vector<int>& _sidx, double initQuality );
virtual WSplit findSplitCatReg( int vi, const vector<int>& _sidx, double initQuality, int* subset );
virtual int calcDir( int splitidx, const vector<int>& _sidx, vector<int>& _sleft, vector<int>& _sright );
virtual int pruneCV( int root );
virtual double updateTreeRNC( int root, double T, int fold );
virtual bool cutTree( int root, double T, int fold, double min_alpha );
virtual float predictTrees( const Range& range, const Mat& sample, int flags ) const;
virtual float predict( InputArray inputs, OutputArray outputs, int flags ) const CV_OVERRIDE;
virtual void writeTrainingParams( FileStorage& fs ) const;
virtual void writeParams( FileStorage& fs ) const;
virtual void writeSplit( FileStorage& fs, int splitidx ) const;
virtual void writeNode( FileStorage& fs, int nidx, int depth ) const;
virtual void writeTree( FileStorage& fs, int root ) const;
virtual void write( FileStorage& fs ) const CV_OVERRIDE;
virtual void readParams( const FileNode& fn );
virtual int readSplit( const FileNode& fn );
virtual int readNode( const FileNode& fn );
virtual int readTree( const FileNode& fn );
virtual void read( const FileNode& fn ) CV_OVERRIDE;
virtual const std::vector<int>& getRoots() const CV_OVERRIDE { return roots; }
virtual const std::vector<Node>& getNodes() const CV_OVERRIDE { return nodes; }
virtual const std::vector<Split>& getSplits() const CV_OVERRIDE { return splits; }
virtual const std::vector<int>& getSubsets() const CV_OVERRIDE { return subsets; }
TreeParams params;
vector<int> varIdx;
vector<int> compVarIdx;
vector<uchar> varType;
vector<Vec2i> catOfs;
vector<int> catMap;
vector<int> roots;
vector<Node> nodes;
vector<Split> splits;
vector<int> subsets;
vector<int> classLabels;
vector<float> missingSubst;
vector<int> varMapping;
bool _isClassifier;
Ptr<WorkData> w;
};
template <typename T>
static inline void readVectorOrMat(const FileNode & node, std::vector<T> & v)
{
if (node.type() == FileNode::MAP)
{
Mat m;
node >> m;
m.copyTo(v);
}
else if (node.type() == FileNode::SEQ)
{
node >> v;
}
}
}}
#endif /* __OPENCV_ML_PRECOMP_HPP__ */

View File

@ -1,531 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Copyright (C) 2014, Itseez Inc, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
namespace cv {
namespace ml {
//////////////////////////////////////////////////////////////////////////////////////////
// Random trees //
//////////////////////////////////////////////////////////////////////////////////////////
RTreeParams::RTreeParams()
{
CV_TRACE_FUNCTION();
calcVarImportance = false;
nactiveVars = 0;
termCrit = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 50, 0.1);
}
RTreeParams::RTreeParams(bool _calcVarImportance,
int _nactiveVars,
TermCriteria _termCrit )
{
CV_TRACE_FUNCTION();
calcVarImportance = _calcVarImportance;
nactiveVars = _nactiveVars;
termCrit = _termCrit;
}
class DTreesImplForRTrees CV_FINAL : public DTreesImpl
{
public:
DTreesImplForRTrees()
{
CV_TRACE_FUNCTION();
params.setMaxDepth(5);
params.setMinSampleCount(10);
params.setRegressionAccuracy(0.f);
params.useSurrogates = false;
params.setMaxCategories(10);
params.setCVFolds(0);
params.use1SERule = false;
params.truncatePrunedTree = false;
params.priors = Mat();
oobError = 0;
}
virtual ~DTreesImplForRTrees() {}
void clear() CV_OVERRIDE
{
CV_TRACE_FUNCTION();
DTreesImpl::clear();
oobError = 0.;
}
const vector<int>& getActiveVars() CV_OVERRIDE
{
CV_TRACE_FUNCTION();
RNG &rng = theRNG();
int i, nvars = (int)allVars.size(), m = (int)activeVars.size();
for( i = 0; i < nvars; i++ )
{
int i1 = rng.uniform(0, nvars);
int i2 = rng.uniform(0, nvars);
std::swap(allVars[i1], allVars[i2]);
}
for( i = 0; i < m; i++ )
activeVars[i] = allVars[i];
return activeVars;
}
void startTraining( const Ptr<TrainData>& trainData, int flags ) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_Assert(!trainData.empty());
DTreesImpl::startTraining(trainData, flags);
int nvars = w->data->getNVars();
int i, m = rparams.nactiveVars > 0 ? rparams.nactiveVars : cvRound(std::sqrt((double)nvars));
m = std::min(std::max(m, 1), nvars);
allVars.resize(nvars);
activeVars.resize(m);
for( i = 0; i < nvars; i++ )
allVars[i] = varIdx[i];
}
void endTraining() CV_OVERRIDE
{
CV_TRACE_FUNCTION();
DTreesImpl::endTraining();
vector<int> a, b;
std::swap(allVars, a);
std::swap(activeVars, b);
}
bool train( const Ptr<TrainData>& trainData, int flags ) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
RNG &rng = theRNG();
CV_Assert(!trainData.empty());
startTraining(trainData, flags);
int treeidx, ntrees = (rparams.termCrit.type & TermCriteria::COUNT) != 0 ?
rparams.termCrit.maxCount : 10000;
int i, j, k, vi, vi_, n = (int)w->sidx.size();
int nclasses = (int)classLabels.size();
double eps = (rparams.termCrit.type & TermCriteria::EPS) != 0 &&
rparams.termCrit.epsilon > 0 ? rparams.termCrit.epsilon : 0.;
vector<int> sidx(n);
vector<uchar> oobmask(n);
vector<int> oobidx;
vector<int> oobperm;
vector<double> oobres(n, 0.);
vector<int> oobcount(n, 0);
vector<int> oobvotes(n*nclasses, 0);
int nvars = w->data->getNVars();
int nallvars = w->data->getNAllVars();
const int* vidx = !varIdx.empty() ? &varIdx[0] : 0;
vector<float> samplebuf(nallvars);
Mat samples = w->data->getSamples();
float* psamples = samples.ptr<float>();
size_t sstep0 = samples.step1(), sstep1 = 1;
Mat sample0, sample(nallvars, 1, CV_32F, &samplebuf[0]);
int predictFlags = _isClassifier ? (PREDICT_MAX_VOTE + RAW_OUTPUT) : PREDICT_SUM;
bool calcOOBError = eps > 0 || rparams.calcVarImportance;
double max_response = 0.;
if( w->data->getLayout() == COL_SAMPLE )
std::swap(sstep0, sstep1);
if( !_isClassifier )
{
for( i = 0; i < n; i++ )
{
double val = std::abs(w->ord_responses[w->sidx[i]]);
max_response = std::max(max_response, val);
}
CV_Assert(fabs(max_response) > 0);
}
if( rparams.calcVarImportance )
varImportance.resize(nallvars, 0.f);
for( treeidx = 0; treeidx < ntrees; treeidx++ )
{
for( i = 0; i < n; i++ )
oobmask[i] = (uchar)1;
for( i = 0; i < n; i++ )
{
j = rng.uniform(0, n);
sidx[i] = w->sidx[j];
oobmask[j] = (uchar)0;
}
int root = addTree( sidx );
if( root < 0 )
return false;
if( calcOOBError )
{
oobidx.clear();
for( i = 0; i < n; i++ )
{
if( oobmask[i] )
oobidx.push_back(i);
}
int n_oob = (int)oobidx.size();
// if there is no out-of-bag samples, we can not compute OOB error
// nor update the variable importance vector; so we proceed to the next tree
if( n_oob == 0 )
continue;
double ncorrect_responses = 0.;
oobError = 0.;
for( i = 0; i < n_oob; i++ )
{
j = oobidx[i];
sample = Mat( nallvars, 1, CV_32F, psamples + sstep0*w->sidx[j], sstep1*sizeof(psamples[0]) );
double val = predictTrees(Range(treeidx, treeidx+1), sample, predictFlags);
double sample_weight = w->sample_weights[w->sidx[j]];
if( !_isClassifier )
{
oobres[j] += val;
oobcount[j]++;
double true_val = w->ord_responses[w->sidx[j]];
double a = oobres[j]/oobcount[j] - true_val;
oobError += sample_weight * a*a;
val = (val - true_val)/max_response;
ncorrect_responses += std::exp( -val*val );
}
else
{
int ival = cvRound(val);
//Voting scheme to combine OOB errors of each tree
int* votes = &oobvotes[j*nclasses];
votes[ival]++;
int best_class = 0;
for( k = 1; k < nclasses; k++ )
if( votes[best_class] < votes[k] )
best_class = k;
int diff = best_class != w->cat_responses[w->sidx[j]];
oobError += sample_weight * diff;
ncorrect_responses += diff == 0;
}
}
oobError /= n_oob;
if( rparams.calcVarImportance && n_oob > 1 )
{
Mat sample_clone;
oobperm.resize(n_oob);
for( i = 0; i < n_oob; i++ )
oobperm[i] = oobidx[i];
for (i = n_oob - 1; i > 0; --i) //Randomly shuffle indices so we can permute features
{
int r_i = rng.uniform(0, n_oob);
std::swap(oobperm[i], oobperm[r_i]);
}
for( vi_ = 0; vi_ < nvars; vi_++ )
{
vi = vidx ? vidx[vi_] : vi_; //Ensure that only the user specified predictors are used for training
double ncorrect_responses_permuted = 0;
for( i = 0; i < n_oob; i++ )
{
j = oobidx[i];
int vj = oobperm[i];
sample0 = Mat( nallvars, 1, CV_32F, psamples + sstep0*w->sidx[j], sstep1*sizeof(psamples[0]) );
sample0.copyTo(sample_clone); //create a copy so we don't mess up the original data
sample_clone.at<float>(vi) = psamples[sstep0*w->sidx[vj] + sstep1*vi];
double val = predictTrees(Range(treeidx, treeidx+1), sample_clone, predictFlags);
if( !_isClassifier )
{
val = (val - w->ord_responses[w->sidx[j]])/max_response;
ncorrect_responses_permuted += exp( -val*val );
}
else
{
ncorrect_responses_permuted += cvRound(val) == w->cat_responses[w->sidx[j]];
}
}
varImportance[vi] += (float)(ncorrect_responses - ncorrect_responses_permuted);
}
}
}
if( calcOOBError && oobError < eps )
break;
}
if( rparams.calcVarImportance )
{
for( vi_ = 0; vi_ < nallvars; vi_++ )
varImportance[vi_] = std::max(varImportance[vi_], 0.f);
normalize(varImportance, varImportance, 1., 0, NORM_L1);
}
endTraining();
return true;
}
void writeTrainingParams( FileStorage& fs ) const CV_OVERRIDE
{
CV_TRACE_FUNCTION();
DTreesImpl::writeTrainingParams(fs);
fs << "nactive_vars" << rparams.nactiveVars;
}
void write( FileStorage& fs ) const CV_OVERRIDE
{
CV_TRACE_FUNCTION();
if( roots.empty() )
CV_Error( CV_StsBadArg, "RTrees have not been trained" );
writeFormat(fs);
writeParams(fs);
fs << "oob_error" << oobError;
if( !varImportance.empty() )
fs << "var_importance" << varImportance;
int k, ntrees = (int)roots.size();
fs << "ntrees" << ntrees
<< "trees" << "[";
for( k = 0; k < ntrees; k++ )
{
fs << "{";
writeTree(fs, roots[k]);
fs << "}";
}
fs << "]";
}
void readParams( const FileNode& fn ) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
DTreesImpl::readParams(fn);
FileNode tparams_node = fn["training_params"];
rparams.nactiveVars = (int)tparams_node["nactive_vars"];
}
void read( const FileNode& fn ) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
clear();
//int nclasses = (int)fn["nclasses"];
//int nsamples = (int)fn["nsamples"];
oobError = (double)fn["oob_error"];
int ntrees = (int)fn["ntrees"];
readVectorOrMat(fn["var_importance"], varImportance);
readParams(fn);
FileNode trees_node = fn["trees"];
FileNodeIterator it = trees_node.begin();
CV_Assert( ntrees == (int)trees_node.size() );
for( int treeidx = 0; treeidx < ntrees; treeidx++, ++it )
{
FileNode nfn = (*it)["nodes"];
readTree(nfn);
}
}
void getVotes( InputArray input, OutputArray output, int flags ) const
{
CV_TRACE_FUNCTION();
CV_Assert( !roots.empty() );
int nclasses = (int)classLabels.size(), ntrees = (int)roots.size();
Mat samples = input.getMat(), results;
int i, j, nsamples = samples.rows;
int predictType = flags & PREDICT_MASK;
if( predictType == PREDICT_AUTO )
{
predictType = !_isClassifier || (classLabels.size() == 2 && (flags & RAW_OUTPUT) != 0) ?
PREDICT_SUM : PREDICT_MAX_VOTE;
}
if( predictType == PREDICT_SUM )
{
output.create(nsamples, ntrees, CV_32F);
results = output.getMat();
for( i = 0; i < nsamples; i++ )
{
for( j = 0; j < ntrees; j++ )
{
float val = predictTrees( Range(j, j+1), samples.row(i), flags);
results.at<float> (i, j) = val;
}
}
} else
{
vector<int> votes;
output.create(nsamples+1, nclasses, CV_32S);
results = output.getMat();
for ( j = 0; j < nclasses; j++)
{
results.at<int> (0, j) = classLabels[j];
}
for( i = 0; i < nsamples; i++ )
{
votes.clear();
for( j = 0; j < ntrees; j++ )
{
int val = (int)predictTrees( Range(j, j+1), samples.row(i), flags);
votes.push_back(val);
}
for ( j = 0; j < nclasses; j++)
{
results.at<int> (i+1, j) = (int)std::count(votes.begin(), votes.end(), classLabels[j]);
}
}
}
}
double getOOBError() const {
return oobError;
}
RTreeParams rparams;
double oobError;
vector<float> varImportance;
vector<int> allVars, activeVars;
};
class RTreesImpl CV_FINAL : public RTrees
{
public:
inline bool getCalculateVarImportance() const CV_OVERRIDE { return impl.rparams.calcVarImportance; }
inline void setCalculateVarImportance(bool val) CV_OVERRIDE { impl.rparams.calcVarImportance = val; }
inline int getActiveVarCount() const CV_OVERRIDE { return impl.rparams.nactiveVars; }
inline void setActiveVarCount(int val) CV_OVERRIDE { impl.rparams.nactiveVars = val; }
inline TermCriteria getTermCriteria() const CV_OVERRIDE { return impl.rparams.termCrit; }
inline void setTermCriteria(const TermCriteria& val) CV_OVERRIDE { impl.rparams.termCrit = val; }
inline int getMaxCategories() const CV_OVERRIDE { return impl.params.getMaxCategories(); }
inline void setMaxCategories(int val) CV_OVERRIDE { impl.params.setMaxCategories(val); }
inline int getMaxDepth() const CV_OVERRIDE { return impl.params.getMaxDepth(); }
inline void setMaxDepth(int val) CV_OVERRIDE { impl.params.setMaxDepth(val); }
inline int getMinSampleCount() const CV_OVERRIDE { return impl.params.getMinSampleCount(); }
inline void setMinSampleCount(int val) CV_OVERRIDE { impl.params.setMinSampleCount(val); }
inline int getCVFolds() const CV_OVERRIDE { return impl.params.getCVFolds(); }
inline void setCVFolds(int val) CV_OVERRIDE { impl.params.setCVFolds(val); }
inline bool getUseSurrogates() const CV_OVERRIDE { return impl.params.getUseSurrogates(); }
inline void setUseSurrogates(bool val) CV_OVERRIDE { impl.params.setUseSurrogates(val); }
inline bool getUse1SERule() const CV_OVERRIDE { return impl.params.getUse1SERule(); }
inline void setUse1SERule(bool val) CV_OVERRIDE { impl.params.setUse1SERule(val); }
inline bool getTruncatePrunedTree() const CV_OVERRIDE { return impl.params.getTruncatePrunedTree(); }
inline void setTruncatePrunedTree(bool val) CV_OVERRIDE { impl.params.setTruncatePrunedTree(val); }
inline float getRegressionAccuracy() const CV_OVERRIDE { return impl.params.getRegressionAccuracy(); }
inline void setRegressionAccuracy(float val) CV_OVERRIDE { impl.params.setRegressionAccuracy(val); }
inline cv::Mat getPriors() const CV_OVERRIDE { return impl.params.getPriors(); }
inline void setPriors(const cv::Mat& val) CV_OVERRIDE { impl.params.setPriors(val); }
inline void getVotes(InputArray input, OutputArray output, int flags) const CV_OVERRIDE {return impl.getVotes(input,output,flags);}
RTreesImpl() {}
virtual ~RTreesImpl() CV_OVERRIDE {}
String getDefaultName() const CV_OVERRIDE { return "opencv_ml_rtrees"; }
bool train( const Ptr<TrainData>& trainData, int flags ) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_Assert(!trainData.empty());
if (impl.getCVFolds() != 0)
CV_Error(Error::StsBadArg, "Cross validation for RTrees is not implemented");
return impl.train(trainData, flags);
}
float predict( InputArray samples, OutputArray results, int flags ) const CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_CheckEQ(samples.cols(), getVarCount(), "");
return impl.predict(samples, results, flags);
}
void write( FileStorage& fs ) const CV_OVERRIDE
{
CV_TRACE_FUNCTION();
impl.write(fs);
}
void read( const FileNode& fn ) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
impl.read(fn);
}
Mat getVarImportance() const CV_OVERRIDE { return Mat_<float>(impl.varImportance, true); }
int getVarCount() const CV_OVERRIDE { return impl.getVarCount(); }
bool isTrained() const CV_OVERRIDE { return impl.isTrained(); }
bool isClassifier() const CV_OVERRIDE { return impl.isClassifier(); }
const vector<int>& getRoots() const CV_OVERRIDE { return impl.getRoots(); }
const vector<Node>& getNodes() const CV_OVERRIDE { return impl.getNodes(); }
const vector<Split>& getSplits() const CV_OVERRIDE { return impl.getSplits(); }
const vector<int>& getSubsets() const CV_OVERRIDE { return impl.getSubsets(); }
double getOOBError() const CV_OVERRIDE { return impl.getOOBError(); }
DTreesImplForRTrees impl;
};
Ptr<RTrees> RTrees::create()
{
CV_TRACE_FUNCTION();
return makePtr<RTreesImpl>();
}
//Function needed for Python and Java wrappers
Ptr<RTrees> RTrees::load(const String& filepath, const String& nodeName)
{
CV_TRACE_FUNCTION();
return Algorithm::load<RTrees>(filepath, nodeName);
}
}}
// End of file.

File diff suppressed because it is too large Load Diff

View File

@ -1,524 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Copyright (C) 2016, Itseez Inc, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "limits"
#include <iostream>
using std::cout;
using std::endl;
/****************************************************************************************\
* Stochastic Gradient Descent SVM Classifier *
\****************************************************************************************/
namespace cv
{
namespace ml
{
class SVMSGDImpl CV_FINAL : public SVMSGD
{
public:
SVMSGDImpl();
virtual ~SVMSGDImpl() {}
virtual bool train(const Ptr<TrainData>& data, int) CV_OVERRIDE;
virtual float predict( InputArray samples, OutputArray results=noArray(), int flags = 0 ) const CV_OVERRIDE;
virtual bool isClassifier() const CV_OVERRIDE;
virtual bool isTrained() const CV_OVERRIDE;
virtual void clear() CV_OVERRIDE;
virtual void write(FileStorage &fs) const CV_OVERRIDE;
virtual void read(const FileNode &fn) CV_OVERRIDE;
virtual Mat getWeights() CV_OVERRIDE { return weights_; }
virtual float getShift() CV_OVERRIDE { return shift_; }
virtual int getVarCount() const CV_OVERRIDE { return weights_.cols; }
virtual String getDefaultName() const CV_OVERRIDE {return "opencv_ml_svmsgd";}
virtual void setOptimalParameters(int svmsgdType = ASGD, int marginType = SOFT_MARGIN) CV_OVERRIDE;
inline int getSvmsgdType() const CV_OVERRIDE { return params.svmsgdType; }
inline void setSvmsgdType(int val) CV_OVERRIDE { params.svmsgdType = val; }
inline int getMarginType() const CV_OVERRIDE { return params.marginType; }
inline void setMarginType(int val) CV_OVERRIDE { params.marginType = val; }
inline float getMarginRegularization() const CV_OVERRIDE { return params.marginRegularization; }
inline void setMarginRegularization(float val) CV_OVERRIDE { params.marginRegularization = val; }
inline float getInitialStepSize() const CV_OVERRIDE { return params.initialStepSize; }
inline void setInitialStepSize(float val) CV_OVERRIDE { params.initialStepSize = val; }
inline float getStepDecreasingPower() const CV_OVERRIDE { return params.stepDecreasingPower; }
inline void setStepDecreasingPower(float val) CV_OVERRIDE { params.stepDecreasingPower = val; }
inline cv::TermCriteria getTermCriteria() const CV_OVERRIDE { return params.termCrit; }
inline void setTermCriteria(const cv::TermCriteria& val) CV_OVERRIDE { params.termCrit = val; }
private:
void updateWeights(InputArray sample, bool positive, float stepSize, Mat &weights);
void writeParams( FileStorage &fs ) const;
void readParams( const FileNode &fn );
static inline bool isPositive(float val) { return val > 0; }
static void normalizeSamples(Mat &matrix, Mat &average, float &multiplier);
float calcShift(InputArray _samples, InputArray _responses) const;
static void makeExtendedTrainSamples(const Mat &trainSamples, Mat &extendedTrainSamples, Mat &average, float &multiplier);
// Vector with SVM weights
Mat weights_;
float shift_;
// Parameters for learning
struct SVMSGDParams
{
float marginRegularization;
float initialStepSize;
float stepDecreasingPower;
TermCriteria termCrit;
int svmsgdType;
int marginType;
};
SVMSGDParams params;
};
Ptr<SVMSGD> SVMSGD::create()
{
return makePtr<SVMSGDImpl>();
}
Ptr<SVMSGD> SVMSGD::load(const String& filepath, const String& nodeName)
{
return Algorithm::load<SVMSGD>(filepath, nodeName);
}
void SVMSGDImpl::normalizeSamples(Mat &samples, Mat &average, float &multiplier)
{
int featuresCount = samples.cols;
int samplesCount = samples.rows;
average = Mat(1, featuresCount, samples.type());
CV_Assert(average.type() == CV_32FC1);
for (int featureIndex = 0; featureIndex < featuresCount; featureIndex++)
{
average.at<float>(featureIndex) = static_cast<float>(mean(samples.col(featureIndex))[0]);
}
for (int sampleIndex = 0; sampleIndex < samplesCount; sampleIndex++)
{
samples.row(sampleIndex) -= average;
}
double normValue = norm(samples);
multiplier = static_cast<float>(sqrt(static_cast<double>(samples.total())) / normValue);
samples *= multiplier;
}
void SVMSGDImpl::makeExtendedTrainSamples(const Mat &trainSamples, Mat &extendedTrainSamples, Mat &average, float &multiplier)
{
Mat normalizedTrainSamples = trainSamples.clone();
int samplesCount = normalizedTrainSamples.rows;
normalizeSamples(normalizedTrainSamples, average, multiplier);
Mat onesCol = Mat::ones(samplesCount, 1, CV_32F);
cv::hconcat(normalizedTrainSamples, onesCol, extendedTrainSamples);
}
void SVMSGDImpl::updateWeights(InputArray _sample, bool positive, float stepSize, Mat& weights)
{
Mat sample = _sample.getMat();
int response = positive ? 1 : -1; // ensure that trainResponses are -1 or 1
if ( sample.dot(weights) * response > 1)
{
// Not a support vector, only apply weight decay
weights *= (1.f - stepSize * params.marginRegularization);
}
else
{
// It's a support vector, add it to the weights
weights -= (stepSize * params.marginRegularization) * weights - (stepSize * response) * sample;
}
}
float SVMSGDImpl::calcShift(InputArray _samples, InputArray _responses) const
{
float margin[2] = { std::numeric_limits<float>::max(), std::numeric_limits<float>::max() };
Mat trainSamples = _samples.getMat();
int trainSamplesCount = trainSamples.rows;
Mat trainResponses = _responses.getMat();
CV_Assert(trainResponses.type() == CV_32FC1);
for (int samplesIndex = 0; samplesIndex < trainSamplesCount; samplesIndex++)
{
Mat currentSample = trainSamples.row(samplesIndex);
float dotProduct = static_cast<float>(currentSample.dot(weights_));
bool positive = isPositive(trainResponses.at<float>(samplesIndex));
int index = positive ? 0 : 1;
float signToMul = positive ? 1.f : -1.f;
float curMargin = dotProduct * signToMul;
if (curMargin < margin[index])
{
margin[index] = curMargin;
}
}
return -(margin[0] - margin[1]) / 2.f;
}
bool SVMSGDImpl::train(const Ptr<TrainData>& data, int)
{
CV_Assert(!data.empty());
clear();
CV_Assert( isClassifier() ); //toDo: consider
Mat trainSamples = data->getTrainSamples();
int featureCount = trainSamples.cols;
Mat trainResponses = data->getTrainResponses(); // (trainSamplesCount x 1) matrix
CV_Assert(trainResponses.rows == trainSamples.rows);
if (trainResponses.empty())
{
return false;
}
int positiveCount = countNonZero(trainResponses >= 0);
int negativeCount = countNonZero(trainResponses < 0);
if ( positiveCount <= 0 || negativeCount <= 0 )
{
weights_ = Mat::zeros(1, featureCount, CV_32F);
shift_ = (positiveCount > 0) ? 1.f : -1.f;
return true;
}
Mat extendedTrainSamples;
Mat average;
float multiplier = 0;
makeExtendedTrainSamples(trainSamples, extendedTrainSamples, average, multiplier);
int extendedTrainSamplesCount = extendedTrainSamples.rows;
int extendedFeatureCount = extendedTrainSamples.cols;
Mat extendedWeights = Mat::zeros(1, extendedFeatureCount, CV_32F);
Mat previousWeights = Mat::zeros(1, extendedFeatureCount, CV_32F);
Mat averageExtendedWeights;
if (params.svmsgdType == ASGD)
{
averageExtendedWeights = Mat::zeros(1, extendedFeatureCount, CV_32F);
}
RNG rng(0);
CV_Assert (params.termCrit.type & TermCriteria::COUNT || params.termCrit.type & TermCriteria::EPS);
int maxCount = (params.termCrit.type & TermCriteria::COUNT) ? params.termCrit.maxCount : INT_MAX;
double epsilon = (params.termCrit.type & TermCriteria::EPS) ? params.termCrit.epsilon : 0;
double err = DBL_MAX;
CV_Assert (trainResponses.type() == CV_32FC1);
// Stochastic gradient descent SVM
for (int iter = 0; (iter < maxCount) && (err > epsilon); iter++)
{
int randomNumber = rng.uniform(0, extendedTrainSamplesCount); //generate sample number
Mat currentSample = extendedTrainSamples.row(randomNumber);
float stepSize = params.initialStepSize * std::pow((1 + params.marginRegularization * params.initialStepSize * (float)iter), (-params.stepDecreasingPower)); //update stepSize
updateWeights( currentSample, isPositive(trainResponses.at<float>(randomNumber)), stepSize, extendedWeights );
//average weights (only for ASGD model)
if (params.svmsgdType == ASGD)
{
averageExtendedWeights = ((float)iter/ (1 + (float)iter)) * averageExtendedWeights + extendedWeights / (1 + (float) iter);
err = norm(averageExtendedWeights - previousWeights);
averageExtendedWeights.copyTo(previousWeights);
}
else
{
err = norm(extendedWeights - previousWeights);
extendedWeights.copyTo(previousWeights);
}
}
if (params.svmsgdType == ASGD)
{
extendedWeights = averageExtendedWeights;
}
Rect roi(0, 0, featureCount, 1);
weights_ = extendedWeights(roi);
weights_ *= multiplier;
CV_Assert((params.marginType == SOFT_MARGIN || params.marginType == HARD_MARGIN) && (extendedWeights.type() == CV_32FC1));
if (params.marginType == SOFT_MARGIN)
{
shift_ = extendedWeights.at<float>(featureCount) - static_cast<float>(weights_.dot(average));
}
else
{
shift_ = calcShift(trainSamples, trainResponses);
}
return true;
}
float SVMSGDImpl::predict( InputArray _samples, OutputArray _results, int ) const
{
float result = 0;
cv::Mat samples = _samples.getMat();
int nSamples = samples.rows;
cv::Mat results;
CV_Assert( samples.cols == weights_.cols && samples.type() == CV_32FC1);
if( _results.needed() )
{
_results.create( nSamples, 1, samples.type() );
results = _results.getMat();
}
else
{
CV_Assert( nSamples == 1 );
results = Mat(1, 1, CV_32FC1, &result);
}
for (int sampleIndex = 0; sampleIndex < nSamples; sampleIndex++)
{
Mat currentSample = samples.row(sampleIndex);
float criterion = static_cast<float>(currentSample.dot(weights_)) + shift_;
results.at<float>(sampleIndex) = (criterion >= 0) ? 1.f : -1.f;
}
return result;
}
bool SVMSGDImpl::isClassifier() const
{
return (params.svmsgdType == SGD || params.svmsgdType == ASGD)
&&
(params.marginType == SOFT_MARGIN || params.marginType == HARD_MARGIN)
&&
(params.marginRegularization > 0) && (params.initialStepSize > 0) && (params.stepDecreasingPower >= 0);
}
bool SVMSGDImpl::isTrained() const
{
return !weights_.empty();
}
void SVMSGDImpl::write(FileStorage& fs) const
{
if( !isTrained() )
CV_Error( CV_StsParseError, "SVMSGD model data is invalid, it hasn't been trained" );
writeFormat(fs);
writeParams( fs );
fs << "weights" << weights_;
fs << "shift" << shift_;
}
void SVMSGDImpl::writeParams( FileStorage& fs ) const
{
String SvmsgdTypeStr;
switch (params.svmsgdType)
{
case SGD:
SvmsgdTypeStr = "SGD";
break;
case ASGD:
SvmsgdTypeStr = "ASGD";
break;
default:
SvmsgdTypeStr = format("Unknown_%d", params.svmsgdType);
}
fs << "svmsgdType" << SvmsgdTypeStr;
String marginTypeStr;
switch (params.marginType)
{
case SOFT_MARGIN:
marginTypeStr = "SOFT_MARGIN";
break;
case HARD_MARGIN:
marginTypeStr = "HARD_MARGIN";
break;
default:
marginTypeStr = format("Unknown_%d", params.marginType);
}
fs << "marginType" << marginTypeStr;
fs << "marginRegularization" << params.marginRegularization;
fs << "initialStepSize" << params.initialStepSize;
fs << "stepDecreasingPower" << params.stepDecreasingPower;
fs << "term_criteria" << "{:";
if( params.termCrit.type & TermCriteria::EPS )
fs << "epsilon" << params.termCrit.epsilon;
if( params.termCrit.type & TermCriteria::COUNT )
fs << "iterations" << params.termCrit.maxCount;
fs << "}";
}
void SVMSGDImpl::readParams( const FileNode& fn )
{
String svmsgdTypeStr = (String)fn["svmsgdType"];
int svmsgdType =
svmsgdTypeStr == "SGD" ? SGD :
svmsgdTypeStr == "ASGD" ? ASGD : -1;
if( svmsgdType < 0 )
CV_Error( CV_StsParseError, "Missing or invalid SVMSGD type" );
params.svmsgdType = svmsgdType;
String marginTypeStr = (String)fn["marginType"];
int marginType =
marginTypeStr == "SOFT_MARGIN" ? SOFT_MARGIN :
marginTypeStr == "HARD_MARGIN" ? HARD_MARGIN : -1;
if( marginType < 0 )
CV_Error( CV_StsParseError, "Missing or invalid margin type" );
params.marginType = marginType;
CV_Assert ( fn["marginRegularization"].isReal() );
params.marginRegularization = (float)fn["marginRegularization"];
CV_Assert ( fn["initialStepSize"].isReal() );
params.initialStepSize = (float)fn["initialStepSize"];
CV_Assert ( fn["stepDecreasingPower"].isReal() );
params.stepDecreasingPower = (float)fn["stepDecreasingPower"];
FileNode tcnode = fn["term_criteria"];
CV_Assert(!tcnode.empty());
params.termCrit.epsilon = (double)tcnode["epsilon"];
params.termCrit.maxCount = (int)tcnode["iterations"];
params.termCrit.type = (params.termCrit.epsilon > 0 ? TermCriteria::EPS : 0) +
(params.termCrit.maxCount > 0 ? TermCriteria::COUNT : 0);
CV_Assert ((params.termCrit.type & TermCriteria::COUNT || params.termCrit.type & TermCriteria::EPS));
}
void SVMSGDImpl::read(const FileNode& fn)
{
clear();
readParams(fn);
fn["weights"] >> weights_;
fn["shift"] >> shift_;
}
void SVMSGDImpl::clear()
{
weights_.release();
shift_ = 0;
}
SVMSGDImpl::SVMSGDImpl()
{
clear();
setOptimalParameters();
}
void SVMSGDImpl::setOptimalParameters(int svmsgdType, int marginType)
{
switch (svmsgdType)
{
case SGD:
params.svmsgdType = SGD;
params.marginType = (marginType == SOFT_MARGIN) ? SOFT_MARGIN :
(marginType == HARD_MARGIN) ? HARD_MARGIN : -1;
params.marginRegularization = 0.0001f;
params.initialStepSize = 0.05f;
params.stepDecreasingPower = 1.f;
params.termCrit = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100000, 0.00001);
break;
case ASGD:
params.svmsgdType = ASGD;
params.marginType = (marginType == SOFT_MARGIN) ? SOFT_MARGIN :
(marginType == HARD_MARGIN) ? HARD_MARGIN : -1;
params.marginRegularization = 0.00001f;
params.initialStepSize = 0.05f;
params.stepDecreasingPower = 0.75f;
params.termCrit = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100000, 0.00001);
break;
default:
CV_Error( CV_StsParseError, "SVMSGD model data is invalid" );
}
}
} //ml
} //cv

View File

@ -1,113 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
namespace cv { namespace ml {
struct PairDI
{
double d;
int i;
};
struct CmpPairDI
{
bool operator ()(const PairDI& e1, const PairDI& e2) const
{
return (e1.d < e2.d) || (e1.d == e2.d && e1.i < e2.i);
}
};
void createConcentricSpheresTestSet( int num_samples, int num_features, int num_classes,
OutputArray _samples, OutputArray _responses)
{
if( num_samples < 1 )
CV_Error( CV_StsBadArg, "num_samples parameter must be positive" );
if( num_features < 1 )
CV_Error( CV_StsBadArg, "num_features parameter must be positive" );
if( num_classes < 1 )
CV_Error( CV_StsBadArg, "num_classes parameter must be positive" );
int i, cur_class;
_samples.create( num_samples, num_features, CV_32F );
_responses.create( 1, num_samples, CV_32S );
Mat responses = _responses.getMat();
Mat mean = Mat::zeros(1, num_features, CV_32F);
Mat cov = Mat::eye(num_features, num_features, CV_32F);
// fill the feature values matrix with random numbers drawn from standard normal distribution
randMVNormal( mean, cov, num_samples, _samples );
Mat samples = _samples.getMat();
// calculate distances from the origin to the samples and put them
// into the sequence along with indices
std::vector<PairDI> dis(samples.rows);
for( i = 0; i < samples.rows; i++ )
{
PairDI& elem = dis[i];
elem.i = i;
elem.d = norm(samples.row(i), NORM_L2);
}
std::sort(dis.begin(), dis.end(), CmpPairDI());
// assign class labels
num_classes = std::min( num_samples, num_classes );
for( i = 0, cur_class = 0; i < num_samples; ++cur_class )
{
int last_idx = num_samples * (cur_class + 1) / num_classes - 1;
double max_dst = dis[last_idx].d;
max_dst = std::max( max_dst, dis[i].d );
for( ; i < num_samples && dis[i].d <= max_dst; ++i )
responses.at<int>(dis[i].i) = cur_class;
}
}
}}
/* End of file. */

File diff suppressed because it is too large Load Diff

View File

@ -1,200 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
// #define GENERATE_TESTDATA
namespace opencv_test { namespace {
struct Activation
{
int id;
const char * name;
};
void PrintTo(const Activation &a, std::ostream *os) { *os << a.name; }
Activation activation_list[] =
{
{ ml::ANN_MLP::IDENTITY, "identity" },
{ ml::ANN_MLP::SIGMOID_SYM, "sigmoid_sym" },
{ ml::ANN_MLP::GAUSSIAN, "gaussian" },
{ ml::ANN_MLP::RELU, "relu" },
{ ml::ANN_MLP::LEAKYRELU, "leakyrelu" },
};
typedef testing::TestWithParam< Activation > ML_ANN_Params;
TEST_P(ML_ANN_Params, ActivationFunction)
{
const Activation &activation = GetParam();
const string dataname = "waveform";
const string data_path = findDataFile(dataname + ".data");
const string model_name = dataname + "_" + activation.name + ".yml";
Ptr<TrainData> tdata = TrainData::loadFromCSV(data_path, 0);
ASSERT_FALSE(tdata.empty());
// hack?
const uint64 old_state = theRNG().state;
theRNG().state = 1027401484159173092;
tdata->setTrainTestSplit(500);
theRNG().state = old_state;
Mat_<int> layerSizes(1, 4);
layerSizes(0, 0) = tdata->getNVars();
layerSizes(0, 1) = 100;
layerSizes(0, 2) = 100;
layerSizes(0, 3) = tdata->getResponses().cols;
Mat testSamples = tdata->getTestSamples();
Mat rx, ry;
{
Ptr<ml::ANN_MLP> x = ml::ANN_MLP::create();
x->setActivationFunction(activation.id);
x->setLayerSizes(layerSizes);
x->setTrainMethod(ml::ANN_MLP::RPROP, 0.01, 0.1);
x->setTermCriteria(TermCriteria(TermCriteria::COUNT, 300, 0.01));
x->train(tdata, ml::ANN_MLP::NO_OUTPUT_SCALE);
ASSERT_TRUE(x->isTrained());
x->predict(testSamples, rx);
#ifdef GENERATE_TESTDATA
x->save(cvtest::TS::ptr()->get_data_path() + model_name);
#endif
}
{
const string model_path = findDataFile(model_name);
Ptr<ml::ANN_MLP> y = Algorithm::load<ANN_MLP>(model_path);
ASSERT_TRUE(y);
y->predict(testSamples, ry);
EXPECT_MAT_NEAR(rx, ry, FLT_EPSILON);
}
}
INSTANTIATE_TEST_CASE_P(/**/, ML_ANN_Params, testing::ValuesIn(activation_list));
//==================================================================================================
CV_ENUM(ANN_MLP_METHOD, ANN_MLP::RPROP, ANN_MLP::ANNEAL)
typedef tuple<ANN_MLP_METHOD, string, int> ML_ANN_METHOD_Params;
typedef TestWithParam<ML_ANN_METHOD_Params> ML_ANN_METHOD;
TEST_P(ML_ANN_METHOD, Test)
{
int methodType = get<0>(GetParam());
string methodName = get<1>(GetParam());
int N = get<2>(GetParam());
String folder = string(cvtest::TS::ptr()->get_data_path());
String original_path = findDataFile("waveform.data");
string dataname = "waveform_" + methodName;
string weight_name = dataname + "_init_weight.yml.gz";
string model_name = dataname + ".yml.gz";
string response_name = dataname + "_response.yml.gz";
Ptr<TrainData> tdata2 = TrainData::loadFromCSV(original_path, 0);
ASSERT_FALSE(tdata2.empty());
Mat samples = tdata2->getSamples()(Range(0, N), Range::all());
Mat responses(N, 3, CV_32FC1, Scalar(0));
for (int i = 0; i < N; i++)
responses.at<float>(i, static_cast<int>(tdata2->getResponses().at<float>(i, 0))) = 1;
Ptr<TrainData> tdata = TrainData::create(samples, ml::ROW_SAMPLE, responses);
ASSERT_FALSE(tdata.empty());
// hack?
const uint64 old_state = theRNG().state;
theRNG().state = 0;
tdata->setTrainTestSplitRatio(0.8);
theRNG().state = old_state;
Mat testSamples = tdata->getTestSamples();
// train 1st stage
Ptr<ml::ANN_MLP> xx = ml::ANN_MLP::create();
Mat_<int> layerSizes(1, 4);
layerSizes(0, 0) = tdata->getNVars();
layerSizes(0, 1) = 30;
layerSizes(0, 2) = 30;
layerSizes(0, 3) = tdata->getResponses().cols;
xx->setLayerSizes(layerSizes);
xx->setActivationFunction(ml::ANN_MLP::SIGMOID_SYM);
xx->setTrainMethod(ml::ANN_MLP::RPROP);
xx->setTermCriteria(TermCriteria(TermCriteria::COUNT, 1, 0.01));
xx->train(tdata, ml::ANN_MLP::NO_OUTPUT_SCALE + ml::ANN_MLP::NO_INPUT_SCALE);
#ifdef GENERATE_TESTDATA
{
FileStorage fs;
fs.open(cvtest::TS::ptr()->get_data_path() + weight_name, FileStorage::WRITE + FileStorage::BASE64);
xx->write(fs);
}
#endif
// train 2nd stage
Mat r_gold;
Ptr<ml::ANN_MLP> x = ml::ANN_MLP::create();
{
const string weight_file = findDataFile(weight_name);
FileStorage fs;
fs.open(weight_file, FileStorage::READ);
x->read(fs.root());
}
x->setTrainMethod(methodType);
if (methodType == ml::ANN_MLP::ANNEAL)
{
x->setAnnealEnergyRNG(RNG(CV_BIG_INT(0xffffffff)));
x->setAnnealInitialT(12);
x->setAnnealFinalT(0.15);
x->setAnnealCoolingRatio(0.96);
x->setAnnealItePerStep(11);
}
x->setTermCriteria(TermCriteria(TermCriteria::COUNT, 100, 0.01));
x->train(tdata, ml::ANN_MLP::NO_OUTPUT_SCALE + ml::ANN_MLP::NO_INPUT_SCALE + ml::ANN_MLP::UPDATE_WEIGHTS);
ASSERT_TRUE(x->isTrained());
#ifdef GENERATE_TESTDATA
x->save(cvtest::TS::ptr()->get_data_path() + model_name);
x->predict(testSamples, r_gold);
{
FileStorage fs_response(cvtest::TS::ptr()->get_data_path() + response_name, FileStorage::WRITE + FileStorage::BASE64);
fs_response << "response" << r_gold;
}
#endif
{
const string response_file = findDataFile(response_name);
FileStorage fs_response(response_file, FileStorage::READ);
fs_response["response"] >> r_gold;
}
ASSERT_FALSE(r_gold.empty());
// verify
const string model_file = findDataFile(model_name);
Ptr<ml::ANN_MLP> y = Algorithm::load<ANN_MLP>(model_file);
ASSERT_TRUE(y);
Mat rx, ry;
for (int j = 0; j < 4; j++)
{
rx = x->getWeights(j);
ry = y->getWeights(j);
EXPECT_MAT_NEAR(rx, ry, FLT_EPSILON) << "Weights are not equal for layer: " << j;
}
x->predict(testSamples, rx);
y->predict(testSamples, ry);
EXPECT_MAT_NEAR(ry, rx, FLT_EPSILON) << "Predict are not equal to result of the saved model";
EXPECT_MAT_NEAR(r_gold, rx, FLT_EPSILON) << "Predict are not equal to 'gold' response";
}
INSTANTIATE_TEST_CASE_P(/*none*/, ML_ANN_METHOD,
testing::Values(
ML_ANN_METHOD_Params(ml::ANN_MLP::RPROP, "rprop", 5000),
ML_ANN_METHOD_Params(ml::ANN_MLP::ANNEAL, "anneal", 1000)
// ML_ANN_METHOD_Params(ml::ANN_MLP::BACKPROP, "backprop", 500) -----> NO BACKPROP TEST
)
);
}} // namespace

View File

@ -1,56 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
namespace opencv_test { namespace {
TEST(ML_NBAYES, regression_5911)
{
int N=12;
Ptr<ml::NormalBayesClassifier> nb = cv::ml::NormalBayesClassifier::create();
// data:
float X_data[] = {
1,2,3,4, 1,2,3,4, 1,2,3,4, 1,2,3,4,
5,5,5,5, 5,5,5,5, 5,5,5,5, 5,5,5,5,
4,3,2,1, 4,3,2,1, 4,3,2,1, 4,3,2,1
};
Mat_<float> X(N, 4, X_data);
// labels:
int Y_data[] = { 0,0,0,0, 1,1,1,1, 2,2,2,2 };
Mat_<int> Y(N, 1, Y_data);
nb->train(X, ml::ROW_SAMPLE, Y);
// single prediction:
Mat R1,P1;
for (int i=0; i<N; i++)
{
Mat r,p;
nb->predictProb(X.row(i), r, p);
R1.push_back(r);
P1.push_back(p);
}
// bulk prediction (continuous memory):
Mat R2,P2;
nb->predictProb(X, R2, P2);
EXPECT_EQ(255 * R2.total(), sum(R1 == R2)[0]);
EXPECT_EQ(255 * P2.total(), sum(P1 == P2)[0]);
// bulk prediction, with non-continuous memory storage
Mat R3_(N, 1+1, CV_32S),
P3_(N, 3+1, CV_32F);
nb->predictProb(X, R3_.col(0), P3_.colRange(0,3));
Mat R3 = R3_.col(0).clone(),
P3 = P3_.colRange(0,3).clone();
EXPECT_EQ(255 * R3.total(), sum(R1 == R3)[0]);
EXPECT_EQ(255 * P3.total(), sum(P1 == P3)[0]);
}
}} // namespace

View File

@ -1,186 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
namespace opencv_test { namespace {
CV_ENUM(EM_START_STEP, EM::START_AUTO_STEP, EM::START_M_STEP, EM::START_E_STEP)
CV_ENUM(EM_COV_MAT, EM::COV_MAT_GENERIC, EM::COV_MAT_DIAGONAL, EM::COV_MAT_SPHERICAL)
typedef testing::TestWithParam< tuple<EM_START_STEP, EM_COV_MAT> > ML_EM_Params;
TEST_P(ML_EM_Params, accuracy)
{
const int nclusters = 3;
const int sizesArr[] = { 500, 700, 800 };
const vector<int> sizes( sizesArr, sizesArr + sizeof(sizesArr) / sizeof(sizesArr[0]) );
const int pointsCount = sizesArr[0] + sizesArr[1] + sizesArr[2];
Mat means;
vector<Mat> covs;
defaultDistribs( means, covs, CV_64FC1 );
Mat trainData(pointsCount, 2, CV_64FC1 );
Mat trainLabels;
generateData( trainData, trainLabels, sizes, means, covs, CV_64FC1, CV_32SC1 );
Mat testData( pointsCount, 2, CV_64FC1 );
Mat testLabels;
generateData( testData, testLabels, sizes, means, covs, CV_64FC1, CV_32SC1 );
Mat probs(trainData.rows, nclusters, CV_64FC1, cv::Scalar(1));
Mat weights(1, nclusters, CV_64FC1, cv::Scalar(1));
TermCriteria termCrit(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 100, FLT_EPSILON);
int startStep = get<0>(GetParam());
int covMatType = get<1>(GetParam());
cv::Mat labels;
Ptr<EM> em = EM::create();
em->setClustersNumber(nclusters);
em->setCovarianceMatrixType(covMatType);
em->setTermCriteria(termCrit);
if( startStep == EM::START_AUTO_STEP )
em->trainEM( trainData, noArray(), labels, noArray() );
else if( startStep == EM::START_E_STEP )
em->trainE( trainData, means, covs, weights, noArray(), labels, noArray() );
else if( startStep == EM::START_M_STEP )
em->trainM( trainData, probs, noArray(), labels, noArray() );
{
SCOPED_TRACE("Train");
float err = 1000;
EXPECT_TRUE(calcErr( labels, trainLabels, sizes, err , false, false ));
EXPECT_LE(err, 0.008f);
}
{
SCOPED_TRACE("Test");
float err = 1000;
labels.create( testData.rows, 1, CV_32SC1 );
for( int i = 0; i < testData.rows; i++ )
{
Mat sample = testData.row(i);
Mat out_probs;
labels.at<int>(i) = static_cast<int>(em->predict2( sample, out_probs )[1]);
}
EXPECT_TRUE(calcErr( labels, testLabels, sizes, err, false, false ));
EXPECT_LE(err, 0.008f);
}
}
INSTANTIATE_TEST_CASE_P(/**/, ML_EM_Params,
testing::Combine(
testing::Values(EM::START_AUTO_STEP, EM::START_M_STEP, EM::START_E_STEP),
testing::Values(EM::COV_MAT_GENERIC, EM::COV_MAT_DIAGONAL, EM::COV_MAT_SPHERICAL)
));
//==================================================================================================
TEST(ML_EM, save_load)
{
const int nclusters = 2;
Mat_<double> samples(3, 1);
samples << 1., 2., 3.;
std::vector<double> firstResult;
string filename = cv::tempfile(".xml");
{
Mat labels;
Ptr<EM> em = EM::create();
em->setClustersNumber(nclusters);
em->trainEM(samples, noArray(), labels, noArray());
for( int i = 0; i < samples.rows; i++)
{
Vec2d res = em->predict2(samples.row(i), noArray());
firstResult.push_back(res[1]);
}
{
FileStorage fs = FileStorage(filename, FileStorage::WRITE);
ASSERT_NO_THROW(fs << "em" << "{");
ASSERT_NO_THROW(em->write(fs));
ASSERT_NO_THROW(fs << "}");
}
}
{
Ptr<EM> em;
ASSERT_NO_THROW(em = Algorithm::load<EM>(filename));
for( int i = 0; i < samples.rows; i++)
{
SCOPED_TRACE(i);
Vec2d res = em->predict2(samples.row(i), noArray());
EXPECT_DOUBLE_EQ(firstResult[i], res[1]);
}
}
remove(filename.c_str());
}
//==================================================================================================
TEST(ML_EM, classification)
{
// This test classifies spam by the following way:
// 1. estimates distributions of "spam" / "not spam"
// 2. predict classID using Bayes classifier for estimated distributions.
string dataFilename = findDataFile("spambase.data");
Ptr<TrainData> data = TrainData::loadFromCSV(dataFilename, 0);
ASSERT_FALSE(data.empty());
Mat samples = data->getSamples();
ASSERT_EQ(samples.cols, 57);
Mat responses = data->getResponses();
vector<int> trainSamplesMask(samples.rows, 0);
const int trainSamplesCount = (int)(0.5f * samples.rows);
const int testSamplesCount = samples.rows - trainSamplesCount;
for(int i = 0; i < trainSamplesCount; i++)
trainSamplesMask[i] = 1;
RNG &rng = cv::theRNG();
for(size_t i = 0; i < trainSamplesMask.size(); i++)
{
int i1 = rng(static_cast<unsigned>(trainSamplesMask.size()));
int i2 = rng(static_cast<unsigned>(trainSamplesMask.size()));
std::swap(trainSamplesMask[i1], trainSamplesMask[i2]);
}
Mat samples0, samples1;
for(int i = 0; i < samples.rows; i++)
{
if(trainSamplesMask[i])
{
Mat sample = samples.row(i);
int resp = (int)responses.at<float>(i);
if(resp == 0)
samples0.push_back(sample);
else
samples1.push_back(sample);
}
}
Ptr<EM> model0 = EM::create();
model0->setClustersNumber(3);
model0->trainEM(samples0, noArray(), noArray(), noArray());
Ptr<EM> model1 = EM::create();
model1->setClustersNumber(3);
model1->trainEM(samples1, noArray(), noArray(), noArray());
// confusion matrices
Mat_<int> trainCM(2, 2, 0);
Mat_<int> testCM(2, 2, 0);
const double lambda = 1.;
for(int i = 0; i < samples.rows; i++)
{
Mat sample = samples.row(i);
double sampleLogLikelihoods0 = model0->predict2(sample, noArray())[0];
double sampleLogLikelihoods1 = model1->predict2(sample, noArray())[0];
int classID = (sampleLogLikelihoods0 >= lambda * sampleLogLikelihoods1) ? 0 : 1;
int resp = (int)responses.at<float>(i);
EXPECT_TRUE(resp == 0 || resp == 1);
if(trainSamplesMask[i])
trainCM(resp, classID)++;
else
testCM(resp, classID)++;
}
EXPECT_LE((double)(trainCM(1,0) + trainCM(0,1)) / trainSamplesCount, 0.23);
EXPECT_LE((double)(testCM(1,0) + testCM(0,1)) / testSamplesCount, 0.26);
}
}} // namespace

View File

@ -1,53 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
namespace opencv_test { namespace {
TEST(ML_KMeans, accuracy)
{
const int iters = 100;
int sizesArr[] = { 5000, 7000, 8000 };
int pointsCount = sizesArr[0]+ sizesArr[1] + sizesArr[2];
Mat data( pointsCount, 2, CV_32FC1 ), labels;
vector<int> sizes( sizesArr, sizesArr + sizeof(sizesArr) / sizeof(sizesArr[0]) );
Mat means;
vector<Mat> covs;
defaultDistribs( means, covs );
generateData( data, labels, sizes, means, covs, CV_32FC1, CV_32SC1 );
TermCriteria termCriteria( TermCriteria::COUNT, iters, 0.0);
{
SCOPED_TRACE("KMEANS_PP_CENTERS");
float err = 1000;
Mat bestLabels;
kmeans( data, 3, bestLabels, termCriteria, 0, KMEANS_PP_CENTERS, noArray() );
EXPECT_TRUE(calcErr( bestLabels, labels, sizes, err , false ));
EXPECT_LE(err, 0.01f);
}
{
SCOPED_TRACE("KMEANS_RANDOM_CENTERS");
float err = 1000;
Mat bestLabels;
kmeans( data, 3, bestLabels, termCriteria, 0, KMEANS_RANDOM_CENTERS, noArray() );
EXPECT_TRUE(calcErr( bestLabels, labels, sizes, err, false ));
EXPECT_LE(err, 0.01f);
}
{
SCOPED_TRACE("KMEANS_USE_INITIAL_LABELS");
float err = 1000;
Mat bestLabels;
labels.copyTo( bestLabels );
RNG &rng = cv::theRNG();
for( int i = 0; i < 0.5f * pointsCount; i++ )
bestLabels.at<int>( rng.next() % pointsCount, 0 ) = rng.next() % 3;
kmeans( data, 3, bestLabels, termCriteria, 0, KMEANS_USE_INITIAL_LABELS, noArray() );
EXPECT_TRUE(calcErr( bestLabels, labels, sizes, err, false ));
EXPECT_LE(err, 0.01f);
}
}
}} // namespace

View File

@ -1,112 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
namespace opencv_test { namespace {
using cv::ml::TrainData;
using cv::ml::EM;
using cv::ml::KNearest;
TEST(ML_KNearest, accuracy)
{
int sizesArr[] = { 500, 700, 800 };
int pointsCount = sizesArr[0]+ sizesArr[1] + sizesArr[2];
Mat trainData( pointsCount, 2, CV_32FC1 ), trainLabels;
vector<int> sizes( sizesArr, sizesArr + sizeof(sizesArr) / sizeof(sizesArr[0]) );
Mat means;
vector<Mat> covs;
defaultDistribs( means, covs );
generateData( trainData, trainLabels, sizes, means, covs, CV_32FC1, CV_32FC1 );
Mat testData( pointsCount, 2, CV_32FC1 );
Mat testLabels;
generateData( testData, testLabels, sizes, means, covs, CV_32FC1, CV_32FC1 );
{
SCOPED_TRACE("Default");
Mat bestLabels;
float err = 1000;
Ptr<KNearest> knn = KNearest::create();
knn->train(trainData, ml::ROW_SAMPLE, trainLabels);
knn->findNearest(testData, 4, bestLabels);
EXPECT_TRUE(calcErr( bestLabels, testLabels, sizes, err, true ));
EXPECT_LE(err, 0.01f);
}
{
SCOPED_TRACE("KDTree");
Mat neighborIndexes;
float err = 1000;
Ptr<KNearest> knn = KNearest::create();
knn->setAlgorithmType(KNearest::KDTREE);
knn->train(trainData, ml::ROW_SAMPLE, trainLabels);
knn->findNearest(testData, 4, neighborIndexes);
Mat bestLabels;
// The output of the KDTree are the neighbor indexes, not actual class labels
// so we need to do some extra work to get actual predictions
for(int row_num = 0; row_num < neighborIndexes.rows; ++row_num){
vector<float> labels;
for(int index = 0; index < neighborIndexes.row(row_num).cols; ++index) {
labels.push_back(trainLabels.at<float>(neighborIndexes.row(row_num).at<int>(0, index) , 0));
}
// computing the mode of the output class predictions to determine overall prediction
std::vector<int> histogram(3,0);
for( int i=0; i<3; ++i )
++histogram[ static_cast<int>(labels[i]) ];
int bestLabel = static_cast<int>(std::max_element( histogram.begin(), histogram.end() ) - histogram.begin());
bestLabels.push_back(bestLabel);
}
bestLabels.convertTo(bestLabels, testLabels.type());
EXPECT_TRUE(calcErr( bestLabels, testLabels, sizes, err, true ));
EXPECT_LE(err, 0.01f);
}
}
TEST(ML_KNearest, regression_12347)
{
Mat xTrainData = (Mat_<float>(5,2) << 1, 1.1, 1.1, 1, 2, 2, 2.1, 2, 2.1, 2.1);
Mat yTrainLabels = (Mat_<float>(5,1) << 1, 1, 2, 2, 2);
Ptr<KNearest> knn = KNearest::create();
knn->train(xTrainData, ml::ROW_SAMPLE, yTrainLabels);
Mat xTestData = (Mat_<float>(2,2) << 1.1, 1.1, 2, 2.2);
Mat zBestLabels, neighbours, dist;
// check output shapes:
int K = 16, Kexp = std::min(K, xTrainData.rows);
knn->findNearest(xTestData, K, zBestLabels, neighbours, dist);
EXPECT_EQ(xTestData.rows, zBestLabels.rows);
EXPECT_EQ(neighbours.cols, Kexp);
EXPECT_EQ(dist.cols, Kexp);
// see if the result is still correct:
K = 2;
knn->findNearest(xTestData, K, zBestLabels, neighbours, dist);
EXPECT_EQ(1, zBestLabels.at<float>(0,0));
EXPECT_EQ(2, zBestLabels.at<float>(1,0));
}
TEST(ML_KNearest, bug_11877)
{
Mat trainData = (Mat_<float>(5,2) << 3, 3, 3, 3, 4, 4, 4, 4, 4, 4);
Mat trainLabels = (Mat_<float>(5,1) << 0, 0, 1, 1, 1);
Ptr<KNearest> knnKdt = KNearest::create();
knnKdt->setAlgorithmType(KNearest::KDTREE);
knnKdt->setIsClassifier(true);
knnKdt->train(trainData, ml::ROW_SAMPLE, trainLabels);
Mat testData = (Mat_<float>(2,2) << 3.1, 3.1, 4, 4.1);
Mat testLabels = (Mat_<int>(2,1) << 0, 1);
Mat result;
knnKdt->findNearest(testData, 1, result);
EXPECT_EQ(1, int(result.at<int>(0, 0)));
EXPECT_EQ(2, int(result.at<int>(1, 0)));
EXPECT_EQ(0, trainLabels.at<int>(result.at<int>(0, 0), 0));
}
}} // namespace

View File

@ -1,81 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// AUTHOR: Rahul Kavi rahulkavi[at]live[at]com
//
// Test data uses subset of data from the popular Iris Dataset (1936):
// - http://archive.ics.uci.edu/ml/datasets/Iris
// - https://en.wikipedia.org/wiki/Iris_flower_data_set
//
#include "test_precomp.hpp"
namespace opencv_test { namespace {
TEST(ML_LR, accuracy)
{
std::string dataFileName = findDataFile("iris.data");
Ptr<TrainData> tdata = TrainData::loadFromCSV(dataFileName, 0);
ASSERT_FALSE(tdata.empty());
Ptr<LogisticRegression> p = LogisticRegression::create();
p->setLearningRate(1.0);
p->setIterations(10001);
p->setRegularization(LogisticRegression::REG_L2);
p->setTrainMethod(LogisticRegression::BATCH);
p->setMiniBatchSize(10);
p->train(tdata);
Mat responses;
p->predict(tdata->getSamples(), responses);
float error = 1000;
EXPECT_TRUE(calculateError(responses, tdata->getResponses(), error));
EXPECT_LE(error, 0.05f);
}
//==================================================================================================
TEST(ML_LR, save_load)
{
string dataFileName = findDataFile("iris.data");
Ptr<TrainData> tdata = TrainData::loadFromCSV(dataFileName, 0);
ASSERT_FALSE(tdata.empty());
Mat responses1, responses2;
Mat learnt_mat1, learnt_mat2;
String filename = tempfile(".xml");
{
Ptr<LogisticRegression> lr1 = LogisticRegression::create();
lr1->setLearningRate(1.0);
lr1->setIterations(10001);
lr1->setRegularization(LogisticRegression::REG_L2);
lr1->setTrainMethod(LogisticRegression::BATCH);
lr1->setMiniBatchSize(10);
ASSERT_NO_THROW(lr1->train(tdata));
ASSERT_NO_THROW(lr1->predict(tdata->getSamples(), responses1));
ASSERT_NO_THROW(lr1->save(filename));
learnt_mat1 = lr1->get_learnt_thetas();
}
{
Ptr<LogisticRegression> lr2;
ASSERT_NO_THROW(lr2 = Algorithm::load<LogisticRegression>(filename));
ASSERT_NO_THROW(lr2->predict(tdata->getSamples(), responses2));
learnt_mat2 = lr2->get_learnt_thetas();
}
// compare difference in prediction outputs and stored inputs
EXPECT_MAT_NEAR(responses1, responses2, 0.f);
Mat comp_learnt_mats;
comp_learnt_mats = (learnt_mat1 == learnt_mat2);
comp_learnt_mats = comp_learnt_mats.reshape(1, comp_learnt_mats.rows*comp_learnt_mats.cols);
comp_learnt_mats.convertTo(comp_learnt_mats, CV_32S);
comp_learnt_mats = comp_learnt_mats/255;
// check if there is any difference between computed learnt mat and retrieved mat
EXPECT_EQ(comp_learnt_mats.rows, sum(comp_learnt_mats)[0]);
remove( filename.c_str() );
}
}} // namespace

View File

@ -1,10 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
#if defined(HAVE_HPX)
#include <hpx/hpx_main.hpp>
#endif
CV_TEST_MAIN("ml")

View File

@ -1,373 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
namespace opencv_test { namespace {
struct DatasetDesc
{
string name;
int resp_idx;
int train_count;
int cat_num;
string type_desc;
public:
Ptr<TrainData> load()
{
string filename = findDataFile(name + ".data");
Ptr<TrainData> data = TrainData::loadFromCSV(filename, 0, resp_idx, resp_idx + 1, type_desc);
data->setTrainTestSplit(train_count);
data->shuffleTrainTest();
return data;
}
};
// see testdata/ml/protocol.txt (?)
DatasetDesc datasets[] = {
{ "mushroom", 0, 4000, 16, "cat" },
{ "adult", 14, 22561, 16, "ord[0,2,4,10-12],cat[1,3,5-9,13,14]" },
{ "vehicle", 18, 761, 4, "ord[0-17],cat[18]" },
{ "abalone", 8, 3133, 16, "ord[1-8],cat[0]" },
{ "ringnorm", 20, 300, 2, "ord[0-19],cat[20]" },
{ "spambase", 57, 3221, 3, "ord[0-56],cat[57]" },
{ "waveform", 21, 300, 3, "ord[0-20],cat[21]" },
{ "elevators", 18, 5000, 0, "ord" },
{ "letter", 16, 10000, 26, "ord[0-15],cat[16]" },
{ "twonorm", 20, 300, 3, "ord[0-19],cat[20]" },
{ "poletelecomm", 48, 2500, 0, "ord" },
};
static DatasetDesc & getDataset(const string & name)
{
const int sz = sizeof(datasets)/sizeof(datasets[0]);
for (int i = 0; i < sz; ++i)
{
DatasetDesc & desc = datasets[i];
if (desc.name == name)
return desc;
}
CV_Error(Error::StsInternal, "");
}
//==================================================================================================
// interfaces and templates
template <typename T> string modelName() { return "Unknown"; }
template <typename T> Ptr<T> tuneModel(const DatasetDesc &, Ptr<T> m) { return m; }
struct IModelFactory
{
virtual Ptr<StatModel> createNew(const DatasetDesc &dataset) const = 0;
virtual Ptr<StatModel> loadFromFile(const string &filename) const = 0;
virtual string name() const = 0;
virtual ~IModelFactory() {}
};
template <typename T>
struct ModelFactory : public IModelFactory
{
Ptr<StatModel> createNew(const DatasetDesc &dataset) const CV_OVERRIDE
{
return tuneModel<T>(dataset, T::create());
}
Ptr<StatModel> loadFromFile(const string & filename) const CV_OVERRIDE
{
return T::load(filename);
}
string name() const CV_OVERRIDE { return modelName<T>(); }
};
// implementation
template <> string modelName<NormalBayesClassifier>() { return "NormalBayesClassifier"; }
template <> string modelName<DTrees>() { return "DTrees"; }
template <> string modelName<KNearest>() { return "KNearest"; }
template <> string modelName<RTrees>() { return "RTrees"; }
template <> string modelName<SVMSGD>() { return "SVMSGD"; }
template<> Ptr<DTrees> tuneModel<DTrees>(const DatasetDesc &dataset, Ptr<DTrees> m)
{
m->setMaxDepth(10);
m->setMinSampleCount(2);
m->setRegressionAccuracy(0);
m->setUseSurrogates(false);
m->setCVFolds(0);
m->setUse1SERule(false);
m->setTruncatePrunedTree(false);
m->setPriors(Mat());
m->setMaxCategories(dataset.cat_num);
return m;
}
template<> Ptr<RTrees> tuneModel<RTrees>(const DatasetDesc &dataset, Ptr<RTrees> m)
{
m->setMaxDepth(20);
m->setMinSampleCount(2);
m->setRegressionAccuracy(0);
m->setUseSurrogates(false);
m->setPriors(Mat());
m->setCalculateVarImportance(true);
m->setActiveVarCount(0);
m->setTermCriteria(TermCriteria(TermCriteria::COUNT, 100, 0.0));
m->setMaxCategories(dataset.cat_num);
return m;
}
template<> Ptr<SVMSGD> tuneModel<SVMSGD>(const DatasetDesc &, Ptr<SVMSGD> m)
{
m->setSvmsgdType(SVMSGD::ASGD);
m->setMarginType(SVMSGD::SOFT_MARGIN);
m->setMarginRegularization(0.00001f);
m->setInitialStepSize(0.1f);
m->setStepDecreasingPower(0.75);
m->setTermCriteria(TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 10000, 0.00001));
return m;
}
template <>
struct ModelFactory<Boost> : public IModelFactory
{
ModelFactory(int boostType_) : boostType(boostType_) {}
Ptr<StatModel> createNew(const DatasetDesc &) const CV_OVERRIDE
{
Ptr<Boost> m = Boost::create();
m->setBoostType(boostType);
m->setWeakCount(20);
m->setWeightTrimRate(0.95);
m->setMaxDepth(4);
m->setUseSurrogates(false);
m->setPriors(Mat());
return m;
}
Ptr<StatModel> loadFromFile(const string &filename) const { return Boost::load(filename); }
string name() const CV_OVERRIDE { return "Boost"; }
int boostType;
};
template <>
struct ModelFactory<SVM> : public IModelFactory
{
ModelFactory(int svmType_, int kernelType_, double gamma_, double c_, double nu_)
: svmType(svmType_), kernelType(kernelType_), gamma(gamma_), c(c_), nu(nu_) {}
Ptr<StatModel> createNew(const DatasetDesc &) const CV_OVERRIDE
{
Ptr<SVM> m = SVM::create();
m->setType(svmType);
m->setKernel(kernelType);
m->setDegree(0);
m->setGamma(gamma);
m->setCoef0(0);
m->setC(c);
m->setNu(nu);
m->setP(0);
return m;
}
Ptr<StatModel> loadFromFile(const string &filename) const { return SVM::load(filename); }
string name() const CV_OVERRIDE { return "SVM"; }
int svmType;
int kernelType;
double gamma;
double c;
double nu;
};
//==================================================================================================
struct ML_Params_t
{
Ptr<IModelFactory> factory;
string dataset;
float mean;
float sigma;
};
void PrintTo(const ML_Params_t & param, std::ostream *os)
{
*os << param.factory->name() << "_" << param.dataset;
}
ML_Params_t ML_Params_List[] = {
{ makePtr< ModelFactory<DTrees> >(), "mushroom", 0.027401f, 0.036236f },
{ makePtr< ModelFactory<DTrees> >(), "adult", 14.279000f, 0.354323f },
{ makePtr< ModelFactory<DTrees> >(), "vehicle", 29.761162f, 4.823927f },
{ makePtr< ModelFactory<DTrees> >(), "abalone", 7.297540f, 0.510058f },
{ makePtr< ModelFactory<Boost> >(Boost::REAL), "adult", 13.894001f, 0.337763f },
{ makePtr< ModelFactory<Boost> >(Boost::DISCRETE), "mushroom", 0.007274f, 0.029400f },
{ makePtr< ModelFactory<Boost> >(Boost::LOGIT), "ringnorm", 9.993943f, 0.860256f },
{ makePtr< ModelFactory<Boost> >(Boost::GENTLE), "spambase", 5.404347f, 0.581716f },
{ makePtr< ModelFactory<RTrees> >(), "waveform", 17.100641f, 0.630052f },
{ makePtr< ModelFactory<RTrees> >(), "mushroom", 0.006547f, 0.028248f },
{ makePtr< ModelFactory<RTrees> >(), "adult", 13.5129f, 0.266065f },
{ makePtr< ModelFactory<RTrees> >(), "abalone", 4.745199f, 0.282112f },
{ makePtr< ModelFactory<RTrees> >(), "vehicle", 24.964712f, 4.469287f },
{ makePtr< ModelFactory<RTrees> >(), "letter", 5.334999f, 0.261142f },
{ makePtr< ModelFactory<RTrees> >(), "ringnorm", 6.248733f, 0.904713f },
{ makePtr< ModelFactory<RTrees> >(), "twonorm", 4.506479f, 0.449739f },
{ makePtr< ModelFactory<RTrees> >(), "spambase", 5.243477f, 0.54232f },
};
typedef testing::TestWithParam<ML_Params_t> ML_Params;
TEST_P(ML_Params, accuracy)
{
const ML_Params_t & param = GetParam();
DatasetDesc &dataset = getDataset(param.dataset);
Ptr<TrainData> data = dataset.load();
ASSERT_TRUE(data);
ASSERT_TRUE(data->getNSamples() > 0);
Ptr<StatModel> m = param.factory->createNew(dataset);
ASSERT_TRUE(m);
ASSERT_TRUE(m->train(data, 0));
float err = m->calcError(data, true, noArray());
EXPECT_NEAR(err, param.mean, 4 * param.sigma);
}
INSTANTIATE_TEST_CASE_P(/**/, ML_Params, testing::ValuesIn(ML_Params_List));
//==================================================================================================
struct ML_SL_Params_t
{
Ptr<IModelFactory> factory;
string dataset;
};
void PrintTo(const ML_SL_Params_t & param, std::ostream *os)
{
*os << param.factory->name() << "_" << param.dataset;
}
ML_SL_Params_t ML_SL_Params_List[] = {
{ makePtr< ModelFactory<NormalBayesClassifier> >(), "waveform" },
{ makePtr< ModelFactory<KNearest> >(), "waveform" },
{ makePtr< ModelFactory<KNearest> >(), "abalone" },
{ makePtr< ModelFactory<SVM> >(SVM::C_SVC, SVM::LINEAR, 1, 0.5, 0), "waveform" },
{ makePtr< ModelFactory<SVM> >(SVM::NU_SVR, SVM::RBF, 0.00225, 62.5, 0.03), "poletelecomm" },
{ makePtr< ModelFactory<DTrees> >(), "mushroom" },
{ makePtr< ModelFactory<DTrees> >(), "abalone" },
{ makePtr< ModelFactory<Boost> >(Boost::REAL), "adult" },
{ makePtr< ModelFactory<RTrees> >(), "waveform" },
{ makePtr< ModelFactory<RTrees> >(), "abalone" },
{ makePtr< ModelFactory<SVMSGD> >(), "waveform" },
};
typedef testing::TestWithParam<ML_SL_Params_t> ML_SL_Params;
TEST_P(ML_SL_Params, save_load)
{
const ML_SL_Params_t & param = GetParam();
DatasetDesc &dataset = getDataset(param.dataset);
Ptr<TrainData> data = dataset.load();
ASSERT_TRUE(data);
ASSERT_TRUE(data->getNSamples() > 0);
Mat responses1, responses2;
string file1 = tempfile(".json.gz");
string file2 = tempfile(".json.gz");
{
Ptr<StatModel> m = param.factory->createNew(dataset);
ASSERT_TRUE(m);
ASSERT_TRUE(m->train(data, 0));
m->calcError(data, true, responses1);
m->save(file1 + "?base64");
}
{
Ptr<StatModel> m = param.factory->loadFromFile(file1);
ASSERT_TRUE(m);
m->calcError(data, true, responses2);
m->save(file2 + "?base64");
}
EXPECT_MAT_NEAR(responses1, responses2, 0.0);
{
ifstream f1(file1.c_str(), std::ios_base::binary);
ifstream f2(file2.c_str(), std::ios_base::binary);
ASSERT_TRUE(f1.is_open() && f2.is_open());
const size_t BUFSZ = 10000;
vector<char> buf1(BUFSZ, 0);
vector<char> buf2(BUFSZ, 0);
while (true)
{
f1.read(&buf1[0], BUFSZ);
f2.read(&buf2[0], BUFSZ);
EXPECT_EQ(f1.gcount(), f2.gcount());
EXPECT_EQ(f1.eof(), f2.eof());
if (!f1.good() || !f2.good() || f1.gcount() != f2.gcount())
break;
ASSERT_EQ(buf1, buf2);
}
}
remove(file1.c_str());
remove(file2.c_str());
}
INSTANTIATE_TEST_CASE_P(/**/, ML_SL_Params, testing::ValuesIn(ML_SL_Params_List));
//==================================================================================================
TEST(TrainDataGet, layout_ROW_SAMPLE) // Details: #12236
{
cv::Mat test = cv::Mat::ones(150, 30, CV_32FC1) * 2;
test.col(3) += Scalar::all(3);
cv::Mat labels = cv::Mat::ones(150, 3, CV_32SC1) * 5;
labels.col(1) += 1;
cv::Ptr<cv::ml::TrainData> train_data = cv::ml::TrainData::create(test, cv::ml::ROW_SAMPLE, labels);
train_data->setTrainTestSplitRatio(0.9);
Mat tidx = train_data->getTestSampleIdx();
EXPECT_EQ((size_t)15, tidx.total());
Mat tresp = train_data->getTestResponses();
EXPECT_EQ(15, tresp.rows);
EXPECT_EQ(labels.cols, tresp.cols);
EXPECT_EQ(5, tresp.at<int>(0, 0)) << tresp;
EXPECT_EQ(6, tresp.at<int>(0, 1)) << tresp;
EXPECT_EQ(6, tresp.at<int>(14, 1)) << tresp;
EXPECT_EQ(5, tresp.at<int>(14, 2)) << tresp;
Mat tsamples = train_data->getTestSamples();
EXPECT_EQ(15, tsamples.rows);
EXPECT_EQ(test.cols, tsamples.cols);
EXPECT_EQ(2, tsamples.at<float>(0, 0)) << tsamples;
EXPECT_EQ(5, tsamples.at<float>(0, 3)) << tsamples;
EXPECT_EQ(2, tsamples.at<float>(14, test.cols - 1)) << tsamples;
EXPECT_EQ(5, tsamples.at<float>(14, 3)) << tsamples;
}
TEST(TrainDataGet, layout_COL_SAMPLE) // Details: #12236
{
cv::Mat test = cv::Mat::ones(30, 150, CV_32FC1) * 3;
test.row(3) += Scalar::all(3);
cv::Mat labels = cv::Mat::ones(3, 150, CV_32SC1) * 5;
labels.row(1) += 1;
cv::Ptr<cv::ml::TrainData> train_data = cv::ml::TrainData::create(test, cv::ml::COL_SAMPLE, labels);
train_data->setTrainTestSplitRatio(0.9);
Mat tidx = train_data->getTestSampleIdx();
EXPECT_EQ((size_t)15, tidx.total());
Mat tresp = train_data->getTestResponses(); // always row-based, transposed
EXPECT_EQ(15, tresp.rows);
EXPECT_EQ(labels.rows, tresp.cols);
EXPECT_EQ(5, tresp.at<int>(0, 0)) << tresp;
EXPECT_EQ(6, tresp.at<int>(0, 1)) << tresp;
EXPECT_EQ(6, tresp.at<int>(14, 1)) << tresp;
EXPECT_EQ(5, tresp.at<int>(14, 2)) << tresp;
Mat tsamples = train_data->getTestSamples();
EXPECT_EQ(15, tsamples.cols);
EXPECT_EQ(test.rows, tsamples.rows);
EXPECT_EQ(3, tsamples.at<float>(0, 0)) << tsamples;
EXPECT_EQ(6, tsamples.at<float>(3, 0)) << tsamples;
EXPECT_EQ(6, tsamples.at<float>(3, 14)) << tsamples;
EXPECT_EQ(3, tsamples.at<float>(test.rows - 1, 14)) << tsamples;
}
}} // namespace

View File

@ -1,50 +0,0 @@
#ifndef __OPENCV_TEST_PRECOMP_HPP__
#define __OPENCV_TEST_PRECOMP_HPP__
#include "opencv2/ts.hpp"
#include <opencv2/ts/cuda_test.hpp> // EXPECT_MAT_NEAR
#include "opencv2/ml.hpp"
#include <fstream>
using std::ifstream;
namespace opencv_test {
using namespace cv::ml;
#define CV_NBAYES "nbayes"
#define CV_KNEAREST "knearest"
#define CV_SVM "svm"
#define CV_EM "em"
#define CV_ANN "ann"
#define CV_DTREE "dtree"
#define CV_BOOST "boost"
#define CV_RTREES "rtrees"
#define CV_ERTREES "ertrees"
#define CV_SVMSGD "svmsgd"
using cv::Ptr;
using cv::ml::StatModel;
using cv::ml::TrainData;
using cv::ml::NormalBayesClassifier;
using cv::ml::SVM;
using cv::ml::KNearest;
using cv::ml::ParamGrid;
using cv::ml::ANN_MLP;
using cv::ml::DTrees;
using cv::ml::Boost;
using cv::ml::RTrees;
using cv::ml::SVMSGD;
void defaultDistribs( Mat& means, vector<Mat>& covs, int type=CV_32FC1 );
void generateData( Mat& data, Mat& labels, const vector<int>& sizes, const Mat& _means, const vector<Mat>& covs, int dataType, int labelType );
int maxIdx( const vector<int>& count );
bool getLabelsMap( const Mat& labels, const vector<int>& sizes, vector<int>& labelsMap, bool checkClusterUniq=true );
bool calcErr( const Mat& labels, const Mat& origLabels, const vector<int>& sizes, float& err, bool labelsEquivalent = true, bool checkClusterUniq=true );
// used in LR test
bool calculateError( const Mat& _p_labels, const Mat& _o_labels, float& error);
} // namespace
#endif

View File

@ -1,119 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
namespace opencv_test { namespace {
TEST(ML_RTrees, getVotes)
{
int n = 12;
int count, i;
int label_size = 3;
int predicted_class = 0;
int max_votes = -1;
int val;
// RTrees for classification
Ptr<ml::RTrees> rt = cv::ml::RTrees::create();
//data
Mat data(n, 4, CV_32F);
randu(data, 0, 10);
//labels
Mat labels = (Mat_<int>(n,1) << 0,0,0,0, 1,1,1,1, 2,2,2,2);
rt->train(data, ml::ROW_SAMPLE, labels);
//run function
Mat test(1, 4, CV_32F);
Mat result;
randu(test, 0, 10);
rt->getVotes(test, result, 0);
//count vote amount and find highest vote
count = 0;
const int* result_row = result.ptr<int>(1);
for( i = 0; i < label_size; i++ )
{
val = result_row[i];
//predicted_class = max_votes < val? i;
if( max_votes < val )
{
max_votes = val;
predicted_class = i;
}
count += val;
}
EXPECT_EQ(count, (int)rt->getRoots().size());
EXPECT_EQ(result.at<float>(0, predicted_class), rt->predict(test));
}
TEST(ML_RTrees, 11142_sample_weights_regression)
{
int n = 3;
// RTrees for regression
Ptr<ml::RTrees> rt = cv::ml::RTrees::create();
//simple regression problem of x -> 2x
Mat data = (Mat_<float>(n,1) << 1, 2, 3);
Mat values = (Mat_<float>(n,1) << 2, 4, 6);
Mat weights = (Mat_<float>(n, 1) << 10, 10, 10);
Ptr<TrainData> trainData = TrainData::create(data, ml::ROW_SAMPLE, values);
rt->train(trainData);
double error_without_weights = round(rt->getOOBError());
rt->clear();
Ptr<TrainData> trainDataWithWeights = TrainData::create(data, ml::ROW_SAMPLE, values, Mat(), Mat(), weights );
rt->train(trainDataWithWeights);
double error_with_weights = round(rt->getOOBError());
// error with weights should be larger than error without weights
EXPECT_GE(error_with_weights, error_without_weights);
}
TEST(ML_RTrees, 11142_sample_weights_classification)
{
int n = 12;
// RTrees for classification
Ptr<ml::RTrees> rt = cv::ml::RTrees::create();
Mat data(n, 4, CV_32F);
randu(data, 0, 10);
Mat labels = (Mat_<int>(n,1) << 0,0,0,0, 1,1,1,1, 2,2,2,2);
Mat weights = (Mat_<float>(n, 1) << 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10);
rt->train(data, ml::ROW_SAMPLE, labels);
rt->clear();
double error_without_weights = round(rt->getOOBError());
Ptr<TrainData> trainDataWithWeights = TrainData::create(data, ml::ROW_SAMPLE, labels, Mat(), Mat(), weights );
rt->train(data, ml::ROW_SAMPLE, labels);
double error_with_weights = round(rt->getOOBError());
std::cout << error_without_weights << std::endl;
std::cout << error_with_weights << std::endl;
// error with weights should be larger than error without weights
EXPECT_GE(error_with_weights, error_without_weights);
}
TEST(ML_RTrees, bug_12974_throw_exception_when_predict_different_feature_count)
{
int numFeatures = 5;
// create a 5 feature dataset and train the model
cv::Ptr<RTrees> model = RTrees::create();
Mat samples(10, numFeatures, CV_32F);
randu(samples, 0, 10);
Mat labels = (Mat_<int>(10,1) << 0,0,0,0,0,1,1,1,1,1);
cv::Ptr<TrainData> trainData = TrainData::create(samples, cv::ml::ROW_SAMPLE, labels);
model->train(trainData);
// try to predict on data which have fewer features - this should throw an exception
for(int i = 1; i < numFeatures - 1; ++i) {
Mat test(1, i, CV_32FC1);
ASSERT_THROW(model->predict(test), Exception);
}
// try to predict on data which have more features - this should also throw an exception
Mat test(1, numFeatures + 1, CV_32FC1);
ASSERT_THROW(model->predict(test), Exception);
}
}} // namespace

View File

@ -1,107 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
namespace opencv_test { namespace {
void randomFillCategories(const string & filename, Mat & input)
{
Mat catMap;
Mat catCount;
std::vector<uchar> varTypes;
FileStorage fs(filename, FileStorage::READ);
FileNode root = fs.getFirstTopLevelNode();
root["cat_map"] >> catMap;
root["cat_count"] >> catCount;
root["var_type"] >> varTypes;
int offset = 0;
int countOffset = 0;
uint var = 0, varCount = (uint)varTypes.size();
for (; var < varCount; ++var)
{
if (varTypes[var] == ml::VAR_CATEGORICAL)
{
int size = catCount.at<int>(0, countOffset);
for (int row = 0; row < input.rows; ++row)
{
int randomChosenIndex = offset + ((uint)cv::theRNG()) % size;
int value = catMap.at<int>(0, randomChosenIndex);
input.at<float>(row, var) = (float)value;
}
offset += size;
++countOffset;
}
}
}
//==================================================================================================
typedef tuple<string, string> ML_Legacy_Param;
typedef testing::TestWithParam< ML_Legacy_Param > ML_Legacy_Params;
TEST_P(ML_Legacy_Params, legacy_load)
{
const string modelName = get<0>(GetParam());
const string dataName = get<1>(GetParam());
const string filename = findDataFile("legacy/" + modelName + "_" + dataName + ".xml");
const bool isTree = modelName == CV_BOOST || modelName == CV_DTREE || modelName == CV_RTREES;
Ptr<StatModel> model;
if (modelName == CV_BOOST)
model = Algorithm::load<Boost>(filename);
else if (modelName == CV_ANN)
model = Algorithm::load<ANN_MLP>(filename);
else if (modelName == CV_DTREE)
model = Algorithm::load<DTrees>(filename);
else if (modelName == CV_NBAYES)
model = Algorithm::load<NormalBayesClassifier>(filename);
else if (modelName == CV_SVM)
model = Algorithm::load<SVM>(filename);
else if (modelName == CV_RTREES)
model = Algorithm::load<RTrees>(filename);
else if (modelName == CV_SVMSGD)
model = Algorithm::load<SVMSGD>(filename);
ASSERT_TRUE(model);
Mat input = Mat(isTree ? 10 : 1, model->getVarCount(), CV_32F);
cv::theRNG().fill(input, RNG::UNIFORM, 0, 40);
if (isTree)
randomFillCategories(filename, input);
Mat output;
EXPECT_NO_THROW(model->predict(input, output, StatModel::RAW_OUTPUT | (isTree ? DTrees::PREDICT_SUM : 0)));
// just check if no internal assertions or errors thrown
}
ML_Legacy_Param param_list[] = {
ML_Legacy_Param(CV_ANN, "waveform"),
ML_Legacy_Param(CV_BOOST, "adult"),
ML_Legacy_Param(CV_BOOST, "1"),
ML_Legacy_Param(CV_BOOST, "2"),
ML_Legacy_Param(CV_BOOST, "3"),
ML_Legacy_Param(CV_DTREE, "abalone"),
ML_Legacy_Param(CV_DTREE, "mushroom"),
ML_Legacy_Param(CV_NBAYES, "waveform"),
ML_Legacy_Param(CV_SVM, "poletelecomm"),
ML_Legacy_Param(CV_SVM, "waveform"),
ML_Legacy_Param(CV_RTREES, "waveform"),
ML_Legacy_Param(CV_SVMSGD, "waveform"),
};
INSTANTIATE_TEST_CASE_P(/**/, ML_Legacy_Params, testing::ValuesIn(param_list));
/*TEST(ML_SVM, throw_exception_when_save_untrained_model)
{
Ptr<cv::ml::SVM> svm;
string filename = tempfile("svm.xml");
ASSERT_THROW(svm.save(filename.c_str()), Exception);
remove(filename.c_str());
}*/
}} // namespace

View File

@ -1,156 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
namespace opencv_test { namespace {
static const int TEST_VALUE_LIMIT = 500;
enum
{
UNIFORM_SAME_SCALE,
UNIFORM_DIFFERENT_SCALES
};
CV_ENUM(SVMSGD_TYPE, UNIFORM_SAME_SCALE, UNIFORM_DIFFERENT_SCALES)
typedef std::vector< std::pair<float,float> > BorderList;
static void makeData(RNG &rng, int samplesCount, const Mat &weights, float shift, const BorderList & borders, Mat &samples, Mat & responses)
{
int featureCount = weights.cols;
samples.create(samplesCount, featureCount, CV_32FC1);
for (int featureIndex = 0; featureIndex < featureCount; featureIndex++)
rng.fill(samples.col(featureIndex), RNG::UNIFORM, borders[featureIndex].first, borders[featureIndex].second);
responses.create(samplesCount, 1, CV_32FC1);
for (int i = 0 ; i < samplesCount; i++)
{
double res = samples.row(i).dot(weights) + shift;
responses.at<float>(i) = res > 0 ? 1.f : -1.f;
}
}
//==================================================================================================
typedef tuple<SVMSGD_TYPE, int, double> ML_SVMSGD_Param;
typedef testing::TestWithParam<ML_SVMSGD_Param> ML_SVMSGD_Params;
TEST_P(ML_SVMSGD_Params, scale_and_features)
{
const int type = get<0>(GetParam());
const int featureCount = get<1>(GetParam());
const double precision = get<2>(GetParam());
RNG &rng = cv::theRNG();
Mat_<float> weights(1, featureCount);
rng.fill(weights, RNG::UNIFORM, -1, 1);
const float shift = static_cast<float>(rng.uniform(-featureCount, featureCount));
BorderList borders;
float lowerLimit = -TEST_VALUE_LIMIT;
float upperLimit = TEST_VALUE_LIMIT;
if (type == UNIFORM_SAME_SCALE)
{
for (int featureIndex = 0; featureIndex < featureCount; featureIndex++)
borders.push_back(std::pair<float,float>(lowerLimit, upperLimit));
}
else if (type == UNIFORM_DIFFERENT_SCALES)
{
for (int featureIndex = 0; featureIndex < featureCount; featureIndex++)
{
int crit = rng.uniform(0, 2);
if (crit > 0)
borders.push_back(std::pair<float,float>(lowerLimit, upperLimit));
else
borders.push_back(std::pair<float,float>(lowerLimit/1000, upperLimit/1000));
}
}
ASSERT_FALSE(borders.empty());
Mat trainSamples;
Mat trainResponses;
int trainSamplesCount = 10000;
makeData(rng, trainSamplesCount, weights, shift, borders, trainSamples, trainResponses);
ASSERT_EQ(trainResponses.type(), CV_32FC1);
Mat testSamples;
Mat testResponses;
int testSamplesCount = 100000;
makeData(rng, testSamplesCount, weights, shift, borders, testSamples, testResponses);
ASSERT_EQ(testResponses.type(), CV_32FC1);
Ptr<TrainData> data = TrainData::create(trainSamples, cv::ml::ROW_SAMPLE, trainResponses);
ASSERT_TRUE(data);
cv::Ptr<SVMSGD> svmsgd = SVMSGD::create();
ASSERT_TRUE(svmsgd);
svmsgd->train(data);
Mat responses;
svmsgd->predict(testSamples, responses);
ASSERT_EQ(responses.type(), CV_32FC1);
ASSERT_EQ(responses.rows, testSamplesCount);
int errCount = 0;
for (int i = 0; i < testSamplesCount; i++)
if (responses.at<float>(i) * testResponses.at<float>(i) < 0)
errCount++;
float err = (float)errCount / testSamplesCount;
EXPECT_LE(err, precision);
}
ML_SVMSGD_Param params_list[] = {
ML_SVMSGD_Param(UNIFORM_SAME_SCALE, 2, 0.01),
ML_SVMSGD_Param(UNIFORM_SAME_SCALE, 5, 0.01),
ML_SVMSGD_Param(UNIFORM_SAME_SCALE, 100, 0.02),
ML_SVMSGD_Param(UNIFORM_DIFFERENT_SCALES, 2, 0.01),
ML_SVMSGD_Param(UNIFORM_DIFFERENT_SCALES, 5, 0.01),
ML_SVMSGD_Param(UNIFORM_DIFFERENT_SCALES, 100, 0.01),
};
INSTANTIATE_TEST_CASE_P(/**/, ML_SVMSGD_Params, testing::ValuesIn(params_list));
//==================================================================================================
TEST(ML_SVMSGD, twoPoints)
{
Mat samples(2, 2, CV_32FC1);
samples.at<float>(0,0) = 0;
samples.at<float>(0,1) = 0;
samples.at<float>(1,0) = 1000;
samples.at<float>(1,1) = 1;
Mat responses(2, 1, CV_32FC1);
responses.at<float>(0) = -1;
responses.at<float>(1) = 1;
cv::Ptr<TrainData> trainData = TrainData::create(samples, cv::ml::ROW_SAMPLE, responses);
Mat realWeights(1, 2, CV_32FC1);
realWeights.at<float>(0) = 1000;
realWeights.at<float>(1) = 1;
float realShift = -500000.5;
float normRealWeights = static_cast<float>(cv::norm(realWeights)); // TODO cvtest
realWeights /= normRealWeights;
realShift /= normRealWeights;
cv::Ptr<SVMSGD> svmsgd = SVMSGD::create();
svmsgd->setOptimalParameters();
svmsgd->train( trainData );
Mat foundWeights = svmsgd->getWeights();
float foundShift = svmsgd->getShift();
float normFoundWeights = static_cast<float>(cv::norm(foundWeights)); // TODO cvtest
foundWeights /= normFoundWeights;
foundShift /= normFoundWeights;
EXPECT_LE(cv::norm(Mat(foundWeights - realWeights)), 0.001); // TODO cvtest
EXPECT_LE(std::abs((foundShift - realShift) / realShift), 0.05);
}
}} // namespace

View File

@ -1,164 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
namespace opencv_test { namespace {
using cv::ml::SVM;
using cv::ml::TrainData;
static Ptr<TrainData> makeRandomData(int datasize)
{
cv::Mat samples = cv::Mat::zeros( datasize, 2, CV_32FC1 );
cv::Mat responses = cv::Mat::zeros( datasize, 1, CV_32S );
RNG &rng = cv::theRNG();
for (int i = 0; i < datasize; ++i)
{
int response = rng.uniform(0, 2); // Random from {0, 1}.
samples.at<float>( i, 0 ) = rng.uniform(0.f, 0.5f) + response * 0.5f;
samples.at<float>( i, 1 ) = rng.uniform(0.f, 0.5f) + response * 0.5f;
responses.at<int>( i, 0 ) = response;
}
return TrainData::create( samples, cv::ml::ROW_SAMPLE, responses );
}
static Ptr<TrainData> makeCircleData(int datasize, float scale_factor, float radius)
{
// Populate samples with data that can be split into two concentric circles
cv::Mat samples = cv::Mat::zeros( datasize, 2, CV_32FC1 );
cv::Mat responses = cv::Mat::zeros( datasize, 1, CV_32S );
for (int i = 0; i < datasize; i+=2)
{
const float pi = 3.14159f;
const float angle_rads = (i/datasize) * pi;
const float x = radius * cos(angle_rads);
const float y = radius * cos(angle_rads);
// Larger circle
samples.at<float>( i, 0 ) = x;
samples.at<float>( i, 1 ) = y;
responses.at<int>( i, 0 ) = 0;
// Smaller circle
samples.at<float>( i + 1, 0 ) = x * scale_factor;
samples.at<float>( i + 1, 1 ) = y * scale_factor;
responses.at<int>( i + 1, 0 ) = 1;
}
return TrainData::create( samples, cv::ml::ROW_SAMPLE, responses );
}
static Ptr<TrainData> makeRandomData2(int datasize)
{
cv::Mat samples = cv::Mat::zeros( datasize, 2, CV_32FC1 );
cv::Mat responses = cv::Mat::zeros( datasize, 1, CV_32S );
RNG &rng = cv::theRNG();
for (int i = 0; i < datasize; ++i)
{
int response = rng.uniform(0, 2); // Random from {0, 1}.
samples.at<float>( i, 0 ) = 0;
samples.at<float>( i, 1 ) = (0.5f - response) * rng.uniform(0.f, 1.2f) + response;
responses.at<int>( i, 0 ) = response;
}
return TrainData::create( samples, cv::ml::ROW_SAMPLE, responses );
}
//==================================================================================================
TEST(ML_SVM, trainauto)
{
const int datasize = 100;
cv::Ptr<TrainData> data = makeRandomData(datasize);
ASSERT_TRUE(data);
cv::Ptr<SVM> svm = SVM::create();
ASSERT_TRUE(svm);
svm->trainAuto( data, 10 ); // 2-fold cross validation.
float test_data0[2] = {0.25f, 0.25f};
cv::Mat test_point0 = cv::Mat( 1, 2, CV_32FC1, test_data0 );
float result0 = svm->predict( test_point0 );
float test_data1[2] = {0.75f, 0.75f};
cv::Mat test_point1 = cv::Mat( 1, 2, CV_32FC1, test_data1 );
float result1 = svm->predict( test_point1 );
EXPECT_NEAR(result0, 0, 0.001);
EXPECT_NEAR(result1, 1, 0.001);
}
TEST(ML_SVM, trainauto_sigmoid)
{
const int datasize = 100;
const float scale_factor = 0.5;
const float radius = 2.0;
cv::Ptr<TrainData> data = makeCircleData(datasize, scale_factor, radius);
ASSERT_TRUE(data);
cv::Ptr<SVM> svm = SVM::create();
ASSERT_TRUE(svm);
svm->setKernel(SVM::SIGMOID);
svm->setGamma(10.0);
svm->setCoef0(-10.0);
svm->trainAuto( data, 10 ); // 2-fold cross validation.
float test_data0[2] = {radius, radius};
cv::Mat test_point0 = cv::Mat( 1, 2, CV_32FC1, test_data0 );
EXPECT_FLOAT_EQ(svm->predict( test_point0 ), 0);
float test_data1[2] = {scale_factor * radius, scale_factor * radius};
cv::Mat test_point1 = cv::Mat( 1, 2, CV_32FC1, test_data1 );
EXPECT_FLOAT_EQ(svm->predict( test_point1 ), 1);
}
TEST(ML_SVM, trainAuto_regression_5369)
{
const int datasize = 100;
Ptr<TrainData> data = makeRandomData2(datasize);
cv::Ptr<SVM> svm = SVM::create();
svm->trainAuto( data, 10 ); // 2-fold cross validation.
float test_data0[2] = {0.25f, 0.25f};
cv::Mat test_point0 = cv::Mat( 1, 2, CV_32FC1, test_data0 );
float result0 = svm->predict( test_point0 );
float test_data1[2] = {0.75f, 0.75f};
cv::Mat test_point1 = cv::Mat( 1, 2, CV_32FC1, test_data1 );
float result1 = svm->predict( test_point1 );
EXPECT_EQ(0., result0);
EXPECT_EQ(1., result1);
}
TEST(ML_SVM, getSupportVectors)
{
// Set up training data
int labels[4] = {1, -1, -1, -1};
float trainingData[4][2] = { {501, 10}, {255, 10}, {501, 255}, {10, 501} };
Mat trainingDataMat(4, 2, CV_32FC1, trainingData);
Mat labelsMat(4, 1, CV_32SC1, labels);
Ptr<SVM> svm = SVM::create();
ASSERT_TRUE(svm);
svm->setType(SVM::C_SVC);
svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6));
// Test retrieval of SVs and compressed SVs on linear SVM
svm->setKernel(SVM::LINEAR);
svm->train(trainingDataMat, cv::ml::ROW_SAMPLE, labelsMat);
Mat sv = svm->getSupportVectors();
EXPECT_EQ(1, sv.rows); // by default compressed SV returned
sv = svm->getUncompressedSupportVectors();
EXPECT_EQ(3, sv.rows);
// Test retrieval of SVs and compressed SVs on non-linear SVM
svm->setKernel(SVM::POLY);
svm->setDegree(2);
svm->train(trainingDataMat, cv::ml::ROW_SAMPLE, labelsMat);
sv = svm->getSupportVectors();
EXPECT_EQ(3, sv.rows);
sv = svm->getUncompressedSupportVectors();
EXPECT_EQ(0, sv.rows); // inapplicable for non-linear SVMs
}
}} // namespace

View File

@ -1,189 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
namespace opencv_test {
void defaultDistribs( Mat& means, vector<Mat>& covs, int type)
{
float mp0[] = {0.0f, 0.0f}, cp0[] = {0.67f, 0.0f, 0.0f, 0.67f};
float mp1[] = {5.0f, 0.0f}, cp1[] = {1.0f, 0.0f, 0.0f, 1.0f};
float mp2[] = {1.0f, 5.0f}, cp2[] = {1.0f, 0.0f, 0.0f, 1.0f};
means.create(3, 2, type);
Mat m0( 1, 2, CV_32FC1, mp0 ), c0( 2, 2, CV_32FC1, cp0 );
Mat m1( 1, 2, CV_32FC1, mp1 ), c1( 2, 2, CV_32FC1, cp1 );
Mat m2( 1, 2, CV_32FC1, mp2 ), c2( 2, 2, CV_32FC1, cp2 );
means.resize(3), covs.resize(3);
Mat mr0 = means.row(0);
m0.convertTo(mr0, type);
c0.convertTo(covs[0], type);
Mat mr1 = means.row(1);
m1.convertTo(mr1, type);
c1.convertTo(covs[1], type);
Mat mr2 = means.row(2);
m2.convertTo(mr2, type);
c2.convertTo(covs[2], type);
}
// generate points sets by normal distributions
void generateData( Mat& data, Mat& labels, const vector<int>& sizes, const Mat& _means, const vector<Mat>& covs, int dataType, int labelType )
{
vector<int>::const_iterator sit = sizes.begin();
int total = 0;
for( ; sit != sizes.end(); ++sit )
total += *sit;
CV_Assert( _means.rows == (int)sizes.size() && covs.size() == sizes.size() );
CV_Assert( !data.empty() && data.rows == total );
CV_Assert( data.type() == dataType );
labels.create( data.rows, 1, labelType );
randn( data, Scalar::all(-1.0), Scalar::all(1.0) );
vector<Mat> means(sizes.size());
for(int i = 0; i < _means.rows; i++)
means[i] = _means.row(i);
vector<Mat>::const_iterator mit = means.begin(), cit = covs.begin();
int bi, ei = 0;
sit = sizes.begin();
for( int p = 0, l = 0; sit != sizes.end(); ++sit, ++mit, ++cit, l++ )
{
bi = ei;
ei = bi + *sit;
CV_Assert( mit->rows == 1 && mit->cols == data.cols );
CV_Assert( cit->rows == data.cols && cit->cols == data.cols );
for( int i = bi; i < ei; i++, p++ )
{
Mat r = data.row(i);
r = r * (*cit) + *mit;
if( labelType == CV_32FC1 )
labels.at<float>(p, 0) = (float)l;
else if( labelType == CV_32SC1 )
labels.at<int>(p, 0) = l;
else
{
CV_DbgAssert(0);
}
}
}
}
int maxIdx( const vector<int>& count )
{
int idx = -1;
int maxVal = -1;
vector<int>::const_iterator it = count.begin();
for( int i = 0; it != count.end(); ++it, i++ )
{
if( *it > maxVal)
{
maxVal = *it;
idx = i;
}
}
CV_Assert( idx >= 0);
return idx;
}
bool getLabelsMap( const Mat& labels, const vector<int>& sizes, vector<int>& labelsMap, bool checkClusterUniq)
{
size_t total = 0, nclusters = sizes.size();
for(size_t i = 0; i < sizes.size(); i++)
total += sizes[i];
CV_Assert( !labels.empty() );
CV_Assert( labels.total() == total && (labels.cols == 1 || labels.rows == 1));
CV_Assert( labels.type() == CV_32SC1 || labels.type() == CV_32FC1 );
bool isFlt = labels.type() == CV_32FC1;
labelsMap.resize(nclusters);
vector<bool> buzy(nclusters, false);
int startIndex = 0;
for( size_t clusterIndex = 0; clusterIndex < sizes.size(); clusterIndex++ )
{
vector<int> count( nclusters, 0 );
for( int i = startIndex; i < startIndex + sizes[clusterIndex]; i++)
{
int lbl = isFlt ? (int)labels.at<float>(i) : labels.at<int>(i);
CV_Assert(lbl < (int)nclusters);
count[lbl]++;
CV_Assert(count[lbl] < (int)total);
}
startIndex += sizes[clusterIndex];
int cls = maxIdx( count );
CV_Assert( !checkClusterUniq || !buzy[cls] );
labelsMap[clusterIndex] = cls;
buzy[cls] = true;
}
if(checkClusterUniq)
{
for(size_t i = 0; i < buzy.size(); i++)
if(!buzy[i])
return false;
}
return true;
}
bool calcErr( const Mat& labels, const Mat& origLabels, const vector<int>& sizes, float& err, bool labelsEquivalent, bool checkClusterUniq)
{
err = 0;
CV_Assert( !labels.empty() && !origLabels.empty() );
CV_Assert( labels.rows == 1 || labels.cols == 1 );
CV_Assert( origLabels.rows == 1 || origLabels.cols == 1 );
CV_Assert( labels.total() == origLabels.total() );
CV_Assert( labels.type() == CV_32SC1 || labels.type() == CV_32FC1 );
CV_Assert( origLabels.type() == labels.type() );
vector<int> labelsMap;
bool isFlt = labels.type() == CV_32FC1;
if( !labelsEquivalent )
{
if( !getLabelsMap( labels, sizes, labelsMap, checkClusterUniq ) )
return false;
for( int i = 0; i < labels.rows; i++ )
if( isFlt )
err += labels.at<float>(i) != labelsMap[(int)origLabels.at<float>(i)] ? 1.f : 0.f;
else
err += labels.at<int>(i) != labelsMap[origLabels.at<int>(i)] ? 1.f : 0.f;
}
else
{
for( int i = 0; i < labels.rows; i++ )
if( isFlt )
err += labels.at<float>(i) != origLabels.at<float>(i) ? 1.f : 0.f;
else
err += labels.at<int>(i) != origLabels.at<int>(i) ? 1.f : 0.f;
}
err /= (float)labels.rows;
return true;
}
bool calculateError( const Mat& _p_labels, const Mat& _o_labels, float& error)
{
error = 0.0f;
float accuracy = 0.0f;
Mat _p_labels_temp;
Mat _o_labels_temp;
_p_labels.convertTo(_p_labels_temp, CV_32S);
_o_labels.convertTo(_o_labels_temp, CV_32S);
CV_Assert(_p_labels_temp.total() == _o_labels_temp.total());
CV_Assert(_p_labels_temp.rows == _o_labels_temp.rows);
accuracy = (float)countNonZero(_p_labels_temp == _o_labels_temp)/_p_labels_temp.rows;
error = 1 - accuracy;
return true;
}
} // namespace

View File

@ -501,8 +501,6 @@ public:
*/ */
virtual void copyTo(HOGDescriptor& c) const; virtual void copyTo(HOGDescriptor& c) const;
/**@example samples/cpp/train_HOG.cpp
*/
/** @brief Computes HOG descriptors of given image. /** @brief Computes HOG descriptors of given image.
@param img Matrix of the type CV_8U containing an image where HOG features will be calculated. @param img Matrix of the type CV_8U containing an image where HOG features will be calculated.
@param descriptors Matrix of the type CV_32F @param descriptors Matrix of the type CV_32F

View File

@ -1,64 +0,0 @@
#!/usr/bin/env python
# Python 2/3 compatibility
from __future__ import print_function
import sys
PY3 = sys.version_info[0] == 3
if PY3:
xrange = range
import numpy as np
from numpy import random
import cv2 as cv
def make_gaussians(cluster_n, img_size):
points = []
ref_distrs = []
for _ in xrange(cluster_n):
mean = (0.1 + 0.8*random.rand(2)) * img_size
a = (random.rand(2, 2)-0.5)*img_size*0.1
cov = np.dot(a.T, a) + img_size*0.05*np.eye(2)
n = 100 + random.randint(900)
pts = random.multivariate_normal(mean, cov, n)
points.append( pts )
ref_distrs.append( (mean, cov) )
points = np.float32( np.vstack(points) )
return points, ref_distrs
from tests_common import NewOpenCVTests
class gaussian_mix_test(NewOpenCVTests):
def test_gaussian_mix(self):
np.random.seed(10)
cluster_n = 5
img_size = 512
points, ref_distrs = make_gaussians(cluster_n, img_size)
em = cv.ml.EM_create()
em.setClustersNumber(cluster_n)
em.setCovarianceMatrixType(cv.ml.EM_COV_MAT_GENERIC)
em.trainEM(points)
means = em.getMeans()
covs = em.getCovs() # Known bug: https://github.com/opencv/opencv/pull/4232
#found_distrs = zip(means, covs)
matches_count = 0
meanEps = 0.05
covEps = 0.1
for i in range(cluster_n):
for j in range(cluster_n):
if (cv.norm(means[i] - ref_distrs[j][0], cv.NORM_L2) / cv.norm(ref_distrs[j][0], cv.NORM_L2) < meanEps and
cv.norm(covs[i] - ref_distrs[j][1], cv.NORM_L2) / cv.norm(ref_distrs[j][1], cv.NORM_L2) < covEps):
matches_count += 1
self.assertEqual(matches_count, cluster_n)
if __name__ == '__main__':
NewOpenCVTests.bootstrap()

View File

@ -136,11 +136,6 @@ class Bindings(NewOpenCVTests):
bm.getPreFilterCap() # from StereoBM bm.getPreFilterCap() # from StereoBM
bm.getBlockSize() # from SteroMatcher bm.getBlockSize() # from SteroMatcher
boost = cv.ml.Boost_create()
boost.getBoostType() # from ml::Boost
boost.getMaxDepth() # from ml::DTrees
boost.isClassifier() # from ml::StatModel
def test_raiseGeneralException(self): def test_raiseGeneralException(self):
with self.assertRaises((cv.error,), with self.assertRaises((cv.error,),
msg='C++ exception is not propagated to Python in the right way') as cm: msg='C++ exception is not propagated to Python in the right way') as cm:
@ -820,16 +815,6 @@ class Arguments(NewOpenCVTests):
self.assertEqual(flag, cv.utils.nested.testEchoBooleanFunction(flag), self.assertEqual(flag, cv.utils.nested.testEchoBooleanFunction(flag),
msg="Function in nested module returns wrong result") msg="Function in nested module returns wrong result")
def test_class_from_submodule_has_global_alias(self):
self.assertTrue(hasattr(cv.ml, "Boost"),
msg="Class is not registered in the submodule")
self.assertTrue(hasattr(cv, "ml_Boost"),
msg="Class from submodule doesn't have alias in the "
"global module")
self.assertEqual(cv.ml.Boost, cv.ml_Boost,
msg="Classes from submodules and global module don't refer "
"to the same type")
def test_inner_class_has_global_alias(self): def test_inner_class_has_global_alias(self):
self.assertTrue(hasattr(cv.SimpleBlobDetector, "Params"), self.assertTrue(hasattr(cv.SimpleBlobDetector, "Params"),
msg="Class is not registered as inner class") msg="Class is not registered as inner class")

View File

@ -7,7 +7,6 @@ set(OPENCV_CPP_SAMPLES_REQUIRED_DEPS
opencv_imgcodecs opencv_imgcodecs
opencv_videoio opencv_videoio
opencv_highgui opencv_highgui
opencv_ml
opencv_video opencv_video
opencv_objdetect opencv_objdetect
opencv_photo opencv_photo

View File

@ -1,367 +0,0 @@
#include "opencv2/core.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/ml.hpp"
#include <algorithm>
#include <iostream>
#include <vector>
using namespace cv;
using namespace std;
const int SZ = 20; // size of each digit is SZ x SZ
const int CLASS_N = 10;
const char* DIGITS_FN = "digits.png";
static void help(char** argv)
{
cout <<
"\n"
"SVM and KNearest digit recognition.\n"
"\n"
"Sample loads a dataset of handwritten digits from 'digits.png'.\n"
"Then it trains a SVM and KNearest classifiers on it and evaluates\n"
"their accuracy.\n"
"\n"
"Following preprocessing is applied to the dataset:\n"
" - Moment-based image deskew (see deskew())\n"
" - Digit images are split into 4 10x10 cells and 16-bin\n"
" histogram of oriented gradients is computed for each\n"
" cell\n"
" - Transform histograms to space with Hellinger metric (see [1] (RootSIFT))\n"
"\n"
"\n"
"[1] R. Arandjelovic, A. Zisserman\n"
" \"Three things everyone should know to improve object retrieval\"\n"
" http://www.robots.ox.ac.uk/~vgg/publications/2012/Arandjelovic12/arandjelovic12.pdf\n"
"\n"
"Usage:\n"
<< argv[0] << endl;
}
static void split2d(const Mat& image, const Size cell_size, vector<Mat>& cells)
{
int height = image.rows;
int width = image.cols;
int sx = cell_size.width;
int sy = cell_size.height;
cells.clear();
for (int i = 0; i < height; i += sy)
{
for (int j = 0; j < width; j += sx)
{
cells.push_back(image(Rect(j, i, sx, sy)));
}
}
}
static void load_digits(const char* fn, vector<Mat>& digits, vector<int>& labels)
{
digits.clear();
labels.clear();
String filename = samples::findFile(fn);
cout << "Loading " << filename << " ..." << endl;
Mat digits_img = imread(filename, IMREAD_GRAYSCALE);
split2d(digits_img, Size(SZ, SZ), digits);
for (int i = 0; i < CLASS_N; i++)
{
for (size_t j = 0; j < digits.size() / CLASS_N; j++)
{
labels.push_back(i);
}
}
}
static void deskew(const Mat& img, Mat& deskewed_img)
{
Moments m = moments(img);
if (abs(m.mu02) < 0.01)
{
deskewed_img = img.clone();
return;
}
float skew = (float)(m.mu11 / m.mu02);
float M_vals[2][3] = {{1, skew, -0.5f * SZ * skew}, {0, 1, 0}};
Mat M(Size(3, 2), CV_32F, &M_vals[0][0]);
warpAffine(img, deskewed_img, M, Size(SZ, SZ), WARP_INVERSE_MAP | INTER_LINEAR);
}
static void mosaic(const int width, const vector<Mat>& images, Mat& grid)
{
int mat_width = SZ * width;
int mat_height = SZ * (int)ceil((double)images.size() / width);
if (!images.empty())
{
grid = Mat(Size(mat_width, mat_height), images[0].type());
for (size_t i = 0; i < images.size(); i++)
{
Mat location_on_grid = grid(Rect(SZ * ((int)i % width), SZ * ((int)i / width), SZ, SZ));
images[i].copyTo(location_on_grid);
}
}
}
static void evaluate_model(const vector<float>& predictions, const vector<Mat>& digits, const vector<int>& labels, Mat& mos)
{
double err = 0;
for (size_t i = 0; i < predictions.size(); i++)
{
if ((int)predictions[i] != labels[i])
{
err++;
}
}
err /= predictions.size();
cout << cv::format("error: %.2f %%", err * 100) << endl;
int confusion[10][10] = {};
for (size_t i = 0; i < labels.size(); i++)
{
confusion[labels[i]][(int)predictions[i]]++;
}
cout << "confusion matrix:" << endl;
for (int i = 0; i < 10; i++)
{
for (int j = 0; j < 10; j++)
{
cout << cv::format("%2d ", confusion[i][j]);
}
cout << endl;
}
cout << endl;
vector<Mat> vis;
for (size_t i = 0; i < digits.size(); i++)
{
Mat img;
cvtColor(digits[i], img, COLOR_GRAY2BGR);
if ((int)predictions[i] != labels[i])
{
for (int j = 0; j < img.rows; j++)
{
for (int k = 0; k < img.cols; k++)
{
img.at<Vec3b>(j, k)[0] = 0;
img.at<Vec3b>(j, k)[1] = 0;
}
}
}
vis.push_back(img);
}
mosaic(25, vis, mos);
}
static void bincount(const Mat& x, const Mat& weights, const int min_length, vector<double>& bins)
{
double max_x_val = 0;
minMaxLoc(x, NULL, &max_x_val);
bins = vector<double>(max((int)max_x_val, min_length));
for (int i = 0; i < x.rows; i++)
{
for (int j = 0; j < x.cols; j++)
{
bins[x.at<int>(i, j)] += weights.at<float>(i, j);
}
}
}
static void preprocess_hog(const vector<Mat>& digits, Mat& hog)
{
int bin_n = 16;
int half_cell = SZ / 2;
double eps = 1e-7;
hog = Mat(Size(4 * bin_n, (int)digits.size()), CV_32F);
for (size_t img_index = 0; img_index < digits.size(); img_index++)
{
Mat gx;
Sobel(digits[img_index], gx, CV_32F, 1, 0);
Mat gy;
Sobel(digits[img_index], gy, CV_32F, 0, 1);
Mat mag;
Mat ang;
cartToPolar(gx, gy, mag, ang);
Mat bin(ang.size(), CV_32S);
for (int i = 0; i < ang.rows; i++)
{
for (int j = 0; j < ang.cols; j++)
{
bin.at<int>(i, j) = (int)(bin_n * ang.at<float>(i, j) / (2 * CV_PI));
}
}
Mat bin_cells[] = {
bin(Rect(0, 0, half_cell, half_cell)),
bin(Rect(half_cell, 0, half_cell, half_cell)),
bin(Rect(0, half_cell, half_cell, half_cell)),
bin(Rect(half_cell, half_cell, half_cell, half_cell))
};
Mat mag_cells[] = {
mag(Rect(0, 0, half_cell, half_cell)),
mag(Rect(half_cell, 0, half_cell, half_cell)),
mag(Rect(0, half_cell, half_cell, half_cell)),
mag(Rect(half_cell, half_cell, half_cell, half_cell))
};
vector<double> hist;
hist.reserve(4 * bin_n);
for (int i = 0; i < 4; i++)
{
vector<double> partial_hist;
bincount(bin_cells[i], mag_cells[i], bin_n, partial_hist);
hist.insert(hist.end(), partial_hist.begin(), partial_hist.end());
}
// transform to Hellinger kernel
double sum = 0;
for (size_t i = 0; i < hist.size(); i++)
{
sum += hist[i];
}
for (size_t i = 0; i < hist.size(); i++)
{
hist[i] /= sum + eps;
hist[i] = sqrt(hist[i]);
}
double hist_norm = norm(hist);
for (size_t i = 0; i < hist.size(); i++)
{
hog.at<float>((int)img_index, (int)i) = (float)(hist[i] / (hist_norm + eps));
}
}
}
static void shuffle(vector<Mat>& digits, vector<int>& labels)
{
vector<int> shuffled_indexes(digits.size());
for (size_t i = 0; i < digits.size(); i++)
{
shuffled_indexes[i] = (int)i;
}
randShuffle(shuffled_indexes);
vector<Mat> shuffled_digits(digits.size());
vector<int> shuffled_labels(labels.size());
for (size_t i = 0; i < shuffled_indexes.size(); i++)
{
shuffled_digits[shuffled_indexes[i]] = digits[i];
shuffled_labels[shuffled_indexes[i]] = labels[i];
}
digits = shuffled_digits;
labels = shuffled_labels;
}
int main(int /* argc */, char* argv[])
{
help(argv);
vector<Mat> digits;
vector<int> labels;
load_digits(DIGITS_FN, digits, labels);
cout << "preprocessing..." << endl;
// shuffle digits
shuffle(digits, labels);
vector<Mat> digits2;
for (size_t i = 0; i < digits.size(); i++)
{
Mat deskewed_digit;
deskew(digits[i], deskewed_digit);
digits2.push_back(deskewed_digit);
}
Mat samples;
preprocess_hog(digits2, samples);
int train_n = (int)(0.9 * samples.rows);
Mat test_set;
vector<Mat> digits_test(digits2.begin() + train_n, digits2.end());
mosaic(25, digits_test, test_set);
imshow("test set", test_set);
Mat samples_train = samples(Rect(0, 0, samples.cols, train_n));
Mat samples_test = samples(Rect(0, train_n, samples.cols, samples.rows - train_n));
vector<int> labels_train(labels.begin(), labels.begin() + train_n);
vector<int> labels_test(labels.begin() + train_n, labels.end());
Ptr<ml::KNearest> k_nearest;
Ptr<ml::SVM> svm;
vector<float> predictions;
Mat vis;
cout << "training KNearest..." << endl;
k_nearest = ml::KNearest::create();
k_nearest->train(samples_train, ml::ROW_SAMPLE, labels_train);
// predict digits with KNearest
k_nearest->findNearest(samples_test, 4, predictions);
evaluate_model(predictions, digits_test, labels_test, vis);
imshow("KNearest test", vis);
k_nearest.release();
cout << "training SVM..." << endl;
svm = ml::SVM::create();
svm->setGamma(5.383);
svm->setC(2.67);
svm->setKernel(ml::SVM::RBF);
svm->setType(ml::SVM::C_SVC);
svm->train(samples_train, ml::ROW_SAMPLE, labels_train);
// predict digits with SVM
svm->predict(samples_test, predictions);
evaluate_model(predictions, digits_test, labels_test, vis);
imshow("SVM test", vis);
cout << "Saving SVM as \"digits_svm.yml\"..." << endl;
svm->save("digits_svm.yml");
svm.release();
waitKey();
return 0;
}

View File

@ -1,70 +0,0 @@
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/ml.hpp"
using namespace cv;
using namespace cv::ml;
int main( int /*argc*/, char** /*argv*/ )
{
const int N = 4;
const int N1 = (int)sqrt((double)N);
const Scalar colors[] =
{
Scalar(0,0,255), Scalar(0,255,0),
Scalar(0,255,255),Scalar(255,255,0)
};
int i, j;
int nsamples = 100;
Mat samples( nsamples, 2, CV_32FC1 );
Mat labels;
Mat img = Mat::zeros( Size( 500, 500 ), CV_8UC3 );
Mat sample( 1, 2, CV_32FC1 );
samples = samples.reshape(2, 0);
for( i = 0; i < N; i++ )
{
// form the training samples
Mat samples_part = samples.rowRange(i*nsamples/N, (i+1)*nsamples/N );
Scalar mean(((i%N1)+1)*img.rows/(N1+1),
((i/N1)+1)*img.rows/(N1+1));
Scalar sigma(30,30);
randn( samples_part, mean, sigma );
}
samples = samples.reshape(1, 0);
// cluster the data
Ptr<EM> em_model = EM::create();
em_model->setClustersNumber(N);
em_model->setCovarianceMatrixType(EM::COV_MAT_SPHERICAL);
em_model->setTermCriteria(TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 300, 0.1));
em_model->trainEM( samples, noArray(), labels, noArray() );
// classify every image pixel
for( i = 0; i < img.rows; i++ )
{
for( j = 0; j < img.cols; j++ )
{
sample.at<float>(0) = (float)j;
sample.at<float>(1) = (float)i;
int response = cvRound(em_model->predict2( sample, noArray() )[1]);
Scalar c = colors[response];
circle( img, Point(j, i), 1, c*0.75, FILLED );
}
}
//draw the clustered samples
for( i = 0; i < nsamples; i++ )
{
Point pt(cvRound(samples.at<float>(i, 0)), cvRound(samples.at<float>(i, 1)));
circle( img, pt, 1, colors[labels.at<int>(i)], FILLED );
}
imshow( "EM-clustering result", img );
waitKey(0);
return 0;
}

View File

@ -1,558 +0,0 @@
#include "opencv2/core.hpp"
#include "opencv2/ml.hpp"
#include <cstdio>
#include <vector>
#include <iostream>
using namespace std;
using namespace cv;
using namespace cv::ml;
static void help(char** argv)
{
printf("\nThe sample demonstrates how to train Random Trees classifier\n"
"(or Boosting classifier, or MLP, or Knearest, or Nbayes, or Support Vector Machines - see main()) using the provided dataset.\n"
"\n"
"We use the sample database letter-recognition.data\n"
"from UCI Repository, here is the link:\n"
"\n"
"Newman, D.J. & Hettich, S. & Blake, C.L. & Merz, C.J. (1998).\n"
"UCI Repository of machine learning databases\n"
"[http://www.ics.uci.edu/~mlearn/MLRepository.html].\n"
"Irvine, CA: University of California, Department of Information and Computer Science.\n"
"\n"
"The dataset consists of 20000 feature vectors along with the\n"
"responses - capital latin letters A..Z.\n"
"The first 16000 (10000 for boosting)) samples are used for training\n"
"and the remaining 4000 (10000 for boosting) - to test the classifier.\n"
"======================================================\n");
printf("\nThis is letter recognition sample.\n"
"The usage: %s [-data=<path to letter-recognition.data>] \\\n"
" [-save=<output XML file for the classifier>] \\\n"
" [-load=<XML file with the pre-trained classifier>] \\\n"
" [-boost|-mlp|-knearest|-nbayes|-svm] # to use boost/mlp/knearest/SVM classifier instead of default Random Trees\n", argv[0] );
}
// This function reads data and responses from the file <filename>
static bool
read_num_class_data( const string& filename, int var_count,
Mat* _data, Mat* _responses )
{
const int M = 1024;
char buf[M+2];
Mat el_ptr(1, var_count, CV_32F);
int i;
vector<int> responses;
_data->release();
_responses->release();
FILE* f = fopen( filename.c_str(), "rt" );
if( !f )
{
cout << "Could not read the database " << filename << endl;
return false;
}
for(;;)
{
char* ptr;
if( !fgets( buf, M, f ) || !strchr( buf, ',' ) )
break;
responses.push_back((int)buf[0]);
ptr = buf+2;
for( i = 0; i < var_count; i++ )
{
int n = 0;
sscanf( ptr, "%f%n", &el_ptr.at<float>(i), &n );
ptr += n + 1;
}
if( i < var_count )
break;
_data->push_back(el_ptr);
}
fclose(f);
Mat(responses).copyTo(*_responses);
cout << "The database " << filename << " is loaded.\n";
return true;
}
template<typename T>
static Ptr<T> load_classifier(const string& filename_to_load)
{
// load classifier from the specified file
Ptr<T> model = StatModel::load<T>( filename_to_load );
if( model.empty() )
cout << "Could not read the classifier " << filename_to_load << endl;
else
cout << "The classifier " << filename_to_load << " is loaded.\n";
return model;
}
static Ptr<TrainData>
prepare_train_data(const Mat& data, const Mat& responses, int ntrain_samples)
{
Mat sample_idx = Mat::zeros( 1, data.rows, CV_8U );
Mat train_samples = sample_idx.colRange(0, ntrain_samples);
train_samples.setTo(Scalar::all(1));
int nvars = data.cols;
Mat var_type( nvars + 1, 1, CV_8U );
var_type.setTo(Scalar::all(VAR_ORDERED));
var_type.at<uchar>(nvars) = VAR_CATEGORICAL;
return TrainData::create(data, ROW_SAMPLE, responses,
noArray(), sample_idx, noArray(), var_type);
}
inline TermCriteria TC(int iters, double eps)
{
return TermCriteria(TermCriteria::MAX_ITER + (eps > 0 ? TermCriteria::EPS : 0), iters, eps);
}
static void test_and_save_classifier(const Ptr<StatModel>& model,
const Mat& data, const Mat& responses,
int ntrain_samples, int rdelta,
const string& filename_to_save)
{
int i, nsamples_all = data.rows;
double train_hr = 0, test_hr = 0;
// compute prediction error on train and test data
for( i = 0; i < nsamples_all; i++ )
{
Mat sample = data.row(i);
float r = model->predict( sample );
r = std::abs(r + rdelta - responses.at<int>(i)) <= FLT_EPSILON ? 1.f : 0.f;
if( i < ntrain_samples )
train_hr += r;
else
test_hr += r;
}
test_hr /= nsamples_all - ntrain_samples;
train_hr = ntrain_samples > 0 ? train_hr/ntrain_samples : 1.;
printf( "Recognition rate: train = %.1f%%, test = %.1f%%\n",
train_hr*100., test_hr*100. );
if( !filename_to_save.empty() )
{
model->save( filename_to_save );
}
}
static bool
build_rtrees_classifier( const string& data_filename,
const string& filename_to_save,
const string& filename_to_load )
{
Mat data;
Mat responses;
bool ok = read_num_class_data( data_filename, 16, &data, &responses );
if( !ok )
return ok;
Ptr<RTrees> model;
int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.8);
// Create or load Random Trees classifier
if( !filename_to_load.empty() )
{
model = load_classifier<RTrees>(filename_to_load);
if( model.empty() )
return false;
ntrain_samples = 0;
}
else
{
// create classifier by using <data> and <responses>
cout << "Training the classifier ...\n";
// Params( int maxDepth, int minSampleCount,
// double regressionAccuracy, bool useSurrogates,
// int maxCategories, const Mat& priors,
// bool calcVarImportance, int nactiveVars,
// TermCriteria termCrit );
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
model = RTrees::create();
model->setMaxDepth(10);
model->setMinSampleCount(10);
model->setRegressionAccuracy(0);
model->setUseSurrogates(false);
model->setMaxCategories(15);
model->setPriors(Mat());
model->setCalculateVarImportance(true);
model->setActiveVarCount(4);
model->setTermCriteria(TC(100,0.01f));
model->train(tdata);
cout << endl;
}
test_and_save_classifier(model, data, responses, ntrain_samples, 0, filename_to_save);
cout << "Number of trees: " << model->getRoots().size() << endl;
// Print variable importance
Mat var_importance = model->getVarImportance();
if( !var_importance.empty() )
{
double rt_imp_sum = sum( var_importance )[0];
printf("var#\timportance (in %%):\n");
int i, n = (int)var_importance.total();
for( i = 0; i < n; i++ )
printf( "%-2d\t%-4.1f\n", i, 100.f*var_importance.at<float>(i)/rt_imp_sum);
}
return true;
}
static bool
build_boost_classifier( const string& data_filename,
const string& filename_to_save,
const string& filename_to_load )
{
const int class_count = 26;
Mat data;
Mat responses;
Mat weak_responses;
bool ok = read_num_class_data( data_filename, 16, &data, &responses );
if( !ok )
return ok;
int i, j, k;
Ptr<Boost> model;
int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.5);
int var_count = data.cols;
// Create or load Boosted Tree classifier
if( !filename_to_load.empty() )
{
model = load_classifier<Boost>(filename_to_load);
if( model.empty() )
return false;
ntrain_samples = 0;
}
else
{
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
//
// As currently boosted tree classifier in MLL can only be trained
// for 2-class problems, we transform the training database by
// "unrolling" each training sample as many times as the number of
// classes (26) that we have.
//
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
Mat new_data( ntrain_samples*class_count, var_count + 1, CV_32F );
Mat new_responses( ntrain_samples*class_count, 1, CV_32S );
// 1. unroll the database type mask
printf( "Unrolling the database...\n");
for( i = 0; i < ntrain_samples; i++ )
{
const float* data_row = data.ptr<float>(i);
for( j = 0; j < class_count; j++ )
{
float* new_data_row = (float*)new_data.ptr<float>(i*class_count+j);
memcpy(new_data_row, data_row, var_count*sizeof(data_row[0]));
new_data_row[var_count] = (float)j;
new_responses.at<int>(i*class_count + j) = responses.at<int>(i) == j+'A';
}
}
Mat var_type( 1, var_count + 2, CV_8U );
var_type.setTo(Scalar::all(VAR_ORDERED));
var_type.at<uchar>(var_count) = var_type.at<uchar>(var_count+1) = VAR_CATEGORICAL;
Ptr<TrainData> tdata = TrainData::create(new_data, ROW_SAMPLE, new_responses,
noArray(), noArray(), noArray(), var_type);
vector<double> priors(2);
priors[0] = 1;
priors[1] = 26;
cout << "Training the classifier (may take a few minutes)...\n";
model = Boost::create();
model->setBoostType(Boost::GENTLE);
model->setWeakCount(100);
model->setWeightTrimRate(0.95);
model->setMaxDepth(5);
model->setUseSurrogates(false);
model->setPriors(Mat(priors));
model->train(tdata);
cout << endl;
}
Mat temp_sample( 1, var_count + 1, CV_32F );
float* tptr = temp_sample.ptr<float>();
// compute prediction error on train and test data
double train_hr = 0, test_hr = 0;
for( i = 0; i < nsamples_all; i++ )
{
int best_class = 0;
double max_sum = -DBL_MAX;
const float* ptr = data.ptr<float>(i);
for( k = 0; k < var_count; k++ )
tptr[k] = ptr[k];
for( j = 0; j < class_count; j++ )
{
tptr[var_count] = (float)j;
float s = model->predict( temp_sample, noArray(), StatModel::RAW_OUTPUT );
if( max_sum < s )
{
max_sum = s;
best_class = j + 'A';
}
}
double r = std::abs(best_class - responses.at<int>(i)) < FLT_EPSILON ? 1 : 0;
if( i < ntrain_samples )
train_hr += r;
else
test_hr += r;
}
test_hr /= nsamples_all-ntrain_samples;
train_hr = ntrain_samples > 0 ? train_hr/ntrain_samples : 1.;
printf( "Recognition rate: train = %.1f%%, test = %.1f%%\n",
train_hr*100., test_hr*100. );
cout << "Number of trees: " << model->getRoots().size() << endl;
// Save classifier to file if needed
if( !filename_to_save.empty() )
model->save( filename_to_save );
return true;
}
static bool
build_mlp_classifier( const string& data_filename,
const string& filename_to_save,
const string& filename_to_load )
{
const int class_count = 26;
Mat data;
Mat responses;
bool ok = read_num_class_data( data_filename, 16, &data, &responses );
if( !ok )
return ok;
Ptr<ANN_MLP> model;
int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.8);
// Create or load MLP classifier
if( !filename_to_load.empty() )
{
model = load_classifier<ANN_MLP>(filename_to_load);
if( model.empty() )
return false;
ntrain_samples = 0;
}
else
{
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
//
// MLP does not support categorical variables by explicitly.
// So, instead of the output class label, we will use
// a binary vector of <class_count> components for training and,
// therefore, MLP will give us a vector of "probabilities" at the
// prediction stage
//
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
Mat train_data = data.rowRange(0, ntrain_samples);
Mat train_responses = Mat::zeros( ntrain_samples, class_count, CV_32F );
// 1. unroll the responses
cout << "Unrolling the responses...\n";
for( int i = 0; i < ntrain_samples; i++ )
{
int cls_label = responses.at<int>(i) - 'A';
train_responses.at<float>(i, cls_label) = 1.f;
}
// 2. train classifier
int layer_sz[] = { data.cols, 100, 100, class_count };
int nlayers = (int)(sizeof(layer_sz)/sizeof(layer_sz[0]));
Mat layer_sizes( 1, nlayers, CV_32S, layer_sz );
#if 1
int method = ANN_MLP::BACKPROP;
double method_param = 0.001;
int max_iter = 300;
#else
int method = ANN_MLP::RPROP;
double method_param = 0.1;
int max_iter = 1000;
#endif
Ptr<TrainData> tdata = TrainData::create(train_data, ROW_SAMPLE, train_responses);
cout << "Training the classifier (may take a few minutes)...\n";
model = ANN_MLP::create();
model->setLayerSizes(layer_sizes);
model->setActivationFunction(ANN_MLP::SIGMOID_SYM, 0, 0);
model->setTermCriteria(TC(max_iter,0));
model->setTrainMethod(method, method_param);
model->train(tdata);
cout << endl;
}
test_and_save_classifier(model, data, responses, ntrain_samples, 'A', filename_to_save);
return true;
}
static bool
build_knearest_classifier( const string& data_filename, int K )
{
Mat data;
Mat responses;
bool ok = read_num_class_data( data_filename, 16, &data, &responses );
if( !ok )
return ok;
int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.8);
// create classifier by using <data> and <responses>
cout << "Training the classifier ...\n";
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
Ptr<KNearest> model = KNearest::create();
model->setDefaultK(K);
model->setIsClassifier(true);
model->train(tdata);
cout << endl;
test_and_save_classifier(model, data, responses, ntrain_samples, 0, string());
return true;
}
static bool
build_nbayes_classifier( const string& data_filename )
{
Mat data;
Mat responses;
bool ok = read_num_class_data( data_filename, 16, &data, &responses );
if( !ok )
return ok;
Ptr<NormalBayesClassifier> model;
int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.8);
// create classifier by using <data> and <responses>
cout << "Training the classifier ...\n";
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
model = NormalBayesClassifier::create();
model->train(tdata);
cout << endl;
test_and_save_classifier(model, data, responses, ntrain_samples, 0, string());
return true;
}
static bool
build_svm_classifier( const string& data_filename,
const string& filename_to_save,
const string& filename_to_load )
{
Mat data;
Mat responses;
bool ok = read_num_class_data( data_filename, 16, &data, &responses );
if( !ok )
return ok;
Ptr<SVM> model;
int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.8);
// Create or load Random Trees classifier
if( !filename_to_load.empty() )
{
model = load_classifier<SVM>(filename_to_load);
if( model.empty() )
return false;
ntrain_samples = 0;
}
else
{
// create classifier by using <data> and <responses>
cout << "Training the classifier ...\n";
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
model = SVM::create();
model->setType(SVM::C_SVC);
model->setKernel(SVM::LINEAR);
model->setC(1);
model->train(tdata);
cout << endl;
}
test_and_save_classifier(model, data, responses, ntrain_samples, 0, filename_to_save);
return true;
}
int main( int argc, char *argv[] )
{
string filename_to_save = "";
string filename_to_load = "";
string data_filename;
int method = 0;
cv::CommandLineParser parser(argc, argv, "{data|letter-recognition.data|}{save||}{load||}{boost||}"
"{mlp||}{knn knearest||}{nbayes||}{svm||}");
data_filename = samples::findFile(parser.get<string>("data"));
if (parser.has("save"))
filename_to_save = parser.get<string>("save");
if (parser.has("load"))
filename_to_load = samples::findFile(parser.get<string>("load"));
if (parser.has("boost"))
method = 1;
else if (parser.has("mlp"))
method = 2;
else if (parser.has("knearest"))
method = 3;
else if (parser.has("nbayes"))
method = 4;
else if (parser.has("svm"))
method = 5;
help(argv);
if( (method == 0 ?
build_rtrees_classifier( data_filename, filename_to_save, filename_to_load ) :
method == 1 ?
build_boost_classifier( data_filename, filename_to_save, filename_to_load ) :
method == 2 ?
build_mlp_classifier( data_filename, filename_to_save, filename_to_load ) :
method == 3 ?
build_knearest_classifier( data_filename, 10 ) :
method == 4 ?
build_nbayes_classifier( data_filename) :
method == 5 ?
build_svm_classifier( data_filename, filename_to_save, filename_to_load ):
-1) < 0)
return 0;
}

View File

@ -1,127 +0,0 @@
// Logistic Regression sample
// AUTHOR: Rahul Kavi rahulkavi[at]live[at]com
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/ml.hpp>
#include <opencv2/highgui.hpp>
using namespace std;
using namespace cv;
using namespace cv::ml;
static void showImage(const Mat &data, int columns, const String &name)
{
Mat bigImage;
for(int i = 0; i < data.rows; ++i)
{
bigImage.push_back(data.row(i).reshape(0, columns));
}
imshow(name, bigImage.t());
}
static float calculateAccuracyPercent(const Mat &original, const Mat &predicted)
{
return 100 * (float)countNonZero(original == predicted) / predicted.rows;
}
int main()
{
const String filename = samples::findFile("data01.xml");
cout << "**********************************************************************" << endl;
cout << filename
<< " contains digits 0 and 1 of 20 samples each, collected on an Android device" << endl;
cout << "Each of the collected images are of size 28 x 28 re-arranged to 1 x 784 matrix"
<< endl;
cout << "**********************************************************************" << endl;
Mat data, labels;
{
cout << "loading the dataset...";
FileStorage f;
if(f.open(filename, FileStorage::READ))
{
f["datamat"] >> data;
f["labelsmat"] >> labels;
f.release();
}
else
{
cerr << "file can not be opened: " << filename << endl;
return 1;
}
data.convertTo(data, CV_32F);
labels.convertTo(labels, CV_32F);
cout << "read " << data.rows << " rows of data" << endl;
}
Mat data_train, data_test;
Mat labels_train, labels_test;
for(int i = 0; i < data.rows; i++)
{
if(i % 2 == 0)
{
data_train.push_back(data.row(i));
labels_train.push_back(labels.row(i));
}
else
{
data_test.push_back(data.row(i));
labels_test.push_back(labels.row(i));
}
}
cout << "training/testing samples count: " << data_train.rows << "/" << data_test.rows << endl;
// display sample image
showImage(data_train, 28, "train data");
showImage(data_test, 28, "test data");
// simple case with batch gradient
cout << "training...";
//! [init]
Ptr<LogisticRegression> lr1 = LogisticRegression::create();
lr1->setLearningRate(0.001);
lr1->setIterations(10);
lr1->setRegularization(LogisticRegression::REG_L2);
lr1->setTrainMethod(LogisticRegression::BATCH);
lr1->setMiniBatchSize(1);
//! [init]
lr1->train(data_train, ROW_SAMPLE, labels_train);
cout << "done!" << endl;
cout << "predicting...";
Mat responses;
lr1->predict(data_test, responses);
cout << "done!" << endl;
// show prediction report
cout << "original vs predicted:" << endl;
labels_test.convertTo(labels_test, CV_32S);
cout << labels_test.t() << endl;
cout << responses.t() << endl;
cout << "accuracy: " << calculateAccuracyPercent(labels_test, responses) << "%" << endl;
// save the classifier
const String saveFilename = "NewLR_Trained.xml";
cout << "saving the classifier to " << saveFilename << endl;
lr1->save(saveFilename);
// load the classifier onto new object
cout << "loading a new classifier from " << saveFilename << endl;
Ptr<LogisticRegression> lr2 = StatModel::load<LogisticRegression>(saveFilename);
// predict using loaded classifier
cout << "predicting the dataset using the loaded classifier...";
Mat responses2;
lr2->predict(data_test, responses2);
cout << "done!" << endl;
// calculate accuracy
cout << labels_test.t() << endl;
cout << responses2.t() << endl;
cout << "accuracy: " << calculateAccuracyPercent(labels_test, responses2) << "%" << endl;
waitKey(0);
return 0;
}

View File

@ -1,65 +0,0 @@
#include <opencv2/ml/ml.hpp>
using namespace std;
using namespace cv;
using namespace cv::ml;
int main()
{
//create random training data
Mat_<float> data(100, 100);
randn(data, Mat::zeros(1, 1, data.type()), Mat::ones(1, 1, data.type()));
//half of the samples for each class
Mat_<float> responses(data.rows, 2);
for (int i = 0; i<data.rows; ++i)
{
if (i < data.rows/2)
{
responses(i, 0) = 1;
responses(i, 1) = 0;
}
else
{
responses(i, 0) = 0;
responses(i, 1) = 1;
}
}
/*
//example code for just a single response (regression)
Mat_<float> responses(data.rows, 1);
for (int i=0; i<responses.rows; ++i)
responses(i, 0) = i < responses.rows / 2 ? 0 : 1;
*/
//create the neural network
Mat_<int> layerSizes(1, 3);
layerSizes(0, 0) = data.cols;
layerSizes(0, 1) = 20;
layerSizes(0, 2) = responses.cols;
Ptr<ANN_MLP> network = ANN_MLP::create();
network->setLayerSizes(layerSizes);
network->setActivationFunction(ANN_MLP::SIGMOID_SYM, 0.1, 0.1);
network->setTrainMethod(ANN_MLP::BACKPROP, 0.1, 0.1);
Ptr<TrainData> trainData = TrainData::create(data, ROW_SAMPLE, responses);
network->train(trainData);
if (network->isTrained())
{
printf("Predict one-vector:\n");
Mat result;
network->predict(Mat::ones(1, data.cols, data.type()), result);
cout << result << endl;
printf("Predict training data:\n");
for (int i=0; i<data.rows; ++i)
{
network->predict(data.row(i), result);
cout << result << endl;
}
}
return 0;
}

View File

@ -1,399 +0,0 @@
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/ml.hpp"
#include "opencv2/highgui.hpp"
#include <stdio.h>
using namespace std;
using namespace cv;
using namespace cv::ml;
const Scalar WHITE_COLOR = Scalar(255,255,255);
const string winName = "points";
const int testStep = 5;
Mat img, imgDst;
RNG rng;
vector<Point> trainedPoints;
vector<int> trainedPointsMarkers;
const int MAX_CLASSES = 2;
vector<Vec3b> classColors(MAX_CLASSES);
int currentClass = 0;
vector<int> classCounters(MAX_CLASSES);
#define _NBC_ 1 // normal Bayessian classifier
#define _KNN_ 1 // k nearest neighbors classifier
#define _SVM_ 1 // support vectors machine
#define _DT_ 1 // decision tree
#define _BT_ 1 // ADA Boost
#define _GBT_ 0 // gradient boosted trees
#define _RF_ 1 // random forest
#define _ANN_ 1 // artificial neural networks
#define _EM_ 1 // expectation-maximization
static void on_mouse( int event, int x, int y, int /*flags*/, void* )
{
if( img.empty() )
return;
int updateFlag = 0;
if( event == EVENT_LBUTTONUP )
{
trainedPoints.push_back( Point(x,y) );
trainedPointsMarkers.push_back( currentClass );
classCounters[currentClass]++;
updateFlag = true;
}
//draw
if( updateFlag )
{
img = Scalar::all(0);
// draw points
for( size_t i = 0; i < trainedPoints.size(); i++ )
{
Vec3b c = classColors[trainedPointsMarkers[i]];
circle( img, trainedPoints[i], 5, Scalar(c), -1 );
}
imshow( winName, img );
}
}
static Mat prepare_train_samples(const vector<Point>& pts)
{
Mat samples;
Mat(pts).reshape(1, (int)pts.size()).convertTo(samples, CV_32F);
return samples;
}
static Ptr<TrainData> prepare_train_data()
{
Mat samples = prepare_train_samples(trainedPoints);
return TrainData::create(samples, ROW_SAMPLE, Mat(trainedPointsMarkers));
}
static void predict_and_paint(const Ptr<StatModel>& model, Mat& dst)
{
Mat testSample( 1, 2, CV_32FC1 );
for( int y = 0; y < img.rows; y += testStep )
{
for( int x = 0; x < img.cols; x += testStep )
{
testSample.at<float>(0) = (float)x;
testSample.at<float>(1) = (float)y;
int response = (int)model->predict( testSample );
dst.at<Vec3b>(y, x) = classColors[response];
}
}
}
#if _NBC_
static void find_decision_boundary_NBC()
{
// learn classifier
Ptr<NormalBayesClassifier> normalBayesClassifier = StatModel::train<NormalBayesClassifier>(prepare_train_data());
predict_and_paint(normalBayesClassifier, imgDst);
}
#endif
#if _KNN_
static void find_decision_boundary_KNN( int K )
{
Ptr<KNearest> knn = KNearest::create();
knn->setDefaultK(K);
knn->setIsClassifier(true);
knn->train(prepare_train_data());
predict_and_paint(knn, imgDst);
}
#endif
#if _SVM_
static void find_decision_boundary_SVM( double C )
{
Ptr<SVM> svm = SVM::create();
svm->setType(SVM::C_SVC);
svm->setKernel(SVM::POLY); //SVM::LINEAR;
svm->setDegree(0.5);
svm->setGamma(1);
svm->setCoef0(1);
svm->setNu(0.5);
svm->setP(0);
svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 1000, 0.01));
svm->setC(C);
svm->train(prepare_train_data());
predict_and_paint(svm, imgDst);
Mat sv = svm->getSupportVectors();
for( int i = 0; i < sv.rows; i++ )
{
const float* supportVector = sv.ptr<float>(i);
circle( imgDst, Point(saturate_cast<int>(supportVector[0]),saturate_cast<int>(supportVector[1])), 5, Scalar(255,255,255), -1 );
}
}
#endif
#if _DT_
static void find_decision_boundary_DT()
{
Ptr<DTrees> dtree = DTrees::create();
dtree->setMaxDepth(8);
dtree->setMinSampleCount(2);
dtree->setUseSurrogates(false);
dtree->setCVFolds(0); // the number of cross-validation folds
dtree->setUse1SERule(false);
dtree->setTruncatePrunedTree(false);
dtree->train(prepare_train_data());
predict_and_paint(dtree, imgDst);
}
#endif
#if _BT_
static void find_decision_boundary_BT()
{
Ptr<Boost> boost = Boost::create();
boost->setBoostType(Boost::DISCRETE);
boost->setWeakCount(100);
boost->setWeightTrimRate(0.95);
boost->setMaxDepth(2);
boost->setUseSurrogates(false);
boost->setPriors(Mat());
boost->train(prepare_train_data());
predict_and_paint(boost, imgDst);
}
#endif
#if _GBT_
static void find_decision_boundary_GBT()
{
GBTrees::Params params( GBTrees::DEVIANCE_LOSS, // loss_function_type
100, // weak_count
0.1f, // shrinkage
1.0f, // subsample_portion
2, // max_depth
false // use_surrogates )
);
Ptr<GBTrees> gbtrees = StatModel::train<GBTrees>(prepare_train_data(), params);
predict_and_paint(gbtrees, imgDst);
}
#endif
#if _RF_
static void find_decision_boundary_RF()
{
Ptr<RTrees> rtrees = RTrees::create();
rtrees->setMaxDepth(4);
rtrees->setMinSampleCount(2);
rtrees->setRegressionAccuracy(0.f);
rtrees->setUseSurrogates(false);
rtrees->setMaxCategories(16);
rtrees->setPriors(Mat());
rtrees->setCalculateVarImportance(false);
rtrees->setActiveVarCount(1);
rtrees->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 5, 0));
rtrees->train(prepare_train_data());
predict_and_paint(rtrees, imgDst);
}
#endif
#if _ANN_
static void find_decision_boundary_ANN( const Mat& layer_sizes )
{
Mat trainClasses = Mat::zeros( (int)trainedPoints.size(), (int)classColors.size(), CV_32FC1 );
for( int i = 0; i < trainClasses.rows; i++ )
{
trainClasses.at<float>(i, trainedPointsMarkers[i]) = 1.f;
}
Mat samples = prepare_train_samples(trainedPoints);
Ptr<TrainData> tdata = TrainData::create(samples, ROW_SAMPLE, trainClasses);
Ptr<ANN_MLP> ann = ANN_MLP::create();
ann->setLayerSizes(layer_sizes);
ann->setActivationFunction(ANN_MLP::SIGMOID_SYM, 1, 1);
ann->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 300, FLT_EPSILON));
ann->setTrainMethod(ANN_MLP::BACKPROP, 0.001);
ann->train(tdata);
predict_and_paint(ann, imgDst);
}
#endif
#if _EM_
static void find_decision_boundary_EM()
{
img.copyTo( imgDst );
Mat samples = prepare_train_samples(trainedPoints);
int i, j, nmodels = (int)classColors.size();
vector<Ptr<EM> > em_models(nmodels);
Mat modelSamples;
for( i = 0; i < nmodels; i++ )
{
const int componentCount = 3;
modelSamples.release();
for( j = 0; j < samples.rows; j++ )
{
if( trainedPointsMarkers[j] == i )
modelSamples.push_back(samples.row(j));
}
// learn models
if( !modelSamples.empty() )
{
Ptr<EM> em = EM::create();
em->setClustersNumber(componentCount);
em->setCovarianceMatrixType(EM::COV_MAT_DIAGONAL);
em->trainEM(modelSamples, noArray(), noArray(), noArray());
em_models[i] = em;
}
}
// classify coordinate plane points using the bayes classifier, i.e.
// y(x) = arg max_i=1_modelsCount likelihoods_i(x)
Mat testSample(1, 2, CV_32FC1 );
Mat logLikelihoods(1, nmodels, CV_64FC1, Scalar(-DBL_MAX));
for( int y = 0; y < img.rows; y += testStep )
{
for( int x = 0; x < img.cols; x += testStep )
{
testSample.at<float>(0) = (float)x;
testSample.at<float>(1) = (float)y;
for( i = 0; i < nmodels; i++ )
{
if( !em_models[i].empty() )
logLikelihoods.at<double>(i) = em_models[i]->predict2(testSample, noArray())[0];
}
Point maxLoc;
minMaxLoc(logLikelihoods, 0, 0, 0, &maxLoc);
imgDst.at<Vec3b>(y, x) = classColors[maxLoc.x];
}
}
}
#endif
int main()
{
cout << "Use:" << endl
<< " key '0' .. '1' - switch to class #n" << endl
<< " left mouse button - to add new point;" << endl
<< " key 'r' - to run the ML model;" << endl
<< " key 'i' - to init (clear) the data." << endl << endl;
cv::namedWindow( "points", 1 );
img.create( 480, 640, CV_8UC3 );
imgDst.create( 480, 640, CV_8UC3 );
imshow( "points", img );
setMouseCallback( "points", on_mouse );
classColors[0] = Vec3b(0, 255, 0);
classColors[1] = Vec3b(0, 0, 255);
for(;;)
{
char key = (char)waitKey();
if( key == 27 ) break;
if( key == 'i' ) // init
{
img = Scalar::all(0);
trainedPoints.clear();
trainedPointsMarkers.clear();
classCounters.assign(MAX_CLASSES, 0);
imshow( winName, img );
}
if( key == '0' || key == '1' )
{
currentClass = key - '0';
}
if( key == 'r' ) // run
{
double minVal = 0;
minMaxLoc(classCounters, &minVal, 0, 0, 0);
if( minVal == 0 )
{
printf("each class should have at least 1 point\n");
continue;
}
img.copyTo( imgDst );
#if _NBC_
find_decision_boundary_NBC();
imshow( "NormalBayesClassifier", imgDst );
#endif
#if _KNN_
find_decision_boundary_KNN( 3 );
imshow( "kNN", imgDst );
find_decision_boundary_KNN( 15 );
imshow( "kNN2", imgDst );
#endif
#if _SVM_
//(1)-(2)separable and not sets
find_decision_boundary_SVM( 1 );
imshow( "classificationSVM1", imgDst );
find_decision_boundary_SVM( 10 );
imshow( "classificationSVM2", imgDst );
#endif
#if _DT_
find_decision_boundary_DT();
imshow( "DT", imgDst );
#endif
#if _BT_
find_decision_boundary_BT();
imshow( "BT", imgDst);
#endif
#if _GBT_
find_decision_boundary_GBT();
imshow( "GBT", imgDst);
#endif
#if _RF_
find_decision_boundary_RF();
imshow( "RF", imgDst);
#endif
#if _ANN_
Mat layer_sizes1( 1, 3, CV_32SC1 );
layer_sizes1.at<int>(0) = 2;
layer_sizes1.at<int>(1) = 5;
layer_sizes1.at<int>(2) = (int)classColors.size();
find_decision_boundary_ANN( layer_sizes1 );
imshow( "ANN", imgDst );
#endif
#if _EM_
find_decision_boundary_EM();
imshow( "EM", imgDst );
#endif
}
}
return 0;
}

View File

@ -1,392 +0,0 @@
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/ml.hpp"
#include "opencv2/objdetect.hpp"
#include "opencv2/videoio.hpp"
#include <iostream>
#include <time.h>
using namespace cv;
using namespace cv::ml;
using namespace std;
vector< float > get_svm_detector( const Ptr< SVM >& svm );
void convert_to_ml( const std::vector< Mat > & train_samples, Mat& trainData );
void load_images( const String & dirname, vector< Mat > & img_lst, bool showImages );
void sample_neg( const vector< Mat > & full_neg_lst, vector< Mat > & neg_lst, const Size & size );
void computeHOGs( const Size wsize, const vector< Mat > & img_lst, vector< Mat > & gradient_lst, bool use_flip );
void test_trained_detector( String obj_det_filename, String test_dir, String videofilename );
vector< float > get_svm_detector( const Ptr< SVM >& svm )
{
// get the support vectors
Mat sv = svm->getSupportVectors();
const int sv_total = sv.rows;
// get the decision function
Mat alpha, svidx;
double rho = svm->getDecisionFunction( 0, alpha, svidx );
CV_Assert( alpha.total() == 1 && svidx.total() == 1 && sv_total == 1 );
CV_Assert( (alpha.type() == CV_64F && alpha.at<double>(0) == 1.) ||
(alpha.type() == CV_32F && alpha.at<float>(0) == 1.f) );
CV_Assert( sv.type() == CV_32F );
vector< float > hog_detector( sv.cols + 1 );
memcpy( &hog_detector[0], sv.ptr(), sv.cols*sizeof( hog_detector[0] ) );
hog_detector[sv.cols] = (float)-rho;
return hog_detector;
}
/*
* Convert training/testing set to be used by OpenCV Machine Learning algorithms.
* TrainData is a matrix of size (#samples x max(#cols,#rows) per samples), in 32FC1.
* Transposition of samples are made if needed.
*/
void convert_to_ml( const vector< Mat > & train_samples, Mat& trainData )
{
//--Convert data
const int rows = (int)train_samples.size();
const int cols = (int)std::max( train_samples[0].cols, train_samples[0].rows );
Mat tmp( 1, cols, CV_32FC1 ); ///< used for transposition if needed
trainData = Mat( rows, cols, CV_32FC1 );
for( size_t i = 0 ; i < train_samples.size(); ++i )
{
CV_Assert( train_samples[i].cols == 1 || train_samples[i].rows == 1 );
if( train_samples[i].cols == 1 )
{
transpose( train_samples[i], tmp );
tmp.copyTo( trainData.row( (int)i ) );
}
else if( train_samples[i].rows == 1 )
{
train_samples[i].copyTo( trainData.row( (int)i ) );
}
}
}
void load_images( const String & dirname, vector< Mat > & img_lst, bool showImages = false )
{
vector< String > files;
glob( dirname, files );
for ( size_t i = 0; i < files.size(); ++i )
{
Mat img = imread( files[i] ); // load the image
if ( img.empty() )
{
cout << files[i] << " is invalid!" << endl; // invalid image, skip it.
continue;
}
if ( showImages )
{
imshow( "image", img );
waitKey( 1 );
}
img_lst.push_back( img );
}
}
void sample_neg( const vector< Mat > & full_neg_lst, vector< Mat > & neg_lst, const Size & size )
{
Rect box;
box.width = size.width;
box.height = size.height;
srand( (unsigned int)time( NULL ) );
for ( size_t i = 0; i < full_neg_lst.size(); i++ )
if ( full_neg_lst[i].cols > box.width && full_neg_lst[i].rows > box.height )
{
box.x = rand() % ( full_neg_lst[i].cols - box.width );
box.y = rand() % ( full_neg_lst[i].rows - box.height );
Mat roi = full_neg_lst[i]( box );
neg_lst.push_back( roi.clone() );
}
}
void computeHOGs( const Size wsize, const vector< Mat > & img_lst, vector< Mat > & gradient_lst, bool use_flip )
{
HOGDescriptor hog;
hog.winSize = wsize;
Mat gray;
vector< float > descriptors;
for( size_t i = 0 ; i < img_lst.size(); i++ )
{
if ( img_lst[i].cols >= wsize.width && img_lst[i].rows >= wsize.height )
{
Rect r = Rect(( img_lst[i].cols - wsize.width ) / 2,
( img_lst[i].rows - wsize.height ) / 2,
wsize.width,
wsize.height);
cvtColor( img_lst[i](r), gray, COLOR_BGR2GRAY );
hog.compute( gray, descriptors, Size( 8, 8 ), Size( 0, 0 ) );
gradient_lst.push_back( Mat( descriptors ).clone() );
if ( use_flip )
{
flip( gray, gray, 1 );
hog.compute( gray, descriptors, Size( 8, 8 ), Size( 0, 0 ) );
gradient_lst.push_back( Mat( descriptors ).clone() );
}
}
}
}
void test_trained_detector( String obj_det_filename, String test_dir, String videofilename )
{
cout << "Testing trained detector..." << endl;
HOGDescriptor hog;
hog.load( obj_det_filename );
vector< String > files;
glob( test_dir, files );
int delay = 0;
VideoCapture cap;
if ( videofilename != "" )
{
if ( videofilename.size() == 1 && isdigit( videofilename[0] ) )
cap.open( videofilename[0] - '0' );
else
cap.open( videofilename );
}
obj_det_filename = "testing " + obj_det_filename;
namedWindow( obj_det_filename, WINDOW_NORMAL );
for( size_t i=0;; i++ )
{
Mat img;
if ( cap.isOpened() )
{
cap >> img;
delay = 1;
}
else if( i < files.size() )
{
img = imread( files[i] );
}
if ( img.empty() )
{
return;
}
vector< Rect > detections;
vector< double > foundWeights;
hog.detectMultiScale( img, detections, foundWeights );
for ( size_t j = 0; j < detections.size(); j++ )
{
Scalar color = Scalar( 0, foundWeights[j] * foundWeights[j] * 200, 0 );
rectangle( img, detections[j], color, img.cols / 400 + 1 );
}
imshow( obj_det_filename, img );
if( waitKey( delay ) == 27 )
{
return;
}
}
}
int main( int argc, char** argv )
{
const char* keys =
{
"{help h| | show help message}"
"{pd | | path of directory contains positive images}"
"{nd | | path of directory contains negative images}"
"{td | | path of directory contains test images}"
"{tv | | test video file name}"
"{dw | | width of the detector}"
"{dh | | height of the detector}"
"{f |false| indicates if the program will generate and use mirrored samples or not}"
"{d |false| train twice}"
"{t |false| test a trained detector}"
"{v |false| visualize training steps}"
"{fn |my_detector.yml| file name of trained SVM}"
};
CommandLineParser parser( argc, argv, keys );
if ( parser.has( "help" ) )
{
parser.printMessage();
exit( 0 );
}
String pos_dir = parser.get< String >( "pd" );
String neg_dir = parser.get< String >( "nd" );
String test_dir = parser.get< String >( "td" );
String obj_det_filename = parser.get< String >( "fn" );
String videofilename = parser.get< String >( "tv" );
int detector_width = parser.get< int >( "dw" );
int detector_height = parser.get< int >( "dh" );
bool test_detector = parser.get< bool >( "t" );
bool train_twice = parser.get< bool >( "d" );
bool visualization = parser.get< bool >( "v" );
bool flip_samples = parser.get< bool >( "f" );
if ( test_detector )
{
test_trained_detector( obj_det_filename, test_dir, videofilename );
exit( 0 );
}
if( pos_dir.empty() || neg_dir.empty() )
{
parser.printMessage();
cout << "Wrong number of parameters.\n\n"
<< "Example command line:\n" << argv[0] << " -dw=64 -dh=128 -pd=/INRIAPerson/96X160H96/Train/pos -nd=/INRIAPerson/neg -td=/INRIAPerson/Test/pos -fn=HOGpedestrian64x128.xml -d\n"
<< "\nExample command line for testing trained detector:\n" << argv[0] << " -t -fn=HOGpedestrian64x128.xml -td=/INRIAPerson/Test/pos";
exit( 1 );
}
vector< Mat > pos_lst, full_neg_lst, neg_lst, gradient_lst;
vector< int > labels;
clog << "Positive images are being loaded..." ;
load_images( pos_dir, pos_lst, visualization );
if ( pos_lst.size() > 0 )
{
clog << "...[done] " << pos_lst.size() << " files." << endl;
}
else
{
clog << "no image in " << pos_dir <<endl;
return 1;
}
Size pos_image_size = pos_lst[0].size();
if ( detector_width && detector_height )
{
pos_image_size = Size( detector_width, detector_height );
}
else
{
for ( size_t i = 0; i < pos_lst.size(); ++i )
{
if( pos_lst[i].size() != pos_image_size )
{
cout << "All positive images should be same size!" << endl;
exit( 1 );
}
}
pos_image_size = pos_image_size / 8 * 8;
}
clog << "Negative images are being loaded...";
load_images( neg_dir, full_neg_lst, visualization );
clog << "...[done] " << full_neg_lst.size() << " files." << endl;
clog << "Negative images are being processed...";
sample_neg( full_neg_lst, neg_lst, pos_image_size );
clog << "...[done] " << neg_lst.size() << " files." << endl;
clog << "Histogram of Gradients are being calculated for positive images...";
computeHOGs( pos_image_size, pos_lst, gradient_lst, flip_samples );
size_t positive_count = gradient_lst.size();
labels.assign( positive_count, +1 );
clog << "...[done] ( positive images count : " << positive_count << " )" << endl;
clog << "Histogram of Gradients are being calculated for negative images...";
computeHOGs( pos_image_size, neg_lst, gradient_lst, flip_samples );
size_t negative_count = gradient_lst.size() - positive_count;
labels.insert( labels.end(), negative_count, -1 );
CV_Assert( positive_count < labels.size() );
clog << "...[done] ( negative images count : " << negative_count << " )" << endl;
Mat train_data;
convert_to_ml( gradient_lst, train_data );
clog << "Training SVM...";
Ptr< SVM > svm = SVM::create();
/* Default values to train SVM */
svm->setCoef0( 0.0 );
svm->setDegree( 3 );
svm->setTermCriteria( TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, 1e-3 ) );
svm->setGamma( 0 );
svm->setKernel( SVM::LINEAR );
svm->setNu( 0.5 );
svm->setP( 0.1 ); // for EPSILON_SVR, epsilon in loss function?
svm->setC( 0.01 ); // From paper, soft classifier
svm->setType( SVM::EPS_SVR ); // C_SVC; // EPSILON_SVR; // may be also NU_SVR; // do regression task
svm->train( train_data, ROW_SAMPLE, labels );
clog << "...[done]" << endl;
if ( train_twice )
{
clog << "Testing trained detector on negative images. This might take a few minutes...";
HOGDescriptor my_hog;
my_hog.winSize = pos_image_size;
// Set the trained svm to my_hog
my_hog.setSVMDetector( get_svm_detector( svm ) );
vector< Rect > detections;
vector< double > foundWeights;
for ( size_t i = 0; i < full_neg_lst.size(); i++ )
{
if ( full_neg_lst[i].cols >= pos_image_size.width && full_neg_lst[i].rows >= pos_image_size.height )
my_hog.detectMultiScale( full_neg_lst[i], detections, foundWeights );
else
detections.clear();
for ( size_t j = 0; j < detections.size(); j++ )
{
Mat detection = full_neg_lst[i]( detections[j] ).clone();
resize( detection, detection, pos_image_size, 0, 0, INTER_LINEAR_EXACT);
neg_lst.push_back( detection );
}
if ( visualization )
{
for ( size_t j = 0; j < detections.size(); j++ )
{
rectangle( full_neg_lst[i], detections[j], Scalar( 0, 255, 0 ), 2 );
}
imshow( "testing trained detector on negative images", full_neg_lst[i] );
waitKey( 5 );
}
}
clog << "...[done]" << endl;
gradient_lst.clear();
clog << "Histogram of Gradients are being calculated for positive images...";
computeHOGs( pos_image_size, pos_lst, gradient_lst, flip_samples );
positive_count = gradient_lst.size();
clog << "...[done] ( positive count : " << positive_count << " )" << endl;
clog << "Histogram of Gradients are being calculated for negative images...";
computeHOGs( pos_image_size, neg_lst, gradient_lst, flip_samples );
negative_count = gradient_lst.size() - positive_count;
clog << "...[done] ( negative count : " << negative_count << " )" << endl;
labels.clear();
labels.assign(positive_count, +1);
labels.insert(labels.end(), negative_count, -1);
clog << "Training SVM again...";
convert_to_ml( gradient_lst, train_data );
svm->train( train_data, ROW_SAMPLE, labels );
clog << "...[done]" << endl;
}
HOGDescriptor hog;
hog.winSize = pos_image_size;
hog.setSVMDetector( get_svm_detector( svm ) );
hog.save( obj_det_filename );
test_trained_detector( obj_det_filename, test_dir, videofilename );
return 0;
}

View File

@ -1,211 +0,0 @@
#include "opencv2/core.hpp"
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/ml.hpp"
using namespace cv;
using namespace cv::ml;
struct Data
{
Mat img;
Mat samples; //Set of train samples. Contains points on image
Mat responses; //Set of responses for train samples
Data()
{
const int WIDTH = 841;
const int HEIGHT = 594;
img = Mat::zeros(HEIGHT, WIDTH, CV_8UC3);
imshow("Train svmsgd", img);
}
};
//Train with SVMSGD algorithm
//(samples, responses) is a train set
//weights is a required vector for decision function of SVMSGD algorithm
bool doTrain(const Mat samples, const Mat responses, Mat &weights, float &shift);
//function finds two points for drawing line (wx = 0)
bool findPointsForLine(const Mat &weights, float shift, Point points[2], int width, int height);
// function finds cross point of line (wx = 0) and segment ( (y = HEIGHT, 0 <= x <= WIDTH) or (x = WIDTH, 0 <= y <= HEIGHT) )
bool findCrossPointWithBorders(const Mat &weights, float shift, const std::pair<Point,Point> &segment, Point &crossPoint);
//segments' initialization ( (y = HEIGHT, 0 <= x <= WIDTH) and (x = WIDTH, 0 <= y <= HEIGHT) )
void fillSegments(std::vector<std::pair<Point,Point> > &segments, int width, int height);
//redraw points' set and line (wx = 0)
void redraw(Data data, const Point points[2]);
//add point in train set, train SVMSGD algorithm and draw results on image
void addPointRetrainAndRedraw(Data &data, int x, int y, int response);
bool doTrain( const Mat samples, const Mat responses, Mat &weights, float &shift)
{
cv::Ptr<SVMSGD> svmsgd = SVMSGD::create();
cv::Ptr<TrainData> trainData = TrainData::create(samples, cv::ml::ROW_SAMPLE, responses);
svmsgd->train( trainData );
if (svmsgd->isTrained())
{
weights = svmsgd->getWeights();
shift = svmsgd->getShift();
return true;
}
return false;
}
void fillSegments(std::vector<std::pair<Point,Point> > &segments, int width, int height)
{
std::pair<Point,Point> currentSegment;
currentSegment.first = Point(width, 0);
currentSegment.second = Point(width, height);
segments.push_back(currentSegment);
currentSegment.first = Point(0, height);
currentSegment.second = Point(width, height);
segments.push_back(currentSegment);
currentSegment.first = Point(0, 0);
currentSegment.second = Point(width, 0);
segments.push_back(currentSegment);
currentSegment.first = Point(0, 0);
currentSegment.second = Point(0, height);
segments.push_back(currentSegment);
}
bool findCrossPointWithBorders(const Mat &weights, float shift, const std::pair<Point,Point> &segment, Point &crossPoint)
{
int x = 0;
int y = 0;
int xMin = std::min(segment.first.x, segment.second.x);
int xMax = std::max(segment.first.x, segment.second.x);
int yMin = std::min(segment.first.y, segment.second.y);
int yMax = std::max(segment.first.y, segment.second.y);
CV_Assert(weights.type() == CV_32FC1);
CV_Assert(xMin == xMax || yMin == yMax);
if (xMin == xMax && weights.at<float>(1) != 0)
{
x = xMin;
y = static_cast<int>(std::floor( - (weights.at<float>(0) * x + shift) / weights.at<float>(1)));
if (y >= yMin && y <= yMax)
{
crossPoint.x = x;
crossPoint.y = y;
return true;
}
}
else if (yMin == yMax && weights.at<float>(0) != 0)
{
y = yMin;
x = static_cast<int>(std::floor( - (weights.at<float>(1) * y + shift) / weights.at<float>(0)));
if (x >= xMin && x <= xMax)
{
crossPoint.x = x;
crossPoint.y = y;
return true;
}
}
return false;
}
bool findPointsForLine(const Mat &weights, float shift, Point points[2], int width, int height)
{
if (weights.empty())
{
return false;
}
int foundPointsCount = 0;
std::vector<std::pair<Point,Point> > segments;
fillSegments(segments, width, height);
for (uint i = 0; i < segments.size(); i++)
{
if (findCrossPointWithBorders(weights, shift, segments[i], points[foundPointsCount]))
foundPointsCount++;
if (foundPointsCount >= 2)
break;
}
return true;
}
void redraw(Data data, const Point points[2])
{
data.img.setTo(0);
Point center;
int radius = 3;
Scalar color;
CV_Assert((data.samples.type() == CV_32FC1) && (data.responses.type() == CV_32FC1));
for (int i = 0; i < data.samples.rows; i++)
{
center.x = static_cast<int>(data.samples.at<float>(i,0));
center.y = static_cast<int>(data.samples.at<float>(i,1));
color = (data.responses.at<float>(i) > 0) ? Scalar(128,128,0) : Scalar(0,128,128);
circle(data.img, center, radius, color, 5);
}
line(data.img, points[0], points[1],cv::Scalar(1,255,1));
imshow("Train svmsgd", data.img);
}
void addPointRetrainAndRedraw(Data &data, int x, int y, int response)
{
Mat currentSample(1, 2, CV_32FC1);
currentSample.at<float>(0,0) = (float)x;
currentSample.at<float>(0,1) = (float)y;
data.samples.push_back(currentSample);
data.responses.push_back(static_cast<float>(response));
Mat weights(1, 2, CV_32FC1);
float shift = 0;
if (doTrain(data.samples, data.responses, weights, shift))
{
Point points[2];
findPointsForLine(weights, shift, points, data.img.cols, data.img.rows);
redraw(data, points);
}
}
static void onMouse( int event, int x, int y, int, void* pData)
{
Data &data = *(Data*)pData;
switch( event )
{
case EVENT_LBUTTONUP:
addPointRetrainAndRedraw(data, x, y, 1);
break;
case EVENT_RBUTTONDOWN:
addPointRetrainAndRedraw(data, x, y, -1);
break;
}
}
int main()
{
Data data;
setMouseCallback( "Train svmsgd", onMouse, &data );
waitKey();
return 0;
}

View File

@ -1,109 +0,0 @@
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/ml.hpp>
using namespace cv;
class TravelSalesman
{
private :
const std::vector<Point>& posCity;
std::vector<int>& next;
RNG rng;
int d0,d1,d2,d3;
public:
TravelSalesman(std::vector<Point> &p, std::vector<int> &n) :
posCity(p), next(n)
{
rng = theRNG();
}
/** Give energy value for a state of system.*/
double energy() const;
/** Function which change the state of system (random perturbation).*/
void changeState();
/** Function to reverse to the previous state.*/
void reverseState();
};
void TravelSalesman::changeState()
{
d0 = rng.uniform(0,static_cast<int>(posCity.size()));
d1 = next[d0];
d2 = next[d1];
d3 = next[d2];
next[d0] = d2;
next[d2] = d1;
next[d1] = d3;
}
void TravelSalesman::reverseState()
{
next[d0] = d1;
next[d1] = d2;
next[d2] = d3;
}
double TravelSalesman::energy() const
{
double e = 0;
for (size_t i = 0; i < next.size(); i++)
{
e += norm(posCity[i]-posCity[next[i]]);
}
return e;
}
static void DrawTravelMap(Mat &img, std::vector<Point> &p, std::vector<int> &n)
{
for (size_t i = 0; i < n.size(); i++)
{
circle(img,p[i],5,Scalar(0,0,255),2);
line(img,p[i],p[n[i]],Scalar(0,255,0),2);
}
}
int main(void)
{
int nbCity=40;
Mat img(500,500,CV_8UC3,Scalar::all(0));
RNG rng(123456);
int radius=static_cast<int>(img.cols*0.45);
Point center(img.cols/2,img.rows/2);
std::vector<Point> posCity(nbCity);
std::vector<int> next(nbCity);
for (size_t i = 0; i < posCity.size(); i++)
{
double theta = rng.uniform(0., 2 * CV_PI);
posCity[i].x = static_cast<int>(radius*cos(theta)) + center.x;
posCity[i].y = static_cast<int>(radius*sin(theta)) + center.y;
next[i]=(i+1)%nbCity;
}
TravelSalesman ts_system(posCity, next);
DrawTravelMap(img,posCity,next);
imshow("Map",img);
waitKey(10);
double currentTemperature = 100.0;
for (int i = 0, zeroChanges = 0; zeroChanges < 10; i++)
{
int changesApplied = ml::simulatedAnnealingSolver(ts_system, currentTemperature, currentTemperature*0.97, 0.99, 10000*nbCity, &currentTemperature, rng);
img.setTo(Scalar::all(0));
DrawTravelMap(img, posCity, next);
imshow("Map", img);
int k = waitKey(10);
std::cout << "i=" << i << " changesApplied=" << changesApplied << " temp=" << currentTemperature << " result=" << ts_system.energy() << std::endl;
if (k == 27 || k == 'q' || k == 'Q')
return 0;
if (changesApplied == 0)
zeroChanges++;
}
std::cout << "Done" << std::endl;
waitKey(0);
return 0;
}

View File

@ -1,116 +0,0 @@
#include "opencv2/ml.hpp"
#include "opencv2/core.hpp"
#include "opencv2/core/utility.hpp"
#include <stdio.h>
#include <string>
#include <map>
using namespace cv;
using namespace cv::ml;
static void help(char** argv)
{
printf(
"\nThis sample demonstrates how to use different decision trees and forests including boosting and random trees.\n"
"Usage:\n\t%s [-r=<response_column>] [-ts=type_spec] <csv filename>\n"
"where -r=<response_column> specified the 0-based index of the response (0 by default)\n"
"-ts= specifies the var type spec in the form ord[n1,n2-n3,n4-n5,...]cat[m1-m2,m3,m4-m5,...]\n"
"<csv filename> is the name of training data file in comma-separated value format\n\n", argv[0]);
}
static void train_and_print_errs(Ptr<StatModel> model, const Ptr<TrainData>& data)
{
bool ok = model->train(data);
if( !ok )
{
printf("Training failed\n");
}
else
{
printf( "train error: %f\n", model->calcError(data, false, noArray()) );
printf( "test error: %f\n\n", model->calcError(data, true, noArray()) );
}
}
int main(int argc, char** argv)
{
cv::CommandLineParser parser(argc, argv, "{ help h | | }{r | 0 | }{ts | | }{@input | | }");
if (parser.has("help"))
{
help(argv);
return 0;
}
std::string filename = parser.get<std::string>("@input");
int response_idx;
std::string typespec;
response_idx = parser.get<int>("r");
typespec = parser.get<std::string>("ts");
if( filename.empty() || !parser.check() )
{
parser.printErrors();
help(argv);
return 0;
}
printf("\nReading in %s...\n\n",filename.c_str());
const double train_test_split_ratio = 0.5;
Ptr<TrainData> data = TrainData::loadFromCSV(filename, 0, response_idx, response_idx+1, typespec);
if( data.empty() )
{
printf("ERROR: File %s can not be read\n", filename.c_str());
return 0;
}
data->setTrainTestSplitRatio(train_test_split_ratio);
std::cout << "Test/Train: " << data->getNTestSamples() << "/" << data->getNTrainSamples();
printf("======DTREE=====\n");
Ptr<DTrees> dtree = DTrees::create();
dtree->setMaxDepth(10);
dtree->setMinSampleCount(2);
dtree->setRegressionAccuracy(0);
dtree->setUseSurrogates(false);
dtree->setMaxCategories(16);
dtree->setCVFolds(0);
dtree->setUse1SERule(false);
dtree->setTruncatePrunedTree(false);
dtree->setPriors(Mat());
train_and_print_errs(dtree, data);
if( (int)data->getClassLabels().total() <= 2 ) // regression or 2-class classification problem
{
printf("======BOOST=====\n");
Ptr<Boost> boost = Boost::create();
boost->setBoostType(Boost::GENTLE);
boost->setWeakCount(100);
boost->setWeightTrimRate(0.95);
boost->setMaxDepth(2);
boost->setUseSurrogates(false);
boost->setPriors(Mat());
train_and_print_errs(boost, data);
}
printf("======RTREES=====\n");
Ptr<RTrees> rtrees = RTrees::create();
rtrees->setMaxDepth(10);
rtrees->setMinSampleCount(2);
rtrees->setRegressionAccuracy(0);
rtrees->setUseSurrogates(false);
rtrees->setMaxCategories(16);
rtrees->setPriors(Mat());
rtrees->setCalculateVarImportance(true);
rtrees->setActiveVarCount(0);
rtrees->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 0));
train_and_print_errs(rtrees, data);
cv::Mat ref_labels = data->getClassLabels();
cv::Mat test_data = data->getTestSampleIdx();
cv::Mat predict_labels;
rtrees->predict(data->getSamples(), predict_labels);
cv::Mat variable_importance = rtrees->getVarImportance();
std::cout << "Estimated variable importance" << std::endl;
for (int i = 0; i < variable_importance.rows; i++) {
std::cout << "Variable " << i << ": " << variable_importance.at<float>(i, 0) << std::endl;
}
return 0;
}

View File

@ -1,81 +0,0 @@
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/ml.hpp>
using namespace cv;
using namespace cv::ml;
int main(int, char**)
{
// Set up training data
//! [setup1]
int labels[4] = {1, -1, -1, -1};
float trainingData[4][2] = { {501, 10}, {255, 10}, {501, 255}, {10, 501} };
//! [setup1]
//! [setup2]
Mat trainingDataMat(4, 2, CV_32F, trainingData);
Mat labelsMat(4, 1, CV_32SC1, labels);
//! [setup2]
// Train the SVM
//! [init]
Ptr<SVM> svm = SVM::create();
svm->setType(SVM::C_SVC);
svm->setKernel(SVM::LINEAR);
svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6));
//! [init]
//! [train]
svm->train(trainingDataMat, ROW_SAMPLE, labelsMat);
//! [train]
// Data for visual representation
int width = 512, height = 512;
Mat image = Mat::zeros(height, width, CV_8UC3);
// Show the decision regions given by the SVM
//! [show]
Vec3b green(0,255,0), blue(255,0,0);
for (int i = 0; i < image.rows; i++)
{
for (int j = 0; j < image.cols; j++)
{
Mat sampleMat = (Mat_<float>(1,2) << j,i);
float response = svm->predict(sampleMat);
if (response == 1)
image.at<Vec3b>(i,j) = green;
else if (response == -1)
image.at<Vec3b>(i,j) = blue;
}
}
//! [show]
// Show the training data
//! [show_data]
int thickness = -1;
circle( image, Point(501, 10), 5, Scalar( 0, 0, 0), thickness );
circle( image, Point(255, 10), 5, Scalar(255, 255, 255), thickness );
circle( image, Point(501, 255), 5, Scalar(255, 255, 255), thickness );
circle( image, Point( 10, 501), 5, Scalar(255, 255, 255), thickness );
//! [show_data]
// Show support vectors
//! [show_vectors]
thickness = 2;
Mat sv = svm->getUncompressedSupportVectors();
for (int i = 0; i < sv.rows; i++)
{
const float* v = sv.ptr<float>(i);
circle(image, Point( (int) v[0], (int) v[1]), 6, Scalar(128, 128, 128), thickness);
}
//! [show_vectors]
imwrite("result.png", image); // save the image
imshow("SVM Simple Example", image); // show it to the user
waitKey();
return 0;
}

View File

@ -1,144 +0,0 @@
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include "opencv2/imgcodecs.hpp"
#include <opencv2/highgui.hpp>
#include <opencv2/ml.hpp>
using namespace cv;
using namespace cv::ml;
using namespace std;
static void help()
{
cout<< "\n--------------------------------------------------------------------------" << endl
<< "This program shows Support Vector Machines for Non-Linearly Separable Data. " << endl
<< "--------------------------------------------------------------------------" << endl
<< endl;
}
int main()
{
help();
const int NTRAINING_SAMPLES = 100; // Number of training samples per class
const float FRAC_LINEAR_SEP = 0.9f; // Fraction of samples which compose the linear separable part
// Data for visual representation
const int WIDTH = 512, HEIGHT = 512;
Mat I = Mat::zeros(HEIGHT, WIDTH, CV_8UC3);
//--------------------- 1. Set up training data randomly ---------------------------------------
Mat trainData(2*NTRAINING_SAMPLES, 2, CV_32F);
Mat labels (2*NTRAINING_SAMPLES, 1, CV_32S);
RNG rng(100); // Random value generation class
// Set up the linearly separable part of the training data
int nLinearSamples = (int) (FRAC_LINEAR_SEP * NTRAINING_SAMPLES);
//! [setup1]
// Generate random points for the class 1
Mat trainClass = trainData.rowRange(0, nLinearSamples);
// The x coordinate of the points is in [0, 0.4)
Mat c = trainClass.colRange(0, 1);
rng.fill(c, RNG::UNIFORM, Scalar(0), Scalar(0.4 * WIDTH));
// The y coordinate of the points is in [0, 1)
c = trainClass.colRange(1,2);
rng.fill(c, RNG::UNIFORM, Scalar(0), Scalar(HEIGHT));
// Generate random points for the class 2
trainClass = trainData.rowRange(2*NTRAINING_SAMPLES-nLinearSamples, 2*NTRAINING_SAMPLES);
// The x coordinate of the points is in [0.6, 1]
c = trainClass.colRange(0 , 1);
rng.fill(c, RNG::UNIFORM, Scalar(0.6*WIDTH), Scalar(WIDTH));
// The y coordinate of the points is in [0, 1)
c = trainClass.colRange(1,2);
rng.fill(c, RNG::UNIFORM, Scalar(0), Scalar(HEIGHT));
//! [setup1]
//------------------ Set up the non-linearly separable part of the training data ---------------
//! [setup2]
// Generate random points for the classes 1 and 2
trainClass = trainData.rowRange(nLinearSamples, 2*NTRAINING_SAMPLES-nLinearSamples);
// The x coordinate of the points is in [0.4, 0.6)
c = trainClass.colRange(0,1);
rng.fill(c, RNG::UNIFORM, Scalar(0.4*WIDTH), Scalar(0.6*WIDTH));
// The y coordinate of the points is in [0, 1)
c = trainClass.colRange(1,2);
rng.fill(c, RNG::UNIFORM, Scalar(0), Scalar(HEIGHT));
//! [setup2]
//------------------------- Set up the labels for the classes ---------------------------------
labels.rowRange( 0, NTRAINING_SAMPLES).setTo(1); // Class 1
labels.rowRange(NTRAINING_SAMPLES, 2*NTRAINING_SAMPLES).setTo(2); // Class 2
//------------------------ 2. Set up the support vector machines parameters --------------------
cout << "Starting training process" << endl;
//! [init]
Ptr<SVM> svm = SVM::create();
svm->setType(SVM::C_SVC);
svm->setC(0.1);
svm->setKernel(SVM::LINEAR);
svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, (int)1e7, 1e-6));
//! [init]
//------------------------ 3. Train the svm ----------------------------------------------------
//! [train]
svm->train(trainData, ROW_SAMPLE, labels);
//! [train]
cout << "Finished training process" << endl;
//------------------------ 4. Show the decision regions ----------------------------------------
//! [show]
Vec3b green(0,100,0), blue(100,0,0);
for (int i = 0; i < I.rows; i++)
{
for (int j = 0; j < I.cols; j++)
{
Mat sampleMat = (Mat_<float>(1,2) << j, i);
float response = svm->predict(sampleMat);
if (response == 1) I.at<Vec3b>(i,j) = green;
else if (response == 2) I.at<Vec3b>(i,j) = blue;
}
}
//! [show]
//----------------------- 5. Show the training data --------------------------------------------
//! [show_data]
int thick = -1;
float px, py;
// Class 1
for (int i = 0; i < NTRAINING_SAMPLES; i++)
{
px = trainData.at<float>(i,0);
py = trainData.at<float>(i,1);
circle(I, Point( (int) px, (int) py ), 3, Scalar(0, 255, 0), thick);
}
// Class 2
for (int i = NTRAINING_SAMPLES; i <2*NTRAINING_SAMPLES; i++)
{
px = trainData.at<float>(i,0);
py = trainData.at<float>(i,1);
circle(I, Point( (int) px, (int) py ), 3, Scalar(255, 0, 0), thick);
}
//! [show_data]
//------------------------- 6. Show support vectors --------------------------------------------
//! [show_vectors]
thick = 2;
Mat sv = svm->getUncompressedSupportVectors();
for (int i = 0; i < sv.rows; i++)
{
const float* v = sv.ptr<float>(i);
circle(I, Point( (int) v[0], (int) v[1]), 6, Scalar(128, 128, 128), thick);
}
//! [show_vectors]
imwrite("result.png", I); // save the Image
imshow("SVM for Non-Linear Training Data", I); // show it to the user
waitKey();
return 0;
}

View File

@ -1,99 +0,0 @@
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.core.TermCriteria;
import org.opencv.highgui.HighGui;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
import org.opencv.ml.Ml;
import org.opencv.ml.SVM;
public class IntroductionToSVMDemo {
public static void main(String[] args) {
// Load the native OpenCV library
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
// Set up training data
//! [setup1]
int[] labels = { 1, -1, -1, -1 };
float[] trainingData = { 501, 10, 255, 10, 501, 255, 10, 501 };
//! [setup1]
//! [setup2]
Mat trainingDataMat = new Mat(4, 2, CvType.CV_32FC1);
trainingDataMat.put(0, 0, trainingData);
Mat labelsMat = new Mat(4, 1, CvType.CV_32SC1);
labelsMat.put(0, 0, labels);
//! [setup2]
// Train the SVM
//! [init]
SVM svm = SVM.create();
svm.setType(SVM.C_SVC);
svm.setKernel(SVM.LINEAR);
svm.setTermCriteria(new TermCriteria(TermCriteria.MAX_ITER, 100, 1e-6));
//! [init]
//! [train]
svm.train(trainingDataMat, Ml.ROW_SAMPLE, labelsMat);
//! [train]
// Data for visual representation
int width = 512, height = 512;
Mat image = Mat.zeros(height, width, CvType.CV_8UC3);
// Show the decision regions given by the SVM
//! [show]
byte[] imageData = new byte[(int) (image.total() * image.channels())];
Mat sampleMat = new Mat(1, 2, CvType.CV_32F);
float[] sampleMatData = new float[(int) (sampleMat.total() * sampleMat.channels())];
for (int i = 0; i < image.rows(); i++) {
for (int j = 0; j < image.cols(); j++) {
sampleMatData[0] = j;
sampleMatData[1] = i;
sampleMat.put(0, 0, sampleMatData);
float response = svm.predict(sampleMat);
if (response == 1) {
imageData[(i * image.cols() + j) * image.channels()] = 0;
imageData[(i * image.cols() + j) * image.channels() + 1] = (byte) 255;
imageData[(i * image.cols() + j) * image.channels() + 2] = 0;
} else if (response == -1) {
imageData[(i * image.cols() + j) * image.channels()] = (byte) 255;
imageData[(i * image.cols() + j) * image.channels() + 1] = 0;
imageData[(i * image.cols() + j) * image.channels() + 2] = 0;
}
}
}
image.put(0, 0, imageData);
//! [show]
// Show the training data
//! [show_data]
int thickness = -1;
int lineType = Imgproc.LINE_8;
Imgproc.circle(image, new Point(501, 10), 5, new Scalar(0, 0, 0), thickness, lineType, 0);
Imgproc.circle(image, new Point(255, 10), 5, new Scalar(255, 255, 255), thickness, lineType, 0);
Imgproc.circle(image, new Point(501, 255), 5, new Scalar(255, 255, 255), thickness, lineType, 0);
Imgproc.circle(image, new Point(10, 501), 5, new Scalar(255, 255, 255), thickness, lineType, 0);
//! [show_data]
// Show support vectors
//! [show_vectors]
thickness = 2;
Mat sv = svm.getUncompressedSupportVectors();
float[] svData = new float[(int) (sv.total() * sv.channels())];
sv.get(0, 0, svData);
for (int i = 0; i < sv.rows(); ++i) {
Imgproc.circle(image, new Point(svData[i * sv.cols()], svData[i * sv.cols() + 1]), 6,
new Scalar(128, 128, 128), thickness, lineType, 0);
}
//! [show_vectors]
Imgcodecs.imwrite("result.png", image); // save the image
HighGui.imshow("SVM Simple Example", image); // show it to the user
HighGui.waitKey();
System.exit(0);
}
}

View File

@ -1,186 +0,0 @@
import java.util.Random;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.core.TermCriteria;
import org.opencv.highgui.HighGui;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
import org.opencv.ml.Ml;
import org.opencv.ml.SVM;
public class NonLinearSVMsDemo {
public static final int NTRAINING_SAMPLES = 100;
public static final float FRAC_LINEAR_SEP = 0.9f;
public static void main(String[] args) {
// Load the native OpenCV library
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
System.out.println("\n--------------------------------------------------------------------------");
System.out.println("This program shows Support Vector Machines for Non-Linearly Separable Data. ");
System.out.println("--------------------------------------------------------------------------\n");
// Data for visual representation
int width = 512, height = 512;
Mat I = Mat.zeros(height, width, CvType.CV_8UC3);
// --------------------- 1. Set up training data randomly---------------------------------------
Mat trainData = new Mat(2 * NTRAINING_SAMPLES, 2, CvType.CV_32F);
Mat labels = new Mat(2 * NTRAINING_SAMPLES, 1, CvType.CV_32S);
Random rng = new Random(100); // Random value generation class
// Set up the linearly separable part of the training data
int nLinearSamples = (int) (FRAC_LINEAR_SEP * NTRAINING_SAMPLES);
//! [setup1]
// Generate random points for the class 1
Mat trainClass = trainData.rowRange(0, nLinearSamples);
// The x coordinate of the points is in [0, 0.4)
Mat c = trainClass.colRange(0, 1);
float[] cData = new float[(int) (c.total() * c.channels())];
double[] cDataDbl = rng.doubles(cData.length, 0, 0.4f * width).toArray();
for (int i = 0; i < cData.length; i++) {
cData[i] = (float) cDataDbl[i];
}
c.put(0, 0, cData);
// The y coordinate of the points is in [0, 1)
c = trainClass.colRange(1, 2);
cData = new float[(int) (c.total() * c.channels())];
cDataDbl = rng.doubles(cData.length, 0, height).toArray();
for (int i = 0; i < cData.length; i++) {
cData[i] = (float) cDataDbl[i];
}
c.put(0, 0, cData);
// Generate random points for the class 2
trainClass = trainData.rowRange(2 * NTRAINING_SAMPLES - nLinearSamples, 2 * NTRAINING_SAMPLES);
// The x coordinate of the points is in [0.6, 1]
c = trainClass.colRange(0, 1);
cData = new float[(int) (c.total() * c.channels())];
cDataDbl = rng.doubles(cData.length, 0.6 * width, width).toArray();
for (int i = 0; i < cData.length; i++) {
cData[i] = (float) cDataDbl[i];
}
c.put(0, 0, cData);
// The y coordinate of the points is in [0, 1)
c = trainClass.colRange(1, 2);
cData = new float[(int) (c.total() * c.channels())];
cDataDbl = rng.doubles(cData.length, 0, height).toArray();
for (int i = 0; i < cData.length; i++) {
cData[i] = (float) cDataDbl[i];
}
c.put(0, 0, cData);
//! [setup1]
// ------------------ Set up the non-linearly separable part of the training data ---------------
//! [setup2]
// Generate random points for the classes 1 and 2
trainClass = trainData.rowRange(nLinearSamples, 2 * NTRAINING_SAMPLES - nLinearSamples);
// The x coordinate of the points is in [0.4, 0.6)
c = trainClass.colRange(0, 1);
cData = new float[(int) (c.total() * c.channels())];
cDataDbl = rng.doubles(cData.length, 0.4 * width, 0.6 * width).toArray();
for (int i = 0; i < cData.length; i++) {
cData[i] = (float) cDataDbl[i];
}
c.put(0, 0, cData);
// The y coordinate of the points is in [0, 1)
c = trainClass.colRange(1, 2);
cData = new float[(int) (c.total() * c.channels())];
cDataDbl = rng.doubles(cData.length, 0, height).toArray();
for (int i = 0; i < cData.length; i++) {
cData[i] = (float) cDataDbl[i];
}
c.put(0, 0, cData);
//! [setup2]
// ------------------------- Set up the labels for the classes---------------------------------
labels.rowRange(0, NTRAINING_SAMPLES).setTo(new Scalar(1)); // Class 1
labels.rowRange(NTRAINING_SAMPLES, 2 * NTRAINING_SAMPLES).setTo(new Scalar(2)); // Class 2
// ------------------------ 2. Set up the support vector machines parameters--------------------
System.out.println("Starting training process");
//! [init]
SVM svm = SVM.create();
svm.setType(SVM.C_SVC);
svm.setC(0.1);
svm.setKernel(SVM.LINEAR);
svm.setTermCriteria(new TermCriteria(TermCriteria.MAX_ITER, (int) 1e7, 1e-6));
//! [init]
// ------------------------ 3. Train the svm----------------------------------------------------
//! [train]
svm.train(trainData, Ml.ROW_SAMPLE, labels);
//! [train]
System.out.println("Finished training process");
// ------------------------ 4. Show the decision regions----------------------------------------
//! [show]
byte[] IData = new byte[(int) (I.total() * I.channels())];
Mat sampleMat = new Mat(1, 2, CvType.CV_32F);
float[] sampleMatData = new float[(int) (sampleMat.total() * sampleMat.channels())];
for (int i = 0; i < I.rows(); i++) {
for (int j = 0; j < I.cols(); j++) {
sampleMatData[0] = j;
sampleMatData[1] = i;
sampleMat.put(0, 0, sampleMatData);
float response = svm.predict(sampleMat);
if (response == 1) {
IData[(i * I.cols() + j) * I.channels()] = 0;
IData[(i * I.cols() + j) * I.channels() + 1] = 100;
IData[(i * I.cols() + j) * I.channels() + 2] = 0;
} else if (response == 2) {
IData[(i * I.cols() + j) * I.channels()] = 100;
IData[(i * I.cols() + j) * I.channels() + 1] = 0;
IData[(i * I.cols() + j) * I.channels() + 2] = 0;
}
}
}
I.put(0, 0, IData);
//! [show]
// ----------------------- 5. Show the training data--------------------------------------------
//! [show_data]
int thick = -1;
int lineType = Imgproc.LINE_8;
float px, py;
// Class 1
float[] trainDataData = new float[(int) (trainData.total() * trainData.channels())];
trainData.get(0, 0, trainDataData);
for (int i = 0; i < NTRAINING_SAMPLES; i++) {
px = trainDataData[i * trainData.cols()];
py = trainDataData[i * trainData.cols() + 1];
Imgproc.circle(I, new Point(px, py), 3, new Scalar(0, 255, 0), thick, lineType, 0);
}
// Class 2
for (int i = NTRAINING_SAMPLES; i < 2 * NTRAINING_SAMPLES; ++i) {
px = trainDataData[i * trainData.cols()];
py = trainDataData[i * trainData.cols() + 1];
Imgproc.circle(I, new Point(px, py), 3, new Scalar(255, 0, 0), thick, lineType, 0);
}
//! [show_data]
// ------------------------- 6. Show support vectors--------------------------------------------
//! [show_vectors]
thick = 2;
Mat sv = svm.getUncompressedSupportVectors();
float[] svData = new float[(int) (sv.total() * sv.channels())];
sv.get(0, 0, svData);
for (int i = 0; i < sv.rows(); i++) {
Imgproc.circle(I, new Point(svData[i * sv.cols()], svData[i * sv.cols() + 1]), 6, new Scalar(128, 128, 128),
thick, lineType, 0);
}
//! [show_vectors]
Imgcodecs.imwrite("result.png", I); // save the Image
HighGui.imshow("SVM for Non-Linear Training Data", I); // show it to the user
HighGui.waitKey();
System.exit(0);
}
}

View File

@ -1,194 +0,0 @@
#!/usr/bin/env python
'''
SVM and KNearest digit recognition.
Sample loads a dataset of handwritten digits from 'digits.png'.
Then it trains a SVM and KNearest classifiers on it and evaluates
their accuracy.
Following preprocessing is applied to the dataset:
- Moment-based image deskew (see deskew())
- Digit images are split into 4 10x10 cells and 16-bin
histogram of oriented gradients is computed for each
cell
- Transform histograms to space with Hellinger metric (see [1] (RootSIFT))
[1] R. Arandjelovic, A. Zisserman
"Three things everyone should know to improve object retrieval"
http://www.robots.ox.ac.uk/~vgg/publications/2012/Arandjelovic12/arandjelovic12.pdf
Usage:
digits.py
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
# built-in modules
from multiprocessing.pool import ThreadPool
from numpy.linalg import norm
# local modules
from common import clock, mosaic
SZ = 20 # size of each digit is SZ x SZ
CLASS_N = 10
DIGITS_FN = 'digits.png'
def split2d(img, cell_size, flatten=True):
h, w = img.shape[:2]
sx, sy = cell_size
cells = [np.hsplit(row, w//sx) for row in np.vsplit(img, h//sy)]
cells = np.array(cells)
if flatten:
cells = cells.reshape(-1, sy, sx)
return cells
def load_digits(fn):
fn = cv.samples.findFile(fn)
print('loading "%s" ...' % fn)
digits_img = cv.imread(fn, cv.IMREAD_GRAYSCALE)
digits = split2d(digits_img, (SZ, SZ))
labels = np.repeat(np.arange(CLASS_N), len(digits)/CLASS_N)
return digits, labels
def deskew(img):
m = cv.moments(img)
if abs(m['mu02']) < 1e-2:
return img.copy()
skew = m['mu11']/m['mu02']
M = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]])
img = cv.warpAffine(img, M, (SZ, SZ), flags=cv.WARP_INVERSE_MAP | cv.INTER_LINEAR)
return img
class KNearest(object):
def __init__(self, k = 3):
self.k = k
self.model = cv.ml.KNearest_create()
def train(self, samples, responses):
self.model.train(samples, cv.ml.ROW_SAMPLE, responses)
def predict(self, samples):
_retval, results, _neigh_resp, _dists = self.model.findNearest(samples, self.k)
return results.ravel()
def load(self, fn):
self.model = cv.ml.KNearest_load(fn)
def save(self, fn):
self.model.save(fn)
class SVM(object):
def __init__(self, C = 1, gamma = 0.5):
self.model = cv.ml.SVM_create()
self.model.setGamma(gamma)
self.model.setC(C)
self.model.setKernel(cv.ml.SVM_RBF)
self.model.setType(cv.ml.SVM_C_SVC)
def train(self, samples, responses):
self.model.train(samples, cv.ml.ROW_SAMPLE, responses)
def predict(self, samples):
return self.model.predict(samples)[1].ravel()
def load(self, fn):
self.model = cv.ml.SVM_load(fn)
def save(self, fn):
self.model.save(fn)
def evaluate_model(model, digits, samples, labels):
resp = model.predict(samples)
err = (labels != resp).mean()
print('error: %.2f %%' % (err*100))
confusion = np.zeros((10, 10), np.int32)
for i, j in zip(labels, resp):
confusion[i, int(j)] += 1
print('confusion matrix:')
print(confusion)
print()
vis = []
for img, flag in zip(digits, resp == labels):
img = cv.cvtColor(img, cv.COLOR_GRAY2BGR)
if not flag:
img[...,:2] = 0
vis.append(img)
return mosaic(25, vis)
def preprocess_simple(digits):
return np.float32(digits).reshape(-1, SZ*SZ) / 255.0
def preprocess_hog(digits):
samples = []
for img in digits:
gx = cv.Sobel(img, cv.CV_32F, 1, 0)
gy = cv.Sobel(img, cv.CV_32F, 0, 1)
mag, ang = cv.cartToPolar(gx, gy)
bin_n = 16
bin = np.int32(bin_n*ang/(2*np.pi))
bin_cells = bin[:10,:10], bin[10:,:10], bin[:10,10:], bin[10:,10:]
mag_cells = mag[:10,:10], mag[10:,:10], mag[:10,10:], mag[10:,10:]
hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
hist = np.hstack(hists)
# transform to Hellinger kernel
eps = 1e-7
hist /= hist.sum() + eps
hist = np.sqrt(hist)
hist /= norm(hist) + eps
samples.append(hist)
return np.float32(samples)
if __name__ == '__main__':
print(__doc__)
digits, labels = load_digits(DIGITS_FN)
print('preprocessing...')
# shuffle digits
rand = np.random.RandomState(321)
shuffle = rand.permutation(len(digits))
digits, labels = digits[shuffle], labels[shuffle]
digits2 = list(map(deskew, digits))
samples = preprocess_hog(digits2)
train_n = int(0.9*len(samples))
cv.imshow('test set', mosaic(25, digits[train_n:]))
digits_train, digits_test = np.split(digits2, [train_n])
samples_train, samples_test = np.split(samples, [train_n])
labels_train, labels_test = np.split(labels, [train_n])
print('training KNearest...')
model = KNearest(k=4)
model.train(samples_train, labels_train)
vis = evaluate_model(model, digits_test, samples_test, labels_test)
cv.imshow('KNearest test', vis)
print('training SVM...')
model = SVM(C=2.67, gamma=5.383)
model.train(samples_train, labels_train)
vis = evaluate_model(model, digits_test, samples_test, labels_test)
cv.imshow('SVM test', vis)
print('saving SVM as "digits_svm.dat"...')
model.save('digits_svm.dat')
cv.waitKey(0)
cv.destroyAllWindows()

View File

@ -1,132 +0,0 @@
#!/usr/bin/env python
'''
Digit recognition adjustment.
Grid search is used to find the best parameters for SVM and KNearest classifiers.
SVM adjustment follows the guidelines given in
http://www.csie.ntu.edu.tw/~cjlin/papers/guide/guide.pdf
Usage:
digits_adjust.py [--model {svm|knearest}]
--model {svm|knearest} - select the classifier (SVM is the default)
'''
import numpy as np
import cv2 as cv
from multiprocessing.pool import ThreadPool
from digits import *
def cross_validate(model_class, params, samples, labels, kfold = 3, pool = None):
n = len(samples)
folds = np.array_split(np.arange(n), kfold)
def f(i):
model = model_class(**params)
test_idx = folds[i]
train_idx = list(folds)
train_idx.pop(i)
train_idx = np.hstack(train_idx)
train_samples, train_labels = samples[train_idx], labels[train_idx]
test_samples, test_labels = samples[test_idx], labels[test_idx]
model.train(train_samples, train_labels)
resp = model.predict(test_samples)
score = (resp != test_labels).mean()
print(".", end='')
return score
if pool is None:
scores = list(map(f, range(kfold)))
else:
scores = pool.map(f, range(kfold))
return np.mean(scores)
class App(object):
def __init__(self):
self._samples, self._labels = self.preprocess()
def preprocess(self):
digits, labels = load_digits(DIGITS_FN)
shuffle = np.random.permutation(len(digits))
digits, labels = digits[shuffle], labels[shuffle]
digits2 = list(map(deskew, digits))
samples = preprocess_hog(digits2)
return samples, labels
def get_dataset(self):
return self._samples, self._labels
def run_jobs(self, f, jobs):
pool = ThreadPool(processes=cv.getNumberOfCPUs())
ires = pool.imap_unordered(f, jobs)
return ires
def adjust_SVM(self):
Cs = np.logspace(0, 10, 15, base=2)
gammas = np.logspace(-7, 4, 15, base=2)
scores = np.zeros((len(Cs), len(gammas)))
scores[:] = np.nan
print('adjusting SVM (may take a long time) ...')
def f(job):
i, j = job
samples, labels = self.get_dataset()
params = dict(C = Cs[i], gamma=gammas[j])
score = cross_validate(SVM, params, samples, labels)
return i, j, score
ires = self.run_jobs(f, np.ndindex(*scores.shape))
for count, (i, j, score) in enumerate(ires):
scores[i, j] = score
print('%d / %d (best error: %.2f %%, last: %.2f %%)' %
(count+1, scores.size, np.nanmin(scores)*100, score*100))
print(scores)
print('writing score table to "svm_scores.npz"')
np.savez('svm_scores.npz', scores=scores, Cs=Cs, gammas=gammas)
i, j = np.unravel_index(scores.argmin(), scores.shape)
best_params = dict(C = Cs[i], gamma=gammas[j])
print('best params:', best_params)
print('best error: %.2f %%' % (scores.min()*100))
return best_params
def adjust_KNearest(self):
print('adjusting KNearest ...')
def f(k):
samples, labels = self.get_dataset()
err = cross_validate(KNearest, dict(k=k), samples, labels)
return k, err
best_err, best_k = np.inf, -1
for k, err in self.run_jobs(f, range(1, 9)):
if err < best_err:
best_err, best_k = err, k
print('k = %d, error: %.2f %%' % (k, err*100))
best_params = dict(k=best_k)
print('best params:', best_params, 'err: %.2f' % (best_err*100))
return best_params
if __name__ == '__main__':
import getopt
import sys
print(__doc__)
args, _ = getopt.getopt(sys.argv[1:], '', ['model='])
args = dict(args)
args.setdefault('--model', 'svm')
args.setdefault('--env', '')
if args['--model'] not in ['svm', 'knearest']:
print('unknown model "%s"' % args['--model'])
sys.exit(1)
t = clock()
app = App()
if args['--model'] == 'knearest':
app.adjust_KNearest()
else:
app.adjust_SVM()
print('work time: %f s' % (clock() - t))

Some files were not shown because too many files have changed in this diff Show More