From 6722d4a5247a0c599a7b91eb62d23d0f55189ef5 Mon Sep 17 00:00:00 2001 From: WU Jia <35290826+kaingwade@users.noreply.github.com> Date: Tue, 27 Feb 2024 20:54:08 +0800 Subject: [PATCH] Merge pull request #25017 from kaingwade:ml_to_contrib Move ml to opencv_contrib #25017 OpenCV cleanup: #24997 opencv_contrib: opencv/opencv_contrib#3636 --- doc/opencv.bib | 60 - doc/py_tutorials/py_ml/images/knnicon.png | Bin 3424 -> 0 bytes doc/py_tutorials/py_ml/images/svmicon.png | Bin 13155 -> 0 bytes .../py_ml/py_knn/images/knn_icon1.jpg | Bin 2449 -> 0 bytes .../py_ml/py_knn/images/knn_icon2.jpg | Bin 4932 -> 0 bytes .../py_ml/py_knn/py_knn_index.markdown | 10 - .../py_knn_opencv/py_knn_opencv.markdown | 123 - .../images/knn_simple.png | Bin 7473 -> 0 bytes .../images/knn_theory.png | Bin 6700 -> 0 bytes .../py_knn_understanding.markdown | 150 -- .../py_ml/py_svm/images/svm_icon1.jpg | Bin 3956 -> 0 bytes .../py_ml/py_svm/images/svm_icon2.jpg | Bin 4645 -> 0 bytes .../py_svm_basics/images/svm_basics1.png | Bin 7731 -> 0 bytes .../py_svm_basics/images/svm_basics2.png | Bin 7998 -> 0 bytes .../py_svm_basics/images/svm_basics3.png | Bin 10353 -> 0 bytes .../py_svm_basics/py_svm_basics.markdown | 134 - .../py_ml/py_svm/py_svm_index.markdown | 10 - .../py_svm/py_svm_opencv/images/deskew.jpg | Bin 11383 -> 0 bytes .../py_svm_opencv/py_svm_opencv.markdown | 56 - .../py_ml/py_table_of_contents_ml.markdown | 9 - .../others/_old/table_of_content_ml.markdown | 4 - .../others/barcode_detect_and_decode.markdown | 2 +- .../others/images/optimal-hyperplane.png | Bin 7998 -> 0 bytes .../others/images/sample-errors-dist.png | Bin 10353 -> 0 bytes .../others/images/separating-lines.png | Bin 7731 -> 0 bytes .../others/images/svm_intro_result.png | Bin 1886 -> 0 bytes .../others/images/svm_non_linear_result.png | Bin 14714 -> 0 bytes .../others/introduction_to_pca.markdown | 2 +- .../others/introduction_to_svm.markdown | 273 -- doc/tutorials/others/non_linear_svms.markdown | 288 -- .../others/table_of_content_other.markdown | 4 +- doc/tutorials/tutorials.markdown | 2 +- modules/CMakeLists.txt | 2 +- modules/ml/CMakeLists.txt | 2 - modules/ml/doc/ml_intro.markdown | 481 ---- modules/ml/doc/pics/SVM_Comparison.png | Bin 94157 -> 0 bytes modules/ml/doc/pics/mlp.png | Bin 11382 -> 0 bytes modules/ml/doc/pics/neuron_model.png | Bin 10005 -> 0 bytes modules/ml/doc/pics/sigmoid_bipolar.png | Bin 7151 -> 0 bytes modules/ml/include/opencv2/ml.hpp | 1956 -------------- modules/ml/include/opencv2/ml/ml.hpp | 48 - modules/ml/include/opencv2/ml/ml.inl.hpp | 60 - modules/ml/misc/java/test/MLTest.java | 42 - modules/ml/misc/objc/gen_dict.json | 9 - modules/ml/misc/python/pyopencv_ml.hpp | 22 - modules/ml/misc/python/test/test_digits.py | 201 -- .../ml/misc/python/test/test_goodfeatures.py | 40 - modules/ml/misc/python/test/test_knearest.py | 13 - .../ml/misc/python/test/test_letter_recog.py | 171 -- modules/ml/src/ann_mlp.cpp | 1534 ----------- modules/ml/src/boost.cpp | 533 ---- modules/ml/src/data.cpp | 1045 -------- modules/ml/src/em.cpp | 859 ------ modules/ml/src/gbt.cpp | 1373 ---------- modules/ml/src/inner_functions.cpp | 222 -- modules/ml/src/kdtree.cpp | 530 ---- modules/ml/src/kdtree.hpp | 97 - modules/ml/src/knearest.cpp | 521 ---- modules/ml/src/lr.cpp | 604 ----- modules/ml/src/nbayes.cpp | 471 ---- modules/ml/src/precomp.hpp | 400 --- modules/ml/src/rtrees.cpp | 531 ---- modules/ml/src/svm.cpp | 2357 ----------------- modules/ml/src/svmsgd.cpp | 524 ---- modules/ml/src/testset.cpp | 113 - modules/ml/src/tree.cpp | 1990 -------------- modules/ml/test/test_ann.cpp | 200 -- modules/ml/test/test_bayes.cpp | 56 - modules/ml/test/test_em.cpp | 186 -- modules/ml/test/test_kmeans.cpp | 53 - modules/ml/test/test_knearest.cpp | 112 - modules/ml/test/test_lr.cpp | 81 - modules/ml/test/test_main.cpp | 10 - modules/ml/test/test_mltests.cpp | 373 --- modules/ml/test/test_precomp.hpp | 50 - modules/ml/test/test_rtrees.cpp | 119 - modules/ml/test/test_save_load.cpp | 107 - modules/ml/test/test_svmsgd.cpp | 156 -- modules/ml/test/test_svmtrainauto.cpp | 164 -- modules/ml/test/test_utils.cpp | 189 -- .../objdetect/include/opencv2/objdetect.hpp | 2 - modules/python/test/test_gaussian_mix.py | 64 - modules/python/test/test_misc.py | 15 - samples/cpp/CMakeLists.txt | 1 - samples/cpp/digits_svm.cpp | 367 --- samples/cpp/em.cpp | 70 - samples/cpp/letter_recog.cpp | 558 ---- samples/cpp/logistic_regression.cpp | 127 - samples/cpp/neural_network.cpp | 65 - samples/cpp/points_classifier.cpp | 399 --- samples/cpp/train_HOG.cpp | 392 --- samples/cpp/train_svmsgd.cpp | 211 -- samples/cpp/travelsalesman.cpp | 109 - samples/cpp/tree_engine.cpp | 116 - .../introduction_to_svm.cpp | 81 - .../ml/non_linear_svms/non_linear_svms.cpp | 144 - .../IntroductionToSVMDemo.java | 99 - .../ml/non_linear_svms/NonLinearSVMsDemo.java | 186 -- samples/python/digits.py | 194 -- samples/python/digits_adjust.py | 132 - samples/python/digits_video.py | 109 - samples/python/gaussian_mix.py | 69 - samples/python/letter_recog.py | 194 -- .../introduction_to_svm.py | 62 - .../ml/non_linear_svms/non_linear_svms.py | 117 - .../tutorial_code/ml/py_svm_opencv/hogsvm.py | 73 - 106 files changed, 5 insertions(+), 23685 deletions(-) delete mode 100644 doc/py_tutorials/py_ml/images/knnicon.png delete mode 100644 doc/py_tutorials/py_ml/images/svmicon.png delete mode 100644 doc/py_tutorials/py_ml/py_knn/images/knn_icon1.jpg delete mode 100644 doc/py_tutorials/py_ml/py_knn/images/knn_icon2.jpg delete mode 100644 doc/py_tutorials/py_ml/py_knn/py_knn_index.markdown delete mode 100644 doc/py_tutorials/py_ml/py_knn/py_knn_opencv/py_knn_opencv.markdown delete mode 100644 doc/py_tutorials/py_ml/py_knn/py_knn_understanding/images/knn_simple.png delete mode 100644 doc/py_tutorials/py_ml/py_knn/py_knn_understanding/images/knn_theory.png delete mode 100644 doc/py_tutorials/py_ml/py_knn/py_knn_understanding/py_knn_understanding.markdown delete mode 100644 doc/py_tutorials/py_ml/py_svm/images/svm_icon1.jpg delete mode 100644 doc/py_tutorials/py_ml/py_svm/images/svm_icon2.jpg delete mode 100644 doc/py_tutorials/py_ml/py_svm/py_svm_basics/images/svm_basics1.png delete mode 100644 doc/py_tutorials/py_ml/py_svm/py_svm_basics/images/svm_basics2.png delete mode 100644 doc/py_tutorials/py_ml/py_svm/py_svm_basics/images/svm_basics3.png delete mode 100644 doc/py_tutorials/py_ml/py_svm/py_svm_basics/py_svm_basics.markdown delete mode 100644 doc/py_tutorials/py_ml/py_svm/py_svm_index.markdown delete mode 100644 doc/py_tutorials/py_ml/py_svm/py_svm_opencv/images/deskew.jpg delete mode 100644 doc/py_tutorials/py_ml/py_svm/py_svm_opencv/py_svm_opencv.markdown delete mode 100644 doc/tutorials/others/_old/table_of_content_ml.markdown delete mode 100644 doc/tutorials/others/images/optimal-hyperplane.png delete mode 100644 doc/tutorials/others/images/sample-errors-dist.png delete mode 100644 doc/tutorials/others/images/separating-lines.png delete mode 100644 doc/tutorials/others/images/svm_intro_result.png delete mode 100644 doc/tutorials/others/images/svm_non_linear_result.png delete mode 100644 doc/tutorials/others/introduction_to_svm.markdown delete mode 100644 doc/tutorials/others/non_linear_svms.markdown delete mode 100644 modules/ml/CMakeLists.txt delete mode 100644 modules/ml/doc/ml_intro.markdown delete mode 100644 modules/ml/doc/pics/SVM_Comparison.png delete mode 100644 modules/ml/doc/pics/mlp.png delete mode 100644 modules/ml/doc/pics/neuron_model.png delete mode 100644 modules/ml/doc/pics/sigmoid_bipolar.png delete mode 100644 modules/ml/include/opencv2/ml.hpp delete mode 100644 modules/ml/include/opencv2/ml/ml.hpp delete mode 100644 modules/ml/include/opencv2/ml/ml.inl.hpp delete mode 100644 modules/ml/misc/java/test/MLTest.java delete mode 100644 modules/ml/misc/objc/gen_dict.json delete mode 100644 modules/ml/misc/python/pyopencv_ml.hpp delete mode 100644 modules/ml/misc/python/test/test_digits.py delete mode 100644 modules/ml/misc/python/test/test_goodfeatures.py delete mode 100644 modules/ml/misc/python/test/test_knearest.py delete mode 100644 modules/ml/misc/python/test/test_letter_recog.py delete mode 100644 modules/ml/src/ann_mlp.cpp delete mode 100644 modules/ml/src/boost.cpp delete mode 100644 modules/ml/src/data.cpp delete mode 100644 modules/ml/src/em.cpp delete mode 100644 modules/ml/src/gbt.cpp delete mode 100644 modules/ml/src/inner_functions.cpp delete mode 100644 modules/ml/src/kdtree.cpp delete mode 100644 modules/ml/src/kdtree.hpp delete mode 100644 modules/ml/src/knearest.cpp delete mode 100644 modules/ml/src/lr.cpp delete mode 100644 modules/ml/src/nbayes.cpp delete mode 100644 modules/ml/src/precomp.hpp delete mode 100644 modules/ml/src/rtrees.cpp delete mode 100644 modules/ml/src/svm.cpp delete mode 100644 modules/ml/src/svmsgd.cpp delete mode 100644 modules/ml/src/testset.cpp delete mode 100644 modules/ml/src/tree.cpp delete mode 100644 modules/ml/test/test_ann.cpp delete mode 100644 modules/ml/test/test_bayes.cpp delete mode 100644 modules/ml/test/test_em.cpp delete mode 100644 modules/ml/test/test_kmeans.cpp delete mode 100644 modules/ml/test/test_knearest.cpp delete mode 100644 modules/ml/test/test_lr.cpp delete mode 100644 modules/ml/test/test_main.cpp delete mode 100644 modules/ml/test/test_mltests.cpp delete mode 100644 modules/ml/test/test_precomp.hpp delete mode 100644 modules/ml/test/test_rtrees.cpp delete mode 100644 modules/ml/test/test_save_load.cpp delete mode 100644 modules/ml/test/test_svmsgd.cpp delete mode 100644 modules/ml/test/test_svmtrainauto.cpp delete mode 100644 modules/ml/test/test_utils.cpp delete mode 100644 modules/python/test/test_gaussian_mix.py delete mode 100644 samples/cpp/digits_svm.cpp delete mode 100644 samples/cpp/em.cpp delete mode 100644 samples/cpp/letter_recog.cpp delete mode 100644 samples/cpp/logistic_regression.cpp delete mode 100644 samples/cpp/neural_network.cpp delete mode 100644 samples/cpp/points_classifier.cpp delete mode 100644 samples/cpp/train_HOG.cpp delete mode 100644 samples/cpp/train_svmsgd.cpp delete mode 100644 samples/cpp/travelsalesman.cpp delete mode 100644 samples/cpp/tree_engine.cpp delete mode 100644 samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp delete mode 100644 samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp delete mode 100644 samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java delete mode 100644 samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java delete mode 100755 samples/python/digits.py delete mode 100755 samples/python/digits_adjust.py delete mode 100755 samples/python/digits_video.py delete mode 100755 samples/python/gaussian_mix.py delete mode 100755 samples/python/letter_recog.py delete mode 100644 samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py delete mode 100644 samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py delete mode 100755 samples/python/tutorial_code/ml/py_svm_opencv/hogsvm.py diff --git a/doc/opencv.bib b/doc/opencv.bib index ada690497d..0991d7b171 100644 --- a/doc/opencv.bib +++ b/doc/opencv.bib @@ -175,13 +175,6 @@ year = {1998}, publisher = {Citeseer} } -@book{Breiman84, - title = {Classification and regression trees}, - author = {Breiman, Leo and Friedman, Jerome and Stone, Charles J and Olshen, Richard A}, - year = {1984}, - publisher = {CRC press}, - url = {https://projecteuclid.org/download/pdf_1/euclid.aos/1016218223} -} @incollection{Brox2004, author = {Brox, Thomas and Bruhn, Andres and Papenberg, Nils and Weickert, Joachim}, title = {High accuracy optical flow estimation based on a theory for warping}, @@ -349,12 +342,6 @@ publisher = {ACM}, url = {https://www.researchgate.net/profile/Liyuan_Li/publication/221571587_Foreground_object_detection_from_videos_containing_complex_background/links/09e4150bdf566d110c000000/Foreground-object-detection-from-videos-containing-complex-background.pdf} } -@article{FHT98, - author = {Friedman, Jerome and Hastie, Trevor and Tibshirani, Robert}, - title = {Additive Logistic Regression: a Statistical View of Boosting}, - year = {1998}, - url = {https://projecteuclid.org/download/pdf_1/euclid.aos/1016218223} -} @inproceedings{FL02, author = {Fattal, Raanan and Lischinski, Dani and Werman, Michael}, title = {Gradient domain high dynamic range compression}, @@ -521,16 +508,6 @@ publisher = {IEEE}, url = {http://www.openrs.org/photogrammetry/2015/SGM%202008%20PAMI%20-%20Stereo%20Processing%20by%20Semiglobal%20Matching%20and%20Mutual%20Informtion.pdf} } -@article{HTF01, - author = {Trevor, Hastie and Robert, Tibshirani and Jerome, Friedman}, - title = {The elements of statistical learning: data mining, inference and prediction}, - year = {2001}, - pages = {371--406}, - journal = {New York: Springer-Verlag}, - volume = {1}, - number = {8}, - url = {http://www.stat.auckland.ac.nz/~yee/784/files/ch09AdditiveModelsTrees.pdf} -} @article{Hartley99, author = {Hartley, Richard I}, title = {Theory and practice of projective rectification}, @@ -602,17 +579,6 @@ number = {3}, publisher = {Elsevier} } -@article{Kirkpatrick83, - author = {Kirkpatrick, S. and Gelatt, C. D. Jr and Vecchi, M. P.}, - title = {Optimization by Simulated Annealing}, - year = {1983}, - pages = {671--680}, - journal = {Science}, - volume = {220}, - number = {4598}, - publisher = {American Association for the Advancement of Science}, - url = {http://sci2s.ugr.es/sites/default/files/files/Teaching/GraduatesCourses/Metaheuristicas/Bibliography/1983-Science-Kirkpatrick-sim_anneal.pdf} -} @inproceedings{Kolmogorov03, author = {Kim, Junhwan and Kolmogorov, Vladimir and Zabih, Ramin}, title = {Visual correspondence using energy minimization and mutual information}, @@ -657,16 +623,6 @@ volume = {5}, pages = {1530-1536} } -@article{LibSVM, - author = {Chang, Chih-Chung and Lin, Chih-Jen}, - title = {LIBSVM: a library for support vector machines}, - year = {2011}, - pages = {27}, - journal = {ACM Transactions on Intelligent Systems and Technology (TIST)}, - volume = {2}, - number = {3}, - publisher = {ACM} -} @inproceedings{Lienhart02, author = {Lienhart, Rainer and Maydt, Jochen}, title = {An extended set of haar-like features for rapid object detection}, @@ -905,14 +861,6 @@ number = {1}, publisher = {IEEE} } -@inproceedings{RPROP93, - author = {Riedmiller, Martin and Braun, Heinrich}, - title = {A direct adaptive method for faster backpropagation learning: The RPROP algorithm}, - booktitle = {Neural Networks, 1993., IEEE International Conference on}, - year = {1993}, - pages = {586--591}, - publisher = {IEEE} -} @inproceedings{RRKB11, author = {Rublee, Ethan and Rabaud, Vincent and Konolige, Kurt and Bradski, Gary}, title = {ORB: an efficient alternative to SIFT or SURF}, @@ -1235,14 +1183,6 @@ year = {2007}, publisher = {IEEE} } -@incollection{bottou2010large, - title = {Large-scale machine learning with stochastic gradient descent}, - author = {Bottou, L{\'e}on}, - booktitle = {Proceedings of COMPSTAT'2010}, - pages = {177--186}, - year = {2010}, - publisher = {Springer} -} @inproceedings{Ke17, author = {Ke, Tong and Roumeliotis, Stergios}, title = {An Efficient Algebraic Solution to the Perspective-Three-Point Problem}, diff --git a/doc/py_tutorials/py_ml/images/knnicon.png b/doc/py_tutorials/py_ml/images/knnicon.png deleted file mode 100644 index 61e4dc040bee25e5a7ecfd443ee42d5302f7d53b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3424 zcmV-m4WIIfP)O0Ru4v$80VJ3WPKe7DE!Y zu%yt^KuKG*60}wSs4BHhgj7l^B@&`d+K{wSE3qU{5xN9S2m}fVP^>@-#06uFx3OnD z^ZLhc9-c9NGjHBK_q{g`U+K!D`|dsG_?~;t`JLZ(JABA6CNQHyR$+H5>MymT(st%D zuD+7rY;`+61YJyfjAx!?eVK`^Zcp2>h~;IbG^@U|xx9zIKKjq&s%F(YZukrCxtuRH zqpmU&zsm9FF3SK0Ie8(v_;YrMObYYMJ+d@zHdn$+C#lKe1Wn{}Z z@i`uAW!suT7XQp#z{)lB_3!zqGQheA+4gn|8dc8TZ`)vNLi*{ibw9b% za(em2tn1}RyZ0R5-^${Tx2&bV1Z0%%PW@Akh)mVX=6)2cWygec6y>>fiytiW^18Ow)utPAzWbPx~3z#;#NN)R3RrvP0&f z3rldev~PA*m|23~6{D)DbTQ)7iO{Uv-E zn{b5aoH+^WsmW!0ejc--M0Npdj$p;$o3i;$zIh-01AA_%RM@_`8I2ock=T*{Egr{z zHL2dJFW?(^qnIi>x>>x4@{qTB`!0>WfW3HV z@bA@FkJt0IuolghvNG(#R~6P7h4L&;F0z7VcF3`~P)twnHnHB6Y{s|5W^_>z6*Q|u zs=MlkM!RqvP(dF~#j@0eH)9Ps0&n97O{i;-a*5sj3=G*jma?V@ z-P7MlE=i5VSxk4h$Hs)k{T5*vLO7M!-`-r}S1B z-JD~O-8wZ(@moUma!}5)W(tRgTZBQVs71J?;{GIjR{EW57cLb(wzCtLJyW##YSI4G zd`kEQw1krM-W#)o@#-YSNo*O8!Iy+!kdlw!#KdLR#WWn1xXihtGv+2Pw-&R-6n12j z>MN8prF|0)7wg4=3Cpa9X~KgrWv3h{o=;ObNCK6?AP3-P`4{d+nVz`*4&gP~RqqWM zrOA}`p8qTCuE&TpDYU2^`&d8vw^8Y>w;|CMzm5m+^D*3)nuBn;&`^)1{k51Z_D6MX z1u`zXgEKB1etP!~Za%G!``0q(HU8-Ca87@}*o?b=pXa_;ce6u{{Vvvx;hv&cCM=xa zOjO=tyi(7FWu_mdDmvvz zX*3V#<1Te>#op>+b#6fObjYdF#NZu`H-&#~JF!z( z0#8U#)=z{=X;kub$T9LSdR6_@4|A3SWtjt{RfYUm?6FcPVK1%cqH=ijZQ0ASyssYB z@prth*526{P6b0L)$?Eu$Ckk(A1mErirLorJ6pX_&n*Wte+!#?LPNYSdW8D^2(hl@ zp)O5E#-WRty4NNwtMKBJqZD334_y;kaP07(OBrvx!j{p5g9}B*g#WFlu_JPVo4o09 zvz+L8U#;hs+xg1R`1#lQ$_SUu5{lDrb-~8+eUhW)yE&ROv+S{@^Hk^&FAzgT@!F#LBka8g}n_!!bw%50*{594tW$pggqfjr&9i@t3+LrTxTE(bWOzEl`fnf>oPS4~Xt0PC`m)jxBL-f5%JDQ1>J zSSr7Kexr0cFiOg%nP1PP=W#_3ebo-xLeGkb;;ls{lA0CnQPQY5w?>_+fD!1F()nP( zZtsomJ($t4k`tcTOGR@gtDokP8n;yjkCJ@J3w{OPAL)WdJLEsKC?{b}I!$%FwU$}s zWlww7eR_N#3q^(gKjya^s9`|<4p}UGoNMmi%9N)>fXh%S1N6Q#Lc!1HXw**0gZ{CIh#f90yG0~X9z|dY19VO2OZPDE@I60e8Q;2>N4{KlX$!4(fzK)ru4^4i&Tx(&__U{e zJx4o-vR#PB+j5k#$te1|GP7p0>iFTON7VLq3PVmFCZdN@nPo?E+>+rxKRPx8(L%@R z+)F^nWDawD!VcLXzca*Y5o%pCp-&_GknQqodHbYv((AFszTUjv2-2JxVryOrsVu(` zGTkWBX(Jsnpia%@2vq0eG(6)W8-}De8c?y>d!F(yT}-~6KcAi7C8uin%fqWiQJ*x@ zAtjN4r+wJr4S(r+FM8fao2>VeJAKhc&mYUl*Kpw{>eRN5z$E!Q4GvUY})Ih zts~_X8IVFdi;(8F^$f2+%jg~Z)Ue)z?(n1;=Gp9Zz1}KOzMdPW(R~Bge4DRrZ(Lo| zA-|}9n%FYbq|A5j;*tA?pRSB1YyVTec2((pk3YMf&LPuRma(z1IPgC2c+30Z`W%*C z!nq}~l`K7-Pd>=K4K{XAGHjTtQ{IRj(kM&hUjr^za9X54V1Fap*|E?eg8Z+L{sow;`1bFUmBKYNH6IrV z@>2)(SD7uOyR!=@yOB)V$>>nrLz^K)(2J$Zr7~H>{r!&Eji;t6Hjmj=Hq{WV2sKDG z04F6oh1+MhSX&Mgsg`ybTTEg}sKCz_o$;bbp){h6yFmusoW`C3JN1yd75Atek(7d^ z!bf(JFata{Sl=#tJYq8OvUQ13byM!xeQJzT~1SX$V~ANHjH**0!qS^ zyj;Bb6GSI;h=+RXDZSEL6V zceB81;S{$&Q4;#QaWy)sl*RNl?%t#tED`y=#yt}i@Ku^7Qxd9|{o>0H89t-n4{H@f zCZ^tWvGr}n6zM-GStP6MC(~3d4#XcQM*W?NO!!kBJlci(6Pd6(&uDQw_QiaWio9jt zTyGVJiqTR;4)(E95+>c$*}BIqr-+5W*>7d%V4ZVBK+=Z{VSzBSE)+6`WA9+quH{b)s})=Stk6`8nqI8zzXfP%A{I{D#QcNK7+_iTS@%bY8bmYpfCb2f|U} zHv6w`t>sjGo{kfq_}yaq`GSZ!DG3vAJ4$f-caU7yb*H}7_MbbRma zA7`e+bOQuPkfOalEEcBw-FM&Zd+t5qcg{VJg9i^-RUF+f1K- zhFrJ+V`GueXV1ck6Oqroy|8yLm}ca=*I$?K=P3{n#{CPR&mTkn`JW?y2LiGqu3nYT zy}eLY1{oQVUD4hS&pr$1&dKNgeyFR1wzfz=Cr-ebGm+1qW9I0;*QbX)Bz8%s{@D9PTw;(+Ym?WvO9zCSM=qOB1 zg2w|zMKC@NUa$0b@gn&BuzBZ zxE442aOaFNTGNk(1)P;W9*OSoAQ^jdNFs$&s?uuw$TE3d$lPeN|)XEvtU*;#x(AG*us!sGFLIt;mR z0R{)*@yBHqe*4?Z;N}Ex)570kVSaQUHz0Gu0RbJg(DU)`$E3Axme#sis>x+L8Kj@O z2+utyWB&|A*y!jeE|&|vq@;xG?CjhBgiuJBv1Sc?_#wV~)^eCv=;SIPf;Zib(Vc7dFp#Fpc`Ne!jQ1*cO?==<;gIJ0omv}n;HN=iyT1wZ6>zmtD`KH2iUy|m1U^`VIpM(a*f2V>v1V-;ed$;}9mj4braK8Mq@tt-QFFXlULB)91N)v% zP{EwGV{~Q7eFJHj!*hMQ9XEWo4-ZZErSr>&8fbg0T)yn=6piCt!xT36Rmt2ec2d+LzO6*J`~Nw@@sOR%_MX+(x+uYu%Q5RJn6dQ@iWVQe$9c2 zzhSED67= z>C-;0V$VNL-n*+de_ z6tS3OQqi#`>~{-~FvBDv0`%ryV}fBO8K-k+BgUu$HQ!69B^PVf28IHF|3DQ=dI0<8 zzJz^-2(X`j!f$z5-7_P)jtsBI@+AIRF_}NDz=bA0jXr((s0t|+Gmj;{%#5vqxt<@=0k?-dhlFJ z!oJ%{u%Q6khJK9BY|I%OYFc8kYGHEJ_Y!_b7I!r`AV?DEed!nVOQ- zvV1wyul@rE{($Q;Bh;LW(U*am;YTg>5O}W&#bE%T=6T7bm?D<5hHd0gf`amih89g3 zL&dVwBVX_MP2veHh6)Cw6$4#mzRhql}Hg zYp=mqzbY`yZK99I5RXUjriu#q-uIYgn&0sw+>BoL7IQQe$8INqV-?tzOrS5B03exE z*0Ph;Y{o(IM?bs0U4Tbjhn5yWcB9Ah5tK(U5A#*}>EOKM5YA_t00^8Y!~Q@A!Bb^7 zxZC0qM*)RZ+&4)dk0J9=c6m8t$`G&f6J~J7H8XlL2({*-*Nvf7 zy2U~&nFhdygH1fh5>}HzPW&_GA#z25W5>kwT(U$az~d37)Yn5w3S7G;xKw#LT)HIi z%$hZV_~z%w_YIU%O)1s%(8^ifr)NbkfrFK(xuR!`-VAJOhfs69ara)bj-XG>u=(UH zil)bo^%K&qGh}>ROuy=CW{j;${>fw_y&2D#NXuc zKx!(SJ}p0V#R{49-d;h2>+0bAd11!Jjqu)k0;$*3$lqyc!ko!Tf$txD5VErc!dbK^ z?r{~=Qb8@Jvkq|P!2?*BGT!@~1dq$O<493B_hdRMDx0_;PMwmm-Uc}T)*0e(z})D^b$@;qNwZKWo@ZG6|HS%LT8#3eP+vs~q6qL7DJ@0YQcxj#vxl z-g{&39~u%2?aCGS+Si1!sw#%ZqD8Wx5@1vtxt|gi^Cmwf5K;&ntVAoC!RXFFNe-bF z#GCKuIYiaWVhZQSdK;9y@{^zZ#CrVk$0;lJ7*GQPkYr%<<#L(W|SgKbjdJvRT(*dYb0+mW0dzFgmia@9xBKW`S3Sv>+-P5Aih? zQy1HKd|XzP&j*c-5-T}%O7z77?8wb1-Lz8Au`nn(%M5`949TaIR5Dq5@12xTDa`2a zpBtP3FTN<@UelCCP+1xCoJyAS4BzGz{+h7%P`GYbrhA3Ti4``s@|f(-jt;1<{^7pX=@n@)54NMlh#(j*&_RYf`pSRkM;@ ziX|X+DBQgGbg8Taf99A*)9 zR5Z>dR(uSG%+oC;%2`+(;Z+(q5qHi1ZZ&Fw2gPQhuO0!!NqdN|P(oE~W0y-37uK&A zF}QVW%;gTQ(82{exPpoC@qSqp>Db61hYjoz2;`l2tu1Iq`;m%w>-{6hYsDu4*BpySw-vD)5z(B%m4)Bs~soQ zPyh%szQ~`%o3zJ{iGsB(_~3&Wr^>}JZ}SVrxiNre4yS+uMrSr!9i+rBokNNGQA2R!THCSNZ(T(L- zQ5>Zyb4*PB={&$z)*x7~m@7AFf%n~?WjyafyS~P=Viw}VL@X*RdbQ~1h zmOwBlV+n<1j0;TTG%_)j5W~yPqGSXxn({H43eam8#90)D32ls3+E(-nmqi5MqUdru zogV?ex3-GUF)%=V??2-w)=9F0D!y0i@V!=xTHp~&B-vmS53!Urv3;F618=`A)>?hN zOaL&>5U=z1)N?EbqPJ#s0w>Dwzgdft?IU!d7{_iWB!|%I#=xgz4y1uk#~4VDe2;>D z*8skkR^WSi8RmErmS2k*N&r7cd5seV=On)0-Yz@#+&S4P3uq$`Qp6%GS2EhNF_h%Q zn3tCsPMdSTaq(j4N#MhL1l@Rtl$OHMrMOoVa3J^HIC*Y#WujG1qUL$Quc2lHD5Qc1 zcrk)SS(ZQ?ix*4szylA66IWQsB;&lwj~Sys(q~^fp>rjKE*4|o-i^_ki@wxF=t=>m zI|;2~5~DL4#{(UpTG+OC!?Xk2nqfj0i@{Jado$6h+}Q3MB5-^WW@i?b*M?RU8|odP zlOTR7RzHnWvsCi86h$z)f`V{61%+ISY0EuM;A$Rf=``j*I!eK8oO3=q*2SGo`65|N zQ|9m{$?0_7fFW0|h?$t4&WF6k5M6Oi)Epn7b0t{Q4s5GO(XsOs-y(^WNTYM-L{`tv zig?J&gXCoT>Eva8gb$e{%dZhUTZ-A6f&D%w{=Li4swS|e91zrSJkpN7WCE>Z27U1~ zX{kv#GQqAmaAbiaMMGKaLa7m$#^}x>bg2m2<~{-+EW)#<+PfLq z+AGn)Q)MV=0hF`=MqdVjLzUQfcZSCdFhn=+^AZn#>szv;9S#xI%a_OWzn-03qk%qC zK}iXMlGqIIq>-gFHIf#4i)e7-9rEUzvYOl4I9Yau(TsR_#OTe$zP+1JeGyvOH2IWr zAJ0WZRWK;tvdblYjA=qq5!1MNmA_#Y0U0e50&gzGXwQ-C5ktkku}?%{t~h3-DA$bxV& zOaWzNcT9^ZZnulBTe&jaMnOeEp{wi^#^plvJBLBFQ1n>Fc$j4E>132`_lh1aNr2fU zpzH6D#~%;>ImllozT1Srfokk~I#AKr&XWQk%+tAZqPU-YGW?Z~S@!aGVPx1GO(Afs z0oDJYasNimBlrJ6&ImrHu{o376kd^Nm*F7NQAPI6)3uewtS4>(M4hkKhS|< zkBC7p9!7@FQ#3OhHePO);u&I^vRz9{8NkT|Bk@h9ZP>R*EQngxQ%aPH7YYgVy=s+! zdmA@mVR4K%a4{5Vb~PWzArD(nzfn=mMz~eD=Q;y z-XUsgl1W^>n$sL$l-O8v@O&}Wa2iHyE?Siv4V?%13duyE2R{5zbWl=~SS#7t4AR91 z{4U(Y|7HzFb3Tf0qUL&W?CJ_%TgF{H&Ns+r&eEBh63_ndVX^Pi(`CZO#>B~-oD}7G z<%&QfWn~d?63Ojd!A1t?#8cD@0gd3Pa?Ig0tZ6%1RV0=(#sI6#O=wdd@#K#l6+ktL z$zai)d!EqM0?fV)^i`u_5OtoWaIuE1sP7$=F%}o!kP*$z&D7V|qkZQ)-}(NkRjcG% z(-dZG-3q5qbExVZK>{&18tpmQHuht7XQ5TOsb(d2a8IN`ARwM^TADb1IXMLJ^BR8} z_G3bgdFZt+)JivsZem*(*)_|!i#@!6jkz4EzCIb#haZY5>2wM+`ujzsbaY6pqrP5# zcqk;0{nV7~n7Ngyp;66Bx@f^)Hh?*pj^nX50*5Qn*9?azG=PshwS>H~yM!T+J}TlX zYC=;<=OQ0qj;5fMO%Xa%ie4LgiYthpzB}^Yd}L-3vrkFev8U zwQGzN2E&Rsn)KBp1dlC3TPg!sOspls!GrSe;Gp;(m6e?15L1YGX!NCH^rz!{X$7{8 z{WuRQxxtA|Q41?3BVc$NH;emkcgnRFm43{NKn8uBl8M4X8cC{T|emNo{dV1hL z{U`W~zX-!2Nu;uqXZRgI#yTGm0UZ zv@u8*KthBzE>kF53yF2*$uUCqZBNW->n5H;6_ zfO%Fm^hOE~!@9b6$q5Cj%PFCr%nM6$|zG=S2k0$L;6Lb_Inj@)=!sMSPu&iBpJN%X6qU zH_Hyb31+~;%*B=#5eJCFw2(|!GWK0vgqp-*DyEVY(jyJp+u^?ZMDET}10NEY^EZs{ z49w9~d_P-*;)qxu3JN=UhIHm~jNf}tfX9psaVq!hfyW0 zxEbYTe#l!J8ZmzT3wZu{*s()a_m6)Z^E+yh)$Vw-9YwWB`ceZ%Q^b_^<7I?i2@y0l z%I}&-i03GeVicQ+lI|yXp*Wm*8BHFhBnRNb596*~k0K12x2{G;FwGEc33dn}MPeOu z`BTfeJNEj83!;3TPB07t__)HU@MWVri=?Mo(3ZNeZ5RlDU&#upnX~uLoPi(yP@-=$ zGw`4PGuGx!T;&um^FxN|Vyb>ZoDa%ywzf*%bkRH>I zixGk5zx6FKA{GFJ^L@-`)0o2vLbs*`!^5y`n~2|=pa?@QUlwsQKF;LHHc8=z@)(ir0an!Op!{&;;1IHzOhKAw&A@i%`1fAS}? zJO1J?prnL0E^(0GUKeRzNS$28Svm<%KP_hR;2^y8Quw-ql(1E(RZpOnOcT0V5YtB( zDti5O(S?^Ti5{3EZBE*&*3->#CzeERw-N7$0Yr=@^D!LGWZb){H%j z0ja30o;R6iX2j2EZWiBVE|go(v2cte)Rf2cckU;!e+k;sag4rn%-#%4S2Dg|ufqMv zb66p<&*cm%L*)JacqO^Y?;UxUxeIT zMj42SZy5c8;wsr*l)0zoQA+-NZq~ki0?%h>%MNom@Z${wo1u;@JpcV3w8d_$DLY1I zHukMO*ft1sZ@QAfQ1SkQWVM9#L~gDqXSX}7M+`#Y6K4j||ABfSg^z@|+Mwd}`N7k@-7+Ia&g zrzWb$p!{fE51$2Z=3GpBYa|7uCqq!uIaUoJZ1^8%X+Mb4>q}&wl}}Zk*7?BG$5lJBiBI^801C%vE~`;A5IdT-d{eAeh?EsF?!QcE2hL|k78`tsT>{_ zMAYquO`F2kn>jD{x97;IDOWPd|NeX&GXT>5$00Bj?)_3R_rxAvkE1w~-csVye8Ljo z_;E3IoX&(|C)7}gUOOIk1>$JcDsg0Y?~Y@}{8(C}ELy#zql1+zS5o-YQv~o+PrRHp zdNR=O7zRg(Pgr@Nmn;b@uj!~6{9x; z%T#bY(-aqbC}|1s+{D*-n6D;g#`w7S5xKeHKvDD#30^A198SgifA2&s711=Ga0kfF z*tjw5FoRe`^Gwz2#xX}yu_wsaDrT{Ws~2AsU9^CL1n9Ly^c614A+I_!qC-p!8o0hk z>DUu_pw5B_*uH%NE3~%4nl%_%u}J_%dk!goc7ous$Z>&~Fz?EhqRjL2!x`khIUv%S zwP853P;?WmBzCbZH`lZKua2UWy#arINBp3BdIZL?EOE^K{Lh)e9WiNM%Eby6 zpf4QyT%j5!im)wmAnVHjzw83lRKZ5$KU1PVu}I z%`^!3ZRzd!v`5eqJtnBRZ)XEKp>b;9)Z z>uKN}LQx2|HIa;3JB}I^j*Y?RN=qd!Qdk(qvSP*tpc`>ID4!BRtha0t=q1V^m?tNV zU^lUE?j=L~7#R#dv|@@-Yc8Qn zqC?_n?_SX%r%%J=Wa7T`>!gJG{UYk-6RqdS#V|k^z|$5_Vsywt$RLLT$^-%atG^Q5 zOH~Dstgjb&pO+^Suxpoqiv6W!7tg_VP=?^ zw9H$7ed$;hvS8e?j$tvCV#3b@i!Z+%bA5sl+>D~H9L2HQiJB?9R`1{nvk8#hg_J1C zI82W}E~{YGDo9U{ORTnS?`HOYZA@HM!Ql}9#cmgm_8Z@bgDBKIFV?J%(VC0p)0j*e zV+xm~slNE46foPjFV^w0+l3hp2WT1w2FG|kd^vEi5+%crRy!URH#&CiAS%!Jx4#v= z=5)&0l%ymk84oWwB`tv2oCk)A9)(_~kS+@*1py}^PKWey4Qo~>c&P~6nqibw(YyLM zLpO7>kHCpCtVxIX5erW**zFN3qP#rrz@{~A$NoSETBVy{eId3r!?9VAfB1*8lB%nP zu{k+pkrxIg9Zxmm`ET1$vVHhpt0U=!MlN%lt;{83*laP$6bng`#9s;Tx?n>AW_PAU z`z{od{2#=Ly@Pwgr+9$~!u#FthQD$WWo^ya(H2jl=6g_5BMd2|LJ~ekhQ#u`@4mP( z`Isemq8!_nUIHh|B`cF#spMd&*zfNo)Slox;J|>Gtm)~Ia(q#wWco4MbC~_XMzl)t zPe!6+^@D@rE9~Ddu@D5358t2dLoJy`&6~w=W)nPJNT5g&lirpaaUKM)nIKxp&4G^)AlOr-SFOM{`QKGt56i{=$ zr2fZK1WyPoG8tuhQeG~4;7eZ;fD%~3>M)wDD3J8RRS2ml>3)2_Ud8O2RlLhD`GD7A zaH)B+u$W|wm-!J_IEiJ6r5Jdx3Vr1WmR~37>E>`OeH)KspO521B$i)X95%%pI1z?i zte}RH8Wd(M043{KFFU=ZMNo*SfE~aLZf0=H@oQ7T9+lv6(|L}z&;CCA000vXNklt!&@4gFNU8EOn#D*;#DU=RO%+ZuEDmumhK>}i{gE*Im4-0drr^!w) zppxZWqX7V|Y#RGxZFqllH;!i;2^^><)KI{=M_ag3bDm`>cZ31O5J66}A0M+A0gK?7 za)L(}p%%@cl}|}f>4A=Lw#j;SQO&BzSbzR=aRzl=Oy<;7W^i+!L&6LTtcfJ7u@to0 zag?Zn1GTIrgKS6*iXo5yvLB%@oF~1|NV;i;SQZ5KS7Z5g`kzRk;9FY6)}BK|OQ@oY z27Dp9{am=apNnaOD5(K~K-6rEjw+1KEc8_)sO3{w9vez}faE`jh}C7> z#TFik=)L}aL9ORfq2?3aF3$m-0H!l5>{F@vv5Paz*b^rt4Qk`YxT)%-QSuW8)6mMM z(3eGwotqX})<`N^)dco!-Ar9h5U}@pBR2Q=ctRLMUpkI{=a9rifJUOings>2qH1cy znR@LtGRWaBVilskYLv9UJc3>~j?tNoS~`tdJVWSe0Y-m1{=IdC8uJKUkzzC2QWsW0 zBXq79efbzk&qi>p3Tk81UuS3GfB$b0QRCxcS!HJ8#lzKTj4vgKIi8H=(_);~Eb>{x zYVmz?a|Olr#-DQ^q8rndETP}g)R=49nk}oouapIFq$MusGEYickl?uzOlLO6y-jT7 zf!GGc#WI-#1K{)FC|MFa;Q)36-!JX}n+bMdw%OiX( zA~L6@B<4|FEtxk_$E}aDmjOg71kV>^bY-HZgwWRwqDEO)YuO-KY1O!!b^}_gF(BCiH zbYgV4gJ_wji9d_k=)(gPVC!yv8^A#wrUi;+8vWGyn*}K#j~+hg?{-< zaZF1}r=eL5Z$N+ zv*^`s)O^`NY5B?6-Kl81zI@O3ZB;jyJYajyAhNrcXpV)mz_=6D&)>Lj1CNZM#plEn6}UX`rD|MP!PD=Jx>e=kc|m9Q$E zqFLh6CmU!bGpK1n`MdxbE@BaTc!3pch={5BdeIm2=L_$@KgaVRO67!F#)CW8I9y)t%XyZ! z_<3wUnSMeIg#^!(pk#ZQTsOo~_VOTKk_Ngi=xSS=?6{^TfiT{AhxEciHZI%D#`ODf zGsX;)bh9vmC@V9WR4P~+TT^9fO5k)=mCVRhs|0CSzC2t+z=wyu{9TN=m8y20xcn;( zogF+Sp1FHML{w8#!Yq?>96~Md5InXBOk%Qc=INGeo!Hh6$;qU1#WdYu?RKz%w`ie0^&AHegzJ{Wi7(=PF7F{5p64tRJ;#6+kDlrYWTO2G9 z&-XXKfv27#tD}>wyYHsVtW2E9Y?|`@@UV=hxmono#Dsjebg8uY`q#s^1PSmKKWBW- zh%kE7!xozv)X>+&PU14|k|Ps7pVa$(`ssv`!3jp`k@fMalByP#PX|lk0!I?J>F5xDqPdw`*0PdKF_)E; z5cZv2sD&Q%RU=Xx1!&?d`}uXOmk(mKJp8as;MJ=Fwfp_>KmR8@^pGUkg+ekhcDpcT z=T4F3yLO4Y_=^{tPVR`d017Dqv>6rVYKDo2h;)&t#@2xh#nr~OR*=YKrAni)jgknboem5^_GNd+uKDk zpFAnXK}Uzk^F4bKdd(q%T;MRLky=@np{S!ELsvXuULt4V zdo#gipcZ1gosA45Mcsj0$n&U&AD*+g4C`2uuA@E)mAp#ud4gQh8 zu$X#`G`N!?)0LamrW&GzEh?Tll_3N{KeW@p9N z7#x&cdF@)A&_ z!E{tr63z+>X?Kr_RrRaAIV{~L=TQ?Xtd286lRvweEz~RHUa-1(7h9WR8J5`>W#6iL=;_ocxEh__cLDQVf%}*jpm)B z`8VhE^suyg2Zmzs9{&{Q*x5D=5UejmzhfB7P*F4d_}`y{)eAi|u09>c$mT<(#Vk%( z(bUBVQ;f%;&H%=M6sHO7TZ$FbP)n!8s0gWOMV`1494Z=X*&Y#7y}hERhK8sDW46Gw(K6JsEqtF9?lKiVFf_d8FgPDS21RbI}r zKr+gT7dgmFaZreDjrbh?_bV}iDoR=q`?eke$13pst`5a+V1J~YsWv+^#T|5I#HW@n zq+op^`tnhNXG&3Xy#y{7q8512mX8ToIA`rGBs&i7=S6|l2M5Kno0y3FJw6^+&!nDX z;UsbU{he5@c$j1%g$tX05|I`CW3`fLg7t-D?-}C{`Sa_R0|z2Cvv%%eLUXZ~zh#Q+ zzl$)`nuF1kiP@8h@GkC&sQ2QVj-udFE6kV=3pkzf_kxh@Jk2o4>->aJTQ>IH&KsW@xency9TH7} zV-;9_jWvaLuxItQnAkcXPLwgHELBy~cgoAL)~#g#C-3u{8&=4kC(8n(IS;jD8lyEA zbIO5MHibEqhBafCn(iIhn9~mIyE`SPZ29O%S~<(Oiw*3F)Q&%MM&O`_9*Q0Jg0vnV zv%Jb*GsYlBXBMGrIVfq@FY=#=yoVg0&P%}@yPaG+R_ZEa1m7<;#TOkGej+vm#)}W4|AG#nPimR zJRb|w&r>KADcS0FLw7gHT^37s?V>ug94AeDpudr@jQQ=LZ1SmOC3UPLmm)D|4MQN7 z;bDnsc)fBi67b5)#k49d zl?e-lM9CgKDsq44PBE!peO1niq^8DR8yk~g*W{%1b?A`j1x2}`SehpjlOEbQ#@=v_ zu>XyvsHM{=* zlrxPso5bI*T@w`G`R65ZVa=MzwT=!EN71|BkN^0l#~mcVJNzSUXC^QwQqfnZp_R;F z-`$DnN=7TYzBPEO^qDclwxbK9F(2E8eyor}-|LyYq#tJ6qfXXnTW@Z+0Q}J(MgE>E zQh?hn(D(GTpa7Z{PT0fk7N$HMk1~H0Nk!}-4)9Nc3i~Ut@989Xq8zPc1|=ngmU8=; z0igWpPrhkoK5md+cyr~0ZU=>0a#1q;lG>RTAgx$oJ5MHlmBsIYyv$I~ajpzr#oeN# zuNgwk_7gl)g6U2|Uo{eU-tm*taI=fR@s!U_hb$!j`;{oVye2PnH6L>@jpQ_)6`Km! z%)_yVLO%mCG4V1@J;!O{EFPu^9A1oVTQ|WE%VTOxep>XI>yT)gKcE70b+~pr@gA$7 zffF=wmb#wZR2G$!Vb6?|-}n?Lh@TcN(m?~)+J-T@Gto-z=*s~tLYE6sl5Z^uYbXc2dZo)_XU_Nu?Ah<)o4kap-Q6Ow2IMKSCc( zoS}bc6pzawxU5GiZwyt0K-_?bNPX0s^iZ%Q?YwL@3|mB6J+w2EmIrxf&5 z3p}zgiX!^qvt$N*P8hO~^fkkCbx{N~w5ILYwhB%J5b`K=pALXN%(c5Op=SE=zgLOn zv17l_iJBfD)R>1ckdB(^!*=%|MqfHgvI8rmVveMsWcwhbO3mGjxf8H;<2U`f?@xn z<-_R8!tBjJNe{>~w)UXr&xT*;_j#btsQ{0ly{H3 zZMkO0KDy-`1-(IR$+wo}c&qpPYk7tdFi#>XM6&=$-O%fv%A!E;=>}yB} zNA4iYSPrsP2V;adGH0A~?z#7M|GM`+-`D5)=lgu0*Yo~9ukZI^OfcqvQ0#!eIOYB7*#IVFU~&Bqk($ z>XfLcD8HaMN*sw2L5d=el7N^;p=?kdc6J^l90o`J&&KEgVC;Y=;0Xqa11vBQ7zSdz z1w@&3vV#6F;4cHQfFaB(**Q45m;rUi0TvJ#%)+dn3BZhwVeSJE7%QKck{%nsjT=-v z1b#LyHJ@EV|5Yc#mO?`*yWhZba0&W|0aCi;7E1%U)F1)YjEE zG&VKA>3ZAU)BCQke|Ti{!`S%5Bz1n__50*OfD8?TOiOQE)Yv3Q(zc`RZNMEPtOMG7Q!!nHjW*xpPK)wlS4w;mWFV@LE#iY zsmx1m9-;j~_TPcw|1Yw?!T!ZX1-QW=X7j)>KnK`qMw438zABOkXy8bt*PR1fZsubA z@?#eVYtEyqE=zUAcnBgWeupMGSqs_;0#@ZC8?r<*)V9iFn`@+^Kp5@fG@3*Jjx<3c z$kIK&&K+fXvD3hJF(jOyI>_01+Awvq-r7!WsdJS>B6sdXC}R8YG3AQ#r-oxEU{%fhGaY#itbYuQ?qYCtTf$7c&dmsp8_Cmz+ZX{y3a~Lk>?!RBkB8}U zHJ75$sE;TbtM#Vj%wR_Rtq@P}EQ)80@?;=0MMg{?&xO?F@!cQQcL?kp!7_lr(Tla! zk7RjtZVm0m?m(qQaMGI!48S_kknpR%%HCQXN9>Tq0BBlZ)}6?!9G5n=!CUOB=Hd}? zeISJUP@)>|f6Qu4VAWAUK+QRzStLQD^HY+@T&hz9>8s0?lbLb(XL)PbM-%L*%8hA# zGc6?nSjh)bgh26O>a`16PnVwz+_aQ)edsBVw7Xr=q}Uqv-b!`6uXrYg;0QCg1WO9> zyxj{0OUI=VTqqYqwUDuHPv&Xxy)Wi7nYdNAtLCUoFlo~ot5!h1>fkRN9zRBOx9D|9 z8VnbrFUd-%+zQQ*7&xW5{xQEorb^zerJ*)sL8Y;2X0c~5A0CURt-CWsOYA(H7fT7jTK6#loqP1Ywhv{NB1agV0}m_tZ*CM zlgzfoTE;%R@hmFg{nA(eHCqYE?bfvDVT&LS-BL%7ej4Xia{T=vr+lwc)vMSYKAK@FX-~bo0FlOpU|?JbY2Z+U)&>Aoi8UO0EvtHQ&rH1=E_- zouNg8s!N5~fs^FY`qk23+J3-d9O8S}UCl%|=L79?rYK=kR!t1R^1xr~=cOkL(}!;; zXU=?+IB-)zO4GKNdVcash$dyAox+G?hS@=2JnqK#`7O{Fm++o)A9=EzM>fvixbcZq zBnrp2Bw~?L+Q~81h*%MoQpzgd1b6KQZA2DgTu55vh>V@rVf7V@7wJvP=xMGOspFC3 z$s4A1jSctjy1Z#P$CDd2$Y%F~2u$WGl1-q$WPR5=EIGG-~O;0eeP!p86)Q7ur!40&0g-MlOyVzM{a9GvwY6T zz{5j)Ls06;W2Y74I{3iRy_lt+X`4wvFBv6D z2l0tFz~J@hof^`4&nvQ$*C9=H8C3mwNn>Mlw==|bE?I9E@h<1lQ1dhIWO6{o_1eCQ zb)A}|@g%xrGg{CV9#FpdKD05H$1c=yYpS&&Vx(e}BB!-$SQwBQ+%c1ytC<9I7G8hI zde?CXe5nsnO+^#2chWWabIG^T`ntVwpIf!horj!E-#wUJ8FqCJv!8iW(1hnIkCtC| zOZM@!tu_%)z+E0a9wlB#YAYZ^!7K#y^CKN2gqaSTzIr~O)KNV4`e9(*v94t2_ zy>#EnyvXcjrGb~#wKM@Jvq=jPDFz2V94-)J1f)q31QZC;K_P^u z0s^54Q3x%ecadI|5^le;!00P&&t5SD#*poCHTJ?BTF&_4wDPXW<@AtzBXFfuWp7}TByXh2{v%}M+x04LTFC-VS=ot8sbR*R0) z+=*VqmrL$(avp=I_J>w(i|?Co`8$4SMkby!yih(d@pI=dNGM!ZR8m$^)zQ_{H!w6Z zwzRs5u(r8ni*k0kd(YL)-9O+#U{G*K=#!_cz{lT>Ye1(&yFa#r-=?8wX{F`Z5$0s>mcdi!&ZE{9pR{gd zC3#qp)&;$-uLUh_e1Ln+Qx+^tWUq8Fv~?DKFH_IEAHlGdV;^HzyL~9{+EwKp}|@6)x;z5 zON}|N4<#=-nbj?-RXbGM`sh)C=ZRn2BfIV_CCDGGX!N>aVZUGbVW_~#2u_nkDb+wx z0q+jC_aUHb`E!dTB*YUxS-(`Z+gBx_c|mjF4i!Kgy(Wg;R96u7s5lDm_-ZUA#SEeX zoPtQ$HgCf&fl?|ekU{oZ^DWNEw5I~|vcB7;X^^A!5SPQ{3ruoN_PR6;@T946jUl9d z0yl!Vlqu?g4)kH+)ny>5`c8ZpS*#4*d@7nxB*mXyn0J+Wk$1ztcqef`Zk!5iS)m8o z`VHe&kU1Zo4OJR}mHU-;iwg|0#F$mJ-$?V8j0X$QDIZ2k)S!G=EIX>Lx>H}axgn?NJh_?D#^n4u3YD8g#GC=Ic|2K0$R2b z1oiGGKBKQ?<*5K>Du4=9MVB9ik_|G;-&obT$WDqPaRX+V=&Q@Y(@TTUIcUk@#8rjU zwqr&L9Cq%4o;|@aanYT;OIK8n!w0LP-bzcM_)ru26tKU{>_wL7Oy(;SA(@rO_S{97tw9C5d@9=s<@=F{D8mIss} zln!T|N_*+F5_dn6$wk%h>du4IiyhYYl%ZEtrUO=eUlw3)D;5;#MR@jael*AV_AF=# ze`26d3bnrJY9v|cQZv-lyh7s043&C4M+FQbKOTRsH9r1BFs(+G)ngKbL?O@4K{bX% z?5_>Od~gXK#i_>Ax|obsb`l@in}7qx4abH$C4JsurI>UMB{joN0Z9hSprHpF z#q0On8v|>k8~8hl_fw=;$8AMKcl(?_HF@qHUP+?P4-A$TPJy#BNmOVN9V92s+;x=|_dDIFxtlNbhlH_NbC+I0 zjo-(x#-Ax>Py^%ThZ}!L=}|@xe`BOLMI67Dvcev>9)&KIY@nsd zVqLi$-QtjF6@hAxoZB-NJ*sl}=;IoTlan_>1->C-6*Tv}LsaQUWXujPBe~dr`i_|ho;QJ{`WUwq`c4nJi zow(J@{!r>$_=Jv9(eNjAg_Kw?`_uhAq&eN{&6RPId;Z9Q+$5qQ;k2#P_S*iNbEnWc zex}MQo#%2lf8vteiboA*NB5X>uFs_~eht%B6S_-kXmxmXkKb^9ZT1tJ)_|wNk(lT0 zp5eJZBgpz4@*=t|3fPtzUHs_2cq|#b9{C>IME7xUM!H+7Rn7lA4Ywl|kc;;QAI**M zQVNiy67B4@4Jo37+|ZbVL)>QQGiC8T>%jrC`(Xllr=x5pN05!N?cLPmM8#9W#Sbqk zpy5fabiqr?Vn{f^)>nu8(ydrE6Lw``_LD0;2_7qwqzv}^aO3TZHX%Va6)42|%*+IW zqUDj}VG*@tNn{zo(0u2fRPLpPd_E(@vd5Sb(mLCy33ku;IUBMeKJ55fD@)wNFN3L< z9~3WFf`52J-_JCXZ$I2ywR|Bmkn%fiuW9ut9H&vLa+fbnqs$CD_o)&2oiq{@x@tZk zbdh%&1p#*+sNWg?;~nQU z8}`X|Kp0+^{+ze&4$GMf^|Ci)Ok}%-C@S;Qv#8sw4}T?IA0~u}g^3Mi*9s6T%I{!n zOn=@bPYoTw7||2n-N@k*7Z*EB%Usc|A+c~fGde%Qsa!9SGnR#v_BD* zP5hJg)be-A$j$iYT(@L!DbV}eIu|B8*ViEH6yadymerCBiZk=D8#CW5w;{hL;!4mo27W^SVJ&>WG;0~=Pa}O7Gvr-L(2|}J7y~#+Dl76 zn)T(f7!%3Ux92({BEE|Kw*45X?eqDWniLhp;+n%LpPW47`1uqw&)upE!wLdC8@fcV zaG_rAc|V3)<`2wj*X4|OW*QQ!?9PUd``dTMU#h&c>k*?+=F)^ULBXrdc!9J46uN2Jil}?WT{zcdQB~iU`5oIVeAGAlsfwrumiyP@ z1U{Mp*U6brgrO{L{N3<9$%Lm?fvU}MPBrh1q`aJK8+A?*gH9{XhO~AidEdR!toWU{ z2PAL5%C_6O98wst5fl`$;Ujfos9+Ed6ZmT4E22PnF$4B)nFc*>X_-q#2HAQMesMA0`Em9OBx`+9>|)Q0C#eXQ z)0`>Vl_$AmfmB=HP#^hW#kzV5FYn{eWbomsD|g+!m(ktI6GT+5WJX={r`&COHNDTz zyv1i>wYua?)2qO`6L!tJGYU|YeO_@v+tkOwy4g&vrtETMnKrjDH}@Gg@f6F&&`;bX zTgfX41AlrgiK0Y>U-f6SFtPm46^k=owb9T#YGPe=PI%~K{Vct}P3qNgUu`{FGQ`m} z4F*@yL7Qs9E~#kdSZy9>6q6dw%EucNR|nhS`-T03F<T0lTD!|q}TvLS15pR|&nOhl(_1aig`u8^t`}6#dj*d7;yoS#uITJ_FujpUu-y`S_ zjKXKc+Su!QwQ56tcB&luTP)A&IEicO*j1E;eqKb8m-EJx29gmOuKq> zI_2RqJhvX4owXv4oSWa5po39fryfoIrj&4*9d#~iMpg2bCKs(D+V#Dpt(w8{Tbj{c zO!%cxBxAbrTSOOAJ3VvqcJ3J_*1<ysvA>_miZ3ixs1(b_Y=~9Nsm2R4@rtpjI zbOfpvrc#D;wY#N0KBja^ycL6*YPIreni+q6Vm!2=UO0?-p7%VtU2*wpYJ&c@!PWcs zWe2l|eV^LhBv>-X$DgmiNvQ}s7v%9vWi7CO_T;(8Hlm@&-wNAE-eZf=ekPZ+7l~u8$A4<1g2_5~I7I7wHU6H9-Qw36g|%gZG<+5E{hE3XsMZ9|l6;u1e znJMx+WqLNsF zGuNT>MBOX&=*nop4!&!8Wt@Fys@m3V(0;hUt9(s3`CaTe>&TBmoFjroqc@8KLz1VJ zrZoGLbq@^I$?B1818+{h{(7wdRKK)9shFV^;Hc}G%ogQ8Ulty-`>(&0c^(xgCpQ1w zqXN6!`AvjtC74Q2Sd!+zhURYG%IsUhq=0*60%p`GrZ=*1G61%_x#w$Mb6nW?hi^WM zGS*@Ka4YpKuHnEi-J|e4vzOkob^N`zuf`j! zVDOWHvN0R2d3>j7@GEXlmud#W!{!`6`Sb9{lc;iW6yh)=OAT0X^N1i4g4 z+o{ByAV1lk8;CXZxI&Pu-J;avK*#v(z2XgvUu*4(awcwY4wtd~abCgkq2>HBzEUzb zz)CZ1HCPmatMcjFe~7Ut41IxF`>nK4HCZvL{UkKJZ3DKT---=yTYvMz>^ymd!bV9% zb#i%n6;Rl|{klBV6X>^5w3o+~zSY&`fbxpy&FvI$!zE#5^sC1ip+PPL=+b5Qv9oW6 ziFF{o@KyD#-69cr{Oh$>-t-b}?|fTJ7VO9#JHp!PBPI@Sl93#S!X6=ct~EQ^OQH#B z0=vQWACe}F_zDRViNCwm!E48%vqiWX8)%MjDjkzz?i232a3_(63(;RgI!)cV&xyG` z#7Ic{V(a#IT@(L(P8q!NxGc}M!0=83QG~;AJI$nwtAVujNgwV@7`TPIBd2ha%_cAarH|O4S?mm0%z1BK0x3$$+n7EiA2x391tLQ-x zO$_*zW1t6D#vYg1fx1HZI|p+G-_Ui`rd0d_SZBEs#( zT|^9LH2?5ho{OsGDvvBnj?f*$>NSyyPO6G&%;}1q&7xJB(NR6I5*`iD#y&m5t~vpo zYUiRm%lY&CF%`x(x&&1HgY~WZObE1%%hgIdp(XO_*VYH6uc})<(<_<;;_x5unP3DV zXcl$Xmj()(R#k_fXAF!`*f9Yb=+apj1l<&1W9+E#n*Nr3n~fQI^XMw+P=mQdUS6Jv zz$2uFXI_53*yYRJi&v~I5s2`CDu?^Nr`-iaM3AMX)fJ7;>QESrP;!Tj?`aB?zq+~a zkaxqzaG>|E7~v-Y)2F7sWh)nzmDMypd&$W_XJBMx3vPZlwo*`0L0r7@3*qlyAEcN$ zWJ3}g7x?KV*ZW$FyV{8?K~k=j**!}0J?=gNf^xq_s_?T{WJ6H5fS16#7)L1j`YwU> zZ;OwhgWg7?An1={91wI4K?~hgp}wKx5GJ>w!8D}BiZx2vrPfSuxvR_)XN(^AX`FDA zfg!>FkKk~Z+Engs8aPi8MQNwRq>_}cbB5)N;;y|}NJuk18(+zd$Mg=A*C>kNT0&dIJJ2>Jzqf)s>dn7= z`9cIENQL^s!IWO(*`9nt_lCK9`$_NxlhaQqY`NIG&S7J5$P^z?-MqJMhNtwIX@E(| zWfoF)T|5d~;|XeZ^kmDxQofMQdl#&l%~P*A6aA;sIahwsKtBz|VCzdGIBM{CWNvdy zyCjY*k)NHuI=;F^Y7P$6N|Vt=OL|S;riL~%biaPGGu3zP+i{3LM5crtXMvxG1b6g)_t{1<+Fu+3kG@L=RISbAqsiPs=Dz7e zCu!VcQ1GCFBD9=0I$(pf>k1S>M;&$liKJVWq;m?XC~4eHeP8L1E2GyCWmaSvMFM zCBHuBB_!+`^QR>NmwZlN%o*WBI%-a{;-NLKqC@;u(lS!)AM=#R&bCZ6Nq2#?CJ?JDv0 z>!>i$)y?L;?sHyHP&Gdzpa`9i(7GH4bw{D#yEDC-0b?u3(%;wTq!Enk;hnP-&Smc}&O=bSVS`Z4>8Mop z9ZP@JhW&t*hZ?gEgZ#`-IQ@_w3O9)m*s%@rjAOTf&(^9&JvDjvMNbuFJddNG>Hs6Fu#?4B9;k zE=tn&tNrya2xjo*<+l~TVd+-6)k)a&7Z>R-Filb0%wHf2!e)lCDQs$L>Qc<_DNIaE zM7a>=C||}_Nd3uZ^x6D`P?uMqU_Dv8B6(_X9+Mc@-vxW@|2@k z3$;?OZO`ajBydjL!BjS#e;U@+feQ_Z^-KwB)qlWUbBG19X&fhn|yw*v5o#{GM zWM|h^)yPxj7D4wbO|Y<`?_67Ewf8eP?;x|JV+1~M!*OH2KY&!RHoP|TRp_o*7n0va zHeI{g$(Vm8zIuD0hGlO_&B4LJk-X@RoRJFfDK;$DW#MYA5ZB6-GYl;$KGL(e)km_`Xoa< z`NOD!va&v6o~GgL+gV?~-iqUr0yR)1V6{`a`d%yj#Kgo*MTf-LIs~yt&072_cjyu7 zOp($Af$s&2m-L*x<^2844IF+w>QM z`nLCXM$h7xS`I8r&1w}F6&@d9o0MaOq7PBJV5ZfMd0>-^N=j~7g>2>N>Ff6bT2XqZ z9ART)6EKzT>(#BwshVR1!6SOFCu~x1p{0N@>4a{!o`E6b71m#ZB~`l&$qj|@vLOWOa20kJqMewYTzIl|c^)#^#;>?2zx_c`ilRIE)Y#Tz1M zrTY)F4>H8qzLfWGe)@D@PmSO=6uiN!p{@PgPYYt#h)}7-YRsCc1>VFmmnV%Z;a0Va zsqCnp>rC{$2)Yz!!1PRO7h_EqSk>xnzxv(k@9)26aG3uD=xKu~QfKV@E0SlYU*)Wf z#WFJTKRL!>Uo+Z`Jw7{)jByi$5OFAb`lwuU;%~Wt<`^##i2YQA%GF!1ejnkL-?S<9 z$@esPtXRYWs0uR^fb_DsH76CaeVte(~H;o*^(kN_wt^gb+=ndU#K?Z4B6vIL0Tm-UQdNls3V z+|EF)EAH7d4CUS3PEfO=uRD>;dMA=~5^T~fa`W|Shf@mjGef>$8ms|R=3n%a=Q~Vm zEW%z_f0GbmXtf z&JzLHS&;4?=Yx3sov%U(^5%TQ5kIi?qX!pwGY$MOpK4)gUF(>3xl;{!otE$hBv=~`%fS2%V~ZUa+3#=YV<7)C@d{~ zxm8u9#ouA5B3~hQ(1M({*Je@_gBujL#*fGG$m-c;2G=asjj@85&o0!C^rgvqp4D|y zxs`rJ6_2+pReZf*-a*Vw5OSyGMq^u}*os3BelpQ%7xpG6c;3;(G@aVY3AGIS6bN(qZkey+8b`J(bcaO(1o9rds+7!qwYo*7AqpeHM?7 zVN5k=P3L~P)xEFlypxP=2(i^GN>>$#U|>|K{`Je+IqkCYp5R`2g`Uo>SJu8r`xT!r zKmBkY?pF_~MzR39*=s9LNp!g&@p2xtW(dVvA~d#{MJXM3?tC^LCagUD|z_zI15uu zZ?SPPNTn}dzmk5oMj4u$mpprR6fi*9^mhWgAqU-4R1r6S$+rL!B2&FaUWjudAezh zW3L9FlA!Ce2Dd|D(S^Q3D?`2`g;=c0#z<%jmEX_KIszV62+|7BBVaSIMvc`SC(Fvp z{?i1IhEXjF)!0)=^dun@2TIV~v#VX-Py)(c8$-UuMMWAdI~G9_d$ix=Bh6~;L*2_I z@Kuxe=XdklV>#!Be3gvcI!DW!xUR2X^IjOB*lXeiUc7u+u)Z*;iGISu{Tj~CpWKmm z`TmFLN`j(3sJI|=j(yptX`|_=klcE7#NhWta>HhBJfJ5F5{w5mL0ca9&0j3&0X}jD zYFXOCy?y;dWLx=_D_5Q?`qrDz0*$aLdYHXdarn)Rg@c_I%9-BYZMBf*CoDMsK^?X@1A>fm;!3%KsZ z{t8uB35y~DV+$Kii_II$j+@D_JYKSkiBlr#(rn!|1T9?M#?H=($n8pUJR>$!QrY!1 z=tjXrM&M%i=)1<*X;~A%B^Apesgnw)xrIx#?0PhIyp5k8#y2+jE)8dqJ+%MsdHt!n zS9d|}1kgaVNr{OhM;&!_gOTQBo(=zB5W9AS$_ap;ohtXSi%!+oc{JC0jWtQx%AGn1 zv|-Equ$nb>siHJ+z{^qI;a&N8v^1-u}`bHsJ;hb=I zBf#F9VGaue)eVy=PLcrU&cjWzX;QS%!(R_S#g;X1I|A4iR#h2L?Ja=B{3BS6ljZ4jDV;|!5!IOYa3;CmM1{h<>9TRBS#MD%=MWfH`_wrmIH#oPcCS!LN zih+){VX1k?YkzBM?svyUV?)CvRz7*Zs*$PH&ojIC3jRi6)_TEmif#z3Y>vej1BkaA z>@88+Ijsr+wRIeOO@XBlJW(!IGdCi{a9au180`Maq9@Am7T+K~SoWEugS zLW`?Gb%@;?&)`4K!M>fnm3l=rqdm5IHr;n#;&8MGkkppCx-Xi8S5y_a8Kw(kj=$1^ z;^6B1i&|jheYP`wG3YMSMod55HwG|v01z_&gZ*u7+vD(DU*&b8d&+$D`$(VIH z&4&|;i2XR${e^5r&c5a>x%zUQ1G&Q~e*1vt(7Y&1^l9(}%-}-bKsasTb7jp%ER0)} z)|yS=Q5~?50Wgj7p|3$SR3oFTO=s|x&zGY&!0b|v1)w&D!|o_ ziHXVngA2NwD-@*y`oGW8a4C4`$y29ht9rB(VBRxC!3XU$(0uPW%5OS5$_tnT9TOfi zPqaDHkBqIRxxH=o@G%=?_>AubmHBhyBoQXp1yvz4Pnfj$PVPl~i_1`s#^ z)X}+7myQKDZ_kPWWI26(c^Qpm;l5@MP`|xJ2rauxknfqCy`B8~x5wUsYpA}BP2Q8E zr*nX2lL}~&^7q!qS>mg_X7u*>h6u4hHy5~Xf0SC{2NpT*EiX1M6$NfAYUt=B!!B4h-N{vp@5@WJECLhGg6daW zk04bAP#kS+ZHo#D)WJx@WC@Fusu8ZemPW!lvm+wt>dhs#f)rDWRkVP_B*?EmyBW_HT;)v2Q25f-g@L7e5s|xt zu?VX81mr0RsNL{zT7|u}9<2da0h6HE#x1L;s0f(0hL)CPz|_y(=^9%(FCy}JQdBnqU=w(mKzin8#+zc$KP*+nJ~jrSeTc1*vZUywFTEmQ8UJUSD5-%{V5dOX(m8 zIL$~V3As<7KG_pPJnslJ1reuKO~dqM^du-41EWPZUpsx=O-9mnSpI@Zg$hCGpb5xuu#JjrOkb2V?sT414l^L0DGZEdG<%WXYPjCC*_=yl9C22wrvDh zRd3Ghl#6SE{leIMp72;Pvrk<0lm9q$ieFaN1YDYgp)qkCQg{P7zV4=b4vZZl_*<3_&V4KPl5f>`F31|1Vhp|Hl9T diff --git a/doc/py_tutorials/py_ml/py_knn/py_knn_understanding/images/knn_theory.png b/doc/py_tutorials/py_ml/py_knn/py_knn_understanding/images/knn_theory.png deleted file mode 100644 index 9d1abdded4f91831ca66f895f190105cabf10af2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6700 zcmV+{8q?*8P)d3;pW*~h=LWP>aa*(0EYO%_=N7gW$zvEtU+>I;=tt@^sw zrEaw@)mNnz!L4dbajn)?si=scmBm`EOA$p8X^IE}0ZbqS1p~>ve>{l}!z444xp$s( z@0{=FGatyzow>hB?sLyM&v_QGSS%Kc_vsJk1fadP4vQrlXcK|RY4AI3Ar_1884Xu$ zfgS5%n;qpW7I!IF1=SIVY=Ire!*8_(SuDO~BwVo(wnrcmfk@*hSE{Yc;&TE}qOC!J zg4M94$=I4kxe0KGwlIs&nFSvdz%XqcnPdc99zD9*QO=SP3SejiBDcYOZ7mt3paQC5 zjjd^vI{~I@3$*x@sqkt&G^~ehc`#60PbL@!msUb;JcElG<@V9mY4I5aFl;?+i$G)- z)ZYkmv^8ab{FU%UbH>&*%AEw$w1rxH#xz(^4-L_G_BZg!NjxI!Bk z-ZZ*3J$6C;ui$xYZ7!0R$k3V|J7Cw@@UXUEEdtsC{Knm|=pr~G81uW`2-|zWL3OY~ zOPgD?hjAala|_`=iH_|I#kH{W6c_>2Wk8)Uq@eOywqUg0- z?4uHDbD^KMPK(9!w1tt)jc(DS+%@p5wpNS9^V|oowKTd#d+BnaueM%`4%TVn0BvF9 zc6et4)HXn*rDMyW_(@nYNt_0jEF-Kfzz@_zT~9c@0U8n=8-(C)>z%V$EEY!ywzRPy zGs3kW4|AS{73&j!Zigb6wHO}zVY&(}7Qb*HTyP4E|20gA_j-)M6_m=le`K%ZKED6{ z=?b)1d_?GZxTPnwy9GKoz{c1Ef_uMbSAFuIztgToES5%k!Nf2Gw?o|z;f8p}#4$LF zL399)0nP<(1$sxl))`m_R5w0uk9qIQjjw+K%!+yKK;U~wKy`uRstkaM#9eZRpPTxPf(m<;?I zm>Bcg?##22(L_wEngrA1ny2dfMV#vD>&J04wZw}6^Hs2Ef z4hK#KE{}S?rFp!U_zd_lkOLgeo}*tF32RHi%k6#2VN&$kEta;Bdm{XDHe3dFgmaRU zkvspb!29_7ce@Li2Yep&+!2U#=VqXXhA0KXe{1wiHg`ZVS~QR_gi{zvBD*%Jc36YDSjN`X5jI?8cWLDkIZal0H-QzX%Rmf9C&0tZ*i2V z#3{yV8h)8lUWg=&TCyACD}(vL;7&yLWZu&jVljqNB-_9^yl!y)f%}j~8OD34EEyvV zOhc+c8->>mZZlHr{u6B>7Ngh%oDTd6!xD?VZg5q6TNc4$iRLZfMqr?}kZ8Q>r3~SE zmzI4CVR;g@KDrh9AX3AxJghAs7BB8s^+$e@hv8nsj8X`uQ1U%e0}-1wZb^vOMz~yL zUOokPyVF!dgLWmWSVBm3v3|%+rx#N1qY&xfQjElq)&O53<>s~{)9%&4N?--j-Z&ex z+;0T#0g@;k=pJ4h;hK7n{Zv^;CTNGWzB(Eya?}&)zqS%_68#6*%D0BK6)EqRhZtUA z5l71$%()?0T;?I*8f_5{&<%oh5W~}Wv>1V0 zp!wQD(jbT!*gE9Kk=yd&#u7y8{jWtleqOm@#3O)sX}_;6#56_%-_aJ4215{!<8z2x z-w$*EW+Gb?M=EPchmnYf)~h!_OAO~CuKg3Wg}B6F$OU@?vMKLHo&=877GX(`V&Dm6 z%3@Tcx(k#bp3DV^bIWUZ^+w@1Bar9_uh9?j>fMa@!sgQ9BE;)@h_Vh#D|r|1t=$en zLa(wll3^6E3YefR#F83gfj6~9q{w%XjpEVTLX_!^I8%PAtRq!!048gT(BetpN7^D% z;8bMmVt}>~L+Ao51uoMTk^-ZVcv8OvSENK+ytDrKF7Of1U0aA@v_obo=W7c|frVI) zrO}K=90k3Vb(l&=Bs#?_+ahDJr525)FR}+UR9lGQ?2D}F#wzPDh5(QcC1a9*OT%uc zUF3q63CL7(HWZG5PzvE(HLoMdmzkghDNu6(vj1dhnZdwpB$0Ntt`vO&5b6O-K7(~h zq@IU@(51ws&Mb?7TYy*L{RmXnKz$Nr6w2De@Sae=#sy9Bcm`=4@nD9|=Lg0kB@T`R z4C^$s7oC7FaH)hxqjO@zR=IGKVY?e6$$m%@WaEgjwmSJrSR1ywpr<)51a@PMEmh_r z#&#R<3uP^dcmlW;82}u|l)98dk<=BFn{W(Ayl&@e3-Kt|Bb(zVXbVZeEL_}{GR5F# zBegtCC5((}lYwo@TD;0`q<7yGWZS@0W+A2XU5T?a#o$cmVr1R6L|cf*Sqn@@VognD zGqP##O4^QBHkdUjLrM+U5(CaK6Zi+Rj999yr8)lU)(4v~!rg`0ks4Ex^~YD*LVQ95 z$u78$EDWX6Hn>8hK4PV^j^-GHtT&$17UDCOBC(4<))vwNB}i_gqi7r4Sm1TcExr9K zWN*;oJhuT?qJIrG$Mv|7^`QM_%aIanCNdH!759d=kY)){P9CGlV>AIeXv;~FwMe}e zQ>$nF9T##k8=WSFDTGm_a%@8Gj{kIVLGmBrsGewkiQI+FBt$t=`L<)vP5gjuyhemC z+&pa&99rw#O=t@66!0|W?ZLi|l$sCVptdkX>L3zGCxnbCq zL=U*WUFADC$^f7%@T$X~>l{<5;5xp;SB)B{6WQZucWAQ>{#h(W=R}fl1=5`4dq~Yo zlh_Vii);^kt*piOOap$VEuuxH6Co$64Eb~-wBcwsPkAt0y$kC9;OsPeEY>NN5l%(g zF_}gfsZHXzhsrfVl<#GL&Qq`&wnQNEIn)%vp$>g+5J`3N+8WezJcXGBca8)8tSusG zGGlCvf-5(|_6S7kp<#xzb!TqF+^#mhDW$gp$q+D&KO<`jZ;*khBvGSV)5GCW?f~Fl z4tzl-Uv4g&MRyta=r|Tr%SA<6D$CZa_F<1WDe3l-QohT;kAyR?oKK5SHc(ZMmHBm zxpqh~U5g7Gj(LqP+92yU?Ql0^7y_46LTyV17lFuQ@Qy>Ld>fyvmrP?W&dvRu;t*iI zwg`jBOEkKlEWQF?{EpJ>I(oEhY`XFIO7GMyZ-3RvBnnd}*GAMo#9zQ(&jz=pn zbA+3N$D5R_LvrCv;x?ozmqm}`kbnr&*oZ?dmbFH>$w+XhN%TYN%X^CeohWBA<2Zpb zy0D2VUSmEV^BDDfsx2=?Rsj9AMZ}>RC&Jk@EIw?*R zyzRKEc!cT4{k%yh5?9Oqf*LO4RBHKq29!C$M@ZYK&$WfPK^Q`%@a*rQHi;7EfdCZd z!H`GNIOhQ1e#}bE2?H_7uKmWtW49+)+P!TaTzx;hoEd}r0aNLaWHHqalyDjMax(|G ztja02Lkpv}$BF7K&^5_wxUTdT(58^MBW?gFO0tB*u1qzl$YDHV38t_d=uTIHl+nOC z7j-#BE#`T!k3brHEKm1?G@>esfiD%#+Y-fiWz7(zGxf2faRTaTpum|$e+H<mi*uz6B6w*}@>Q;;D#^B4o7u_?&WnDkEsQz_`u@Vqx|=2%+yZ2O zFl{spt`vAk;k=310i;lXR*?oOxrVc-PHCa~Jg9Wybd|2mY%q!c7_tF4;|QaHy7l1@cEX zY3lF^5~)77b#D)8WnR=#!(84-th*2no&bY_QR~(AP;d#{ekc6UU71PI7c+Ab=i*T5 z#4t2`1<$_&RY`38hJz4vVj4QafzNQ7mB5kT{KKZw6${b=E zOzRr6shkU;D`EUS&}$bY(O<$<`Xe(9lL%qE-@3vuV3D>6wflYxK@Mb+UG*_3T5^8gxm$J+cfqj)V#4-|iTw6rC z4910KcMdQQ=gM~*LrbGu(_;thx>&cTLKkEmZE=B9F!Qc)71D^&SImTewKTd#k8=TOtqBLb1>dUaj?fEb;x(F$h@ zBUu9-F)K1WgoN<)AY9iUGniMF{x2-d;QN}UFY6-aGR)0(_o0sM|6$_1KE zAIywje2^@-(;CLXm|28358UmUZh^yS_2##wmD~rMp)Dc~LxF#2i|{)EWb?I)wva3# z7g!CnRn`%YGl6@Z{6Nht=Nx1zu~tz-bL@i@hIO*7t|dMb5M%qRwvae{1iXP{l6an6 zpb9fLn!6(%Ic>h6`^-n2C0T*y1yzUKL53-6XpXf=>m*4W0qUYl~=s)j&6=Kh$1GEwdb1U!1Eg#Ah6ftlj2m3u%S}kix*GH4Sxy9L($q z)A=IUvYl6faoQph z@Cs&D8}kxFf%kBzeXphUoQHUYOr{5tAZc1gvR5cVrsgBHg(P4MUMiQ^O4{R%LM4>A13hxaEZ2vBv^`>!5CS~G|Vg!ZZhWp&tiTM!EoSxZ4s6|$p!w2ndu}ZaugE6 zm$*H$KKB9_X^Y4tBY>{jB9fpZlEQPMwh+S^3H%2Mlk);an3*Qz8aDy|r!68$_D7s3 zqqT(?N^c}u!~TAvFl|oV6$u#bjW=?T1_RFnKO!SVk#fev%g@4FNkl+&D}tGeVX;PG z)%}pH`HPV)A4?;q<)Ups_PZtle^l0z49kFtNL=7qxPEbkOQAjj$0b>?b*j-6` zb09DqxE${o+YvwqBzwVCOpNZh6nI2iL<+nK90&X!IL-ajeVu&gQ+Cw3rz0*$A{D>R zM2yZW+=xRhg%e@g2nVoLTR&j8#G^E$uNw8-FNW&S0gcrV$iIbT) z!g-aoz*r>wYZm4=)MNVECd zf%EY}W#UX=vcbKFgxQ>@Eu@t!MAiobfhEXv(+>2A<=K|H5NARE=a5C_71WW(Nzkv^Wm zO9ltBMbHuNr0}FmUnFE~EHVx13Va5vYC6dlm&knBBtI;w-l!aiMl8 zVX?T(tw=giiM9}n5J2kN+ZMO}WE_S&NwSRqa6j-e5>;=pSX?H67 zz~TYMXbZ5UMR%l5=}pQyEEeC>6Dj)h6Kx@u)-fKLW`?wdSdt=$)WfuRlwu?zWgb$e z&|)5bih{Br&XlWxC$)uG_F^bf8M(W*5R3EJJE=JYfGd!;SL1c1xPwY{LE0irz`DefV>?ot zelXJ8z-v7jv?xZJIDUfk1h-hyW*pL7!?HIIBk57Sw1rsw!(^l@&3vSxVYU)Py3<%J zt`h=IK&fxEGTnh2k+#VTvHO=6MIHsNM|!#Wl~aHzKyPg!7Rz8H4d@eKhO(A)2myy6 zp4zv8Zn|dc^H5+1@cpRQ z%8?4fl^N7?OrwS&$nSAj z8Bwc&by4p<7Z`|?EchBYE$X$Skg4J_U^&^@3*2?A!I@16=!8VLG;t_0#oGrls>^^6 z8(-@Nyo4A_J@61x`n`!W5TjfRR3Le07K_E=PyQd)^HsFKpqU5&0000sMG?ZHVxs>v0iAq>GC|pynb}2o;5?%L=QwTy;LLy<;06Xs0t|2v7!ErA3W%M| z2?6~pfd32-1DNq-N@f;Twv&Q7PJjUf1~Z(@e*!qEjyQP-7~v2u2}Kb5dsa^Q@|CM9s%kfG>gwqm7#dmLwz^|| z*Txp*cJIFX0}oICfWV+9!6Bh>PoKpnJpU^(E&WwSW>$7iZeh{8;*!#`@`~Dzf7jJF zG=6IO(((0MC%%jDz5n;X;1F?mWORClM4p|a%r7jiZ_qZkws&^-_WyB#0Pw$AC;8v7 z|KWn4xEM~>!U+Ax1!8z|a=~y$h=d{&mktu@;>Rs{DTbNn`pdU9?JQDCx2U|XkAJc9 zA(W@jtp7v%m+XHBi~WDe{tNcsu2BF62AwPq3c>BJOS*R?s9*EsHj${q}weVGJiTLd&d5QwvvK4JBJosjn$a&m%pIa_~T{f?;9%Z zTXP>Sn;OZYIKSL9Xlyf66D=^JYfBS~ZRDk<6&s0XH;Qca%^J9s6Pn`cLY=8DqTMLJgp3 zOm#N*RMZ0%_mdwS^pCYHS-)ElZaffB~lshppY*AnGq5n2|7EkVHsG)<7J z=NY%&KAxwy!Db=aq`Cb?r)}in8?8Krs7oGZ10*~Th);U<6tHvm5}!I! zHaP}`bYy4B#xbl7!pr^i{JD#gBmp)|WvZ3t5eB}1=C(c3D7vVD_H%OS*v&SpFxk`C zIi>dKA^vQYBmcP_uC^BYRREqJO=p5mJKD@daq^SzS*EWB^G*hjOq|Q8Y1ly}kKjdd zOOz-fPjk^3p-eIcCEZ#PtbhKQr=oHBM!06yxqBh1PcHk0o6@GMIUM7Fz!IGq$zwpF z(fhF~8Ai=h1!weZ_9`=;caK=+S&H)t(f~6t<4HT?nW$QpM*gAfz%Km5x+)u(1BR*cg_V zBL=Gp)z^B0a@nhm?q69{i;CXDGSxe&YWwg{%A4k6?JfuIH(-1ALWu0Y-x~f#zbB8G zEfPEemREe!Y{Rpv@0xy+>Zrj_BAgpUcB}Fvf8Y_QdZ=WhFUMqfnLs?Va6aV&d7pJf zxzk2e;Re^Gwr!Ets^G@Nhdf(%qKI2``@^EE9cor?=}kku-NgOn7E)my>rZ)khIfzj z#edy7pYAOO07xe>wMm-BY2FC-71Va=9tx-L7btuq&i<%$M&)hB+ozgeD+eXp$F^jE zNSWNDGy8*~8#{1%`qM7VP|3>dtE(2-WC4LL@u~7JqN>H+!zp(Em|kr`%FvKS zc&mIc!rk*laM$K$6^@HAq|@Oa8`0W#d47tTG5^c2^1O2YM1ya`IQG}WAk>n};w_J2 zVs%599d&4RB2=_!+N%R+!P`y|`*BpXg|(oP?s0ONK`jS}X}hj6Hc|%@9KY*A8=j%U zJ8cMU?Z6|iIsY=*vtsdnJRVEkr4(W}IjAsLQrG7GO;@FOZ={7eBdx`yaU&LZ$In|E zN|9NV#S$v4c1CCe^E&NUjsfkBbDnCic0LXF`TEdT(+o6Qn>6?7Y8bWya_q$mXKw5K zJg;um9GLu_VHxx#4oy5;Y5zmY3W7*arl<$6UZZT#z4lO$7>dzQgYVMTp4j|Dr|3k_ z`TI51wTMakE)mdTuq7HjB(aW$H#MVY>tC_g)J&1YIn{(Yy6%WQzwmla@dMO({Pq=p z0&tXr$M!cS9qirU8=ti&Kk4*ZtvyWRE%Nf(r5^*S#ami6F0$9Nl(#pJf%r{L(ic$; zzC5YFTW{VIQKO?~0Rt~XkoZ-VJu?;(I={2! zTGH*ztD_sf0$qKc^UbOx`6U+qDq@b%m7p#%+L;}vzoaMsOx~hWZr8)s*x_Pz>qX6> z%rG&jTxzdnUPmTDJF|e8z8GOz^`QQVLSxG_S8J%W<=nLqjYE2ZGy5b?{`vap4m(x1 z`*t1@F#a{C*ZSh@YCC->OGe#hFxo4X26c7ANi$Hf__bzaIlq)O<;5Vm;)0J(JnlZCt@t zOX+jZf>S0pn;nhI@3X}Wq>S_-;|h-I?v*~3iM<8`nfbNJYw>rH~vM4?>&fC;XJ6)6*$;#;ORXL$O`xzCirz_kEh{kkXO- zKc3<5zg)7OwFr^YzGaS8+3N8N?ZG6eKeTuZNI}8#VKm30>fz>Eg7msF^1>epPCl9z zE`@os)8GE}7J6Dxz~`hx@pcB=Z_H+!zye-G--v!%uCrGmSr;)oI63xtt1h=`&?BgB zZJ``lDS#}iI^969D_K}7qWz?mzs$x2is)OM;@o>a1tTpvXyhs<(^78Gy7&cD+LlA3 zFawX~{T2Jt+C&xfjrN5fx+FZA_3SRKSSwzsG+i6~Jn3i!OXVaNi{c_Q>i%GT3Av_U zE7mtUVC*+I^&7E{)GIa0gzd2CAE!Ynk60F&S2=F1yma&{Szwp30*A4e70Ik`9(wg_ zNE92h_meK*&!^-Z0~*$Cl)wkQ898e+9y%`La-UmD8-vx6M?XKNn+zyw4GK5*ZmPCx z9dbtIkS<3WI1WwOn)D!i3ncYaHDIq$<1t;DZK`B9S|s~lL`(F=!;eqbj)543v18y( z-4ns$rtwi_-TE4!lD(qiMHD#Bso>u3!c*}X3>gO*d51Te@u48IIX6Yb6!De}k$*7xM- zH*RUc44(*Z5|+CI9Nr-X=nM9Wj_;_@QWuAoBInT%2@71o8vzA6G+=vW^WakhU&W#b z^F5agaWZ^Segkvif@12@?3nM;M{O)MdlD7T$Hl-_?(m+(r03dEnBNj7qq(HEW0-R5 zdWKg9kkkDez86#3Fx+D; zAdOJL9f_PCtzcZ37p2s$(iiI_HBy?0wra)h`uCl_9%EGetw^AGB}x29bDW~#4&L({ z{ToGn5BVu{{}p#!<0@M_nS3N1N(^IvCGvAR{x=5PP7*!}3}WL+3p9R^KXaGR+c_qf zpM8H}_i;qoS4wr!oZV7Hz~{p{B4tA<-?K~)FH;z z^P{$ifny-#bA;M~wP{JPThlR6tio)Ff`PkD($g`I@d(Nbj^aQ*wZOV}#=?Gx2SuzE zZ$P`&J+8DTR|cG}ZqcK1XgOJ2#+g0CZ>MpHyB%3R8;TQBB%!>8;YWaIY@NHOeB%(f?A(cjmLLu3hT6#nx>q`{ z8i#*ZJcoAk!%U@P+*_EKA}VYTa=HBOXmNA%p0!Wqv0T>;IMDMCQ@-2s7qf;n?@aJ- zAtuhZmV13kG;+l8i(G+I-=?mRgqzQt|S?vQ1E^_kzW>D(|*SH a)!kBdDox4_L}#ZHM9SG{Id*TG152G2LOQp0Py?( z{*VE>04fj|3lepYQJ>!1Jdx)HKYrw9I_$tn7UMXZ?8#V5J4P09-&oA;1MzAcz(C zrw71)o|6jrZvp-h@B#>Yo|2Z1p5eR!&jPpr1c5G`=RXHH?~Xjb2Y^|rAc6|o)NE!> zG(vvticzV>w8A(s;TS#rKfLj)6mG` zuBDZ=4IF`VadmUQkMejB5Ev935*ilsY^f zJfWqvjoAIJr?;|)5f2aO=+#rS!Sb7={QIuJ)z;PoR1D`2Ht$L4plHQ#zk(C7gy;ziLc0t* zxORZ~FE)wi16ES)DCpeHvyc7CGUYWRNNAtk*yhZE++CF{d3$ngZ*#S@X+Ew(V0roX z%Z6#==WdJcv8R;YV!ppt0gZxwF=Ch=Z)r8t`3D6OGhw(aM#7;Td7h`IF+A5Lgf?de z8mQ%$^%m+-B2+OabIKEG?#O(wO+?N7294XzzpQVr^)!0%SM>fy+JZL{v#aWsF?2=1 zd02}oJ1Qk8X_-Y{-3q%zj*Mj(%$w`K{`Tc^s26mAzVxbKyZ{o1sw{80Qu(Zs4&J-x z#Irq3weFt#OEi!&D)!;y9l+=N%K>jMdn~Y9!dxXBBfAsC%Ws<9)(aNejnieP@7Bi#zDFgekqDB9ShsD$;&t{}U zK4;=BA^~#-eJHeVkoX+mkr!lTEWPWLq8pOh{z?Dim##s*K+{eYHK*vCz3`T421XI%BSELWw*c^$7cdq>sQN)y|8^+J64{Q#J#@WCJ0L7flk|<(Irjnhs=N{xD{TD6q5+h; z5}xmNktNiKU|0+TIzQ)PuA#Yo*mrHKuP$TKdOg@Ne%tBLMl^T)H`Q#dGS~)5D4>ef z$h>Z@FMMHMRwckIB@~SQLcb*QRb%>TQ^4{zRBna>5}b__r4=jD=MgR@fOK4^IHWBOw#91ow{EofGO2!XU2&X&q`*0NT z+7vFQ$}Ud7o5ax2pMB6+P@KV^0eozrS9hB;EXfL`{F-1jox1d+GL%+6yTp)QA-~k? zy%FR}a#3Dh!f=*B+hicmQPbDmaTJ||$aLu^w>%usH_2FqWR&s_Nc`DzQKgL8y~1&8 z_$-3cbNgk+Ttw9j+9z@+q|DvZd#W&C7_E2%0*~W=7rZ)X`nKK}UYlV8hv+-OJQCtY zQtmeJi{A=W9zO~Ews&%KS80G0DXUsabj&Qtu+xBy_#&O@nR;nKYKmblQ)EXb3Lz|Y z>fOAZ?s4QkS!8}tt}ul)-$5rSh>9sa+FDhu*LHkQI#33xv=c4N-bk=Plhx?Gl2-&-9P$_u3efcT5%Gh|QnlwY4CE@nGkD4mA zzAiANy#UL;kA`)}nt!oVpz#|R80Co2B#nSEd!V_-hM+;|Ftnrr_f}zdF-atvGPYpr z7}+=-v_!`f^m3W01T@L_dL7#WHq)lp1*6#St5gX+uDS-eEJc*k-Hq>Y3Zo8~ zvI0ZpFtnWADu*rw%E2FT!X_Ju7qv zKY(qt>>i4XSaWGz-HRCblk?c`wZ-kgusR=DL`JH86voiEL}14(Ykk>G#=}&_rmngD z#YLW9%Epd2g>$?wS-n@6f2<$pp0`tF{@YM)5A_sST3gx~%hzT-NxFP2*w|8>Vl ziCfu`&9gwNeX^wvdu(J(^*on#Kv+OA3selMaA@mKUg|yF>#6SWU(~3!XEW1_l_2S8 zJ@=(^rAej{?!;;JzKC^@d_$PnZXR(QNqyPEIm$9ofbteGc^2;mhr%-&(@G<%TltAS zR)qabLg7lJt|>m(6uWu~FKHaB$kPkDT=qy^^!{X#zYW@skuCc6V(kOwOPOI!Nt`Xl zrr6*u*YYeM*CpKumFoe@h|{CrouvU>i=z9gGw6sLWg*(TN*lMEa$L#BhQ$I*JyZZ+ zRYAgmwU1zR$HWst`7*V#r4PBE5YMKbo6qhw z5AGBbF;g2l)k=>-uk~t|cKMaVZ8k}Ufv*%R;j-&esma=2d52^7F6~ER&Az``Bc1ka zT=xhI94A}O{H$?^@o9VF+h_ldkty|OK78brN!pA!=y;s#Emtq1gibm>R<_ob7 zACweI=i9DydrmzYjD8_-4?^wfV>i{Dqp8eSEQ&d`KQim}EYq&EH-RkPH^Z?HLJAk; z^(@+9DP4wro~ir8T$n>t{rwgT%0#v-Rw@I6=;)Xy{TwfflW+@4zQ+-@)&MH3AM)Kq zxn7^D-2;QJ6e+gWDI5e zkie(!1_lmYyduVN^_e%G;aB)gbm%IEL1Un{wDaj|e1#-J_4g-o+5^d?6pk2b$tr)u zsa)b~jeCSEn<=wvAMlYns;;deJEgLz+=v^wAAWqy@73aS2ug~|JH?c(_^>_lI4NMd zYBrfZoH|N=-Tra=@R06N7}8&jHa9!E?K8(0@$MR43I7i1T5tD-CB5Kwbi1Q_v4+Pe z;;MClT#Wfasjx0qyMh*+|8Kv zs%}VZk}z)tfL-j6ga;!bd+(SSDPhP(gVpeG2DH#R?q$Q!XKhiLB+nbz+n5o2qP$d0 zlDvjioe4?lY(%bCv8-lJitG`B{WL2SLLSs}fmA~Eof+x%0n2On2!i{Q3cdGFV9nF> z#7(r^X6^K;-6F)tDzM)E=1d)gY$RlZ`KxWd{HnCAq}oCXKunllzDskJaw|-Ja1=2z zzH}>6a;iDq+e@>I^AEtiAH%{3XkFtsYH^fG=q6RoWjq?*E{Ur&UU6O4hSCUODy14z zqeFHqU_T?)3O&9`Z3H^iC>t~wC{$b%DG_^_+xF=74Rxu}WB={o2mNg^btrjH&Ia;` zbp4Zz>nCy6{rY!4zTX8$-;Ig(MS0g@?B(vH8)1186J66Z83#Bm?Y8e#sC`i7Jw&hr zXpGJ>Eoa55Y#&w(+N?)7t?bZPo`g*!+VT0NoKMpz{v$H4#I?QJWb#X+=?1|ub{yR! zwI{QIQgRF)oJ7mA-ZaGgd#kRMRyY*vW@Xas4_ilq(^>`CQnxO3^cDL&U>}o)!1Wj# z7jzn$vlLhKr{c9im1<(w9`mjvPkeOiJ4rRlbZc+J4TV*zFBx14O^c_MEF%om6uDdl zv&~fCqx;p_?aFuacdAKeJ7`C_a8HW6_`;Wv-A?9qv7SgHIaZH@ArIMZ)WqCkQ!Re+ z>+~A(qV)B*J^f>upgoR}I>!43-KT{=MfpTq#$~ph@^XI0C5f&LKQ(!X7-YW})i!9U zCa}_?c<=k^_gFj+bWhF;@)OMZE9OjL^^o<$r0kYr%C5m;X1sjn#z+ZXTKW?@Fe&@#2n6GNkDtG22z3NplP zxBpCiZq0IR8y7IteARD@aaE znrU6n#C)aty}~E0v3mIFCa0=_LprY#jOw@z*h%0HIaaN2{6$|a89}xU?R0BN;8)j` zEJo{00093P)t-s|Ns90 z008*x@0YNFnwpwKL`0eZ1{@s!Enxw(v;P)Q0EmbG#K6h2B0?-IENg53h^211xw$`X zA@%0yTwMQnmt7nj9F2{Q?e6W?jGLzkC_Fs>v$M0|=H70RO=XHit-Ybc!oxah4e7$k zSv%{s@ z;nno?{hzdu;^pBoWCX?0ypAa#b(UL|t%CdY_Edc|MRFbP@$K{8;v`xCpa2&$Gc(fC z(l=-eOm-=-AU)RJ((KF8Lqq?Ql>b0(6@{W~VTL`d6*NsvO^=+pfu3e%W&doBQntma zji!3s-2d2D_~(T^&^be4As=*2%`vz^J^I zPI)BgzQ;;lHu2frufU?+-Q8AxIW=Vys=S;yS{4Eg08@M{RaI3sHaK>bVseyJNp&8I zrFF~I!@0|z$!B|Qf?M9Gu|HY@q6!=!R{%ITIIuB3`}O+w`1pC5UH9tjYK={rvW3IZ zx3{>qL2V+}ke*_RJ%*ujN_ZAGIyJaAQE!i1VTMA3qGp#GAuC-9PkA$1f;5<~iNCwU z*5J;JjQ@9-UZuH|R)H_q;?knFlc2VZtr<2dGa}8+&ALBWx$0zd00001bW%=J06^y0 zW&i*H0b)x>M7P}lJ7)j@010qNS#tmYE+YT{E+YYWr9XB6000?uMObuGZ)S9NVRB^v zXKrt8Wi4}Ka%E+1b7*gL?*qR+000`ANklGV{*b0=32I9XK4T}l3Q87qp~M6j(u5`DZ1Au!PG*cLVhsj2tr^fX$RWx<$2|9B z$DQMZm9E6B>nhe@NRzr<=~ic@v@~;fC-LV_C!O9&cha45zr=r#gid_?d+)x#-}}Aa zy9THRZP$c<3caA>frfR%Y84MGtopUc4Jscr-emI9uvu1*`t{Mz9xo0UVdlq*bNI4QZ8gbKOoHPwSAID4nEW$;a{xhJqfyM`g9> zac76jV9}l>oB6%RVCY^ReAVfATAk`y;jFFzZ)1E}t;h3JSmEp_p3WM0b9i~33M-6t zrxdfk?npOOCj?gG?OU%>zi*{(fO3UZ=fr#1kEA7H98$2fdugFg{ic;ItSr5L6Z?_k zl*+^m!^U06@ed=r8i9;qrFX~#7PtY)p~4#(&sl|>DJnH(E>RQw69_Vi%6WosOKD+G zV&(X%M8P_aAP>s=ZrFksu%yB&LwZ(%U^V6}rmimB{=;>$ghfdOrfU_J;|Z9u0xtZgTHl6&CEAC07Nn!ct+WuvAlFsjyU7Dl8S23QIK= zmI_OS^#Ot9e)L5Z7Oaha)S|+IHB@I+VL@A~ORK_yH4ibW!h*K3D=I8#nkPR72ob>!9&V^wnY_|)CqxNk zp%X7ynW~QJ@s?!CI{c!7@7b($oLPFD<;064zvUczXV_dkY60T|tDUg$Iu?5)&QUya zbuHysRjgUBAf+WqVDU%O!FAh(5Ov!bctMiD;*F;3oWVN++9G>NRZ41*~D%+-Hn6n)dJv!=B(_m=}2%w#joCQfAyoX2z5|&}za*)2Ts@RLI#~7($W;??;(S zNt4Y4|JJC9IMgRiG#xAzS|IlH))la{8AnPa_-?4Zh?%>hpSpOWX-X^)EV}u-RkNQd#;iKsugJESLZYmGj0T#x!sHkX#)||l*hfNp4be6?N z&U&J}QV~xy-GUq10zY|Sl@oVDC22a!@$|faH*HTf;PKe2$qQ>8_rVP&!kQ6XG)*+{ zC;A;-T>F0Hb%_}@C%1Z`` zky^pgv{{=Bu)vakF?sR90%(H^yZDND(X_H)(E$9syi2^kzy}L}dM{ymL|`;MmOQZh z{$*X}^(%f@l&8%4Y(pYN(@KOz*(>dS|FWQykzfJft{|ob#MfF^Ym&iQ9FM*O|MIn; zN0vHWF02)P+J;)2Rv4@RuahV3lz&BfgT7(HwID(?tz=k~eg11U9chZp%QHY$xMPo| zl@04(k(*zv8F!JUsYtwx&ylkaKwYi@>rGBrUqF5ynD1S_i2gNdBD^rE!3@4=TFJ1M z?}iRH#p@J5FYQIl1(O=gv?n>N0Iw#JpI7gQZ?Gof_1b-wq=yB?i%tzrM2x1#lM>cm zPV&#;9%&-;d3aG%*P|K98%^_h)(>vxKIm{i#HS}Zi!iLn!5>XOl7z5+_Rqv{=(*gz z-W8r#hSg^pP10zZ$FqL*UE**U2cmd|ACGiu@N7~>)4X%mziVu{;huFEDPCdxt9{Tn z5>|X_ceExUtS=ycX(0xA@}XLze>}d;<%Lr5^+j#cMbo_D&N?t*dgCnLw>yjv>3l2s zLPmVOnJ~>KTQto(3@M$}yN`&VG$~$mJX8|Aiu}MVITkuKXwOVOSOHP$m?gU+y@|s( z?jpFn+AGNE+GL8R!^1-NE-LsRk(VRC4;?=eI&TSUJl@^qB8IuQhzu-bG+no8`=%^~ z!qV-7d4cUCDI7I0&OuuH0xf0=1sEAy9;Q+mURraQ`UL+Yxm z!mLd#<#8(VkPa4jApZ^c)C%QaW|n_(QiDO2-i773bE)#Zu+k8Zoh4Gj0vXR?H3Ht& zvCV2`%=g+OnCm^18BMb^SS6m$sO=~==(j5y7H}MlEFsuqt=-Sd7+_Spo|PJ8Mbn$~ z?6SSI;0*zag9RxeLh%atm&HbYY9eGb9b9N!ZP0fp9Tp`aMg&K7odI6{c!-+1tc!A# zRq34;TxeX7a#ZoKz&vTo6@CfEcx=G1#i~I@G+mf(2%4i(Q(BY{ONfME^P=Z_)072h z4-tXXpvAWwO>?)Sv?Su37|~(T5@I5{3!!e>zYa}Urm-s})R!yKbfGi&1ft56xnzJv zNeH4Q+H29gNc*A9I?cET%n(Z}8{A`I(o(9D0v1#jf{)j@S)(y)ni|gRXjpM1_71NO zy;D$=MvWgZ~ zOcG)_n!W-&BhE-GV1QwFiJ?gB7EMSc#^M{lvZ_UK9DtU;jhZ(W5;b z03;!-m?Z?=h0I?}F>g0tzQZ!E)BN&#U%ne8DJ*Y*SvgH*Mte7RlN;GY;kv2L+*BZy^`mYi+`CtK$kcEs8 zu>jG#;->kFsX>QqdBq4TMhVf&T$@H4G@8Y(H{$XV?J%|#SS6R|Yf75?mOmc~1q(Pb zW04T-wdvDdV!3KiEMC>c1?A52w!AD+Va>mxAqkC3ci_!|;i6Y-CeHld57t1vMJy5` zcx{@l8eA^Dp?EdvPEE^)$iOqo*5AZr|QhTrNh#$K`)^1ZFm$)IE22xahMrXL_|OM+lVc7>u$9QqbD8-*UY{RfF{W zMF{6rSdOQ$8cxnh28+5IvNvba!_+wco2lt56=vGq%qI`@k6%ACZ}jxmow4UfDIr33 zA@j_VHg?0VP9t5PBW(VfWB2fEiFxfdI_c2aS8HkS`ptOemkwaGyP0)n!hEr7^la#1 z%4d&+uJktUBd3F&=rmfJg!osX6KC}-t1F0g(I{4(1stb6C8H<2<0Jd1d#Q%zW*a$F zG_!4@7j7C0+tYi&MGOb;$_uQgLGtP`a2iggB^cJq53-pUIAff--n+ivuE8jG)0pkE zf5Vf(cQ>_$tMt}}tQv&pl_z?`N%3G&62iCCKLzb;wzPk!bt=t|{#&AFT(a>y69(z|(@tk@Rfp9PK=r{QFWA;yN2V#1=c5Pg81lDgZG;W5q^wT{nL zm}zX7=0N{??}-`XM6YA3{!+ zz$^qcOlQsXFttdyG;PyS&PH+5jU61HIUsnj1l!v}*`#WTD;Cd^#A( z-HW`M2u?nl-Eguv_J)(O!rISz;L^+4chR*F0<#c&J-w-6>Yi_C^$wiVkn}h;S6rDM zzkbv`QJc$(i_!LRGZ{1!%uCEAjL?0^BTK6Lx?(-}@U*cF*NirWtTU>@=qY zWg&Qadb3i41Da2rSrWZZ{^^9b|5=I~<|EC66gTgJ(KGR<0ga}@yX;@f4JY+&B9wed z;#u@UhD%5m0(#$c&0z4ZJbIWuIn!z#dsgS7ZVKi>9Yzp@eag6`LaOtMi~piZlX4?Podhg7?eYy6uNQZYy1q*b3TZn|P z>Y#54eZyfl6tB2j-yLqcUXH-bb`wGRboR0S^%uKp?)3Dba)bq5^peBSA4AZ-Y0m&u zFMq9jY7jGjdtqq8-PJmMoBe=JLz0-OVyu6>VV=61YEk*Xq9w#M^yjJgg^*8etRM$q zW;uu3Y9cCHCl-fkt;Ids;O)Q7ZgLSM;W~Bh$}cu|Jz4TnueL7Xu)wRn*I^|Db8Q-C z>RM7p&Wx_QB=eQIv6KVQn=={Al{-;mDWrsmt_vBR8mxy; z2l;tT30i+-4#rqv-Fqw3{-!k8vnbmPCn2KiLWUM$W+A=&JB{NyAEbYYp3f>{TPRqd z2uec0MbjbTYpc&LsFy#@i_}d;!7J8GX^>^_2?2|i5D>R{TeMxsCTwKCu#J6w{}Nsg z5EHE3=N~&CY>$N$kuVY>hAw0^r^s=EyG8@N9u>q()TJ<MzK_R@X0_t<(1q(PLFJ#2nh3w2MX^WzirxTu6++fiX;xmz= z>4ULd!CgU4M^kE!()Bq_k$J@tR^%*1%w5Rp7F^inXyh8j>-UlUE1s~xtAvd=3(*(h zylDudd-+G%HJ0)(Grxbu6;?zEAx2Mc$kbpoyy6Rsk`O!i=3sGpdeg^zMlIP;_wP>< zO`-LDoMC~}yb?mJp5F61xL*F@cuk!oLd8SzhD9%AyvG+!=ZbO_|1qC2RC0&r6?a(t z5+d4YnrpQq#9&Oa4Rw2)XkLzl!s3+>Ht~9TTLj0~A^c0n%r7MZ78ngLAw=!zJ(?e} zmw!ZFH7_L?79}CV)k0`p!y;bCaRk#eh_`PsF|c?fL`=~%*LtnKQyUZxEBr!+=smsZ zU3nH|!vg3)7zrV4Z5qa2v-_;QiiQP1m|6%4dU~S`X6#6(qG5%R5cRP~(;-H^H*)EX zykUir5SexH$uXmnVS$oR<&@hbIw7mQf;`-;U|7Io;z|g~PRK<tsxKL3zLm zxscK7%9ZG9+F?SsfGQSNCZE zy)|!&GBqd%Sbq}OvCAgaw9n_;BS|Ai2$JyBuz*_7JA$2qzV(J1+21r*^hksBToe+#E#eN6Gp3(P7QX-5Y`vLZl)wzf(0&;`#cGw z1v0EY_O3j+zzXSEz+={!OA<$mP7RJ(lmZJ}Ib|a@B#;(pw=;VA%N*7`Hen?j5=jf4 zF%wH0iUo!{Y*@EQ4vX$W=KZKQk+c{My99a``z0I8q=p4PNk!8|O@y#=tFQF~R~52lh>) zhZ8FyB!IxM zfeSZ!@7#>k_#*g zdoz@f2G?Ma*Zxn32=d8=a27gb*3lo-&R_%#^|yn$NBDj=zKu z7uL^i<=Rg~DdB3>XgQC?T|$TpOFC-~Rp}jX3855N;7X`U?;%&dM}@^TW;D%|5b?}H zC{Finc$8Br36`7Zgsiwo2qnQnO%L%r8Ha@oCBb5>O(#Y|C<<0H zvB3AV@Dlw1g;9VMU0hSIjH=jtu3&g74}5 zp4eFk1;QHj!M6FWA%2^oKv=uQQt&`f7cz^>@VWQipqlnKl26BZ_0c8DS& zlnDzmnr2D}O_UPih1FZN%7z76h4u7)5!BnrN22*x0rw%u&UG-bw6cYji!BKK7B3N< zg!u4I1bMJN0<5b@&XyNsYX@$-N9eb_qw#Kt;!5kb<{-%0E&Q-9Od>M2ula<3&v%I* zM1R4_dOlb{d)?lovem0UAJz9wQl5pZy#eKg1yI#c6lk~ZTAH-30jlv@<6Z<=eNUvj zp=y465JB$K0u>gNUA=D&go0O1K)CY(HB^f-{!n4@Pv~oNwgPZGXKkYjE3zF&kj4u+ z$Z>TT!q|1lB=x^KXTjb7D8-(0RfPp-TM^`b^==3~4w2P<9jd**QH8~CTj(SM?ONI= zkb6{EyjGus-0%W|tO09wtwq+Uuy{r3B(fT%M&erSJzH*8Ve#9V)vKvsuyJzr7V7UR tECJwDSSl=uV5xUQcr8D9vkEI(`+pIb@795QG_e2x002ovPDHLkV1m*$#Wer` diff --git a/doc/py_tutorials/py_ml/py_svm/py_svm_basics/images/svm_basics2.png b/doc/py_tutorials/py_ml/py_svm/py_svm_basics/images/svm_basics2.png deleted file mode 100644 index d4522f047986799c7d327ce744f9931cda5e719b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 7998 zcmV-EAHm>>P)+AnpTmLv`6sV@DrWhvFh?k?IqdPnQbaZqS6cpRr+c{|t%!$Y+Zvwo&XY< zu8BQt8f%SG+MT6ukXo)FJ@xheEnxv%TwJLPEKYhR*_EU9<>){^{4{0}<>}+LGEM5m z&EDkN*x%H2bpMH@bh5*w%gg`l?dzfd8yr*srMZ+eWeerCzIHhyx45^((Y&mz|6^nS zR((3S#;whAgh6f_W{O5dawBPsO^>L2oU)5+uzC6 zz5@*a`1$v~OJX}}6!hcf_4W1i>Fa)ldW@5Yf{J@#Y*|He9lOk=p0ARw96CO3 zBE?*BZjer;!HG_IE0L;vS${dt*vXUt0Pxk@uLUPPJv}BQDVzWe9vUI%=IA#yIkvR8 z`t$UDe1NN~t42de-{{#xL;pZ+71oQInZ1ovd^FeK(t)0A9ls@E00001bW%=J06^y0 zW&i*H0b)x>L|1(Z*&hG^010qNS#tmYE+YT{E+YYWr9XB6000?uMObuGZ)S9NVRB^v zXKrt8Wi4}Ka%E+1b7*gL?*qR+000}MNkltu?mO;mcU7{fVT>W1eOIP zMPDV98-@vN&?Hzc~ns4;R0n0YfnvkmJZ{)>q-Gj#efj>EkaXfuv#+TXliI^Dv8e0 z0AO(e4J>O|>oVWi@0@8Y2`m_rTxMVPUt;A1YjaI)<{Q0y$3SUd32MMB0;4G>Se<-B zV^&w&QzFwL<^ljafucd}HI-%08tCRTH2i+Ev$Hg?#3*0@V}zmy#9eAtIl_W1ytyrn zT?b!3P#Rc%pteSV8w&A&H25@S2@4+N8y5Of<3Xo+e`(wuX)?6IL~0;H8!(k8ti}%B z+_M&Ur^c2U-q~BCRR@a)&x8h#SHSi{2$m--SXax}m^*4~4>s`pOlg+?GJQh_@QMVm z&Ys*@itjp7Z?2KV+1*~EmE{arR>nqTXARDxQ>F2zM_O9ew3TcTJ{^`cE4sa2E5R@n zAA0R|Sb35T%cNE_3=<>KS>n0yuF8fb%iTG5x(cgIeZ%VkmRN;F4C~D7U=SXI+urC>EzVG+aHH2GW=7AdUbkA|;Q zVG--BUEw#xDlAeN3%6D@EP4Ber@mh4JxhR9A%&%usfWbDDlB5z6`8KWB9-xz!Ko@N zQW>0OAFRS6)>;$csS1Vle)*R>{s{ddMC1P6-u-eT)SB9QJNMU>9M*q)si^w8t#*c& zWTsZe&iug0L9^7x+1<0Y#ISDP)^(VU){uPHr>@6Y!_Rbg&+s+QJ)|9FZN0O`*_~`; zZZEL~|D~>Aghm4ihcC#b)!4y1YkTVk2I@L%oqPw`Slr*hn|s=%MolC2PJT~GVf_a9 zixVh96Vq@SO~|wT2kQBTHS@oWG>`?VH8t?v^YLv1?Ph*+DPhIHzo=HD;4KsveGvVY zEMtBhZ%)%xoo1e7P}TD3@yhF*MTnbR)^vc)7ZCd<#Z?cmvoc0vV*{TNxZRQ+ZF|iL z!I>U6iP_bc+RhSNS(avD@4!A8A6}D{Eiur|x8!WJCo}U2t~IZG>Qhdm^a?^Ny|CAl zowT$$cii?sN6kp)JN5jU(p&JK52CxNXo;kTM+cwv+#|V%RO5_uZ8qVa(!+vdLA092 zE`MO#2C@%!^I5vN(>arCK5T5r)@$p^0v7yw5X}xz*jq=!V}!7J<@c#n*P8i`1s&gFE`}#f zzEawJ32Tsk4SU)kEH_wiFUw}SK0aWGIsEA< zKi85?qyIw?!qwC{GSVqGrG9Pi$VgkUQp;uT?%Wkbccv}uSwJ#*#o@9pf0w}n@Olur zTVW4$p%a7izc}X8UVUy@2_k#naG&J*%m*m@;&U&wT<$v^f3Wgk!JU)n;#;Yg_W5EE z0d?*>?OS!O@?gQ~Q8by-LEiZsVYMOmosO;#pRYVvaL0CbGU@tg=Q@-D5LJI23m;K- z(EJV4$7RF9X}_sGGCNMG77aiTbtDx{_bEK1^M>%TX4$admI=h-ykEhq_If!c0kt4* z6La5fTG_FLH;i>%+9Vqm6d#&%eKZzdK!AFeLF==N@H&?p;LayU@0Se=!Y?59(fQXJ z7q(43HljPed_cjl;CSSsC?A&O`k>a7_iDnm&_!s+Q(_>I>yEq87nM94j=xONvmjhA zxjtCm85x&KVkIyd7%;eEx$Zc42<=laEcpsPC8vXEb`#vhSwd2G5)AJF3@$o>aaI_PLvHxa(&QT zc%UljAX*p`Bue>d9&1)GEI77mOg=0)G>f{Du1^9;!_VL{!GwpT-SoEIf`Vbesb&gT zP&kIVl9b&67;ssk6{tf2f_4ug#g)(~K+=GCWG*~Vw1F5jx_l}JM{=6ObmWKQ%7*pS z4%x6IQ(!JU(5r_k4jT?6E2j`Xao-h1!xAsOXIwU{$*& z$j@XQIyn~}=(a#x)T5WWCnITnQJ>>NLYvc^CeBWU4Z{ae|5yK2_T)c!46%52!-r?- z5+1KA;gVo^b5iffgjIq=1oG2aFLiDEQ~8!s^8^YeT^|i2{m^+_I&4Ygnio4WSe0+r zT3W&XBYK6xtQ3Ddw@2pn2~g$or7VdY(#~hX;x{V|R_v?qr?J70o?{V?010_z8vtiXMu4iFbJ86QdB-dwR zP%>*1R>@eKkAZM=#_PCeC1G6%)9mp{u1{)VF9zoAEdO$+yIu!63&RpFglRGb_C?Ui zv+yX?&mY^Z0~YJ`JR7Dgc(FD7CWY$y;!WscT4q!LSai{)1KtMev+OLWOjwem8=)yz z`qsty@IZ+7G9=9uadIIuYEE;hm+=$V@K^hmvjr*_7Ny|u zKH9`$N}0DJ zbZ(B7q~fhEg;%yF)mh_{lebY^{tSzfdt@4{wnA=cY+8Ioo$|;&1;c_vhp7@I)(6>% zxyuNgN$_)?m^HU&4TV|sl9Fe`Ep)2nbPyd&={sB8#>F*IaJfwcUF%NtKn0l&^Rh95 zE~!Mxi;e=jq~wiUG-M~pUV*LUmti0e)<&8n>8YF0iN8}DqH%0x2~i6^l#C=FZSa2+Sfg)gU=nx9x@W4>ii@j+bBS3-3=6WgL@W7S^oN$hR%&aLo zEcn2CXiE49$58Mt=yM5)sMTnYta*z%?k2#s0nO%R$nN3!?saWVO`YwM@7YsySdi|& z8ixn2H%Mbpr;f(0(8Ed0q6D+;bmZ_*KCH}}=hRi>9%tSQuq$sm%+tCRvB^VJM5(K| z)$s&`b`s4s-hUr;A>K8-Gi~g(=Us4;3Rd$un#`_qEIOasYF050Bcp?EqLBHshiGBd z&F~o`uQuO8NK~*k^;4By#^Hg<&@ZcWWuw}{C7^L7UF$XcOoqwO)R1S_q8o;Tmk>=g zt<&@2fyvZFwOumjbt@PQ-;-U~uC}JV6go>dMp(};Q%-fE6IfqcqkdgL5Ja`h!^8;M z4P2mceHK-zTvtyFCR5>oSiDKwLC=xYIrzMM*Fl1nwM{gVXG!5=!-C_Z?5nBFS$FbWpBUgg7J?f+ z5m1CkvzTvn&b(l~)cXCmvxLC)If-7y)n3IAUe>a@B;DmBSv`GS_PF1it)fV( zRIV7T|NZTI9=|7D9{<2QxoyxrhIS>BFAt^60N|KTxZzLYfutFQV^DUy7AFR*zZOxR+?~vSd z;9J?-s#W_W%k?|73^!Gd=M+2pYx#)Jdc4m3h7XD2Y`CD-A5a-OfTr-84LgrC`* z|I%{FV8IPt3k!QXmjukJ)vF1r*@$n*tj^NdyLr!^&Ao+Kg)beftA*1OIA+9P#oJ^K zSk6*HSh#xND0Y3UF=%nQqdtMC9A2_}i4_M6j{g$6DYeuC^iYs|YnO?v;qD8S2MdP> zb|q)GWT8_(&_K~kG7`tayN_2KEPQ-q7j}I_w^yI6IvfJ-0Kpjh9y+$A;$Xp#OyE5- zNi;s3qv&H2nz)@r-=T|w;$i)&+%F)WM<^K+;0WQkQ>?UzGCFteDyN>6C`!+gY`e|)hMIo7D2(V;D+!|C>-6vdDq8gw`=7z zoERNaFf8~zsseT55jIHU#Y{bU@vMSjQOdwMglI7D$Rj+kOkmMeZM~624^uQ7+8)`Y zXjr%R(-eMxjzteqP#zk4TG6o1o@8&Qaeaa%S=cM}F#O0w_~SHFX#}058&l&7h6Tlk zUpq}Rxkb<@&2#Ty&nS8}yh7!SOixlBE-cbz7Ae(qSQt+iQRHPh*P&V2uxNHf&t94< zoGw#Ng}8@>2wkfm9Cd23wRu%7xM)gIrO zAp3BsJ&F?OtWBdVjVbUti@re>({T1d1;fJC4{7Rn-V&iZ5pnNpV+w`^x16SGwl9J% zQOs_*@OTB84p&_tH2N`$c7GkdA}Sl!Qxt8cCD&&sjq9-Oc16S5F<&1_UaReBlE(F! zF8VNh*ZUUB+tha+lV1WvlED*ndt^n2_31z9TO?nhzx9S~G^?OnCQy)Wb~`m*Ojy4= zoLTZ>czEm9-^}CQQ|JgqEAehNNV71uWwL*~h_Jrh_xQUrjf$;fmOd)DQzWxny*%lW~gmAx!#eUZE?#{6*xByh=~CpQ)m*1KVvLVp{+)0aQ2 zA(}XwH-ju`cwo%S0ZEc)>JFZqEh?-GDFm!5+h|<3TUfN3%=IyFp_m2CJxlYDvSA6k z!!)MAOOaIdKoUuSxH?{l;a_rCP<%N2M~~g{ScZI*U@Dy?4G+`+kBYmRkazYj1;c{y z+%IHVBz;>6Y~AU8beKfn#awE^1g;jKKfUUll3~G54`(J158p%L7)Y*9BzLt`E5jL~ zHvRO@(^RfQ*}(cP6)ZS(0!4D^J0rtbaUFyVQy%ResZ(hlWl?(2n&j>>@u#87zbKdKv6iXWO!ifQu^AxTsrL7)QZC@ zxVw`ttYqQzYtu?rw^_2+&2HPcvaqro=}%rR16XjZ3$4CZIJ5yQTe?h`kR2W<3s&EU zUdWKIWiQdZ{mu*7^3Ho7TVYu6fgn1uNM(x#V9BBHj;z|XawBA5u4fHzeLh3BX0MdqE4}r6mh^pi#bLqQ4x|1e zga^uj_5Ek=T_l;GxL114uiSg}W6#|CuHnA#tURn_c;H}J!OATdR^~fDD-J9;bO}AH zKv`Z^(wAY0IbJ~1q%*79Rna)a5wVns;R^~7yg7tXcqQ$?DKS2Xai<3gEb{iv&MT=VP7oa0*g3#}V-T-vO9d@-&(5d4N zX!IB%jKxLKo3N-ug5IM}C@5mP6Vo?6BPmEDTrDyi(5-wdeB1lBd=>?k`W9~QKPpy)B_T~Ln)q+c42 zB4@x0@KL|S%qk60N%srDYQ$_3E>Uzzu6DpBeKx99E-WYWOf*-)Zk0$yB_QsShE85m9!qi$#ffs}%mVA-WV8zpm$aXYjgP7v|A zZaUlnIongKjx;O4f?ry8$SIiOh-0NBY3xKC3I?^u=I{s!eEyNvBf4$SivMge;4_gN z(BxJdG+sfpOG;mhd!bto@0t}9;2SP28(*W{<5PxFddFAN<#Txs!wtkq(him4U@ewB zb=DM1miB96OoiYvbZop>u$~!SBwH`%so)RvlkAA9)J#F@Z$7cync@ZRSwHx4w*25% z-k0b1CxIYDK@m=&QB;P-(yW%)d9F-Yk1X%{cnNo&#Y%uBPAnT97$PjhYz4+j=vgl= zSNPok2-l2Nd7fIW7@hU#l9^rp?49Si&&^ka6X$-2c@IYADEJ$$4Nf5)vMz|Fc#VVWoD+1Oun^q=YWCBK)I?a8yU4^9*Jf4u=HV4b&PlF|mUC~tuu;BP)|HrOc zO@4L8c{6FSL>-q@V#Xvaym0}2(lC^IT+~)6!%!NC7oVIjjhg^2&T94h7I+1)&SLyH zr$UIi{8~#CZ{t|8m+jUXI4;?<7(5IqLroQ@;J2I{Td=PHrv|{Y04o{o?n_90&Mm;0 zJ#G^%F~N(CcG2L`=^1ErtDp}%k)c>+z=8{jhEV{b?gWVg69~b%4X^U_4S(-li>1J3 zm&Hc9+@}GCL;&llqvZ+f%a?D?Tm-ziZ}^#lfqG76;dh5S8Y;kg*Gj?q(D2rqGh}${ z)_V)*`cM^qZ@%)4ho;IB*5$waT!#FpuYj=NBjHvGcX<48II^KEVf|I+8}D4cr+~2F z1L0#7W%6kYr>{7y52pY6Hhkgo^M!;3H_XzkO*KD+f^U|8&w8P-J&RV3{uogfPOm^% z5MDv(TqVL9Iz&?+YF7|Z#snxD*4gdfq&Y+7t{^*qTy7-|!{*sAO*O3zsgsy1O=pQ0 z!jDiin`(v?2n!B;VGI^cDsMKd!m7d|HwJbeufigC6jyIiHmnb3!+L(@hT#<2KB#0^ z!}okQLmuC{g0SH9eUtYq7uMsKvvbS)`kq{in`iaEq+GhUa&5MBuv{%~X;U;@`AEV!%iha<;00OF485f*&C@M=pi zFaQ{YWnpOn6IY(t|4aJu?}fdj-30(p>&0bYsr*2x^3|2NXjHWfEGTOAS|y79&)C2D zswgx51H*nFmNix4UTcc&^cd_(1nK(#U-FyMG*zeQ6~u!R0^(KOQnkic5q zT2Zi$ZlK%6@PTN|bpOzi^FcL!lIP_2b2HY#NxW1fdBvi07*qoM6N<$f|!PA A-~a#s diff --git a/doc/py_tutorials/py_ml/py_svm/py_svm_basics/images/svm_basics3.png b/doc/py_tutorials/py_ml/py_svm/py_svm_basics/images/svm_basics3.png deleted file mode 100644 index 1379a56d52442a73edc1f43844e99017ab4475e9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10353 zcmcI~1y@^L&~9)@k>c(WoFc`olp@95CBfabPzVhYyjXE9l;W<%-HKCOic4|VoA>(# z-(7dzteka{J=@OS`^-G^%)r%E<#B)%KmY)Mqo^RO2>>8*A}%gWWJHO?`{8iJ4a-%* zzykol?fvgUO6I_&L==*He$e;Sa5wnHj!N^KkK2ennC`p}f2q+_D=!1=J(+GJeuZR6oA(DJPIBGC!CO^CH#48v`?cnI4}2 zHWZJE)orN&Lk06O$%MQiC!l|qiVcsaQx20=c+K*jfeAD5|FK%M0{(CfXVu%K*SO)- z{8-^V);^I{uNgixdF-k}%NM>JA0LN>p?izJAWMna+S<-HIA!={F_eYA*FIZw6<(g0 zkS>JPW8>hI8`N6Ij?`B9>wkKl_Ayii`G4tJZ}B=DJ)i_r!{8Get^iC_WJ*ED*Hh*{ zdx)CWr@ETWj3pipc3(s;hn5`n#G-;-2b6=xCyaG6`4FEVyj<$<2sMCdUV=BjygbinS9pCW7zMA-t7?Hx4FzhN{`( z28fU4reJPOO-(_;=27kVYOBx5;9$hSra`Oszn!s+v13v&eI#{k4cU|9FH;xMKmydf z53c>#g%Et%_6bdv|J`xuo(fKOq@<+eN34~v0<5sB>k9@FOoqNzOeP|HYVIRSc7alT z-Vug@%wc{xDRyV0ZU4~o^XJ5eZ+K^$#rn`lV(Xipgm_G)u%&k4%?_Tn%LGMgHkgs& z=rJ^u9^derEW^gd-F&rX%q+2b$*huOPUVju19jQk?;nLL+OgzU(Sq}G(pM;zWISX_ zdnD(33yU+O5_2|}8=(p5@yBp6-;ClkI>9iKSu>G8CaAU=RGT zo{iO2Q^3XVtFex%z}03+z{B-FN(u_TR);%$Ow{Y61wu?z+>gTE6v7j)99)DtC3wGg z9)t<25hz`{^tPJ!IWnB`lj`Z2tGD?y@?7FL6^Du!n#&TUfE^p ze6?q9ESr~539Z*&{6ZTW+R;%e*u;Id=oYCkqM%MFKEZ0H1+d;nmeMqKJceyclgiV8 z*iQ-j-z`l%+q0oMd2@4f1gd?WqP!L6S^$o3fA2+WKNZMir*>DBV;W$cHWUARdA*}O zgL3=NCCI@x2j+2u?g*rl457flP5Xr=Ds?7ADMp+0;lBZU%$@nhrT5H zQ*pmO?~UmG&0u+Xc`3_)-$2!Mcoil?FLt6`+Z)r41n~85U1fW_1;-D~`f{}$#aZCQ zhH({SoA?=a-u;|XMdc(7^?R_YJHSjmzaMmA^|FF@nuH9pW1E0WbX>C0`IH1CnH!(5sY1Gg_o&4#vOjzs4 z%cjnn&(~$(kYe}&jK-vR{_v!jBeGPx+N7%e{%6>^|Mx-b+r@}^UiuXV?Z}nd?7?8p z5$dO}Q%EG1uHy zxP(#6=NQ#BMXZw;vy*o%FhvZITU$HbR3RXU)JcsKy6xwHIF-^H@t9Zn9UUF@^|O8k zo~w65r6jvq9bJguBPm>A-nPa_!D19uVsRmbTX0?gL7pjlVkvvn!wf2=G?$N)C{?Qyw?YLRx09T)sZe&Z87$|3+X|d`g{~X7_ zWp`Ej!OCI+chb0g-dwcYV9h9SS4CMlb%hiOF`>Wrhm{zoca{f`D;pLS6U>fYoJI@` z|H%g4Z7+^b5KDvACSI>bB_hOh%1qlt$*@14U#F|BDOTX$?~8H$V4(=<%4MV0@&l8V z1_)1NSmJRV!@(M~y8W6OL=B#;GxYcem?KtFS$j{9JxE^0{<cJE~Hx(H3oL z`=h*M30^}=GECIbf6)rEAD#>xl6d(uy1Kf)j|aHAP8_M)`A&>~Obx?T`DR^jt{Tmx z_=tjyifrv8&s3KL2LNV-RYk>M$yga13}1v!?yg}h$C;_=jUf$l3#7KE(%*icJ5Y6;oQz|M zXe3rMFU?g3`w4ADc5pIhCF0x3D|*ks$*AcI5q=xH^``<<)f*E5(~+EMnMlt>;-D%A zu@C`g{}?eV=L-o?ago+U%M1|D2v>a;YrpA?zEA}rOW2~}UjDuRbJks@y28<;!5Yp9 zZjQGix*r|aVvV1kL^Bin%ZRL$YR@+QpDgcbXo&D)-RHIB*9NQ~gm{kqq#dwirH$)| z`tWVX|JLT8T19p$;fal1L?8AhBXZ*ow+H1Qosz9h6OQa0af`$Y-fHDW*r$m*{UVY9 zVk%&%pVMZaAy+vLCx@Cc0ir#&p7}L#%UToe+8T71R?nnBrLm|jao3yF39nwT%8?k8 zATMT8j?pxbduin@Zg1@0PK{^|GQEix<)VxM;v?WjY*@&Jl9hq8on5u3z+b1$j|!8n zGhtg>@JYUV3c%yZwRm=y_pWI(QC9g~tWx=gQj12cBcO(iY3{Pt)VYj*NJYL zMKS%n>9tVQtLx|r5Cs#(Va#MpIjb^%D8-ZS*?9AUzCANHN71V+ijY|Rbrt0G;2gr{gV8vrnNSjSeRSTM0nD=?g{ za<7@Qd5#&R3(qf~A912%yw|PawJhQTmE?&xzi)n_*}i+42IZ_4h_``$+~~gd<0wgr z#~F3p^6H;TsVXnW>Fh4qsvJiP;(If&?YEZ~KDw7wzb;sH)?!Ji?`3yB7V{_1!N~bv zEJxEOcR(A-q|gtwFucF7%toJ0t?^tcxS1)NhDJ4==gK=**4EaRmydplZ=~v_oujic zlwA}?2=ZLY3SY5aPSVubX}%Mi+iVZ`w&U)D#&~n<6@$`QKnv;llycKcMx3M;?!}eY zGVMfxmLg&djrmikLl^vDI8kjX5qm7ZWKpdD)#fKKrLW(t89#1-S_9B@;>h|kWg~)i z?z>(TybpvWe^9WbBUQuR6l+B8?BkSS}Y0thY5RnL6mYC@@?Kg~=X6^z&N!)1}LzRv)hMoDI3THL(jG=ak#z4mFMFk+*l z5kuYp`-$s4^FMeNkHM>haV+)qV0tEA#KF@iQt9Uh`*=Bm4Pv_VK9s2hKeVWG=T^CE z^rC07$+VPGSuAkWl=vEoeSC#@4#J6SQ>#9!A{_f#OaX6A0 zf)pBnvaDl}?4`0sQrcvMI%`sZU!nZZu$nH=ksq2l)K>N*Czs3z4M=tB_^#6Z-&0?4 zYnCzbwt-tS%Ns&AcSz*-V>vL+4iVn+x|SE~0mdM8tp|k9!V90TPV~~hEdjTqPh8un zzwxIu&mQETQFd93PjNGSBpJ;;Q`k&>ls-TK*vchqL4I#(3%hp?tsTlK+j-W}3E6dM~mb3zq}BXu}i?}DA}CMd?EEqwLId;vn`N4oSm zTIJBg-&ljg(;(?rEpd2vi{lvA$aGXR7jcXNob_$(+{ZzJbGIZLdSp`RpK^N)wG9uRbNbX4(^MgJopQiy%#5Ff7c7*UxNa63Yi-=Tbqlb%-BEo^xZ5 z8tAXao#J|3!1Ekw1J)V5)v*1TS}K>JSx^s6Lw|+#I_@Xi+dg&s0(I49BbgL??9Rih znlC!Uomp!`br|}a)&SdI_Wn|1W@^Muv9X(9pKcSS#_pG0g~j>!_nD!wc-d&@wTK zniGl3$IYf&!(Y5%v@7MYs+5)87jYKT_t-A!t%Di&-Bh4pg!@b~V|UiOdDh=wZ^L4S ze!4V%^5@A<6(oxAUZi=RcXp0$z7>P4S(v&nt3KrT2$=f>r7WSF{b!{rklfa7=VG3B zILn4zlgG8kLMtcQ0w$j(Z8ibyy6clPSvd!uf14ZE-8XM4OKm9r6t&mZqtzeu^O5Os z)+o0F6kz1wRS#;QK&D8t-ER3ptd^F-c`xh%ko&J1*#zY0>chwsW8vs&7I_xIZ-u&=nOo}LgNU$;2#C~3>1yX;NDnP{e7T#8DI zL~A|TnE?t?gU}oKlz(6p9a$A_1BYw3QKqg;)DV*I&!_VCom^HL7SmCWHe>LiOif1g zY{eck7Zqa7^=z_NJkf)%2*AH1jN*q2@HtiYN^~+uAQwERQBQ`P*iNRlVYaeer8(!p z@}~=j?>h360t6VCnYshaML8kwO1!-^#j_RCAD9WSs-MQb{c?u@^`ErqD&=SDn+s`U z6tLiLXqoK%!M^WCR|y#?V$i>InZGHPAB+7JASQa#m0Rr#-_YH3bnUu=OouAltH?lBIT-fP--F^r#BX#uCUafa2677{G- zSN}|qU_BcMG6z5tXXEQDT={Ea*u#|}I`*o#U8Jw%74NBUE{)D1KW79lwuZfRd2%ST zMC;tZS4=pV>VpltM+5^f8PYSZFtC_pC4<>~%8^>c>@d-7pJe3GvED_>r8OT+kKy*+QsVWzhj71Ef7d6a%1O`a~FXo=S5M>A7R zQkGZMiSm`%9m=D_3aDs)r|&b&$T4y%AQenVjI%Da)~xmGN|igYR|GX&_cFapsj|Hd znQ4ewEc?i@Bn>d6Of3NI{PupddVjst(vWv1irogiJ(CgpVUp+ILWv1rT}udOF!(Dc zD^vcezRj06jQLnI4P&QOu05aOK>|TliB|u@m=!`n3qV%%%d-L)$h;HIicpv*>Krco z(XNXjCdc@hxHEaJzwJ5GxxEtYy%gHH$&+O;s6Qlv;!n-ou$yYC?As*!fEQq5gVV_1 zWj0h~x0mUx_a!ZRZ}ooV=th2R_=kUqH%+RN3QDVViaHr5g~o;^?&!&1BVLpk2LlHy zVUQ)bdd5g?SD!~&t?sN7vM55KM0vV((eu=yYg|9PF?pGl)^~ zkaLg8=K-FCzcS{e(?Kjt&p2-1ldewAG>?WPaUKQB?LEe`&gBLAiJM zpa5%52nVrnj1s%8r8Bf9p*BJhbckxfwb0|(iF>O{3eHTot`xOjW5lc)b4;-tm%~_v zhm7JBIFvhs8eY6QunQeUCt1ri_nngIj}SLQH*t=~tkp_FKnLaUKAk^**wxMdz`7)O zMVbnHKdKls+bh5fCi>9b|5h`U9y;iOk}Gg#2*gsx4?srJ9^J~Ar@fZea7rjm+4)cU zUU9|qUNS+v(PSnyW#=ZIUbw4sY0eQc{0$dk1Jm~Ry4h*}GWzwM)lXtFO7J!(OcExN z|KJawRTB)6O-=vh0g3F|m(2}N?jM|_F`Ph;jfqit+0_s57GU71Df?VQ%G6CX$S3Hm z8y8cW-0$5I9k)d=(3p-BJ+N6{ucf#=@Yhzi@s^8^CrRfgzOnjzNJP4E&79+U>f|57 z#AL_Wt8(&~)4Krk?7T+_#MXV1Itwg2!hpM?i&KED>)6|H5ppsr?QheKL9^x4K$Gh+ zeV@(CRW)dEdg|uQ0|o|0pCmU*U|?XkQ~<$-fFY%=d*(s;-jQ(oY(D-o+k8dXzg?u@ z4ua>8w=YE+w`MX+b8OW8kkM0nUgdDeK%9L>HwC*JzF{}I{l=!Kq9&&Za2xqlFW-eX z$^MpO!{~c9V(ST?d)p-}rzJ}L zrNk5#oj+EReOHi1?U{cYz*tY=@un~^Ya4g(%v18t(w79{x^qA@o~%c57vbIrTg*(I z{kFMOoW)6as3G&Zc#&kHFa)T>DjW_pAaa3Br)qmJneyXH5!xxJK&C zoT2Uhbs@BOHX(NMtG2Q-jyjyX`?OEr1iHt3KsA4O^?Y%>@_ci2A^#qWN!_%XdOGt>LIMMdka zm#Ii77YJZJo-LuHql4%$0MQ|-zgZN;H@=-OSP{g zNd|@sm(-A@>0ZY(*&*yP88))5lT-3o@rke8+z2uX84V2$6&008Xr3bWBgixXAgw_} zZ;{AHoHvJFQ-=*$Yxe4wSQ-C&kfA&>YYv3X5%$!mm8qKqv4eJ2rOMAMJ8XKwaLU3D z=gOn{Ac%7&ijy%r*qb&3e0eM|+dotPJ*_5%W3EEgs9abJr1XtSZDs$cC}Pd$3(x+&iENKhfZN;`klu?XKCIHluu+uZp-A zW2aABOo|X2n1HD%!zeQaet>rvW{h&yq<>GXEMqDe%_ZgR+YcJni=DpoOO!zRuL`$M zLt@Et4EQFr?21#4{6_OMakXrW_&eEzJ;!G&Qdy#o(gEBfte%1aitW5z@3rLfu8WLi zi|e5VyS@$?Gv~l+ixN*9g^lr9ba$qogHvRF2gMnoBvn#r}(9?(cas*k~E?$(Cgdb~|gakrskN8Uhjy9g2=dajwIwB?RLL`{OM3 zaITu3)AQ3I8@(N$bLYA3Ht_ub2&Eb-)LU}6W}5M@U3|j}vP^TiZ=~IkSzU`+*^V_U zR65Z2=VNJy+=_4>Bs4XKFahE00$nc-?%(2q^8!pk{r|dN_2@%nHJ;9U8yKQ#2fq5u zT4W= zpzG_F>*GJcIU+Jfd?z6QeEZr6u3iRWaYs$Iv1tLj6Mg+r)s8~U;hxSAdHFwjzx4)^ zj91SsP|+9bJkwG$PV7K+@LxlchY0m1P?0ye=oeqmCf{O-Qne#*@IIuA{PhW9D0xyq z^znNehwF?X^z?8QS=L^{L|G;N?0fmgoAT+{v?PSpQ-#gOSWh?GH-T7QW3qDUu%`beZ#S28G8xjuM zp#hSIHN{=${^R8|iq^|fa!U>r2FM03HfwGwNY-*B{o;q|LIY90OK-CIDGfT(Y2~s~ z4hnbB{DIr~Z2wHBM%CkSs3<8nrC7f56mrQ_ zXQwWF_lV)6$<}yIHWuJU;= zQJ0C&3flVCP-35eWR@bd!>^ZCNYk&wj4` zTUC{$ulM2Ph!`O`Ig;z`=>}wx%}f@J%D^EE?eMN>_+ID8YnJ`xsxs-RLshcl8R@Ed zsdcKo@wq6m!m!UPbks3dC!T3L^c{k6$cpKc-%5OrMaPqiO@PUYUG8$r0Tld`pH@yyff-G5^Wq(i6|^(DRVsDwd}HBM+$Q1V*h7UIde$QbK$^r^yNa+R=lvexS3@>PPGf`Yv!R1=)NKs^{&>XM#~aj=0nI2Xg5LbizIqo|mGJ~EHER^EXQjgYulw$Z+ng$LnxbpQE(q9~)?2$EX4*id= z*RI+|94eo(eU`RXjBi&9WE{Z0ZKmTgw;V{F4`{i$ZG?B$c-Z4IFz%^CyxEYXXw3R; z>2m1=%w}XQe+ZKz7g?WB`df-}O z4=3}F39_kIuRdLejrZrp;b<2ffGtW+xp_v6=1H7?y`3|X?5SH1bA_-W4PcMIGHK8^ zQ&R@l)z{ly$V{*7ctYHX5a;x246_4b{&G3`D)4*=WT0r=z`S>QTE(PTtzTBPrGGpE z&Zy{>LIDJR%g{&t&)Q=(!Vp~UqBOTe0@K;pv_~997%sVRG51g6mgSX_!m2JOJFuP7 zpa%pIzj~0V{lp&?Ln(B9eO;XBb)Xztr-h>GEU-(akJ$71T z7N?UGP-0@DX=P(;YsS-2WPXy8W)o}Tupyx^m6e5{hdBAd1a2nK8Fr^+0Cs6#BKc8l z(%uB>ugYSNV5q9VnLF& zRF(nB{FtHg5Xi=+<^@(kMYA+_SHotj@~o<}=(qn*DRw?TJs^#mJ)(>%UU!eXabr}u zthI}$Z)T_JNY-~^YXIXhJw6=5HBieZoSdEelUA+yeZC6CH6)1hHpG=LU=n~;2WvRz z(095sLv@;0OLXWKIyDi5NLES;$+r=-o>FZ@c=kH;n2}QdF9L_>mf~0{jt}oMe5`dv zLqzxx0XZ;B6AnzWeoD|~_2|lfTj;k0f0}W@bL~w7l7iu8V)Puk98(RnFp^b>!(mL7 z9#}mYfnD`u)Xk5?n7~+{Ff9n0lQmTHIXoZV8!DShK`+89LJ%W&$4(Qwb~1iRGYqfB z7obvsc4eoQl$0QNCH6>!*ssA*US57tC?3>rI-iH#u5W2D=S8E>+4^vXH`b{w5JIGX zAJ3c#UCmB~EKEw2OV>aVy%#f#)X<3VGW>Ka&d5yueZa&(ssLl}zK}udxyk*JNw)Nk z0rhg|6;}fJp@EO*cokFtSWmi@TAdFH^HMB#`>sRh2RTwekb%c=tWX&e4Z8n$qARl+ zs8z6+!{}ZN=STnz88GzV@UXW1gWmnaLmZU|^7E|WFXp;6_lQZ=Noy{gV?Lj2h0RAPmeAb-5Jv3_^lZ zo5Op{@Gu0f?rV>{zCb9xPBm{I9~+lY))04m`hj%8_zznvLbswam9~LzoEuvU3f}+8 z%D=lVYX5Mm*DZmU!M;%mv!Mqda%$=w{v6zRBIlly6kYgs0(IE+x$xE)>MhYN5W(AC z?qs}e%CbgOaSog}ZJLQa8>gJTr+t9+2;?Lhb0RAts`}T9ztn4qg#`pq&9{iCG<2Xa za6G0;HzaMrVXh2}5t)CwOzl2Ih610!PS6Dzpu`Oj#>gQxtvK(L-|ZE|jUHV4P-1P{UHiaR74Ct&(>*OScC?Pm>^GMf;CA2(a&oIsI0#N?EXEF ziBE=MhWxzu^#UFK0hS*mf{L}^Lz|oyzfKceE>Bi&kl+BN(ohY^RP&Bu1_@E`PMU!T zhb?}PFr6^+ead77OG zgbYT(;qZflgN5CP`}_3tbf9g#_W^T6_t{-Q 0\f$, it belongs -to blue group, else it belongs to red group. We can call this line as **Decision Boundary**. It is -very simple and memory-efficient. Such data which can be divided into two with a straight line (or -hyperplanes in higher dimensions) is called **Linear Separable**. - -So in above image, you can see plenty of such lines are possible. Which one we will take? Very -intuitively we can say that the line should be passing as far as possible from all the points. Why? -Because there can be noise in the incoming data. This data should not affect the classification -accuracy. So taking a farthest line will provide more immunity against noise. So what SVM does is to -find a straight line (or hyperplane) with largest minimum distance to the training samples. See the -bold line in below image passing through the center. - -![image](images/svm_basics2.png) - -So to find this Decision Boundary, you need training data. Do you need all? NO. Just the ones which -are close to the opposite group are sufficient. In our image, they are the one blue filled circle -and two red filled squares. We can call them **Support Vectors** and the lines passing through them -are called **Support Planes**. They are adequate for finding our decision boundary. We need not -worry about all the data. It helps in data reduction. - -What happened is, first two hyperplanes are found which best represents the data. For eg, blue data -is represented by \f$w^Tx+b_0 > 1\f$ while red data is represented by \f$w^Tx+b_0 < -1\f$ where \f$w\f$ is -**weight vector** ( \f$w=[w_1, w_2,..., w_n]\f$) and \f$x\f$ is the feature vector -(\f$x = [x_1,x_2,..., x_n]\f$). \f$b_0\f$ is the **bias**. Weight vector decides the orientation of decision -boundary while bias point decides its location. Now decision boundary is defined to be midway -between these hyperplanes, so expressed as \f$w^Tx+b_0 = 0\f$. The minimum distance from support vector -to the decision boundary is given by, \f$distance_{support \, vectors}=\frac{1}{||w||}\f$. Margin is -twice this distance, and we need to maximize this margin. i.e. we need to minimize a new function -\f$L(w, b_0)\f$ with some constraints which can expressed below: - -\f[\min_{w, b_0} L(w, b_0) = \frac{1}{2}||w||^2 \; \text{subject to} \; t_i(w^Tx+b_0) \geq 1 \; \forall i\f] - -where \f$t_i\f$ is the label of each class, \f$t_i \in [-1,1]\f$. - -### Non-Linearly Separable Data - -Consider some data which can't be divided into two with a straight line. For example, consider an -one-dimensional data where 'X' is at -3 & +3 and 'O' is at -1 & +1. Clearly it is not linearly -separable. But there are methods to solve these kinds of problems. If we can map this data set with -a function, \f$f(x) = x^2\f$, we get 'X' at 9 and 'O' at 1 which are linear separable. - -Otherwise we can convert this one-dimensional to two-dimensional data. We can use \f$f(x)=(x,x^2)\f$ -function to map this data. Then 'X' becomes (-3,9) and (3,9) while 'O' becomes (-1,1) and (1,1). -This is also linear separable. In short, chance is more for a non-linear separable data in -lower-dimensional space to become linear separable in higher-dimensional space. - -In general, it is possible to map points in a d-dimensional space to some D-dimensional space -\f$(D>d)\f$ to check the possibility of linear separability. There is an idea which helps to compute the -dot product in the high-dimensional (kernel) space by performing computations in the low-dimensional -input (feature) space. We can illustrate with following example. - -Consider two points in two-dimensional space, \f$p=(p_1,p_2)\f$ and \f$q=(q_1,q_2)\f$. Let \f$\phi\f$ be a -mapping function which maps a two-dimensional point to three-dimensional space as follows: - -\f[\phi (p) = (p_{1}^2,p_{2}^2,\sqrt{2} p_1 p_2) -\phi (q) = (q_{1}^2,q_{2}^2,\sqrt{2} q_1 q_2)\f] - -Let us define a kernel function \f$K(p,q)\f$ which does a dot product between two points, shown below: - -\f[ -\begin{aligned} -K(p,q) = \phi(p).\phi(q) &= \phi(p)^T \phi(q) \\ - &= (p_{1}^2,p_{2}^2,\sqrt{2} p_1 p_2).(q_{1}^2,q_{2}^2,\sqrt{2} q_1 q_2) \\ - &= p_{1}^2 q_{1}^2 + p_{2}^2 q_{2}^2 + 2 p_1 q_1 p_2 q_2 \\ - &= (p_1 q_1 + p_2 q_2)^2 \\ - \phi(p).\phi(q) &= (p.q)^2 -\end{aligned} -\f] - -It means, a dot product in three-dimensional space can be achieved using squared dot product in -two-dimensional space. This can be applied to higher dimensional space. So we can calculate higher -dimensional features from lower dimensions itself. Once we map them, we get a higher dimensional -space. - -In addition to all these concepts, there comes the problem of misclassification. So just finding -decision boundary with maximum margin is not sufficient. We need to consider the problem of -misclassification errors also. Sometimes, it may be possible to find a decision boundary with less -margin, but with reduced misclassification. Anyway we need to modify our model such that it should -find decision boundary with maximum margin, but with less misclassification. The minimization -criteria is modified as: - -\f[min \; ||w||^2 + C(distance \; of \; misclassified \; samples \; to \; their \; correct \; regions)\f] - -Below image shows this concept. For each sample of the training data a new parameter \f$\xi_i\f$ is -defined. It is the distance from its corresponding training sample to their correct decision region. -For those who are not misclassified, they fall on their corresponding support planes, so their -distance is zero. - -![image](images/svm_basics3.png) - -So the new optimization problem is : - -\f[\min_{w, b_{0}} L(w,b_0) = ||w||^{2} + C \sum_{i} {\xi_{i}} \text{ subject to } y_{i}(w^{T} x_{i} + b_{0}) \geq 1 - \xi_{i} \text{ and } \xi_{i} \geq 0 \text{ } \forall i\f] - -How should the parameter C be chosen? It is obvious that the answer to this question depends on how -the training data is distributed. Although there is no general answer, it is useful to take into -account these rules: - -- Large values of C give solutions with less misclassification errors but a smaller margin. - Consider that in this case it is expensive to make misclassification errors. Since the aim of - the optimization is to minimize the argument, few misclassifications errors are allowed. -- Small values of C give solutions with bigger margin and more classification errors. In this - case the minimization does not consider that much the term of the sum so it focuses more on - finding a hyperplane with big margin. - -Additional Resources --------------------- - --# [NPTEL notes on Statistical Pattern Recognition, Chapters - 25-29](https://nptel.ac.in/courses/117108048) -Exercises ---------- diff --git a/doc/py_tutorials/py_ml/py_svm/py_svm_index.markdown b/doc/py_tutorials/py_ml/py_svm/py_svm_index.markdown deleted file mode 100644 index dc737e97a0..0000000000 --- a/doc/py_tutorials/py_ml/py_svm/py_svm_index.markdown +++ /dev/null @@ -1,10 +0,0 @@ -Support Vector Machines (SVM) {#tutorial_py_svm_index} -============================= - -- @subpage tutorial_py_svm_basics - - Get a basic understanding of what SVM is - -- @subpage tutorial_py_svm_opencv - - Let's use SVM functionalities in OpenCV diff --git a/doc/py_tutorials/py_ml/py_svm/py_svm_opencv/images/deskew.jpg b/doc/py_tutorials/py_ml/py_svm/py_svm_opencv/images/deskew.jpg deleted file mode 100644 index 32c22b7afe9dcf7b00d2ae53afd000605c428708..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 11383 zcmbVy2UJtrwssV;9nqr_m3|aKAb?UrFCqd00#XB^L_k_Xk#49~&peeMeM) zo98YMCnrc&@U9qGN=oXcfV`5Ngp#PF6!_PloH%#>JkuXcEEg}bfNyi&2LC@_$6o=L z&jNN%d}lau3vlxC35Lrjj_Uwi^!H>u@%sY&<2rGY;S}TPGiT48|AYPj@lU|X6ATO| zPcbkuo;pQ;+MoVA;M8TtD>p@yPG8lvJafyHS@czS%31CQ1>b;roh!G+tlVCoJAdu^ z4Hi}&-aCB!0^(o^NhxU=<%cS&YLC=4^bHJ+AjT$8>t{B$cJ>Z%cMnf5?-xj)H*ens z1;77*`Wz7%6&({Bhe=IK&&bTm&dDt-DkhecmXXT8*VffHH2!F6?&|KL^iuo!2gb%H zCZ}lAGqZE6YwH`ETiZLkd%x^D0buysEc*A~hW#IQU8dW0^3*AYQ)hnJb>gHqeK1@; z#duTX^c5xDGnTGbZ;8G-%lsfbrQq8+ZZW+Tpp{$a`D?eu$9PtMnfBYV|D9p4|GzBz zmtp^D*8t!G!wLH4F4xLf-xb6J+4wvW&wp>)OX2xA_~ z`w$`FHUUe9Fs%04&a6eEpxkYO8=H+)ahE{TE40#Mgu|H?%oI-h@|+%tk>^q8y*U9< zCf6n}KE5<&1AwjyGr-_FSM5&ECS&8u5x6!sutIZcH#LPM5Qv3+$8*YxXu)KO z!fa-2d%mK@bc5^&C_=RNM-NoS3ap;qRDr z?=$|dhE?Qw2Z>YfN!ZIuWnuh6<3eE|?cUCh7eUpLH(ijiS}iH^VevF;)KKvK;d8R^ zUNroT^W1`yCGnb5QW>GGXko-yec%k?P3^u`CoKj8iAs(xe9Todh+6i{{<&L1{p&}K z%T`{#pNej+{!mtQ&Mk!eU2$1UkmTRgLRRPHnO;+~q>dCV2i+QZ?p`3IC1*=r< z`%rpOA;q_Z$5fb&7#WpZ4-`Xno|pw*nlO_N-Y>3(VFcJdXYCR9&U=CtDTEGvY;Mdq zE+zTzC_daNW?dBk^A|n+9^2w9wgM4DHx)!*-)`A>kyS_x7j!`!Wavb~qgD*YT3xN+ zWvVkm#X_soPJ$TT(J}nCR`-}hU9ye_YN?sA9lREsN9k~GooW$itFpU>R`dsP)jlbS zKO%dse>>|;)cqOUTPf`-uezAC7r@0A@+Gd7H;%Mgg0D?kK4R^EJ;HsLw=~a5s7(|; zYgd>3L@Y;PICRytOx_-;N8pI226?^FT)F#hGP|Krd$5b8=dg*ZnVYpXk} zrhx@AwX`@wqO4@k{2*vv%sI@|F6<*_4Z1V}yNqZ1&Bn7GqK_;j&(AqNdv{Bk13O-{ z&^Z=wuql6V@tolf>cbIw5#@_eFthPCG}o9?^`eR(J#WUGz>ZDd3GKD3(D{Sam(3d! z9G`@^UAnw&pu79o8~cX?w=g{IY`2lT&pjh?$K``Ochk%BoK~lX%?gT=ceJ!@5N<_& zji`4I5;8N~$qm`rMd>2`*io?FG2r1K=VBECqH#iEH=(yAb!!<8uyBRh#Y_y}SmR#H zYsRUVm~iUm^zWLC=bJ9GE_t@-hmbI^QYknJ)$E_%R=>tf3OoVSVG}{K60~8$BcC#h)z~LD){J?LbR$_cN*S zl7+vnis&t2Cf2X4~kuS)XRt9li8q`|(8gMa9juXTRh-|T)7?)JizTfpYp1~(PPvYaWV2?$ ztc=IuLta_H8<(yJh4D&+Qzdey|1ziOnZk;<+7JQ_pwMH0q{f_hUrgwD)k4j+FWZWe z?V?j}wYsI9lvVo94VpEiA*Sq5n~Bh~(7TcDsGpuy^6_5ZVCFikSrrP96VUYJ%CLKz zsW^p1(W~K4Dla4=3Ptt$_SnKZHBpZZ#XIOnE&Vs_{S*HFF$4JR8N^pFgEfRcR9q7r zA4Lrnb#G6g+Pc#QL#X^1gp3 zj3dD|A|=q)|DtA4l&0@PEBTu6LEx#FqetA}lt6)|#4iv@U+1O%Q`Ba;#|5xDg)U}8 z`CzkAmv(S0x!O{Xn6}OPS=Ne&1@|Qlm_iVY%dvE-VTUL?WV$HT+s@iwX94y)to^0? z3Di(;_}Pa|*uFl>vSa%gz+~+IM>E=MV8(IkBRbNsW%rhZA0Pfv4}LNa9noklq+rx` zajd!}sV^5ckmL>=?!qAsmImwHwnRu3js^2D^*1=4RhAavcRt!F9k{H43OlflUdI8k zZD3}@_{3m1rUq~NC3q<;YBSbMZ(w>lwmBl@{gX#)OnTjPAoytR^8OQ3AF#=rkdh&6 zKeU}+==QGf-<_5GcQ8sT{O`fdx8-IrZi@EWco;=UA7)ush&3U6T zmm7i5Ms2%BVDId;mW#vRN$WDJY08+p`t9%3q}QT17B6`q2M5wB3fIIMM9eLcJzbb@ z135r{m{xo!bpk*pr4ndW(4}}Nmr1mx{1Sq_YM2@oj;X644JX0G7E=UDtaTo zBv-aqY9>6d=SNws$2s1M(@G$Q7BFD=8(2ZwbpMYS-*YEQpm zecm8zU~{c(eTV#D05T95EFLZK`P{{E9dOTV{$CAKWdA?NcNkh)-O8nW_a&Y~lU_|R ztWmjhP3Uk@m26S^A#zdI!fE^uj=>Vtej8i8KYXaMl_Yhb_GgQ3KJJ`jzYInCNrr!z zaDsI~I&Ve+kXhHlGW);a?fr~FgVd3)3Or5Nj@l%OMXyjp$)e!4HEK3S&E4Qg@!0Jf$HCL2Ft$+j=yyK8j9u(Vk9L(Sj_#`AF?(2h~mr#mRAi*s{@ zlDiKJ*(2oYr>w9Rm?4MPjk;RMbcBYQ-Y7`ur4WCFK6kb7%kBxRRmsgq*R{@i9C+o1 zIuILy9YyEhyoIA}{0)gO4j#=1fzFqPtM)n8!v}^I|FX@IuWk-eFDUOe79id(7Qllp z%vL;_!`+o}hv0>`O((!j%7H~y&?Cx7ygSU~ZL{S<{AWeZHV}C{t%>-9s54?)m?u=z zrQYb%9eyz9AC-B#4K(fACVuBxz{kjXoioBt3(hku6D~j(*kk1#eUIhg@z8+hRtNlJ zB2GPz4vqofuafU+kJgR>S1!2Vr8e=6bxswYu9NPt^rtQY5aAGMZcBkb&|?R1zrUT| zR&l9oH^6=-Xfo#r7w|e!OJ@_Y!8|vYv7*+Y^E`t&HcQBb(Myw^(PgG&e{0oP1C+h8 z%ixS4T)wCw*C>xD>*ojx&2(Xy!mg`qxi}hDoi;o##UW5ll-ezTCDLweOUpP;ejS@@ zxt!b49&fyMf(?6X*>K%`-N*tbhD;EdH%w8DAQ57sIoXG%=cO*lacfCv%I+5GkCzS) zq@%vYC&3r)?fR?8jy_7N%V6X%VfK<=HM+u)nA`mLi}lImqSjfHa`4CuE$r4?i#-1~ z<7N<8dbO)rA}+^7(%t-8vSLICHSXhg!_S@U#s?Ohx?O6<^VN)dMurr0T#|qxg<=gD zPxW^bIUE@O{?zrdPu8RGZ12zg@(RvCz*#j-`F?3^TplusgJ5UnXIoP-SG6BMr0avh)#^b3LgN~|8l)0ler_>fZ zJ1yyn=S^_v_RC7qNG4(;+m*XvZH+!NKZ^%mj+1ZzJ0~w>zjM|=4O?3-82V$swc4zB z_eo!SjL9CQ;mP_yG;O=39sDuSfBK}DvCY9!v1Z+vL-WPQQ=YxUqVaW z#$nuejZ4d|YFy5$6z0=AZG&eR!Y`t1kt7tQIZepS}$St z6Eb!@dg77jql>b=y-}IS7voVHxlf9{WwM~+hH-9Gt=3Z$M7_%3X0oSOrz6o>79yFK z;EWH6xg&H~S=qVC38?Ce`>)mp#$gnkg z@zFUTus7d%SyF^nC~PZ~gy=^qTmvanGW6Vc!g_1#I1v+pK6B!Oqq6MODEqf@KHCJf z4(_&r9p{_h9$@^kx@^|o$(qfYhU*sEW`06iAF>L|T3M3Bj8Kvl(chJseA#uGyG^}iq(90TnXMpgs|U2f z3Jgsl5gv%W^3 z4RZ$7b*}?ryTgFar*9=xlS~W$81@O^+JV-GNUOsSFQDOh8(<>Sr62D(s<59Zft~B- zHngIxw+%~_cG#dlR?ZH*o5_m5g*}{CAe@u3JmMD9j9)wjfo04p+BdS`o!c50vNAzl z?T~$81xKrL&=_2E$M(g6hh2aw+{}Sf`$b+w_~e_%RjZr_2b*tv^OoDmFi7}Mwo;;6 zoi&2GFZjkNdD-2}D?@QvW*g_bf4EB{wNOVW+stFEiU3Pm2BCf(z5UkTmT5nW2WzqGOUk=JsGR1OpI0_J z68Kg?Q{E()f-=}Ly{x9-7e-!>wl@M{y4_n0^$xSx1bw2%SMXlgd7}*LHFoD)FlfP7 zxUM7FYKWecnqSNpDJRX-Ll)vf{V3Dj7#ce8>@u@P@*H)Be?0d0G;baWoZwQnl{!Dr z>2%N~mG=(Vx#Nk1F5@n02&>6MmS((UghOeIy9nsSqc!(6O8Z5%ZIF&08W(_wqq*rn zCpEn^R45FS@LbbU7`ZD86i&O&8{INh@8@qj*StD(=o2z=LBNr88clOAA=G9f18cR3 zWqpYb5p(g1sN!!=SFh$QJNj9;#HvMQ%WJw^Yw_BHu;CM&ahg@Wm-C0sq6xXkifZ`b zUA}LsQSoZi8}n9oOh&EaYvOHs?LQjO!!;@grde$TPh;JOcYVwZUuyWA01@fn2%Kz~ z%aOp<>|k$frdApPuczpK?^fx*E6GV@Xem!gE%s!SH0bIZ=#Y-|NA&nfb9zyuW^@Xb zHVNu0hfCy2D%o}iX;c;FsDjk%o7=oRMHn+n%0f;}V@7)J`cdZ1s#BAk5n}>Bt77WV z__6XO94Q#8z7^t4c7o1~+H-k>(HXTxKKDd&1*A$yp>81^lEN4%tIxkDyHz4 zAf;;wt0PsW7XJI#w#_n0X}a!^R!cc7H*2g{(MYayzaSuiE&NtW)c^F9T8cuMay*G@ zpy$`$?U|qOe>GGUzHibU;oktN=-6209f_))9TnWgFF!2nFU3qYsCPi*7A!b3JncEe z_n@_oc_Lk;;RiE=b{yE$vWij1G7W^|Lgdw~LEsec5zNCOI69DZAHa2>Gc-Ow`ZOQqV_0h^9}UtGLQEJ^AYpG)ZQmyCb(bd;fMO1$A@@Ce&Y5DT1(Y3^*;zPt>^B&uT;>)%g z4YS3pdH!#i4r)t1n5Li9nH|}EvDG}Mqq67Fj)u8uVa4E8JQ{i=@cS;iRdG0D?Vxkh z`0lGKzh#|p0rB5~N{`2TtOT9XdCLRX{lXOqr9Gi4rD)~je0Ey=bz`$h6`aJe;(a16 zyFnTcQR^QkQ4?82q{ zYs-iQUHQ>#n)n{MXzY`eRV3GQ74A!4*)zJ;qe+NOGkQ4F(<|8JfH?^&>-qc%XPvzi zN{N!lv{^QEf%!!-daIsi<0K0Fsj+qYv-u(>1jC0j8ST~pTC7hRx z;Zm!+nS9zJxYQ0=#7@b5o?M5F{UPn>9q6=v2I`!iF2@}ZH%0`;cuT#kS){FBEKyM+qOAWYb?upACRG)9VWR+1g zmxwx{SIV;aoY}#ktZVux|BvBsrE*5#ueA{UZ2BEU%twUB-v;T%#v1uN66 z&uMh2Jt{SwU=VLWlNU{*y=4bk@Il`%NwPyGDadYbyOHu(pYyXfV!UXVKY^~S?fz^N zGw&r>mMA1*-}H|6mseK=o0*z5w%V5EdM-b+A?H>mx*OOgH1)=6-NPK7%nj~WYf(LS zPSrk@q;(R-_vIgh8DKaJI#Pgt0eWlSNJ@lWG)On#szj@-tTMQOdtsj(sb`4K_ipcd zt($8OA*1KS25N4!WtWX#Rj6fDDKnegth?jI__R;;yZ$F1svXj9+78v~B-wO(ynJow zp*8sG<7a_xoruTJ%bMzCjiVmPQ5Y1%M}f(K*~{(Vy^_=MQJl{LG()~Ojgvy0EMEhb)_RDid79De0386nT8wp{jm+$L9q5vGqn z<=n`BEabVyP#3bQY&E^wsbE~r zpd}g@E@(^`%wL5;J}Qdge#}+e$_p*)=I^?|>g1~Fu99zy;>)@5&%}1hi709g%pFf) zbhuGkAk4qyQ4Z|SSGjT3}?V~JWy_!EHMr7_Mydmm6(L-eEEB=mA z#fzq|;8~ff%!5@k3v8zI9n?hV~KbUo-qp6_GLF5x6pqFEneb10hOVL&e_ny1| z@*ZT41^lpUN>O6mM7+OsZspyunRBX$kT>hseM;S&@-}5-%W3fp?{aCaD{6!~=O0aZbkDa11=Z}I~zLY%W0(l?w$=V;6?>Gc2rU+Sy- zr}+)l#eWL;zSw@m==tCcA{KJ_FRvMa#l*bVj~8)TirVWU^&S#?ynk{KzoFXrEx(L$ z7Y4DksYi;B-Lgi_EN1ZBhi6ACz~G4i8V3Y%9J@KpV}F4eC?iw^Jr#PinhiF#e7LIv z&n@+pr3le=Qnw^u!>L`ciJ{@O##iXpo7aB2@6{byEM}5&oQKPMGqDN_>&86T%>~Bn zv0aw6C?s45^S$Y&Gg13jqL8lzDLdk7s2u7XdHYLS#}xhTxN;1bSHI+SgHCqVxmqW# zhmXm!p6o_PQCAWm;0v+%j}IeUp&P5H9aXn)Wd<}^C*D2|Vhu;`c3u^eq!Vtf0OgX-isr)MQ1JXlT~UAiD=s78@qaHd~F! zo|pvR4|uOFIop~q_X>{zHXl~>a`tH<=Neeka_~W%DR^mTAJ;zma({mP8yOgd1tOhK zX7gqk$8G?5(oM-u{j>Y4^`2cCvI#_Nu5ZK(=hKXCAc~*(ODD1`R6BXf8nx&Vv{pi$ zu7up)&2xGiPgsff&d1X7?|N*yy})(fiF=H2n#c$+SwvpS`)+}*bEA>1vnToJp#m!N z8EJxt=vA++iNW%oSDG83KaQXEwOx49%b~r5_}G(ggByOZ9)QGqt!}HsUQ9HR8e_Vi zL!)NW#EtxZY&H%C;(df# z#$zz}I#?$z(Mm*mj|xj~DRpx5s=dmE^N_f*Snqda0+i-bs~1l$>MkhA(}^wLA}CN{ zveWF%|w;x&!AECzmpy^xGXe zGRNBy3_&k4T0ib_yq9$Y6=?EBcd#32M>v$y^;VgMLg4OcZ*(+o`INs-MvD3qXzcaZ zQtS2BIvy_*3`4EvZI=+?q@J1=wcxoalZW$A$GM!?2Ie6xB;n-@LU?Nk*85bI`CLHCHnunQa}avGs6R7tql8{=*FGwopQY2EGIV}Qfx zNVWUj@8vJjHbPYHZt>h=s~ci+FMs=j?0F%+r}uOgrx+}MEMxyIy2VCuD>_VL#Y%%< zL5o!h7IsLyxf20INQ#a zh&r>-Ij@)X)DZvHfMWECL8#Z(k&zT$&cd4YPW+Emdil|7r2qO2U>X1vVyNlt3XSfZ z2JUA1#x_(f_*>{ixBTJkMy@Q|r=&5po(QlumW)8MOanh42}qV*ENvz3;cCg0yqS|B zTVMAoYv6Ye(lJ0481!pXUdZeto`C;AVKy(HVTlN+@7M zVu5vBnAcj&eS}D4TNab+y$~A1Tch7%FUTXTicZ4nl821KtKg!G^Gf1TozmuUr`72j zBJ%Fvj?4d8SiMJfnEmW@t4LrfVzJ(Z$vgxB`nu>*ky=J8<}I_ELBti32E!0;l!GC? z#!lT@lJVM#0m)V4p}QI=iAl@l)EgEs4q-`~KoJjMB9x%t+c}tFPb}S26@ivOoex~i^=sF#iJl@F=cnbw zo1jVJAJE5u+=>dPKitsK#ZOm8mVX|CzBgDKvA}JD8wJ`L88hwI@<^7WT@_B8Wb45) zqtqSmCbNY@{($zafIdR06X6_+U{ZFc{GQvY@P@cFUe-e2@U>=Ff9NRw3=S(lNa*ITPbM$Srw6xUS?1VM^J&VJ5)v z02^G=TRK6*nK~)In1`J}XVTi;1)2|0eaX5?yw5=iW!*zu=pI+>;3LvQE( z=W?(Ujh?P1t44SYQQ6qb@M%gBvIs0EX$8lQ-PWVYUsY3iDutC)(U&_P{9sD0D=xRC z2)q+*86)}LO@8R&^MtCzX2^B>&<_18dkQ}xiaBsLE`m=cm|i}T17274R)}CzvRaw@ zvB0^-eR3Hcz!^4LA zdHy6nO19nFJ~ho$Q9l&#(-+1P`7r@NMtbF#zZn z;Phdyxnu0xN?msImi)EpC$GiI+r*fJ3-wkr6GD%TxKNprcIjM}kNH85`{ej5uQ zAW~Dm4x)MU)Xo9@psoq})$L)FSxY{-Cq;teQU*qwnLX86jJ&0X|OB&~tp5c(g^)tgjM4q6W=AG}>@oWv~;?geefihb%I~=SeMb8N zzAgCm+T6u!(2h#!G9L-gdZe{|iiR_rOL7i?w?=C%WA@nD{X-nxJ2HL*MQDkW(s9$E z#7A^DZ>7V`-wjT`8*ub{esxDtFaAxgQ2)1@T;%xy;ns&=sLHA4NK+)<1?<*4-`oi9 z)XS2S*7nJ#g`$l+JyTf2)HKXh_ZOwV&0F{*@JQob?3eE4>h3Ui%&wos$)ioQXd)N3 zXX~xx9x|~|%%nY2bdTz3mwA{X7P4x%ES?y*i=c@x`^PKP3bxCqM}ZP#p`u9}Q+mWw zZ=3uiknoshoHK zT5I(62Wio1u(clHW_>6=8yj2Vqpvzh!tGyM6T$SytFGZ{ofeHDjSIVrsP1#`_Vx?b z!0X9k=EVECGKNU-JC1s{7}k=m064jR!ZJK5KdU4aJ)x|{7_0{V@o2x2-WQhi&N@A8UNDCBQ21PpY%OIE z23qpe8{T2A6&6Tl8cj*OupE=qGwTx^6^6id`zcKkMiwoMF|W{7##(+23abPzU~Og9 zC0VYIdz7|mW2)TnDA-j7HyS&Q!Aca{EWzJWEiC6PMj)DVA2>x*Wz@(oIEZlwp25u`Bg&Ej3UDsGbQ>GWgz>0e9sZ>^F*xC)#P zUnz&_i+)5vMcLFdLoyaKpLGEUK{!9YJhrjarKee`h zYHokezjpxz3chy?@S;mxD#~vEs4AwS{!xpklS-w3^(soS^5OnksB2`>P)+AnpTmLv`6sV@DrWhvFh?k?IqdPnQbaZqS6cpRr+c{|t%!$Y+Zvwo&XY< zu8BQt8f%SG+MT6ukXo)FJ@xheEnxv%TwJLPEKYhR*_EU9<>){^{4{0}<>}+LGEM5m z&EDkN*x%H2bpMH@bh5*w%gg`l?dzfd8yr*srMZ+eWeerCzIHhyx45^((Y&mz|6^nS zR((3S#;whAgh6f_W{O5dawBPsO^>L2oU)5+uzC6 zz5@*a`1$v~OJX}}6!hcf_4W1i>Fa)ldW@5Yf{J@#Y*|He9lOk=p0ARw96CO3 zBE?*BZjer;!HG_IE0L;vS${dt*vXUt0Pxk@uLUPPJv}BQDVzWe9vUI%=IA#yIkvR8 z`t$UDe1NN~t42de-{{#xL;pZ+71oQInZ1ovd^FeK(t)0A9ls@E00001bW%=J06^y0 zW&i*H0b)x>L|1(Z*&hG^010qNS#tmYE+YT{E+YYWr9XB6000?uMObuGZ)S9NVRB^v zXKrt8Wi4}Ka%E+1b7*gL?*qR+000}MNkltu?mO;mcU7{fVT>W1eOIP zMPDV98-@vN&?Hzc~ns4;R0n0YfnvkmJZ{)>q-Gj#efj>EkaXfuv#+TXliI^Dv8e0 z0AO(e4J>O|>oVWi@0@8Y2`m_rTxMVPUt;A1YjaI)<{Q0y$3SUd32MMB0;4G>Se<-B zV^&w&QzFwL<^ljafucd}HI-%08tCRTH2i+Ev$Hg?#3*0@V}zmy#9eAtIl_W1ytyrn zT?b!3P#Rc%pteSV8w&A&H25@S2@4+N8y5Of<3Xo+e`(wuX)?6IL~0;H8!(k8ti}%B z+_M&Ur^c2U-q~BCRR@a)&x8h#SHSi{2$m--SXax}m^*4~4>s`pOlg+?GJQh_@QMVm z&Ys*@itjp7Z?2KV+1*~EmE{arR>nqTXARDxQ>F2zM_O9ew3TcTJ{^`cE4sa2E5R@n zAA0R|Sb35T%cNE_3=<>KS>n0yuF8fb%iTG5x(cgIeZ%VkmRN;F4C~D7U=SXI+urC>EzVG+aHH2GW=7AdUbkA|;Q zVG--BUEw#xDlAeN3%6D@EP4Ber@mh4JxhR9A%&%usfWbDDlB5z6`8KWB9-xz!Ko@N zQW>0OAFRS6)>;$csS1Vle)*R>{s{ddMC1P6-u-eT)SB9QJNMU>9M*q)si^w8t#*c& zWTsZe&iug0L9^7x+1<0Y#ISDP)^(VU){uPHr>@6Y!_Rbg&+s+QJ)|9FZN0O`*_~`; zZZEL~|D~>Aghm4ihcC#b)!4y1YkTVk2I@L%oqPw`Slr*hn|s=%MolC2PJT~GVf_a9 zixVh96Vq@SO~|wT2kQBTHS@oWG>`?VH8t?v^YLv1?Ph*+DPhIHzo=HD;4KsveGvVY zEMtBhZ%)%xoo1e7P}TD3@yhF*MTnbR)^vc)7ZCd<#Z?cmvoc0vV*{TNxZRQ+ZF|iL z!I>U6iP_bc+RhSNS(avD@4!A8A6}D{Eiur|x8!WJCo}U2t~IZG>Qhdm^a?^Ny|CAl zowT$$cii?sN6kp)JN5jU(p&JK52CxNXo;kTM+cwv+#|V%RO5_uZ8qVa(!+vdLA092 zE`MO#2C@%!^I5vN(>arCK5T5r)@$p^0v7yw5X}xz*jq=!V}!7J<@c#n*P8i`1s&gFE`}#f zzEawJ32Tsk4SU)kEH_wiFUw}SK0aWGIsEA< zKi85?qyIw?!qwC{GSVqGrG9Pi$VgkUQp;uT?%Wkbccv}uSwJ#*#o@9pf0w}n@Olur zTVW4$p%a7izc}X8UVUy@2_k#naG&J*%m*m@;&U&wT<$v^f3Wgk!JU)n;#;Yg_W5EE z0d?*>?OS!O@?gQ~Q8by-LEiZsVYMOmosO;#pRYVvaL0CbGU@tg=Q@-D5LJI23m;K- z(EJV4$7RF9X}_sGGCNMG77aiTbtDx{_bEK1^M>%TX4$admI=h-ykEhq_If!c0kt4* z6La5fTG_FLH;i>%+9Vqm6d#&%eKZzdK!AFeLF==N@H&?p;LayU@0Se=!Y?59(fQXJ z7q(43HljPed_cjl;CSSsC?A&O`k>a7_iDnm&_!s+Q(_>I>yEq87nM94j=xONvmjhA zxjtCm85x&KVkIyd7%;eEx$Zc42<=laEcpsPC8vXEb`#vhSwd2G5)AJF3@$o>aaI_PLvHxa(&QT zc%UljAX*p`Bue>d9&1)GEI77mOg=0)G>f{Du1^9;!_VL{!GwpT-SoEIf`Vbesb&gT zP&kIVl9b&67;ssk6{tf2f_4ug#g)(~K+=GCWG*~Vw1F5jx_l}JM{=6ObmWKQ%7*pS z4%x6IQ(!JU(5r_k4jT?6E2j`Xao-h1!xAsOXIwU{$*& z$j@XQIyn~}=(a#x)T5WWCnITnQJ>>NLYvc^CeBWU4Z{ae|5yK2_T)c!46%52!-r?- z5+1KA;gVo^b5iffgjIq=1oG2aFLiDEQ~8!s^8^YeT^|i2{m^+_I&4Ygnio4WSe0+r zT3W&XBYK6xtQ3Ddw@2pn2~g$or7VdY(#~hX;x{V|R_v?qr?J70o?{V?010_z8vtiXMu4iFbJ86QdB-dwR zP%>*1R>@eKkAZM=#_PCeC1G6%)9mp{u1{)VF9zoAEdO$+yIu!63&RpFglRGb_C?Ui zv+yX?&mY^Z0~YJ`JR7Dgc(FD7CWY$y;!WscT4q!LSai{)1KtMev+OLWOjwem8=)yz z`qsty@IZ+7G9=9uadIIuYEE;hm+=$V@K^hmvjr*_7Ny|u zKH9`$N}0DJ zbZ(B7q~fhEg;%yF)mh_{lebY^{tSzfdt@4{wnA=cY+8Ioo$|;&1;c_vhp7@I)(6>% zxyuNgN$_)?m^HU&4TV|sl9Fe`Ep)2nbPyd&={sB8#>F*IaJfwcUF%NtKn0l&^Rh95 zE~!Mxi;e=jq~wiUG-M~pUV*LUmti0e)<&8n>8YF0iN8}DqH%0x2~i6^l#C=FZSa2+Sfg)gU=nx9x@W4>ii@j+bBS3-3=6WgL@W7S^oN$hR%&aLo zEcn2CXiE49$58Mt=yM5)sMTnYta*z%?k2#s0nO%R$nN3!?saWVO`YwM@7YsySdi|& z8ixn2H%Mbpr;f(0(8Ed0q6D+;bmZ_*KCH}}=hRi>9%tSQuq$sm%+tCRvB^VJM5(K| z)$s&`b`s4s-hUr;A>K8-Gi~g(=Us4;3Rd$un#`_qEIOasYF050Bcp?EqLBHshiGBd z&F~o`uQuO8NK~*k^;4By#^Hg<&@ZcWWuw}{C7^L7UF$XcOoqwO)R1S_q8o;Tmk>=g zt<&@2fyvZFwOumjbt@PQ-;-U~uC}JV6go>dMp(};Q%-fE6IfqcqkdgL5Ja`h!^8;M z4P2mceHK-zTvtyFCR5>oSiDKwLC=xYIrzMM*Fl1nwM{gVXG!5=!-C_Z?5nBFS$FbWpBUgg7J?f+ z5m1CkvzTvn&b(l~)cXCmvxLC)If-7y)n3IAUe>a@B;DmBSv`GS_PF1it)fV( zRIV7T|NZTI9=|7D9{<2QxoyxrhIS>BFAt^60N|KTxZzLYfutFQV^DUy7AFR*zZOxR+?~vSd z;9J?-s#W_W%k?|73^!Gd=M+2pYx#)Jdc4m3h7XD2Y`CD-A5a-OfTr-84LgrC`* z|I%{FV8IPt3k!QXmjukJ)vF1r*@$n*tj^NdyLr!^&Ao+Kg)beftA*1OIA+9P#oJ^K zSk6*HSh#xND0Y3UF=%nQqdtMC9A2_}i4_M6j{g$6DYeuC^iYs|YnO?v;qD8S2MdP> zb|q)GWT8_(&_K~kG7`tayN_2KEPQ-q7j}I_w^yI6IvfJ-0Kpjh9y+$A;$Xp#OyE5- zNi;s3qv&H2nz)@r-=T|w;$i)&+%F)WM<^K+;0WQkQ>?UzGCFteDyN>6C`!+gY`e|)hMIo7D2(V;D+!|C>-6vdDq8gw`=7z zoERNaFf8~zsseT55jIHU#Y{bU@vMSjQOdwMglI7D$Rj+kOkmMeZM~624^uQ7+8)`Y zXjr%R(-eMxjzteqP#zk4TG6o1o@8&Qaeaa%S=cM}F#O0w_~SHFX#}058&l&7h6Tlk zUpq}Rxkb<@&2#Ty&nS8}yh7!SOixlBE-cbz7Ae(qSQt+iQRHPh*P&V2uxNHf&t94< zoGw#Ng}8@>2wkfm9Cd23wRu%7xM)gIrO zAp3BsJ&F?OtWBdVjVbUti@re>({T1d1;fJC4{7Rn-V&iZ5pnNpV+w`^x16SGwl9J% zQOs_*@OTB84p&_tH2N`$c7GkdA}Sl!Qxt8cCD&&sjq9-Oc16S5F<&1_UaReBlE(F! zF8VNh*ZUUB+tha+lV1WvlED*ndt^n2_31z9TO?nhzx9S~G^?OnCQy)Wb~`m*Ojy4= zoLTZ>czEm9-^}CQQ|JgqEAehNNV71uWwL*~h_Jrh_xQUrjf$;fmOd)DQzWxny*%lW~gmAx!#eUZE?#{6*xByh=~CpQ)m*1KVvLVp{+)0aQ2 zA(}XwH-ju`cwo%S0ZEc)>JFZqEh?-GDFm!5+h|<3TUfN3%=IyFp_m2CJxlYDvSA6k z!!)MAOOaIdKoUuSxH?{l;a_rCP<%N2M~~g{ScZI*U@Dy?4G+`+kBYmRkazYj1;c{y z+%IHVBz;>6Y~AU8beKfn#awE^1g;jKKfUUll3~G54`(J158p%L7)Y*9BzLt`E5jL~ zHvRO@(^RfQ*}(cP6)ZS(0!4D^J0rtbaUFyVQy%ResZ(hlWl?(2n&j>>@u#87zbKdKv6iXWO!ifQu^AxTsrL7)QZC@ zxVw`ttYqQzYtu?rw^_2+&2HPcvaqro=}%rR16XjZ3$4CZIJ5yQTe?h`kR2W<3s&EU zUdWKIWiQdZ{mu*7^3Ho7TVYu6fgn1uNM(x#V9BBHj;z|XawBA5u4fHzeLh3BX0MdqE4}r6mh^pi#bLqQ4x|1e zga^uj_5Ek=T_l;GxL114uiSg}W6#|CuHnA#tURn_c;H}J!OATdR^~fDD-J9;bO}AH zKv`Z^(wAY0IbJ~1q%*79Rna)a5wVns;R^~7yg7tXcqQ$?DKS2Xai<3gEb{iv&MT=VP7oa0*g3#}V-T-vO9d@-&(5d4N zX!IB%jKxLKo3N-ug5IM}C@5mP6Vo?6BPmEDTrDyi(5-wdeB1lBd=>?k`W9~QKPpy)B_T~Ln)q+c42 zB4@x0@KL|S%qk60N%srDYQ$_3E>Uzzu6DpBeKx99E-WYWOf*-)Zk0$yB_QsShE85m9!qi$#ffs}%mVA-WV8zpm$aXYjgP7v|A zZaUlnIongKjx;O4f?ry8$SIiOh-0NBY3xKC3I?^u=I{s!eEyNvBf4$SivMge;4_gN z(BxJdG+sfpOG;mhd!bto@0t}9;2SP28(*W{<5PxFddFAN<#Txs!wtkq(him4U@ewB zb=DM1miB96OoiYvbZop>u$~!SBwH`%so)RvlkAA9)J#F@Z$7cync@ZRSwHx4w*25% z-k0b1CxIYDK@m=&QB;P-(yW%)d9F-Yk1X%{cnNo&#Y%uBPAnT97$PjhYz4+j=vgl= zSNPok2-l2Nd7fIW7@hU#l9^rp?49Si&&^ka6X$-2c@IYADEJ$$4Nf5)vMz|Fc#VVWoD+1Oun^q=YWCBK)I?a8yU4^9*Jf4u=HV4b&PlF|mUC~tuu;BP)|HrOc zO@4L8c{6FSL>-q@V#Xvaym0}2(lC^IT+~)6!%!NC7oVIjjhg^2&T94h7I+1)&SLyH zr$UIi{8~#CZ{t|8m+jUXI4;?<7(5IqLroQ@;J2I{Td=PHrv|{Y04o{o?n_90&Mm;0 zJ#G^%F~N(CcG2L`=^1ErtDp}%k)c>+z=8{jhEV{b?gWVg69~b%4X^U_4S(-li>1J3 zm&Hc9+@}GCL;&llqvZ+f%a?D?Tm-ziZ}^#lfqG76;dh5S8Y;kg*Gj?q(D2rqGh}${ z)_V)*`cM^qZ@%)4ho;IB*5$waT!#FpuYj=NBjHvGcX<48II^KEVf|I+8}D4cr+~2F z1L0#7W%6kYr>{7y52pY6Hhkgo^M!;3H_XzkO*KD+f^U|8&w8P-J&RV3{uogfPOm^% z5MDv(TqVL9Iz&?+YF7|Z#snxD*4gdfq&Y+7t{^*qTy7-|!{*sAO*O3zsgsy1O=pQ0 z!jDiin`(v?2n!B;VGI^cDsMKd!m7d|HwJbeufigC6jyIiHmnb3!+L(@hT#<2KB#0^ z!}okQLmuC{g0SH9eUtYq7uMsKvvbS)`kq{in`iaEq+GhUa&5MBuv{%~X;U;@`AEV!%iha<;00OF485f*&C@M=pi zFaQ{YWnpOn6IY(t|4aJu?}fdj-30(p>&0bYsr*2x^3|2NXjHWfEGTOAS|y79&)C2D zswgx51H*nFmNix4UTcc&^cd_(1nK(#U-FyMG*zeQ6~u!R0^(KOQnkic5q zT2Zi$ZlK%6@PTN|bpOzi^FcL!lIP_2b2HY#NxW1fdBvi07*qoM6N<$f|!PA A-~a#s diff --git a/doc/tutorials/others/images/sample-errors-dist.png b/doc/tutorials/others/images/sample-errors-dist.png deleted file mode 100644 index 1379a56d52442a73edc1f43844e99017ab4475e9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10353 zcmcI~1y@^L&~9)@k>c(WoFc`olp@95CBfabPzVhYyjXE9l;W<%-HKCOic4|VoA>(# z-(7dzteka{J=@OS`^-G^%)r%E<#B)%KmY)Mqo^RO2>>8*A}%gWWJHO?`{8iJ4a-%* zzykol?fvgUO6I_&L==*He$e;Sa5wnHj!N^KkK2ennC`p}f2q+_D=!1=J(+GJeuZR6oA(DJPIBGC!CO^CH#48v`?cnI4}2 zHWZJE)orN&Lk06O$%MQiC!l|qiVcsaQx20=c+K*jfeAD5|FK%M0{(CfXVu%K*SO)- z{8-^V);^I{uNgixdF-k}%NM>JA0LN>p?izJAWMna+S<-HIA!={F_eYA*FIZw6<(g0 zkS>JPW8>hI8`N6Ij?`B9>wkKl_Ayii`G4tJZ}B=DJ)i_r!{8Get^iC_WJ*ED*Hh*{ zdx)CWr@ETWj3pipc3(s;hn5`n#G-;-2b6=xCyaG6`4FEVyj<$<2sMCdUV=BjygbinS9pCW7zMA-t7?Hx4FzhN{`( z28fU4reJPOO-(_;=27kVYOBx5;9$hSra`Oszn!s+v13v&eI#{k4cU|9FH;xMKmydf z53c>#g%Et%_6bdv|J`xuo(fKOq@<+eN34~v0<5sB>k9@FOoqNzOeP|HYVIRSc7alT z-Vug@%wc{xDRyV0ZU4~o^XJ5eZ+K^$#rn`lV(Xipgm_G)u%&k4%?_Tn%LGMgHkgs& z=rJ^u9^derEW^gd-F&rX%q+2b$*huOPUVju19jQk?;nLL+OgzU(Sq}G(pM;zWISX_ zdnD(33yU+O5_2|}8=(p5@yBp6-;ClkI>9iKSu>G8CaAU=RGT zo{iO2Q^3XVtFex%z}03+z{B-FN(u_TR);%$Ow{Y61wu?z+>gTE6v7j)99)DtC3wGg z9)t<25hz`{^tPJ!IWnB`lj`Z2tGD?y@?7FL6^Du!n#&TUfE^p ze6?q9ESr~539Z*&{6ZTW+R;%e*u;Id=oYCkqM%MFKEZ0H1+d;nmeMqKJceyclgiV8 z*iQ-j-z`l%+q0oMd2@4f1gd?WqP!L6S^$o3fA2+WKNZMir*>DBV;W$cHWUARdA*}O zgL3=NCCI@x2j+2u?g*rl457flP5Xr=Ds?7ADMp+0;lBZU%$@nhrT5H zQ*pmO?~UmG&0u+Xc`3_)-$2!Mcoil?FLt6`+Z)r41n~85U1fW_1;-D~`f{}$#aZCQ zhH({SoA?=a-u;|XMdc(7^?R_YJHSjmzaMmA^|FF@nuH9pW1E0WbX>C0`IH1CnH!(5sY1Gg_o&4#vOjzs4 z%cjnn&(~$(kYe}&jK-vR{_v!jBeGPx+N7%e{%6>^|Mx-b+r@}^UiuXV?Z}nd?7?8p z5$dO}Q%EG1uHy zxP(#6=NQ#BMXZw;vy*o%FhvZITU$HbR3RXU)JcsKy6xwHIF-^H@t9Zn9UUF@^|O8k zo~w65r6jvq9bJguBPm>A-nPa_!D19uVsRmbTX0?gL7pjlVkvvn!wf2=G?$N)C{?Qyw?YLRx09T)sZe&Z87$|3+X|d`g{~X7_ zWp`Ej!OCI+chb0g-dwcYV9h9SS4CMlb%hiOF`>Wrhm{zoca{f`D;pLS6U>fYoJI@` z|H%g4Z7+^b5KDvACSI>bB_hOh%1qlt$*@14U#F|BDOTX$?~8H$V4(=<%4MV0@&l8V z1_)1NSmJRV!@(M~y8W6OL=B#;GxYcem?KtFS$j{9JxE^0{<cJE~Hx(H3oL z`=h*M30^}=GECIbf6)rEAD#>xl6d(uy1Kf)j|aHAP8_M)`A&>~Obx?T`DR^jt{Tmx z_=tjyifrv8&s3KL2LNV-RYk>M$yga13}1v!?yg}h$C;_=jUf$l3#7KE(%*icJ5Y6;oQz|M zXe3rMFU?g3`w4ADc5pIhCF0x3D|*ks$*AcI5q=xH^``<<)f*E5(~+EMnMlt>;-D%A zu@C`g{}?eV=L-o?ago+U%M1|D2v>a;YrpA?zEA}rOW2~}UjDuRbJks@y28<;!5Yp9 zZjQGix*r|aVvV1kL^Bin%ZRL$YR@+QpDgcbXo&D)-RHIB*9NQ~gm{kqq#dwirH$)| z`tWVX|JLT8T19p$;fal1L?8AhBXZ*ow+H1Qosz9h6OQa0af`$Y-fHDW*r$m*{UVY9 zVk%&%pVMZaAy+vLCx@Cc0ir#&p7}L#%UToe+8T71R?nnBrLm|jao3yF39nwT%8?k8 zATMT8j?pxbduin@Zg1@0PK{^|GQEix<)VxM;v?WjY*@&Jl9hq8on5u3z+b1$j|!8n zGhtg>@JYUV3c%yZwRm=y_pWI(QC9g~tWx=gQj12cBcO(iY3{Pt)VYj*NJYL zMKS%n>9tVQtLx|r5Cs#(Va#MpIjb^%D8-ZS*?9AUzCANHN71V+ijY|Rbrt0G;2gr{gV8vrnNSjSeRSTM0nD=?g{ za<7@Qd5#&R3(qf~A912%yw|PawJhQTmE?&xzi)n_*}i+42IZ_4h_``$+~~gd<0wgr z#~F3p^6H;TsVXnW>Fh4qsvJiP;(If&?YEZ~KDw7wzb;sH)?!Ji?`3yB7V{_1!N~bv zEJxEOcR(A-q|gtwFucF7%toJ0t?^tcxS1)NhDJ4==gK=**4EaRmydplZ=~v_oujic zlwA}?2=ZLY3SY5aPSVubX}%Mi+iVZ`w&U)D#&~n<6@$`QKnv;llycKcMx3M;?!}eY zGVMfxmLg&djrmikLl^vDI8kjX5qm7ZWKpdD)#fKKrLW(t89#1-S_9B@;>h|kWg~)i z?z>(TybpvWe^9WbBUQuR6l+B8?BkSS}Y0thY5RnL6mYC@@?Kg~=X6^z&N!)1}LzRv)hMoDI3THL(jG=ak#z4mFMFk+*l z5kuYp`-$s4^FMeNkHM>haV+)qV0tEA#KF@iQt9Uh`*=Bm4Pv_VK9s2hKeVWG=T^CE z^rC07$+VPGSuAkWl=vEoeSC#@4#J6SQ>#9!A{_f#OaX6A0 zf)pBnvaDl}?4`0sQrcvMI%`sZU!nZZu$nH=ksq2l)K>N*Czs3z4M=tB_^#6Z-&0?4 zYnCzbwt-tS%Ns&AcSz*-V>vL+4iVn+x|SE~0mdM8tp|k9!V90TPV~~hEdjTqPh8un zzwxIu&mQETQFd93PjNGSBpJ;;Q`k&>ls-TK*vchqL4I#(3%hp?tsTlK+j-W}3E6dM~mb3zq}BXu}i?}DA}CMd?EEqwLId;vn`N4oSm zTIJBg-&ljg(;(?rEpd2vi{lvA$aGXR7jcXNob_$(+{ZzJbGIZLdSp`RpK^N)wG9uRbNbX4(^MgJopQiy%#5Ff7c7*UxNa63Yi-=Tbqlb%-BEo^xZ5 z8tAXao#J|3!1Ekw1J)V5)v*1TS}K>JSx^s6Lw|+#I_@Xi+dg&s0(I49BbgL??9Rih znlC!Uomp!`br|}a)&SdI_Wn|1W@^Muv9X(9pKcSS#_pG0g~j>!_nD!wc-d&@wTK zniGl3$IYf&!(Y5%v@7MYs+5)87jYKT_t-A!t%Di&-Bh4pg!@b~V|UiOdDh=wZ^L4S ze!4V%^5@A<6(oxAUZi=RcXp0$z7>P4S(v&nt3KrT2$=f>r7WSF{b!{rklfa7=VG3B zILn4zlgG8kLMtcQ0w$j(Z8ibyy6clPSvd!uf14ZE-8XM4OKm9r6t&mZqtzeu^O5Os z)+o0F6kz1wRS#;QK&D8t-ER3ptd^F-c`xh%ko&J1*#zY0>chwsW8vs&7I_xIZ-u&=nOo}LgNU$;2#C~3>1yX;NDnP{e7T#8DI zL~A|TnE?t?gU}oKlz(6p9a$A_1BYw3QKqg;)DV*I&!_VCom^HL7SmCWHe>LiOif1g zY{eck7Zqa7^=z_NJkf)%2*AH1jN*q2@HtiYN^~+uAQwERQBQ`P*iNRlVYaeer8(!p z@}~=j?>h360t6VCnYshaML8kwO1!-^#j_RCAD9WSs-MQb{c?u@^`ErqD&=SDn+s`U z6tLiLXqoK%!M^WCR|y#?V$i>InZGHPAB+7JASQa#m0Rr#-_YH3bnUu=OouAltH?lBIT-fP--F^r#BX#uCUafa2677{G- zSN}|qU_BcMG6z5tXXEQDT={Ea*u#|}I`*o#U8Jw%74NBUE{)D1KW79lwuZfRd2%ST zMC;tZS4=pV>VpltM+5^f8PYSZFtC_pC4<>~%8^>c>@d-7pJe3GvED_>r8OT+kKy*+QsVWzhj71Ef7d6a%1O`a~FXo=S5M>A7R zQkGZMiSm`%9m=D_3aDs)r|&b&$T4y%AQenVjI%Da)~xmGN|igYR|GX&_cFapsj|Hd znQ4ewEc?i@Bn>d6Of3NI{PupddVjst(vWv1irogiJ(CgpVUp+ILWv1rT}udOF!(Dc zD^vcezRj06jQLnI4P&QOu05aOK>|TliB|u@m=!`n3qV%%%d-L)$h;HIicpv*>Krco z(XNXjCdc@hxHEaJzwJ5GxxEtYy%gHH$&+O;s6Qlv;!n-ou$yYC?As*!fEQq5gVV_1 zWj0h~x0mUx_a!ZRZ}ooV=th2R_=kUqH%+RN3QDVViaHr5g~o;^?&!&1BVLpk2LlHy zVUQ)bdd5g?SD!~&t?sN7vM55KM0vV((eu=yYg|9PF?pGl)^~ zkaLg8=K-FCzcS{e(?Kjt&p2-1ldewAG>?WPaUKQB?LEe`&gBLAiJM zpa5%52nVrnj1s%8r8Bf9p*BJhbckxfwb0|(iF>O{3eHTot`xOjW5lc)b4;-tm%~_v zhm7JBIFvhs8eY6QunQeUCt1ri_nngIj}SLQH*t=~tkp_FKnLaUKAk^**wxMdz`7)O zMVbnHKdKls+bh5fCi>9b|5h`U9y;iOk}Gg#2*gsx4?srJ9^J~Ar@fZea7rjm+4)cU zUU9|qUNS+v(PSnyW#=ZIUbw4sY0eQc{0$dk1Jm~Ry4h*}GWzwM)lXtFO7J!(OcExN z|KJawRTB)6O-=vh0g3F|m(2}N?jM|_F`Ph;jfqit+0_s57GU71Df?VQ%G6CX$S3Hm z8y8cW-0$5I9k)d=(3p-BJ+N6{ucf#=@Yhzi@s^8^CrRfgzOnjzNJP4E&79+U>f|57 z#AL_Wt8(&~)4Krk?7T+_#MXV1Itwg2!hpM?i&KED>)6|H5ppsr?QheKL9^x4K$Gh+ zeV@(CRW)dEdg|uQ0|o|0pCmU*U|?XkQ~<$-fFY%=d*(s;-jQ(oY(D-o+k8dXzg?u@ z4ua>8w=YE+w`MX+b8OW8kkM0nUgdDeK%9L>HwC*JzF{}I{l=!Kq9&&Za2xqlFW-eX z$^MpO!{~c9V(ST?d)p-}rzJ}L zrNk5#oj+EReOHi1?U{cYz*tY=@un~^Ya4g(%v18t(w79{x^qA@o~%c57vbIrTg*(I z{kFMOoW)6as3G&Zc#&kHFa)T>DjW_pAaa3Br)qmJneyXH5!xxJK&C zoT2Uhbs@BOHX(NMtG2Q-jyjyX`?OEr1iHt3KsA4O^?Y%>@_ci2A^#qWN!_%XdOGt>LIMMdka zm#Ii77YJZJo-LuHql4%$0MQ|-zgZN;H@=-OSP{g zNd|@sm(-A@>0ZY(*&*yP88))5lT-3o@rke8+z2uX84V2$6&008Xr3bWBgixXAgw_} zZ;{AHoHvJFQ-=*$Yxe4wSQ-C&kfA&>YYv3X5%$!mm8qKqv4eJ2rOMAMJ8XKwaLU3D z=gOn{Ac%7&ijy%r*qb&3e0eM|+dotPJ*_5%W3EEgs9abJr1XtSZDs$cC}Pd$3(x+&iENKhfZN;`klu?XKCIHluu+uZp-A zW2aABOo|X2n1HD%!zeQaet>rvW{h&yq<>GXEMqDe%_ZgR+YcJni=DpoOO!zRuL`$M zLt@Et4EQFr?21#4{6_OMakXrW_&eEzJ;!G&Qdy#o(gEBfte%1aitW5z@3rLfu8WLi zi|e5VyS@$?Gv~l+ixN*9g^lr9ba$qogHvRF2gMnoBvn#r}(9?(cas*k~E?$(Cgdb~|gakrskN8Uhjy9g2=dajwIwB?RLL`{OM3 zaITu3)AQ3I8@(N$bLYA3Ht_ub2&Eb-)LU}6W}5M@U3|j}vP^TiZ=~IkSzU`+*^V_U zR65Z2=VNJy+=_4>Bs4XKFahE00$nc-?%(2q^8!pk{r|dN_2@%nHJ;9U8yKQ#2fq5u zT4W= zpzG_F>*GJcIU+Jfd?z6QeEZr6u3iRWaYs$Iv1tLj6Mg+r)s8~U;hxSAdHFwjzx4)^ zj91SsP|+9bJkwG$PV7K+@LxlchY0m1P?0ye=oeqmCf{O-Qne#*@IIuA{PhW9D0xyq z^znNehwF?X^z?8QS=L^{L|G;N?0fmgoAT+{v?PSpQ-#gOSWh?GH-T7QW3qDUu%`beZ#S28G8xjuM zp#hSIHN{=${^R8|iq^|fa!U>r2FM03HfwGwNY-*B{o;q|LIY90OK-CIDGfT(Y2~s~ z4hnbB{DIr~Z2wHBM%CkSs3<8nrC7f56mrQ_ zXQwWF_lV)6$<}yIHWuJU;= zQJ0C&3flVCP-35eWR@bd!>^ZCNYk&wj4` zTUC{$ulM2Ph!`O`Ig;z`=>}wx%}f@J%D^EE?eMN>_+ID8YnJ`xsxs-RLshcl8R@Ed zsdcKo@wq6m!m!UPbks3dC!T3L^c{k6$cpKc-%5OrMaPqiO@PUYUG8$r0Tld`pH@yyff-G5^Wq(i6|^(DRVsDwd}HBM+$Q1V*h7UIde$QbK$^r^yNa+R=lvexS3@>PPGf`Yv!R1=)NKs^{&>XM#~aj=0nI2Xg5LbizIqo|mGJ~EHER^EXQjgYulw$Z+ng$LnxbpQE(q9~)?2$EX4*id= z*RI+|94eo(eU`RXjBi&9WE{Z0ZKmTgw;V{F4`{i$ZG?B$c-Z4IFz%^CyxEYXXw3R; z>2m1=%w}XQe+ZKz7g?WB`df-}O z4=3}F39_kIuRdLejrZrp;b<2ffGtW+xp_v6=1H7?y`3|X?5SH1bA_-W4PcMIGHK8^ zQ&R@l)z{ly$V{*7ctYHX5a;x246_4b{&G3`D)4*=WT0r=z`S>QTE(PTtzTBPrGGpE z&Zy{>LIDJR%g{&t&)Q=(!Vp~UqBOTe0@K;pv_~997%sVRG51g6mgSX_!m2JOJFuP7 zpa%pIzj~0V{lp&?Ln(B9eO;XBb)Xztr-h>GEU-(akJ$71T z7N?UGP-0@DX=P(;YsS-2WPXy8W)o}Tupyx^m6e5{hdBAd1a2nK8Fr^+0Cs6#BKc8l z(%uB>ugYSNV5q9VnLF& zRF(nB{FtHg5Xi=+<^@(kMYA+_SHotj@~o<}=(qn*DRw?TJs^#mJ)(>%UU!eXabr}u zthI}$Z)T_JNY-~^YXIXhJw6=5HBieZoSdEelUA+yeZC6CH6)1hHpG=LU=n~;2WvRz z(095sLv@;0OLXWKIyDi5NLES;$+r=-o>FZ@c=kH;n2}QdF9L_>mf~0{jt}oMe5`dv zLqzxx0XZ;B6AnzWeoD|~_2|lfTj;k0f0}W@bL~w7l7iu8V)Puk98(RnFp^b>!(mL7 z9#}mYfnD`u)Xk5?n7~+{Ff9n0lQmTHIXoZV8!DShK`+89LJ%W&$4(Qwb~1iRGYqfB z7obvsc4eoQl$0QNCH6>!*ssA*US57tC?3>rI-iH#u5W2D=S8E>+4^vXH`b{w5JIGX zAJ3c#UCmB~EKEw2OV>aVy%#f#)X<3VGW>Ka&d5yueZa&(ssLl}zK}udxyk*JNw)Nk z0rhg|6;}fJp@EO*cokFtSWmi@TAdFH^HMB#`>sRh2RTwekb%c=tWX&e4Z8n$qARl+ zs8z6+!{}ZN=STnz88GzV@UXW1gWmnaLmZU|^7E|WFXp;6_lQZ=Noy{gV?Lj2h0RAPmeAb-5Jv3_^lZ zo5Op{@Gu0f?rV>{zCb9xPBm{I9~+lY))04m`hj%8_zznvLbswam9~LzoEuvU3f}+8 z%D=lVYX5Mm*DZmU!M;%mv!Mqda%$=w{v6zRBIlly6kYgs0(IE+x$xE)>MhYN5W(AC z?qs}e%CbgOaSog}ZJLQa8>gJTr+t9+2;?Lhb0RAts`}T9ztn4qg#`pq&9{iCG<2Xa za6G0;HzaMrVXh2}5t)CwOzl2Ih610!PS6Dzpu`Oj#>gQxtvK(L-|ZE|jUHV4P-1P{UHiaR74Ct&(>*OScC?Pm>^GMf;CA2(a&oIsI0#N?EXEF ziBE=MhWxzu^#UFK0hS*mf{L}^Lz|oyzfKceE>Bi&kl+BN(ohY^RP&Bu1_@E`PMU!T zhb?}PFr6^+ead77OG zgbYT(;qZflgN5CP`}_3tbf9g#_W^T6_t{-QJo{00093P)t-s|Ns90 z008*x@0YNFnwpwKL`0eZ1{@s!Enxw(v;P)Q0EmbG#K6h2B0?-IENg53h^211xw$`X zA@%0yTwMQnmt7nj9F2{Q?e6W?jGLzkC_Fs>v$M0|=H70RO=XHit-Ybc!oxah4e7$k zSv%{s@ z;nno?{hzdu;^pBoWCX?0ypAa#b(UL|t%CdY_Edc|MRFbP@$K{8;v`xCpa2&$Gc(fC z(l=-eOm-=-AU)RJ((KF8Lqq?Ql>b0(6@{W~VTL`d6*NsvO^=+pfu3e%W&doBQntma zji!3s-2d2D_~(T^&^be4As=*2%`vz^J^I zPI)BgzQ;;lHu2frufU?+-Q8AxIW=Vys=S;yS{4Eg08@M{RaI3sHaK>bVseyJNp&8I zrFF~I!@0|z$!B|Qf?M9Gu|HY@q6!=!R{%ITIIuB3`}O+w`1pC5UH9tjYK={rvW3IZ zx3{>qL2V+}ke*_RJ%*ujN_ZAGIyJaAQE!i1VTMA3qGp#GAuC-9PkA$1f;5<~iNCwU z*5J;JjQ@9-UZuH|R)H_q;?knFlc2VZtr<2dGa}8+&ALBWx$0zd00001bW%=J06^y0 zW&i*H0b)x>M7P}lJ7)j@010qNS#tmYE+YT{E+YYWr9XB6000?uMObuGZ)S9NVRB^v zXKrt8Wi4}Ka%E+1b7*gL?*qR+000`ANklGV{*b0=32I9XK4T}l3Q87qp~M6j(u5`DZ1Au!PG*cLVhsj2tr^fX$RWx<$2|9B z$DQMZm9E6B>nhe@NRzr<=~ic@v@~;fC-LV_C!O9&cha45zr=r#gid_?d+)x#-}}Aa zy9THRZP$c<3caA>frfR%Y84MGtopUc4Jscr-emI9uvu1*`t{Mz9xo0UVdlq*bNI4QZ8gbKOoHPwSAID4nEW$;a{xhJqfyM`g9> zac76jV9}l>oB6%RVCY^ReAVfATAk`y;jFFzZ)1E}t;h3JSmEp_p3WM0b9i~33M-6t zrxdfk?npOOCj?gG?OU%>zi*{(fO3UZ=fr#1kEA7H98$2fdugFg{ic;ItSr5L6Z?_k zl*+^m!^U06@ed=r8i9;qrFX~#7PtY)p~4#(&sl|>DJnH(E>RQw69_Vi%6WosOKD+G zV&(X%M8P_aAP>s=ZrFksu%yB&LwZ(%U^V6}rmimB{=;>$ghfdOrfU_J;|Z9u0xtZgTHl6&CEAC07Nn!ct+WuvAlFsjyU7Dl8S23QIK= zmI_OS^#Ot9e)L5Z7Oaha)S|+IHB@I+VL@A~ORK_yH4ibW!h*K3D=I8#nkPR72ob>!9&V^wnY_|)CqxNk zp%X7ynW~QJ@s?!CI{c!7@7b($oLPFD<;064zvUczXV_dkY60T|tDUg$Iu?5)&QUya zbuHysRjgUBAf+WqVDU%O!FAh(5Ov!bctMiD;*F;3oWVN++9G>NRZ41*~D%+-Hn6n)dJv!=B(_m=}2%w#joCQfAyoX2z5|&}za*)2Ts@RLI#~7($W;??;(S zNt4Y4|JJC9IMgRiG#xAzS|IlH))la{8AnPa_-?4Zh?%>hpSpOWX-X^)EV}u-RkNQd#;iKsugJESLZYmGj0T#x!sHkX#)||l*hfNp4be6?N z&U&J}QV~xy-GUq10zY|Sl@oVDC22a!@$|faH*HTf;PKe2$qQ>8_rVP&!kQ6XG)*+{ zC;A;-T>F0Hb%_}@C%1Z`` zky^pgv{{=Bu)vakF?sR90%(H^yZDND(X_H)(E$9syi2^kzy}L}dM{ymL|`;MmOQZh z{$*X}^(%f@l&8%4Y(pYN(@KOz*(>dS|FWQykzfJft{|ob#MfF^Ym&iQ9FM*O|MIn; zN0vHWF02)P+J;)2Rv4@RuahV3lz&BfgT7(HwID(?tz=k~eg11U9chZp%QHY$xMPo| zl@04(k(*zv8F!JUsYtwx&ylkaKwYi@>rGBrUqF5ynD1S_i2gNdBD^rE!3@4=TFJ1M z?}iRH#p@J5FYQIl1(O=gv?n>N0Iw#JpI7gQZ?Gof_1b-wq=yB?i%tzrM2x1#lM>cm zPV&#;9%&-;d3aG%*P|K98%^_h)(>vxKIm{i#HS}Zi!iLn!5>XOl7z5+_Rqv{=(*gz z-W8r#hSg^pP10zZ$FqL*UE**U2cmd|ACGiu@N7~>)4X%mziVu{;huFEDPCdxt9{Tn z5>|X_ceExUtS=ycX(0xA@}XLze>}d;<%Lr5^+j#cMbo_D&N?t*dgCnLw>yjv>3l2s zLPmVOnJ~>KTQto(3@M$}yN`&VG$~$mJX8|Aiu}MVITkuKXwOVOSOHP$m?gU+y@|s( z?jpFn+AGNE+GL8R!^1-NE-LsRk(VRC4;?=eI&TSUJl@^qB8IuQhzu-bG+no8`=%^~ z!qV-7d4cUCDI7I0&OuuH0xf0=1sEAy9;Q+mURraQ`UL+Yxm z!mLd#<#8(VkPa4jApZ^c)C%QaW|n_(QiDO2-i773bE)#Zu+k8Zoh4Gj0vXR?H3Ht& zvCV2`%=g+OnCm^18BMb^SS6m$sO=~==(j5y7H}MlEFsuqt=-Sd7+_Spo|PJ8Mbn$~ z?6SSI;0*zag9RxeLh%atm&HbYY9eGb9b9N!ZP0fp9Tp`aMg&K7odI6{c!-+1tc!A# zRq34;TxeX7a#ZoKz&vTo6@CfEcx=G1#i~I@G+mf(2%4i(Q(BY{ONfME^P=Z_)072h z4-tXXpvAWwO>?)Sv?Su37|~(T5@I5{3!!e>zYa}Urm-s})R!yKbfGi&1ft56xnzJv zNeH4Q+H29gNc*A9I?cET%n(Z}8{A`I(o(9D0v1#jf{)j@S)(y)ni|gRXjpM1_71NO zy;D$=MvWgZ~ zOcG)_n!W-&BhE-GV1QwFiJ?gB7EMSc#^M{lvZ_UK9DtU;jhZ(W5;b z03;!-m?Z?=h0I?}F>g0tzQZ!E)BN&#U%ne8DJ*Y*SvgH*Mte7RlN;GY;kv2L+*BZy^`mYi+`CtK$kcEs8 zu>jG#;->kFsX>QqdBq4TMhVf&T$@H4G@8Y(H{$XV?J%|#SS6R|Yf75?mOmc~1q(Pb zW04T-wdvDdV!3KiEMC>c1?A52w!AD+Va>mxAqkC3ci_!|;i6Y-CeHld57t1vMJy5` zcx{@l8eA^Dp?EdvPEE^)$iOqo*5AZr|QhTrNh#$K`)^1ZFm$)IE22xahMrXL_|OM+lVc7>u$9QqbD8-*UY{RfF{W zMF{6rSdOQ$8cxnh28+5IvNvba!_+wco2lt56=vGq%qI`@k6%ACZ}jxmow4UfDIr33 zA@j_VHg?0VP9t5PBW(VfWB2fEiFxfdI_c2aS8HkS`ptOemkwaGyP0)n!hEr7^la#1 z%4d&+uJktUBd3F&=rmfJg!osX6KC}-t1F0g(I{4(1stb6C8H<2<0Jd1d#Q%zW*a$F zG_!4@7j7C0+tYi&MGOb;$_uQgLGtP`a2iggB^cJq53-pUIAff--n+ivuE8jG)0pkE zf5Vf(cQ>_$tMt}}tQv&pl_z?`N%3G&62iCCKLzb;wzPk!bt=t|{#&AFT(a>y69(z|(@tk@Rfp9PK=r{QFWA;yN2V#1=c5Pg81lDgZG;W5q^wT{nL zm}zX7=0N{??}-`XM6YA3{!+ zz$^qcOlQsXFttdyG;PyS&PH+5jU61HIUsnj1l!v}*`#WTD;Cd^#A( z-HW`M2u?nl-Eguv_J)(O!rISz;L^+4chR*F0<#c&J-w-6>Yi_C^$wiVkn}h;S6rDM zzkbv`QJc$(i_!LRGZ{1!%uCEAjL?0^BTK6Lx?(-}@U*cF*NirWtTU>@=qY zWg&Qadb3i41Da2rSrWZZ{^^9b|5=I~<|EC66gTgJ(KGR<0ga}@yX;@f4JY+&B9wed z;#u@UhD%5m0(#$c&0z4ZJbIWuIn!z#dsgS7ZVKi>9Yzp@eag6`LaOtMi~piZlX4?Podhg7?eYy6uNQZYy1q*b3TZn|P z>Y#54eZyfl6tB2j-yLqcUXH-bb`wGRboR0S^%uKp?)3Dba)bq5^peBSA4AZ-Y0m&u zFMq9jY7jGjdtqq8-PJmMoBe=JLz0-OVyu6>VV=61YEk*Xq9w#M^yjJgg^*8etRM$q zW;uu3Y9cCHCl-fkt;Ids;O)Q7ZgLSM;W~Bh$}cu|Jz4TnueL7Xu)wRn*I^|Db8Q-C z>RM7p&Wx_QB=eQIv6KVQn=={Al{-;mDWrsmt_vBR8mxy; z2l;tT30i+-4#rqv-Fqw3{-!k8vnbmPCn2KiLWUM$W+A=&JB{NyAEbYYp3f>{TPRqd z2uec0MbjbTYpc&LsFy#@i_}d;!7J8GX^>^_2?2|i5D>R{TeMxsCTwKCu#J6w{}Nsg z5EHE3=N~&CY>$N$kuVY>hAw0^r^s=EyG8@N9u>q()TJ<MzK_R@X0_t<(1q(PLFJ#2nh3w2MX^WzirxTu6++fiX;xmz= z>4ULd!CgU4M^kE!()Bq_k$J@tR^%*1%w5Rp7F^inXyh8j>-UlUE1s~xtAvd=3(*(h zylDudd-+G%HJ0)(Grxbu6;?zEAx2Mc$kbpoyy6Rsk`O!i=3sGpdeg^zMlIP;_wP>< zO`-LDoMC~}yb?mJp5F61xL*F@cuk!oLd8SzhD9%AyvG+!=ZbO_|1qC2RC0&r6?a(t z5+d4YnrpQq#9&Oa4Rw2)XkLzl!s3+>Ht~9TTLj0~A^c0n%r7MZ78ngLAw=!zJ(?e} zmw!ZFH7_L?79}CV)k0`p!y;bCaRk#eh_`PsF|c?fL`=~%*LtnKQyUZxEBr!+=smsZ zU3nH|!vg3)7zrV4Z5qa2v-_;QiiQP1m|6%4dU~S`X6#6(qG5%R5cRP~(;-H^H*)EX zykUir5SexH$uXmnVS$oR<&@hbIw7mQf;`-;U|7Io;z|g~PRK<tsxKL3zLm zxscK7%9ZG9+F?SsfGQSNCZE zy)|!&GBqd%Sbq}OvCAgaw9n_;BS|Ai2$JyBuz*_7JA$2qzV(J1+21r*^hksBToe+#E#eN6Gp3(P7QX-5Y`vLZl)wzf(0&;`#cGw z1v0EY_O3j+zzXSEz+={!OA<$mP7RJ(lmZJ}Ib|a@B#;(pw=;VA%N*7`Hen?j5=jf4 zF%wH0iUo!{Y*@EQ4vX$W=KZKQk+c{My99a``z0I8q=p4PNk!8|O@y#=tFQF~R~52lh>) zhZ8FyB!IxM zfeSZ!@7#>k_#*g zdoz@f2G?Ma*Zxn32=d8=a27gb*3lo-&R_%#^|yn$NBDj=zKu z7uL^i<=Rg~DdB3>XgQC?T|$TpOFC-~Rp}jX3855N;7X`U?;%&dM}@^TW;D%|5b?}H zC{Finc$8Br36`7Zgsiwo2qnQnO%L%r8Ha@oCBb5>O(#Y|C<<0H zvB3AV@Dlw1g;9VMU0hSIjH=jtu3&g74}5 zp4eFk1;QHj!M6FWA%2^oKv=uQQt&`f7cz^>@VWQipqlnKl26BZ_0c8DS& zlnDzmnr2D}O_UPih1FZN%7z76h4u7)5!BnrN22*x0rw%u&UG-bw6cYji!BKK7B3N< zg!u4I1bMJN0<5b@&XyNsYX@$-N9eb_qw#Kt;!5kb<{-%0E&Q-9Od>M2ula<3&v%I* zM1R4_dOlb{d)?lovem0UAJz9wQl5pZy#eKg1yI#c6lk~ZTAH-30jlv@<6Z<=eNUvj zp=y465JB$K0u>gNUA=D&go0O1K)CY(HB^f-{!n4@Pv~oNwgPZGXKkYjE3zF&kj4u+ z$Z>TT!q|1lB=x^KXTjb7D8-(0RfPp-TM^`b^==3~4w2P<9jd**QH8~CTj(SM?ONI= zkb6{EyjGus-0%W|tO09wtwq+Uuy{r3B(fT%M&erSJzH*8Ve#9V)vKvsuyJzr7V7UR tECJwDSSl=uV5xUQcr8D9vkEI(`+pIb@795QG_e2x002ovPDHLkV1m*$#Wer` diff --git a/doc/tutorials/others/images/svm_intro_result.png b/doc/tutorials/others/images/svm_intro_result.png deleted file mode 100644 index 5f477a4f699af3776b36055f86229c96295c44e3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1886 zcmcgsSx{3~7(NjqNFYcANmal}2SH`g(ukCylujfVY_J0HLc|6X>5#BV7K9KWA|XLw zY$4EU-H=+dU{H~PS|NdgS}M#~(y~(rl}!*#f`f$Hn=tLzK6Iu&^vpf~{r>Iymxm+X zPbJ&gY_I_U*zFA?9RL93WxCcDh=p1sAOm0l_V0_@1H&*1g|gCFu}WEhR|EhomBPS& z7IgapQWMxbTvJmc5U^NTETeJRyT^NeUKbr59U2Pv^$n6KWRGngB?Tp+q0L=gJ&ukr zRY|4O&q{cb%F5Hg;dxgVoR}z;NLYk^0(J&lRFqs&QWYHB(%A|7`E}uN)2BG6$cfCk`spDyK#JNkcdgaudJ7@lEfaoMu*p_f6khVZ6ce zNjZSItiRPl`ZslLW<=%V4Vw*7fRD_AHz57D0`$^33wy+gVjrMJf{&j5cFd$Cgp)~j zhFkH-e!!aau$W8$eBL!(R90;;vR;eHy`+$+tp4YHQ?zvlH+96hKVdd>5qHPLmO zdR>FEwquoR@#nQ)kH!ozt{Ku{=lwU9#;T4je|3iVTp)1X!JJbVQ+JgF>ER?RH-4cZ zYMvaoby;jo+!iGF(Jn=$9h1koD0wXrhhNXc5bMu3E?#(eyKcQUI}+PFylx6(pQf0l zD+yZ4vnOor(#z?zQ!Tp8txytmnbTruOPb=ziis^j-z3Ila-Y(OGs?!ZoyiUTBmQ;K z4pS4GH{Q;8_3Pu24}S419)WOu0%K3&SO>ELGU$@*`ctFpqTuRBReYBCi=pViqo1yIX_i)}7asIpUK)La8 z)Larx9DXRs*g4QUd3Q8i`KnK5xDe>nOF_v67g5ZEpRJu>RNiVQsU|a-DlSiapHW%g z&Q?)+DAw{P<8UrA}qzwKGI9*G-c^N0{zn z;tYoHiDKuhl2qS8Pmn?Srqzn_=5}^{2b7?JGL#kd8sFk8^_LfD{gB#yOPSN_yo5>U zlKYl&rx=1Ff4!m=7vK954tAh1#H!&7N+RSwLRYN{FLde=Iza?QYo8E7dNt-UO0LMk zn4(t2n*FaHur#k#Q?j6G*&QRmDJTOLv`mS dL%nFPv!A%q>vyKzb>8%X?A=2p)qTtm{sF}u%G>|| diff --git a/doc/tutorials/others/images/svm_non_linear_result.png b/doc/tutorials/others/images/svm_non_linear_result.png deleted file mode 100644 index bfecae9a1b60e1344cf9fb9b89869b5cd4bf013e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 14714 zcmV-=IfcfFP)Px#32;bRa{vGf6951U69E94oEQKA00(qQO+^RW2n`1yEu$m50ssIY07*naRCwC$ zop+oR<^9LsGuy6ljowj26fCF^6~z{#V#kgGYK(;#D=LVH9k5}DXfz^LB#JFD1^ z7z8UIND)+|9&mTu9ar|Y%>4dX=A7$WW~c4*dGW`y%kA#WGtcMwmgid}euscD2>E#V zN>Id10I&~C*2Z^2shE-0Ad4w54E`{MWnvmMEZ2=Uis=v}Gz+RE=0HOp+zr5ekRzs} z4S2;A7>TeCR1j1IJWv9cL5|q#g(Ngh48qm(As}W#2O$Fm3C)BAVKEB=b|YbJhD~P# zdi~JK|3x8k{`tkjhvz{Q)J$D+b%J%x6!*i^^Tko<4-dnSurp({CxIibUABu`a``M+NqQsjvx3h2@-P5~@%SPkJ#P zosw>iSC}$=dY|fQMV6T|Wz+ZH*B^37PT#)%;$q+0wZTlXn4>Tgvf)?o_oTG22B(*+ zykBWkM-R}A^D}8%8~h#knn(auMNyQ=lh1@dZP{|u^Uv=J1ms90(=^fpb*nBrsc$$E z?t)2hh_DIca8ay>|3VZ#h3eG4Z4dm)Ii^H@edAeG|L}*Nio*T(?|?y&3;S>0e8Q?# z)yE#2pGo0060C!dAqXEped6B`hGU^0^oQdhBuLmE{O}4~2`{I17+DQ}g{$FJ2s&uN z{UYUd?~X_k9#2@e^8W7kdxODD3vlzmt8f#HguKLEQ$F}s8-EFYVH3uO6k`Eu;cn>M zU~WN)?PoL!UAMAf{{}*?Tv`2_-xMBmOu?t0ex#~B;l7)0>ejuxfBEv7wQK7$jECj$ zDgYzl<;0cJZLk_Xf*&C2w0o~>W~anY!WxS-%p|%m&1(i-?U_Fe536KY0Hy2HE zJDYkBcn$hmVZ4mcjhDb^$Zg$N=wqjYa!-?EK(>WQI;qpZfmz$Og=Wmy zwrNv{jKfk*nJIx4aepeLd%&^-{h}S=Bmho=jxAX)ZKN_)!W-K7($+bUCIsP?IDWkb zUbF5ZTQdn?97ni`)K&qHM^aT3Mb-W;1EGQs!g1ezHO$w>mpL;Iu1-_JficaMaJj9y zI#l@iexU5ZTovRy&H5cpLS&#=^fW_`Fdr;gQ1O#tdaf?AjhSGK2%hTtV_e0iFEJRW%Vy7Y71 z5}X5P0x$=AzwYfCcv3uSm#6KbP-DuE=E3!*Fi?e3IN9jXkaGBoW`o?^T(xsxDw7JQLMH$^ z!+^LKd<`rF;9pRlewA&&N;i@)N2e1$W1=6-v>ojoxCH)W=y|}&y79o~q;p_qy##N= zSncI^(4pDiC@pO-K6ndWft>W8H%g$mHCOW;_!M@7;;zaJ_3)fdQ_4;%wys5Ob}@{B ze5iu|xk3^G9pM^?z>Q6`Yt8n%2zIv;7iF4@JL++1(x4A2KyqDelmOhOjZ2dak_^Kk zP}^9L=D;m*Biz#B`jZ@7^02<8QW6S04uz(8U)ojN{&an^yQu(B2{*$`C^FI1seuFF zhh)o?Tnj@1xC?g1`57mxOfAk*u1-6hW?iVg8uN^L1d~!=;2SU;-h|yInmT!~C0PbW zbU9YH#qZs5o!wFkrJ5S}kVTSEWpdBSk92#4Q|3t$8C=OC6 zE(+TvZfwkhj&O%_7#Lf8v4|q6O@VndTm~W73X)hEMj8|$H6%;&ec}6<505bSs=2i_ z1cQoTU{c73`Ho^>8y*4=D4>E0UEvDYAI^$#RV)s};FtWJy`2}8EMl7W)`2|s*tT$3 z9XYZ{%ufd6e0U5VgZ(sVscqYhUkk*CI_M7-Ed*3V)X1LbosG{t(GJKE^lK?gScMrd2^O@VB`K;>uhw_* zcA1nti3{LyM*L~SuwP~SeA2dU;ejFn>W{sTDKYPEnqOLX)XCH5$|XP+z#)X zl~58V#=xa;4y=Ru&4>+R7rd*DFK8ArRR@Q| zVkm4=msd2G81T2V+0{iXLxI*1w@rAJZhVJrF5eGJAp~_vRnTh@o#7IwfiW!wEoQ?S zC~j}&yJn$#uJ6BFq+j3zL$B9pDo9J6voh1`=P0LThLv!q&jh>->-dy)xO*53-;e zTx?LCj!JkJj)r+|Dq?RU31d8b&$#X`)<#pR&<&o3g7jaB0v^XTYOK|5Q7;&gIv9o7 zy4l}Ha4#tm8c2X@ILgv8NFjIt^5L=c*GUP%urD3BFs8sa@LR|-;tM@+`!7ZiKMU?z zO2R^z4_Ctk>+V}$2l)g;odpU+IjiWb(H%$cEemLCRWugMA8BT?N>C4fGE^`3!)SOM z#a^t(t6u^Wkg&=E9O@|7&=qy*yZ(Y|5<7(OoN%x*DEH=#%M> zn2Ew%0P0~pc%U2H3lgl^gl?jQrG>f4KJfu@bx&p8rehat2EUTZL zr?FmU0I8#Sp1hf%nUP5m9%1tE$+@0f6%`c-*iRDX!ryh?7l$%1b|aJFQTVMzlbi!n zwegxn70fh~&g-O5ud=?@)I4^{8@ z>5h7({_Om-xqP-Qt;4tA_i!cr*9dX62YQ-b+bRNQ!oE-dgRRR+*a{cJJvwvCK8~4u z7zLlfSeq5|RukSutQe8PAB~%(I9iBJ* z^1$WX2yfH#)tzr`>6KdJZx)!m(#>fljr+{bir6oKpE|KN{KoIA5>%-CxAmBO2l z1sA}JAi*xkf)8L@N|#y{ts0g$EMLygQ}g=$)Ngd*=&%}IvS-Qq`t@XmSv#;<(?a_r z3N_Fh%G;EdKKKYm0g#*tQiY-B{?UU0ebKwQ&}#h0X8@>`ZOt zNR&m&Ns%gZ9ndcU^E@MYLi;>4Lp+t;>fH^*Bu)?gM{r-Q)}-o|5LN^cC9!x5LwM^H7D`wGH)-)@uZJ)|@5Z*;8N5$QmD?XgI6nzbtlm%=dE2w$~g@_FDZ=ncK$OM7mF z8L5t11r?Sa#dimZnP_Epo&qnF!=yxZt2pDHqPBI+c*VmvbH{UU(?UaL=UV6pyAx!K z$Iw3$Hlgbg@rwEmt-}XZ8Q987kkDM5{@-+3Rsw>A0_`F!ra+*%qI|fR1QVe`RH3B1 zmtHO*X{olc zfFPkGa56jtPs6E6ecY!YSxCYt?5mA0H$4GYAj3&$NVaFJJbnzLlP#B+XJIVA2fl?G zD1-Sft-0hdWQKQCh23x&tVlUVC{~yw7J2ptY))z~IQToh8D^bQx*R@&g-|0Jw%Gxh z5w;eFw*VLp6H*rc@56^6K_sd9v%d6U?9zoLBGggS5Q0avaX#c4v2jT$?_l1bSbPE* zLqb2?r;W?N4qYKs=(l<6lt9z)bI%m#ykcIZx zrm^7zAJn8!O1;j)N@OCR&?0nzC$4Yhgg}`zsaH{vHyTv|k4Gvk4L|zmR>8woFapK_ zumC2POgS*OD0*6&~~;!D5Mq?c< zHGz05DYu)R0@m~ocK$hGy_SvbEW0TJ#XIW6Vl&v*I1}69NTZ&6qIGo8`bf|ACF}4`RsG?X;nkdIU17CqcmRNVOg~DpvPnaG>SzdVfoq^2{MUXb8eZrKeE@hQzA}0CLHBaL zJhJ(gziU_IkVA6z?U!8~$s5$8@K?j~JRWKO{Ib&0u)CtnDqueN;R1Lba$yjR(Ej&p zdmQQ7frOp3c;&9J0m|&R3_gc1;CZODnQ|S5eAvU@8u%@0q+cBgktP*_ckT>Jl2ln4 zJz{8Xj~@PI%W7Ayto`b%>hw>&$sIO_!!T`J1goJ020&L>1q(B$6kvPjg!&Go7eN66 z@#0z-3cxh@sYP`Zq8?ssqpzi^k=2y#n8RQhC}z1~>)=&w{4o3s5m1v2$#tOQ}&V;YcQtN&24y=HBxE#)a zZ=oUs7g>8Gl%NV;7Ys~Xu5^c<0Nf9C9G`!DH-EQVdfYN3ZwOEpDGRG%q8`3I%mxR5 z5x7h{CF=s8m?ev3D1*zj^Sv(caRwHoj_;h%Cs>s@K7;iz8)`YF=#&v1MhwgzxIMf@ z!gFBufg=k?{-^RksyD(9`%rIrB~lp*p$k;OE9QEcJEE|CG-YoW#LJnGQ428qAK&sU)=fZX4YQMEOpBEDP@teet~|daP$&;6n>XL zY2A}+!cR*Xo96Ej683;MU@Af1~*9LvjuQzTdf%h}KN+^>25pC0%@7P@(^p z4D7xj;4~7}YH<|6fi@1`0czkCm;^6rSxOSmY=1^Vx^@4n$tv0FgBf+`f&L+%SAQ;i zT>=gvVGi7_Gaqd0Jc~7NZom=04E?e{UoISf+dYix*5MEG1AXoftKong00=|>)nXxL zG8~fBtH2%@3>)m-)MyvHtBpth(u=#ZxATqNZj>e2D|uB_RZunrH_R=WyH^kW>Ijw} zCf1^mgtouT0@z~vwql*(eAohGGzO~V%DIDc21kiLwDloHRhHE*oBZ?SxjX0nVhQ?0 zuO=Z01q_$l^4`I%J#Q`W6nOA>Bu`iktEek#SG-;Jb|Xh%0G|4N8_D+WLPYe3cBUkB z=A;hH8Q4MYkSpiS-ZSOSx2oT&dDbrlj>tVilBA}_WTxOKxDC#?QyYvrLBcfexfVjD z*jo(TS3aTSag}c$T=D3Zw`=C8sNH?t8xc>1-15xO5RQexa2gD@L3=g@y25kN*PtE= z?jvD}&!`@rwKOG7;oRbL6_h>m{@3u{m;XK1C;6`JdTpb>48f`IXN2uTLlcf|HduSL z@E~Nv6l+^3+meI|To1G1cggNhENdk2kpKcYtFm(Fjx zI8`Y#NceB~5N5%4tK(x-T{a_7><|*x!4c_~Eeae1Lm&qZOSbI5nY!`imMyu>q0PWU zMNbq=kl61p`%LICChukybzN{>(-qjI9B(H0NPmWTu*wFOm;@`}Yf#{8sI>G_;`ENz zz-!w066lc5i>im0HGSdwqXES(tl||+aJv*w5_OZ<_V~f15aLO z@6cEQYi+_}xEM;{LQ@!six?Q=nA~P(LMao@>Z4F-tsl~!NW|XChbd4fCM|KC02cu8 z2K=vWZ-I*$_&DT46s{FPGf7FpR2E4P`ay-YC!*FN2&Y3fWWyP)=Oss={~GtsLMJEy z5BT8*8=TZTmxRe-3fu(?ple3XaTfeb8~>wqHG6Mie7DB1?aKZG2Zc z8JK7kfdqHLEVwcKda?w&<0zq|x_+@e7B>TT!FXMmo10;Zef%H{fK|{TJ**GIZ{U1* z9X^Rq(}}{F&>w(-kez<N9@5m21h-^X;TNIoc-7g3VBnwqF|zmqC_RMK&IJ z;(>RxamY5UM}jbNf$LPz4hgzpRG6iEd9+YRZI7D9m0jdSwub9rE!=0DX=tTsp<_`G zvtZ`l637Y6g8qqqp4zS^NN56bVVX8Bfjx;B^C6gSMYY>D(6P)#-iQOVA|E+$aUv#f z9$W-3z?fu%HX?8-yaJclKMcYZ`|bu94Flmv2qgGfzs>HYpr*~p8fW2K-FPl^PG}(~H&Pm7JVU%#&DMd2DH zuDFyR_jAi5QQ_|pF#YjJ@G}g9$D!Qj!bXJfSpfx9hIANGuhy@rS<@)HLWSO&U8Ip( zrSZ$K62?HT{iAaP2~CH}1AQKlBq^ds0BUjANwS!X-tSGhfQxe6A*`S6-;;+5<`!t-=* z-y`^v7DUx3fCOG`<>hXX@v5k5gB_^C>5E+5qg;jQx|gR25^8u;_x9BGK4(I7dpTSQ zRj!)u_sZV$%l>J7rga!Pi~jTZ^?4`t?tMzZsar!^!-`h-VXp{uF$tRlrY#gCY*2yV zVps;}wOI-^0)t>a9GuSd=-u!Z42Rd<#Y2fr^_xO!=)PZ_(EIe81`k?)LdPS3jrAKF z{aFx>dB0_w(j|`5Vi*Nq!X>|Skjqpgp46)^={Y8axT3akGYrDgyfMtv0jCXsunHYv$*M>eS0up9NF+0S(X$`v{++elt5gbU#V8!%9T)1WVS zVPEKIkWD1BIkb6uczdhGNQW=iVqv5A-PbD;Q8#R;nmDm;(xlbVsN(ZUH{H}LW6t#J z1qmA?0P|oByqZElk_vu!6bf5kNQJP#Y6iw;=xyE0oo4-R1Z`?O!Jx8dkMi=%<6#>7 z_1=5yBuR=yGNA-ZY@tkRbD`Mz;?qRJ7*aF^_rO!o6&`j%G1z<<4?E#D69XY7zz6DC zXBCFS>dcwjpdqUJkxxI}*|n?B>wayt8lHk)FwJST$?M@{*c}fQt%xpH;2hn{j<%mq z2_kSCn0bU=4`ZGM>Fcj+yk6;yGm2nCgDrmGfyI@T(WuLdcPcPSw@@8ab~<(tdI`K4 zkKHaedg}d5kxMds06#$(Hb8|#CN^L?^TR%1+qR(3Ck-8%d(1J9!z?)euDkL(cJu^; zit2I?sSFF@M+m`sM;%ppp}RJo(XLIac}HWN&tg`_SX25ys4dm}R7F>!#youGGJ;wC z`ej{lMQ1=!R6v%c>}+Y`#Gl)2G2a=KKjWAX&7PS2x!;c_Bg-BXOFjq#>2u%W}=g;tn(A$Um}TxXMfU+r;rfeT?5 zjJ4uazzZMg9+M+1ojQ$yz%A(srd8m&ml|)_9*36&xXZ^>!A@Ik_A&wj0)m7BZQ-C*B3nX`Fq2@zwgp`)+f>ulOzlE0Na#Mq z&P86bU-G(2Yc%tC7(Kev^yz(LV^OWtp|F{E)I#Wwkw=-{I-zY6?(D*QXE%N~I$=Vu zf&!1vC&{u@U$4lr^yZso%a_-RCn+SMV{wvhe2%^LNtC9KY2?U`d3mzmFHfJoX5z%6 zrAv1yiaKUYCmXA#iV)O-gze!ot^Q>#d|g>!@vGDd(E0X z;jrrWdu+MZvtbDIuwN%SK|)(02X25{;Rd5y7^GgDq5G~T8tSpfcG$5a2v0Y>HGh7N zBr$$`S6fc*ui*x`9}W-;Ej=VOxsbUEWiTuag_~vAW9YsVfpPExj7`n&+ElhlYu3~i z76#y|hPRGBx?ELRxX^~#nPC{zaGCb+o9i?ZMqz(=4faiE4btX#8?J%pZ10@UgAw3? z<6uDCdZMcR)1zC7o`l=ChXVoW(MJ#Rc+Q3Q`}e={^wYb@vb1hpt^M+weDE1;f(`Jo z&`4Xd`cv_a>T8gWHl>JbsBzqH`w5elLVvZf?(0(f43s7L-d^{9y64mXRQzYJVML``>#L8* zGJX37PCd0?z<{h-vr2qE>4zWcgpr?lBn-jt;Ye5tPnnelr9wH3g#UwM;rH76W39`> zYFA&#iR->*^qCPMvSinie;)A9o}c%;VZaS_(Yl3|3qP;^92FLx#@1df5nXj!{W1?5`{;f|#l@Wr|g4L1gQN^S7 z`k%r+r>*A0VFo;`t3e=;CK6g`alHq-;B^?2S~+kVcv(NF{6JBa1J>wH(rzZ;39P7D zvG*yB`I}u8?iNuGBcT%%!3nM}WuK`elwc9u3LmAAB%KR`V2IUz9f>dYe4(O_>ocy? zxK1ob9wpB_e5M!gy5PFK0zZtST#Er+gVv)=^u02Pl9oPR_SdwJkqg@ES?K z8{rUG2~U`KBzrs@4VCbB*qY#H!@4-C0r0-1*|QS!cg;U2_aGJZiq2Q8tX=tK&6jIx z*ECXAWM?g91u9^-lNGK$a3Qq`YN)7ZVIr#G8`ua-AZU9MN>BywK-k6BVq$(64NHw+eKES0UdOf?9=+@PHQX zl-`&m5$`Kt2x?(E0FOJT<769%!oAQJW zr>9%C3lcgF9tFP*lhc2O9{~6PzA#H+4nx26NU9oG4MA87i{hh+n)KEqN;?Y}f&k3W z&M>phV&IF~*zhzq8D?v@6Q`i+3p_?u2s1rK19aml>CqVDznwGT zZkP=R3le4kq}X~B8G37U<2x-~$@S0>(0gMN1y>wSLKU(g>aI1Zf{GV!uAEDYbW?F( z_t3eP-mzd1aBZ ze{cJDp|{X4`6u_8+*Bh~T@#&9tOhbZJg*l4A(N*7D?luI1F z#opqe8mx)bJjZiOpIthYsfwax%h^q*F0b18Q9=szh1Z~m^J5Nuun;bV50dbinL@%F zVJ6IhZiarX2PVLB7;ALldaDrSx1E0*Q6rCi{}>Mea1Re`c)&0F@9uSXBOVS}L-M)XyMX z{G7uz(h@COZ#k@u?_O%)(DbDgSp`#;@Z)d{EP@yFPqzK6fZafQd_%DEjp9j0!X8dalBXRkJgwFkOk1-64ActM3bpo{kAF4*RphaPve zwXmqGU=~zR=^p5QVD^EFDi@))Vl;SQ&J{#m9Q~`n>2M0X5C4WTcvt(cOQ1t0*@P10 z!&Oi$f~8E_7?If?-oB`6QCsoDYV5So#gD`TXF~_L0J=bDI1he;OWnl4_(@awduye10s?fWSBwIY1+Z{#w34j)6d zrQvC@Y+czp5(0HFOUrLGxs2L#+PEVGM7Q&n)^@+|*|<+4>q&;YVJc*s_&Cqk;+vm^ zuM+&MfwBt1vd#@v^AsMn}~z7yY7!~GOIRWHvC%~|0TYjsR^mp zC%|j)>onxi1mR+Bd@AfV^vm~!OW-%q$=)QCpcq!UDLTSvT(B^aP{5tA8=_DH&zraf zzXf-~Rj^EZ#}NLj5P-`xw}G+MK@}*5*LCmAHg4Ne;b=GoE`|d{p(m$H!bDvFcR)T= zn(LeK!7iOwrVj3fJB-N3mC|D}Rl`i^+*oP9)wt6UeODjZl(02{w4x%H-NlKl+K?4y zNP}Ci7Un@QTnsOyrk%#_r49~;H{g4N?rS%^0Ubbw9QY4>1P>b;As|5w1mSB~=|B=j zM0u(vTdK;B{hf2g2f=V{JOn;VO@evgC-^IDHf~v*t`$v-$*ajwmWK47-S92^2(?aI z&UNsd@RT;;p|gV5zSWH%Fi3|qHiqV(I(aoFogs32o2VGwE<3H*rHkM_ABqM4G=uBU z*Ep&TO9&2vxrny(!QX^GilCEddlZ|b{)W7_87zn_PCiE~9k`Mqd zl-MkN$D^yrKG}-60tpn`K1T^|*S%a~^p|(n#+N}@kT8QW4gLfdLYY-dT}85O@#9V~ z3f97SX1 zB~T#>zlDG~>RmI8fTN)t{sAR|UU6$i)*|G$!A@ZQ1jUNaO2$$8YVB*iaFDKCXNz<- z#=$GM;AS`q2EdVUq-g$QCX1kNvWY$_T>ZMMsvujoQ~Xi)a#<^Xroch)4D4@+GfZy5 z&9FlouM(Yn%|aCBz!aF5)W#>pqKHHtuGG;PE`f5mvh^ls3fur4;C4__pZ1n4zkk7- z@DOYjkHjo=*X?%?Cd)&yWg3GHFrm%K)O*19N&WX)3gE7Vbxlvve`~*)Rt>rSP3YRwU6J&%H*r3Z2t=f^XRA zg&?6tdvrsq#?y7{Bt=iV4^fIo!(s3|Oo#KGuKOke1tQ>ce$!PGAi**ygY~e`xJVBv z_CSS3S_{``S#7Rt&JBu zoPh)3S{NqkEIJx8cpz$j24dt{#Co^_2Ec~YzEy!-sIkarq{3-%5F8B;!+NKAItum> z2vToh{F(wehAP?ahjAp7L4u!B`^sAQGvq@Rj5nnONpsD!;boq^BcknydiWz0LMhxK z+RDZWLrWf}USwb>i~t}2rD^VntilMm42od0E*mBUK3ELPotY-;jj*M!6+TiGM!^t} zAQ!$&XNw58sm^x7|LMl>Cm8b(g=`4N{SgUHgaL344A4cxe+7Spg;4FxM|$XLiyo^4 zFX~?Y>^ibUGlPUha2){GLLvB}Q{4A{Gt7i%6A3zL=(V=1u?!bcdr6`gu7py!-gS9Z zRWLJ66>n`;fjd^l_D%4@P`C$9hHxAwJt~X?KX_pvd=LkbR44^Myak&bRkpL;``X%# znmmi1pRDy@a+5K=6W)R>7zOhT_x{^5Y%Kt_aH;lkWE_?9B-p5pAB%66Bv_}F|8i6N zQ56a*ExiJl#jmPxi|*wJx2vNK$c8r{0uhL_`PaknDqNbNJHV9z&$5Dmk>dR?8XJM7 zY4`!#)*V&$>dQpq-YZ0Xsu;3h_VuD=hoJ~TfA}#S=Pov0v$7VAEW6=u%vPWbTX}Xv z4g4CatkAJHr|INVw($yZ3m+noYp&x*1-UlmuPB7ujBs+sVrU{ptc)6X4K9J#t>9rA z#f{NI`*^6Zh>A9>$Kiq5@F?7CA`k1~w1p`%XEGcNPo@^Kpu%177(8i&Q}JBgc#0rl zD-(V%jD-bOoD!W3_rRsC*T}MDQ@9Rt!3W)pEB6vKiGOb=4^1{)3OB%9 zL$9jc$nsQGUkU!K1y7IypJVN&FNHY<#pCD0OgAwQdF7%dJnc2cEqig7%19aYkOBTO2c9sDLah9{px`9)7fC>VwxfGM@~C zpa>3ab74o|_fOd-3f7YvfdvU21lQ-Ud7;7US=r|Llw5FDFGB!MwxUvlsm^_y1#r6w z_34pADlCEy@Nyi==H_11<~nu;JHkvj7`njMCjM?2+z;=Y7SkwVOws@a&W71=P8!Yz zRj7vNprSnnO5k;FCxejgRIsm!N@*#)4gKLY)7;sDgeiL*T2vZOfqXa<(v;&Kr{b+R zw+yKt-h)k04$I8)>!|RM?&UFpgtkW|+~1NPD54X9+bq4p^WJlE>AMoRA11*YX8ocB z59(fiE=Xu^l)@X(3tnv2*;9f!uoIquh^1!)9-N$9D`^LnX5K`1j~p+N4F``?v*$@OZ1_HMya+aV95@x&!bJ5>K-&{jBGx$^+|1s_& zmZflWvoL?5j%F03hG!iXlaVs!z*|~Zy?C-5%i$dW=E0a2&v`XT7=?TYi7;;WdJ~3x z=IT3)KoQiZe+B!Y2D0O0paPHSUJhaZt=j1{skU@fpHq*MZ;4*j=E^jT}Mg|D$ z0`u>4Y2QqNQy06gk9-5Fbw!z`+Rp`*({?VlyjNGb3*Lo`giYvTNr-!&t_N;{MR1`L zYwF0q*oN;xfr#F8wONSYy*%{6JScSH3YJ?O3b+tKxNxqkcUf6nw}9;>VMv(!f`kIr zqH`&7R7`^)p@4YW0^+e`G8B*SfTssxK|+Bz5QS^!h=5_Ep+X(Z(ls^~Bot^3l8}Pw zKqTVm<{4GjG?<5n1*|Arfnetb1p065rwq%|sEKNg^cdRLZ7YF%w4Mc(?+9 z8pwfKSf51Lr}GzYDNKMmu^TBB zLK*JWB}{$~UdVx5XtxPU7@pS>(8jtq#ivz>IDJD9OVO-JG5ycO;ZB|eFT4&L;5i7u zFqi@-LlAy!TN=O#+EUyPmS)njj6kOne(WzeWYBh&>e4a7Caj13bV-$ZX=4I%U<3G} z7S7k+Jhi>vGV65Xd(&VV*V}v9xVhAY6ID7H)D+HeqwqXTg!#JC9x4RjMstgOWW&1< zgdn`%o`xA~@%aN>U~~n375wlrdneYWk6jT<-%h(?1=PTwp`^X7y+sE`Czz%QP<}UjWY>!f!_`lwXA=@gLN9q0 z7PqOGC_3Vmwd4xQf%(w6F>fCQKh($JJqo+4Kzlaa4%tu#IX3pqguFr$+611xmRsA{ z1zq6P#!bJQJk;jzDD5#Y0@uNK=nhL=v@c5#vHh8cBD3NM(|H(n{$maqehVIk#<8S! zz<;33=-bJIt3ZOD@B@^JGh{QjUxkf*s54Di2Vj3V4XU8hscIwp!G%x(wHaT#-qf9k!?DEl3?fI7!=H#=&A+ zhL7C}gJCS((o!m>X>foDtaI1-)nFJ29{32pNW=mN?{3vU>Nj*!Ek~NdKG8s zBB$rrl7R~BtBr58Gy^yaJ%6;Zws0iOh9lhKCkVr_Fc9{G;SE$t)+UU=OPa&k3!9-Y z0E_Jrjr}Gph3%l&GFDNBHE<9_w89(~l5qHEEV#(7t9Bgt;W}8UWdk@GK3LA)NYT{| zR7uTc-0r_<4Zxw5F9vESN~5|oNZYcoV(z#4?pab#=~1Df=NM zI0w4IWza?Fw|1fUF9^Ub&;hEPAC6n26#@7ooDaor(MSz4E&+Qt#IYS@Q-7+)6=(YU z>Bc8nx8p|*|7m~93|s));Q2W6yKAjeW6Izt>n^}r5;_Gz=%LF6X`U4FGi(H4BP_8< zweAGJhQY?wE(^lhucoYyE`^D(w9P+LV1IZ4`dZPEMuE5CDtN&@{cuFYOf#+Q6;Pob zeh>BVXSk*DW)*-3waznEsn*BANC2LOWk!6V1fe)3VcW4a?HZP0t$ODaxXj#4vk<(b z6Z;))l5ij-VPgevs#0ovt$q0r&y_*KC7GUd65A ztD$Ww2_spTP&F>tYhT0Cg}k**wmD;Z!JwDx(fXg*w_81#lkR z1+yRur-28;@SV*{v3vtx8J(`Bw5@G=;omR{fIW>{-Xs+K?S&WM83-7cmyi$%JHcnk zHeFERI5-A?XW>gj9)M6so1r5-4FSl3t6>scXUk$phUX$i;lkJ3^k1u5W^H$!?N= 3.0 | - -Goal ----- - -In this tutorial you will learn how to: - -- Use the OpenCV functions @ref cv::ml::SVM::train to build a classifier based on SVMs and @ref - cv::ml::SVM::predict to test its performance. - -What is a SVM? --------------- - -A Support Vector Machine (SVM) is a discriminative classifier formally defined by a separating -hyperplane. In other words, given labeled training data (*supervised learning*), the algorithm -outputs an optimal hyperplane which categorizes new examples. - -In which sense is the hyperplane obtained optimal? Let's consider the following simple problem: - -For a linearly separable set of 2D-points which belong to one of two classes, find a separating -straight line. - -![](images/separating-lines.png) - -@note In this example we deal with lines and points in the Cartesian plane instead of hyperplanes -and vectors in a high dimensional space. This is a simplification of the problem.It is important to -understand that this is done only because our intuition is better built from examples that are easy -to imagine. However, the same concepts apply to tasks where the examples to classify lie in a space -whose dimension is higher than two. - -In the above picture you can see that there exists multiple lines that offer a solution to the -problem. Is any of them better than the others? We can intuitively define a criterion to estimate -the worth of the lines: A line is bad if it passes too close to the points because it will be -noise sensitive and it will not generalize correctly. Therefore, our goal should be to find -the line passing as far as possible from all points. - -Then, the operation of the SVM algorithm is based on finding the hyperplane that gives the largest -minimum distance to the training examples. Twice, this distance receives the important name of -**margin** within SVM's theory. Therefore, the optimal separating hyperplane *maximizes* the margin -of the training data. - -![](images/optimal-hyperplane.png) - -How is the optimal hyperplane computed? ---------------------------------------- - -Let's introduce the notation used to define formally a hyperplane: - -\f[f(x) = \beta_{0} + \beta^{T} x,\f] - -where \f$\beta\f$ is known as the *weight vector* and \f$\beta_{0}\f$ as the *bias*. - -@note A more in depth description of this and hyperplanes you can find in the section 4.5 (*Separating -Hyperplanes*) of the book: *Elements of Statistical Learning* by T. Hastie, R. Tibshirani and J. H. -Friedman (@cite HTF01). - -The optimal hyperplane can be represented in an infinite number of different ways by -scaling of \f$\beta\f$ and \f$\beta_{0}\f$. As a matter of convention, among all the possible -representations of the hyperplane, the one chosen is - -\f[|\beta_{0} + \beta^{T} x| = 1\f] - -where \f$x\f$ symbolizes the training examples closest to the hyperplane. In general, the training -examples that are closest to the hyperplane are called **support vectors**. This representation is -known as the **canonical hyperplane**. - -Now, we use the result of geometry that gives the distance between a point \f$x\f$ and a hyperplane -\f$(\beta, \beta_{0})\f$: - -\f[\mathrm{distance} = \frac{|\beta_{0} + \beta^{T} x|}{||\beta||}.\f] - -In particular, for the canonical hyperplane, the numerator is equal to one and the distance to the -support vectors is - -\f[\mathrm{distance}_{\text{ support vectors}} = \frac{|\beta_{0} + \beta^{T} x|}{||\beta||} = \frac{1}{||\beta||}.\f] - -Recall that the margin introduced in the previous section, here denoted as \f$M\f$, is twice the -distance to the closest examples: - -\f[M = \frac{2}{||\beta||}\f] - -Finally, the problem of maximizing \f$M\f$ is equivalent to the problem of minimizing a function -\f$L(\beta)\f$ subject to some constraints. The constraints model the requirement for the hyperplane to -classify correctly all the training examples \f$x_{i}\f$. Formally, - -\f[\min_{\beta, \beta_{0}} L(\beta) = \frac{1}{2}||\beta||^{2} \text{ subject to } y_{i}(\beta^{T} x_{i} + \beta_{0}) \geq 1 \text{ } \forall i,\f] - -where \f$y_{i}\f$ represents each of the labels of the training examples. - -This is a problem of Lagrangian optimization that can be solved using Lagrange multipliers to obtain -the weight vector \f$\beta\f$ and the bias \f$\beta_{0}\f$ of the optimal hyperplane. - -Source Code ------------ - -@add_toggle_cpp -- **Downloadable code**: Click - [here](https://github.com/opencv/opencv/tree/5.x/samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp) - -- **Code at glance:** - @include samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp -@end_toggle - -@add_toggle_java -- **Downloadable code**: Click - [here](https://github.com/opencv/opencv/tree/5.x/samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java) - -- **Code at glance:** - @include samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java -@end_toggle - -@add_toggle_python -- **Downloadable code**: Click - [here](https://github.com/opencv/opencv/tree/5.x/samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py) - -- **Code at glance:** - @include samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py -@end_toggle - -Explanation ------------ - -- **Set up the training data** - -The training data of this exercise is formed by a set of labeled 2D-points that belong to one of -two different classes; one of the classes consists of one point and the other of three points. - -@add_toggle_cpp -@snippet samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp setup1 -@end_toggle - -@add_toggle_java -@snippet samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java setup1 -@end_toggle - -@add_toggle_python -@snippet samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py setup1 -@end_toggle - -The function @ref cv::ml::SVM::train that will be used afterwards requires the training data to be -stored as @ref cv::Mat objects of floats. Therefore, we create these objects from the arrays -defined above: - -@add_toggle_cpp -@snippet samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp setup2 -@end_toggle - -@add_toggle_java -@snippet samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java setup2 -@end_toggle - -@add_toggle_python -@snippet samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py setup1 -@end_toggle - -- **Set up SVM's parameters** - - In this tutorial we have introduced the theory of SVMs in the most simple case, when the - training examples are spread into two classes that are linearly separable. However, SVMs can be - used in a wide variety of problems (e.g. problems with non-linearly separable data, a SVM using - a kernel function to raise the dimensionality of the examples, etc). As a consequence of this, - we have to define some parameters before training the SVM. These parameters are stored in an - object of the class @ref cv::ml::SVM. - -@add_toggle_cpp -@snippet samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp init -@end_toggle - -@add_toggle_java -@snippet samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java init -@end_toggle - -@add_toggle_python -@snippet samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py init -@end_toggle - -Here: -- *Type of SVM*. We choose here the type @ref cv::ml::SVM::C_SVC "C_SVC" that can be used for - n-class classification (n \f$\geq\f$ 2). The important feature of this type is that it deals - with imperfect separation of classes (i.e. when the training data is non-linearly separable). - This feature is not important here since the data is linearly separable and we chose this SVM - type only for being the most commonly used. - -- *Type of SVM kernel*. We have not talked about kernel functions since they are not - interesting for the training data we are dealing with. Nevertheless, let's explain briefly now - the main idea behind a kernel function. It is a mapping done to the training data to improve - its resemblance to a linearly separable set of data. This mapping consists of increasing the - dimensionality of the data and is done efficiently using a kernel function. We choose here the - type @ref cv::ml::SVM::LINEAR "LINEAR" which means that no mapping is done. This parameter is - defined using cv::ml::SVM::setKernel. - -- *Termination criteria of the algorithm*. The SVM training procedure is implemented solving a - constrained quadratic optimization problem in an **iterative** fashion. Here we specify a - maximum number of iterations and a tolerance error so we allow the algorithm to finish in - less number of steps even if the optimal hyperplane has not been computed yet. This - parameter is defined in a structure @ref cv::TermCriteria . - -- **Train the SVM** - We call the method @ref cv::ml::SVM::train to build the SVM model. - -@add_toggle_cpp -@snippet samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp train -@end_toggle - -@add_toggle_java -@snippet samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java train -@end_toggle - -@add_toggle_python -@snippet samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py train -@end_toggle - -- **Regions classified by the SVM** - - The method @ref cv::ml::SVM::predict is used to classify an input sample using a trained SVM. In - this example we have used this method in order to color the space depending on the prediction done - by the SVM. In other words, an image is traversed interpreting its pixels as points of the - Cartesian plane. Each of the points is colored depending on the class predicted by the SVM; in - green if it is the class with label 1 and in blue if it is the class with label -1. - -@add_toggle_cpp -@snippet samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp show -@end_toggle - -@add_toggle_java -@snippet samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java show -@end_toggle - -@add_toggle_python -@snippet samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py show -@end_toggle - -- **Support vectors** - - We use here a couple of methods to obtain information about the support vectors. - The method @ref cv::ml::SVM::getSupportVectors obtain all of the support - vectors. We have used this methods here to find the training examples that are - support vectors and highlight them. - -@add_toggle_cpp -@snippet samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp show_vectors -@end_toggle - -@add_toggle_java -@snippet samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java show_vectors -@end_toggle - -@add_toggle_python -@snippet samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py show_vectors -@end_toggle - -Results -------- - -- The code opens an image and shows the training examples of both classes. The points of one class - are represented with white circles and black ones are used for the other class. -- The SVM is trained and used to classify all the pixels of the image. This results in a division - of the image in a blue region and a green region. The boundary between both regions is the - optimal separating hyperplane. -- Finally the support vectors are shown using gray rings around the training examples. - -![](images/svm_intro_result.png) diff --git a/doc/tutorials/others/non_linear_svms.markdown b/doc/tutorials/others/non_linear_svms.markdown deleted file mode 100644 index 5bfd8aae4c..0000000000 --- a/doc/tutorials/others/non_linear_svms.markdown +++ /dev/null @@ -1,288 +0,0 @@ -Support Vector Machines for Non-Linearly Separable Data {#tutorial_non_linear_svms} -======================================================= - -@tableofcontents - -@prev_tutorial{tutorial_introduction_to_svm} -@next_tutorial{tutorial_introduction_to_pca} - -| | | -| -: | :- | -| Original author | Fernando Iglesias García | -| Compatibility | OpenCV >= 3.0 | - -Goal ----- - -In this tutorial you will learn how to: - -- Define the optimization problem for SVMs when it is not possible to separate linearly the - training data. -- How to configure the parameters to adapt your SVM for this class of problems. - -Motivation ----------- - -Why is it interesting to extend the SVM optimization problem in order to handle non-linearly separable -training data? Most of the applications in which SVMs are used in computer vision require a more -powerful tool than a simple linear classifier. This stems from the fact that in these tasks __the -training data can be rarely separated using an hyperplane__. - -Consider one of these tasks, for example, face detection. The training data in this case is composed -by a set of images that are faces and another set of images that are non-faces (_every other thing -in the world except from faces_). This training data is too complex so as to find a representation -of each sample (_feature vector_) that could make the whole set of faces linearly separable from the -whole set of non-faces. - -Extension of the Optimization Problem -------------------------------------- - -Remember that using SVMs we obtain a separating hyperplane. Therefore, since the training data is -now non-linearly separable, we must admit that the hyperplane found will misclassify some of the -samples. This _misclassification_ is a new variable in the optimization that must be taken into -account. The new model has to include both the old requirement of finding the hyperplane that gives -the biggest margin and the new one of generalizing the training data correctly by not allowing too -many classification errors. - -We start here from the formulation of the optimization problem of finding the hyperplane which -maximizes the __margin__ (this is explained in the previous tutorial (@ref tutorial_introduction_to_svm): - -\f[\min_{\beta, \beta_{0}} L(\beta) = \frac{1}{2}||\beta||^{2} \text{ subject to } y_{i}(\beta^{T} x_{i} + \beta_{0}) \geq 1 \text{ } \forall i\f] - -There are multiple ways in which this model can be modified so it takes into account the -misclassification errors. For example, one could think of minimizing the same quantity plus a -constant times the number of misclassification errors in the training data, i.e.: - -\f[\min ||\beta||^{2} + C \text{(misclassification errors)}\f] - -However, this one is not a very good solution since, among some other reasons, we do not distinguish -between samples that are misclassified with a small distance to their appropriate decision region or -samples that are not. Therefore, a better solution will take into account the _distance of the -misclassified samples to their correct decision regions_, i.e.: - -\f[\min ||\beta||^{2} + C \text{(distance of misclassified samples to their correct regions)}\f] - -For each sample of the training data a new parameter \f$\xi_{i}\f$ is defined. Each one of these -parameters contains the distance from its corresponding training sample to their correct decision -region. The following picture shows non-linearly separable training data from two classes, a -separating hyperplane and the distances to their correct regions of the samples that are -misclassified. - -![](images/sample-errors-dist.png) - -@note Only the distances of the samples that are misclassified are shown in the picture. The -distances of the rest of the samples are zero since they lay already in their correct decision -region. - -The red and blue lines that appear on the picture are the margins to each one of the -decision regions. It is very __important__ to realize that each of the \f$\xi_{i}\f$ goes from a -misclassified training sample to the margin of its appropriate region. - -Finally, the new formulation for the optimization problem is: - -\f[\min_{\beta, \beta_{0}} L(\beta) = ||\beta||^{2} + C \sum_{i} {\xi_{i}} \text{ subject to } y_{i}(\beta^{T} x_{i} + \beta_{0}) \geq 1 - \xi_{i} \text{ and } \xi_{i} \geq 0 \text{ } \forall i\f] - -How should the parameter C be chosen? It is obvious that the answer to this question depends on how -the training data is distributed. Although there is no general answer, it is useful to take into -account these rules: - -- Large values of C give solutions with _less misclassification errors_ but a _smaller margin_. - Consider that in this case it is expensive to make misclassification errors. Since the aim of - the optimization is to minimize the argument, few misclassifications errors are allowed. -- Small values of C give solutions with _bigger margin_ and _more classification errors_. In this - case the minimization does not consider that much the term of the sum so it focuses more on - finding a hyperplane with big margin. - -Source Code ------------ - -You may also find the source code in `samples/cpp/tutorial_code/ml/non_linear_svms` folder of the OpenCV source library or -[download it from here](https://github.com/opencv/opencv/tree/5.x/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp). - -@add_toggle_cpp -- **Downloadable code**: Click - [here](https://github.com/opencv/opencv/tree/5.x/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp) - -- **Code at glance:** - @include samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp -@end_toggle - -@add_toggle_java -- **Downloadable code**: Click - [here](https://github.com/opencv/opencv/tree/5.x/samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java) - -- **Code at glance:** - @include samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java -@end_toggle - -@add_toggle_python -- **Downloadable code**: Click - [here](https://github.com/opencv/opencv/tree/5.x/samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py) - -- **Code at glance:** - @include samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py -@end_toggle - -Explanation ------------ - -- __Set up the training data__ - -The training data of this exercise is formed by a set of labeled 2D-points that belong to one of -two different classes. To make the exercise more appealing, the training data is generated -randomly using a uniform probability density functions (PDFs). - -We have divided the generation of the training data into two main parts. - -In the first part we generate data for both classes that is linearly separable. - -@add_toggle_cpp -@snippet samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp setup1 -@end_toggle - -@add_toggle_java -@snippet samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java setup1 -@end_toggle - -@add_toggle_python -@snippet samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py setup1 -@end_toggle - -In the second part we create data for both classes that is non-linearly separable, data that -overlaps. - -@add_toggle_cpp -@snippet samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp setup2 -@end_toggle - -@add_toggle_java -@snippet samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java setup2 -@end_toggle - -@add_toggle_python -@snippet samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py setup2 -@end_toggle - -- __Set up SVM's parameters__ - -@note In the previous tutorial @ref tutorial_introduction_to_svm there is an explanation of the -attributes of the class @ref cv::ml::SVM that we configure here before training the SVM. - -@add_toggle_cpp -@snippet samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp init -@end_toggle - -@add_toggle_java -@snippet samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java init -@end_toggle - -@add_toggle_python -@snippet samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py init -@end_toggle - -There are just two differences between the configuration we do here and the one that was done in -the previous tutorial (@ref tutorial_introduction_to_svm) that we use as reference. - -- _C_. We chose here a small value of this parameter in order not to punish too much the - misclassification errors in the optimization. The idea of doing this stems from the will of - obtaining a solution close to the one intuitively expected. However, we recommend to get a - better insight of the problem by making adjustments to this parameter. - - @note In this case there are just very few points in the overlapping region between classes. - By giving a smaller value to __FRAC_LINEAR_SEP__ the density of points can be incremented and the - impact of the parameter _C_ explored deeply. - -- _Termination Criteria of the algorithm_. The maximum number of iterations has to be - increased considerably in order to solve correctly a problem with non-linearly separable - training data. In particular, we have increased in five orders of magnitude this value. - -- __Train the SVM__ - -We call the method @ref cv::ml::SVM::train to build the SVM model. Watch out that the training -process may take a quite long time. Have patiance when your run the program. - -@add_toggle_cpp -@snippet samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp train -@end_toggle - -@add_toggle_java -@snippet samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java train -@end_toggle - -@add_toggle_python -@snippet samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py train -@end_toggle - -- __Show the Decision Regions__ - -The method @ref cv::ml::SVM::predict is used to classify an input sample using a trained SVM. In -this example we have used this method in order to color the space depending on the prediction done -by the SVM. In other words, an image is traversed interpreting its pixels as points of the -Cartesian plane. Each of the points is colored depending on the class predicted by the SVM; in -dark green if it is the class with label 1 and in dark blue if it is the class with label 2. - -@add_toggle_cpp -@snippet samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp show -@end_toggle - -@add_toggle_java -@snippet samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java show -@end_toggle - -@add_toggle_python -@snippet samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py show -@end_toggle - -- __Show the training data__ - -The method @ref cv::circle is used to show the samples that compose the training data. The samples -of the class labeled with 1 are shown in light green and in light blue the samples of the class -labeled with 2. - -@add_toggle_cpp -@snippet samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp show_data -@end_toggle - -@add_toggle_java -@snippet samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java show_data -@end_toggle - -@add_toggle_python -@snippet samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py show_data -@end_toggle - -- __Support vectors__ - -We use here a couple of methods to obtain information about the support vectors. The method -@ref cv::ml::SVM::getSupportVectors obtain all support vectors. We have used this methods here -to find the training examples that are support vectors and highlight them. - -@add_toggle_cpp -@snippet samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp show_vectors -@end_toggle - -@add_toggle_java -@snippet samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java show_vectors -@end_toggle - -@add_toggle_python -@snippet samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py show_vectors -@end_toggle - -Results -------- - -- The code opens an image and shows the training examples of both classes. The points of one class - are represented with light green and light blue ones are used for the other class. -- The SVM is trained and used to classify all the pixels of the image. This results in a division - of the image in a blue region and a green region. The boundary between both regions is the - separating hyperplane. Since the training data is non-linearly separable, it can be seen that - some of the examples of both classes are misclassified; some green points lay on the blue region - and some blue points lay on the green one. -- Finally the support vectors are shown using gray rings around the training examples. - -![](images/svm_non_linear_result.png) - -You may observe a runtime instance of this on the [YouTube here](https://www.youtube.com/watch?v=vFv2yPcSo-Q). - -@youtube{vFv2yPcSo-Q} diff --git a/doc/tutorials/others/table_of_content_other.markdown b/doc/tutorials/others/table_of_content_other.markdown index b4bbf62777..f6fe601fa2 100644 --- a/doc/tutorials/others/table_of_content_other.markdown +++ b/doc/tutorials/others/table_of_content_other.markdown @@ -1,4 +1,4 @@ -Other tutorials (ml, objdetect, photo, stitching, video) {#tutorial_table_of_content_other} +Other tutorials (objdetect, photo, stitching, video) {#tutorial_table_of_content_other} ======================================================== - photo. @subpage tutorial_hdr_imaging @@ -9,6 +9,4 @@ Other tutorials (ml, objdetect, photo, stitching, video) {#tutorial_table_of_con - objdetect. @subpage tutorial_cascade_classifier - objdetect. @subpage tutorial_traincascade - objdetect. @subpage tutorial_barcode_detect_and_decode -- ml. @subpage tutorial_introduction_to_svm -- ml. @subpage tutorial_non_linear_svms - ml. @subpage tutorial_introduction_to_pca diff --git a/doc/tutorials/tutorials.markdown b/doc/tutorials/tutorials.markdown index c8aae6ab56..75b0f8fa43 100644 --- a/doc/tutorials/tutorials.markdown +++ b/doc/tutorials/tutorials.markdown @@ -10,7 +10,7 @@ OpenCV Tutorials {#tutorial_root} - @subpage tutorial_table_of_content_features2d - feature detectors, descriptors and matching framework - @subpage tutorial_table_of_content_dnn - infer neural networks using built-in _dnn_ module - @subpage tutorial_table_of_content_gapi - graph-based approach to computer vision algorithms building -- @subpage tutorial_table_of_content_other - other modules (ml, objdetect, stitching, video, photo) +- @subpage tutorial_table_of_content_other - other modules (objdetect, stitching, video, photo) - @subpage tutorial_table_of_content_ios - running OpenCV on an iDevice - @subpage tutorial_table_of_content_3d - 3d objects processing and visualisation @cond CUDA_MODULES diff --git a/modules/CMakeLists.txt b/modules/CMakeLists.txt index 10b72f8880..b61cf9deb1 100644 --- a/modules/CMakeLists.txt +++ b/modules/CMakeLists.txt @@ -20,7 +20,7 @@ foreach(mod ${OPENCV_MODULES_BUILD} ${OPENCV_MODULES_DISABLED_USER} ${OPENCV_MOD endforeach() ocv_list_sort(OPENCV_MODULES_MAIN) ocv_list_sort(OPENCV_MODULES_EXTRA) -set(FIXED_ORDER_MODULES core imgproc imgcodecs videoio highgui video 3d stereo features2d calib objdetect dnn ml flann photo stitching) +set(FIXED_ORDER_MODULES core imgproc imgcodecs videoio highgui video 3d stereo features2d calib objdetect dnn flann photo stitching) list(REMOVE_ITEM OPENCV_MODULES_MAIN ${FIXED_ORDER_MODULES}) set(OPENCV_MODULES_MAIN ${FIXED_ORDER_MODULES} ${OPENCV_MODULES_MAIN}) diff --git a/modules/ml/CMakeLists.txt b/modules/ml/CMakeLists.txt deleted file mode 100644 index e1d5f3100b..0000000000 --- a/modules/ml/CMakeLists.txt +++ /dev/null @@ -1,2 +0,0 @@ -set(the_description "Machine Learning") -ocv_define_module(ml opencv_core WRAP java objc python) diff --git a/modules/ml/doc/ml_intro.markdown b/modules/ml/doc/ml_intro.markdown deleted file mode 100644 index f49e378e79..0000000000 --- a/modules/ml/doc/ml_intro.markdown +++ /dev/null @@ -1,481 +0,0 @@ -Machine Learning Overview {#ml_intro} -========================= - -[TOC] - -Training Data {#ml_intro_data} -============= - -In machine learning algorithms there is notion of training data. Training data includes several -components: - -- A set of training samples. Each training sample is a vector of values (in Computer Vision it's - sometimes referred to as feature vector). Usually all the vectors have the same number of - components (features); OpenCV ml module assumes that. Each feature can be ordered (i.e. its - values are floating-point numbers that can be compared with each other and strictly ordered, - i.e. sorted) or categorical (i.e. its value belongs to a fixed set of values that can be - integers, strings etc.). -- Optional set of responses corresponding to the samples. Training data with no responses is used - in unsupervised learning algorithms that learn structure of the supplied data based on distances - between different samples. Training data with responses is used in supervised learning - algorithms, which learn the function mapping samples to responses. Usually the responses are - scalar values, ordered (when we deal with regression problem) or categorical (when we deal with - classification problem; in this case the responses are often called "labels"). Some algorithms, - most noticeably Neural networks, can handle not only scalar, but also multi-dimensional or - vector responses. -- Another optional component is the mask of missing measurements. Most algorithms require all the - components in all the training samples be valid, but some other algorithms, such as decision - trees, can handle the cases of missing measurements. -- In the case of classification problem user may want to give different weights to different - classes. This is useful, for example, when: - - user wants to shift prediction accuracy towards lower false-alarm rate or higher hit-rate. - - user wants to compensate for significantly different amounts of training samples from - different classes. -- In addition to that, each training sample may be given a weight, if user wants the algorithm to - pay special attention to certain training samples and adjust the training model accordingly. -- Also, user may wish not to use the whole training data at once, but rather use parts of it, e.g. - to do parameter optimization via cross-validation procedure. - -As you can see, training data can have rather complex structure; besides, it may be very big and/or -not entirely available, so there is need to make abstraction for this concept. In OpenCV ml there is -cv::ml::TrainData class for that. - -@sa cv::ml::TrainData - -Normal Bayes Classifier {#ml_intro_bayes} -======================= - -This simple classification model assumes that feature vectors from each class are normally -distributed (though, not necessarily independently distributed). So, the whole data distribution -function is assumed to be a Gaussian mixture, one component per class. Using the training data the -algorithm estimates mean vectors and covariance matrices for every class, and then it uses them for -prediction. - -@sa cv::ml::NormalBayesClassifier - -K-Nearest Neighbors {#ml_intro_knn} -=================== - -The algorithm caches all training samples and predicts the response for a new sample by analyzing a -certain number (__K__) of the nearest neighbors of the sample using voting, calculating weighted -sum, and so on. The method is sometimes referred to as "learning by example" because for prediction -it looks for the feature vector with a known response that is closest to the given vector. - -@sa cv::ml::KNearest - -Support Vector Machines {#ml_intro_svm} -======================= - -Originally, support vector machines (SVM) was a technique for building an optimal binary (2-class) -classifier. Later the technique was extended to regression and clustering problems. SVM is a partial -case of kernel-based methods. It maps feature vectors into a higher-dimensional space using a kernel -function and builds an optimal linear discriminating function in this space or an optimal hyper- -plane that fits into the training data. In case of SVM, the kernel is not defined explicitly. -Instead, a distance between any 2 points in the hyper-space needs to be defined. - -The solution is optimal, which means that the margin between the separating hyper-plane and the -nearest feature vectors from both classes (in case of 2-class classifier) is maximal. The feature -vectors that are the closest to the hyper-plane are called _support vectors_, which means that the -position of other vectors does not affect the hyper-plane (the decision function). - -SVM implementation in OpenCV is based on @cite LibSVM - -@sa cv::ml::SVM - -Prediction with SVM {#ml_intro_svm_predict} -------------------- - -StatModel::predict(samples, results, flags) should be used. Pass flags=StatModel::RAW_OUTPUT to get -the raw response from SVM (in the case of regression, 1-class or 2-class classification problem). - -Decision Trees {#ml_intro_trees} -============== - -The ML classes discussed in this section implement Classification and Regression Tree algorithms -described in @cite Breiman84 . - -The class cv::ml::DTrees represents a single decision tree or a collection of decision trees. It's -also a base class for RTrees and Boost. - -A decision tree is a binary tree (tree where each non-leaf node has two child nodes). It can be used -either for classification or for regression. For classification, each tree leaf is marked with a -class label; multiple leaves may have the same label. For regression, a constant is also assigned to -each tree leaf, so the approximation function is piecewise constant. - -@sa cv::ml::DTrees - -Predicting with Decision Trees {#ml_intro_trees_predict} ------------------------------- - -To reach a leaf node and to obtain a response for the input feature vector, the prediction procedure -starts with the root node. From each non-leaf node the procedure goes to the left (selects the left -child node as the next observed node) or to the right based on the value of a certain variable whose -index is stored in the observed node. The following variables are possible: - -- __Ordered variables.__ The variable value is compared with a threshold that is also stored in - the node. If the value is less than the threshold, the procedure goes to the left. Otherwise, it - goes to the right. For example, if the weight is less than 1 kilogram, the procedure goes to the - left, else to the right. - -- __Categorical variables.__ A discrete variable value is tested to see whether it belongs to a - certain subset of values (also stored in the node) from a limited set of values the variable - could take. If it does, the procedure goes to the left. Otherwise, it goes to the right. For - example, if the color is green or red, go to the left, else to the right. - -So, in each node, a pair of entities (variable_index , `decision_rule (threshold/subset)` ) is used. -This pair is called a _split_ (split on the variable variable_index ). Once a leaf node is reached, -the value assigned to this node is used as the output of the prediction procedure. - -Sometimes, certain features of the input vector are missed (for example, in the darkness it is -difficult to determine the object color), and the prediction procedure may get stuck in the certain -node (in the mentioned example, if the node is split by color). To avoid such situations, decision -trees use so-called _surrogate splits_. That is, in addition to the best "primary" split, every tree -node may also be split to one or more other variables with nearly the same results. - -Training Decision Trees {#ml_intro_trees_train} ------------------------ - -The tree is built recursively, starting from the root node. All training data (feature vectors and -responses) is used to split the root node. In each node the optimum decision rule (the best -"primary" split) is found based on some criteria. In machine learning, gini "purity" criteria are -used for classification, and sum of squared errors is used for regression. Then, if necessary, the -surrogate splits are found. They resemble the results of the primary split on the training data. All -the data is divided using the primary and the surrogate splits (like it is done in the prediction -procedure) between the left and the right child node. Then, the procedure recursively splits both -left and right nodes. At each node the recursive procedure may stop (that is, stop splitting the -node further) in one of the following cases: - -- Depth of the constructed tree branch has reached the specified maximum value. -- Number of training samples in the node is less than the specified threshold when it is not - statistically representative to split the node further. -- All the samples in the node belong to the same class or, in case of regression, the variation is - too small. -- The best found split does not give any noticeable improvement compared to a random choice. - -When the tree is built, it may be pruned using a cross-validation procedure, if necessary. That is, -some branches of the tree that may lead to the model overfitting are cut off. Normally, this -procedure is only applied to standalone decision trees. Usually tree ensembles build trees that are -small enough and use their own protection schemes against overfitting. - -Variable Importance {#ml_intro_trees_var} -------------------- - -Besides the prediction that is an obvious use of decision trees, the tree can be also used for -various data analyses. One of the key properties of the constructed decision tree algorithms is an -ability to compute the importance (relative decisive power) of each variable. For example, in a spam -filter that uses a set of words occurred in the message as a feature vector, the variable importance -rating can be used to determine the most "spam-indicating" words and thus help keep the dictionary -size reasonable. - -Importance of each variable is computed over all the splits on this variable in the tree, primary -and surrogate ones. Thus, to compute variable importance correctly, the surrogate splits must be -enabled in the training parameters, even if there is no missing data. - -Boosting {#ml_intro_boost} -======== - -A common machine learning task is supervised learning. In supervised learning, the goal is to learn -the functional relationship \f$F: y = F(x)\f$ between the input \f$x\f$ and the output \f$y\f$ . -Predicting the qualitative output is called _classification_, while predicting the quantitative -output is called _regression_. - -Boosting is a powerful learning concept that provides a solution to the supervised classification -learning task. It combines the performance of many "weak" classifiers to produce a powerful -committee @cite HTF01 . A weak classifier is only required to be better than chance, and thus can be -very simple and computationally inexpensive. However, many of them smartly combine results to a -strong classifier that often outperforms most "monolithic" strong classifiers such as SVMs and -Neural Networks. - -Decision trees are the most popular weak classifiers used in boosting schemes. Often the simplest -decision trees with only a single split node per tree (called stumps ) are sufficient. - -The boosted model is based on \f$N\f$ training examples \f${(x_i,y_i)}1N\f$ with \f$x_i \in{R^K}\f$ -and \f$y_i \in{-1, +1}\f$ . \f$x_i\f$ is a \f$K\f$ -component vector. Each component encodes a -feature relevant to the learning task at hand. The desired two-class output is encoded as -1 and +1. - -Different variants of boosting are known as Discrete Adaboost, Real AdaBoost, LogitBoost, and Gentle -AdaBoost @cite FHT98 . All of them are very similar in their overall structure. Therefore, this -chapter focuses only on the standard two-class Discrete AdaBoost algorithm, outlined below. -Initially the same weight is assigned to each sample (step 2). Then, a weak classifier -\f$f_{m(x)}\f$ is trained on the weighted training data (step 3a). Its weighted training error and -scaling factor \f$c_m\f$ is computed (step 3b). The weights are increased for training samples that -have been misclassified (step 3c). All weights are then normalized, and the process of finding the -next weak classifier continues for another \f$M\f$ -1 times. The final classifier \f$F(x)\f$ is the -sign of the weighted sum over the individual weak classifiers (step 4). - -__Two-class Discrete AdaBoost Algorithm__ - -- Set \f$N\f$ examples \f${(x_i,y_i)}1N\f$ with \f$x_i \in{R^K}, y_i \in{-1, +1}\f$ . - -- Assign weights as \f$w_i = 1/N, i = 1,...,N\f$ . - -- Repeat for \f$m = 1,2,...,M\f$ : - - - Fit the classifier \f$f_m(x) \in{-1,1}\f$, using weights \f$w_i\f$ on the training data. - - - Compute \f$err_m = E_w [1_{(y \neq f_m(x))}], c_m = log((1 - err_m)/err_m)\f$ . - - - Set \f$w_i \Leftarrow w_i exp[c_m 1_{(y_i \neq f_m(x_i))}], i = 1,2,...,N,\f$ and - renormalize so that \f$\Sigma i w_i = 1\f$ . - -- Classify new samples _x_ using the formula: \f$\textrm{sign} (\Sigma m = 1M c_m f_m(x))\f$ . - -@note Similar to the classical boosting methods, the current implementation supports two-class -classifiers only. For M \> 2 classes, there is the __AdaBoost.MH__ algorithm (described in -@cite FHT98) that reduces the problem to the two-class problem, yet with a much larger training set. - -To reduce computation time for boosted models without substantially losing accuracy, the influence -trimming technique can be employed. As the training algorithm proceeds and the number of trees in -the ensemble is increased, a larger number of the training samples are classified correctly and with -increasing confidence, thereby those samples receive smaller weights on the subsequent iterations. -Examples with a very low relative weight have a small impact on the weak classifier training. Thus, -such examples may be excluded during the weak classifier training without having much effect on the -induced classifier. This process is controlled with the weight_trim_rate parameter. Only examples -with the summary fraction weight_trim_rate of the total weight mass are used in the weak classifier -training. Note that the weights for __all__ training examples are recomputed at each training -iteration. Examples deleted at a particular iteration may be used again for learning some of the -weak classifiers further @cite FHT98 - -@sa cv::ml::Boost - -Prediction with Boost {#ml_intro_boost_predict} ---------------------- -StatModel::predict(samples, results, flags) should be used. Pass flags=StatModel::RAW_OUTPUT to get -the raw sum from Boost classifier. - -Random Trees {#ml_intro_rtrees} -============ - -Random trees have been introduced by Leo Breiman and Adele Cutler: - . The algorithm can deal with both -classification and regression problems. Random trees is a collection (ensemble) of tree predictors -that is called _forest_ further in this section (the term has been also introduced by L. Breiman). -The classification works as follows: the random trees classifier takes the input feature vector, -classifies it with every tree in the forest, and outputs the class label that received the majority -of "votes". In case of a regression, the classifier response is the average of the responses over -all the trees in the forest. - -All the trees are trained with the same parameters but on different training sets. These sets are -generated from the original training set using the bootstrap procedure: for each training set, you -randomly select the same number of vectors as in the original set ( =N ). The vectors are chosen -with replacement. That is, some vectors will occur more than once and some will be absent. At each -node of each trained tree, not all the variables are used to find the best split, but a random -subset of them. With each node a new subset is generated. However, its size is fixed for all the -nodes and all the trees. It is a training parameter set to \f$\sqrt{number\_of\_variables}\f$ by -default. None of the built trees are pruned. - -In random trees there is no need for any accuracy estimation procedures, such as cross-validation or -bootstrap, or a separate test set to get an estimate of the training error. The error is estimated -internally during the training. When the training set for the current tree is drawn by sampling with -replacement, some vectors are left out (so-called _oob (out-of-bag) data_ ). The size of oob data is -about N/3 . The classification error is estimated by using this oob-data as follows: - -- Get a prediction for each vector, which is oob relative to the i-th tree, using the very i-th - tree. - -- After all the trees have been trained, for each vector that has ever been oob, find the - class-winner for it (the class that has got the majority of votes in the trees where - the vector was oob) and compare it to the ground-truth response. - -- Compute the classification error estimate as a ratio of the number of misclassified oob vectors - to all the vectors in the original data. In case of regression, the oob-error is computed as the - squared error for oob vectors difference divided by the total number of vectors. - -For the random trees usage example, please, see letter_recog.cpp sample in OpenCV distribution. - -@sa cv::ml::RTrees - -__References:__ - -- _Machine Learning_, Wald I, July 2002. - -- _Looking Inside the Black Box_, Wald II, July 2002. - -- _Software for the Masses_, Wald III, July 2002. - -- And other articles from the web site - - -Expectation Maximization {#ml_intro_em} -======================== - -The Expectation Maximization(EM) algorithm estimates the parameters of the multivariate probability -density function in the form of a Gaussian mixture distribution with a specified number of mixtures. - -Consider the set of the N feature vectors { \f$x_1, x_2,...,x_{N}\f$ } from a d-dimensional Euclidean -space drawn from a Gaussian mixture: - -\f[p(x;a_k,S_k, \pi _k) = \sum _{k=1}^{m} \pi _kp_k(x), \quad \pi _k \geq 0, \quad \sum _{k=1}^{m} \pi _k=1,\f] - -\f[p_k(x)= \varphi (x;a_k,S_k)= \frac{1}{(2\pi)^{d/2}\mid{S_k}\mid^{1/2}} exp \left \{ - \frac{1}{2} (x-a_k)^TS_k^{-1}(x-a_k) \right \} ,\f] - -where \f$m\f$ is the number of mixtures, \f$p_k\f$ is the normal distribution density with the mean -\f$a_k\f$ and covariance matrix \f$S_k\f$, \f$\pi_k\f$ is the weight of the k-th mixture. Given the -number of mixtures \f$M\f$ and the samples \f$x_i\f$, \f$i=1..N\f$ the algorithm finds the maximum- -likelihood estimates (MLE) of all the mixture parameters, that is, \f$a_k\f$, \f$S_k\f$ and -\f$\pi_k\f$ : - -\f[L(x, \theta )=logp(x, \theta )= \sum _{i=1}^{N}log \left ( \sum _{k=1}^{m} \pi _kp_k(x) \right ) \to \max _{ \theta \in \Theta },\f] - -\f[\Theta = \left \{ (a_k,S_k, \pi _k): a_k \in \mathbbm{R} ^d,S_k=S_k^T>0,S_k \in \mathbbm{R} ^{d \times d}, \pi _k \geq 0, \sum _{k=1}^{m} \pi _k=1 \right \} .\f] - -The EM algorithm is an iterative procedure. Each iteration includes two steps. At the first step -(Expectation step or E-step), you find a probability \f$p_{i,k}\f$ (denoted \f$\alpha_{i,k}\f$ in -the formula below) of sample i to belong to mixture k using the currently available mixture -parameter estimates: - -\f[\alpha _{ki} = \frac{\pi_k\varphi(x;a_k,S_k)}{\sum\limits_{j=1}^{m}\pi_j\varphi(x;a_j,S_j)} .\f] - -At the second step (Maximization step or M-step), the mixture parameter estimates are refined using -the computed probabilities: - -\f[\pi _k= \frac{1}{N} \sum _{i=1}^{N} \alpha _{ki}, \quad a_k= \frac{\sum\limits_{i=1}^{N}\alpha_{ki}x_i}{\sum\limits_{i=1}^{N}\alpha_{ki}} , \quad S_k= \frac{\sum\limits_{i=1}^{N}\alpha_{ki}(x_i-a_k)(x_i-a_k)^T}{\sum\limits_{i=1}^{N}\alpha_{ki}}\f] - -Alternatively, the algorithm may start with the M-step when the initial values for \f$p_{i,k}\f$ can -be provided. Another alternative when \f$p_{i,k}\f$ are unknown is to use a simpler clustering -algorithm to pre-cluster the input samples and thus obtain initial \f$p_{i,k}\f$ . Often (including -machine learning) the k-means algorithm is used for that purpose. - -One of the main problems of the EM algorithm is a large number of parameters to estimate. The -majority of the parameters reside in covariance matrices, which are \f$d \times d\f$ elements each -where \f$d\f$ is the feature space dimensionality. However, in many practical problems, the -covariance matrices are close to diagonal or even to \f$\mu_k*I\f$ , where \f$I\f$ is an identity -matrix and \f$\mu_k\f$ is a mixture-dependent "scale" parameter. So, a robust computation scheme -could start with harder constraints on the covariance matrices and then use the estimated parameters -as an input for a less constrained optimization problem (often a diagonal covariance matrix is -already a good enough approximation). - -@sa cv::ml::EM - -References: -- Bilmes98 J. A. Bilmes. _A Gentle Tutorial of the EM Algorithm and its Application to Parameter -Estimation for Gaussian Mixture and Hidden Markov Models_. Technical Report TR-97-021, -International Computer Science Institute and Computer Science Division, University of California -at Berkeley, April 1998. - -Neural Networks {#ml_intro_ann} -=============== - -ML implements feed-forward artificial neural networks or, more particularly, multi-layer perceptrons -(MLP), the most commonly used type of neural networks. MLP consists of the input layer, output -layer, and one or more hidden layers. Each layer of MLP includes one or more neurons directionally -linked with the neurons from the previous and the next layer. The example below represents a 3-layer -perceptron with three inputs, two outputs, and the hidden layer including five neurons: - -![image](pics/mlp.png) - -All the neurons in MLP are similar. Each of them has several input links (it takes the output values -from several neurons in the previous layer as input) and several output links (it passes the -response to several neurons in the next layer). The values retrieved from the previous layer are -summed up with certain weights, individual for each neuron, plus the bias term. The sum is -transformed using the activation function \f$f\f$ that may be also different for different neurons. - -![image](pics/neuron_model.png) - -In other words, given the outputs \f$x_j\f$ of the layer \f$n\f$ , the outputs \f$y_i\f$ of the -layer \f$n+1\f$ are computed as: - -\f[u_i = \sum _j (w^{n+1}_{i,j}*x_j) + w^{n+1}_{i,bias}\f] - -\f[y_i = f(u_i)\f] - -Different activation functions may be used. ML implements three standard functions: - -- Identity function ( cv::ml::ANN_MLP::IDENTITY ): \f$f(x)=x\f$ - -- Symmetrical sigmoid ( cv::ml::ANN_MLP::SIGMOID_SYM ): \f$f(x)=\beta*(1-e^{-\alpha - x})/(1+e^{-\alpha x}\f$ ), which is the default choice for MLP. The standard sigmoid with - \f$\beta =1, \alpha =1\f$ is shown below: - - ![image](pics/sigmoid_bipolar.png) - -- Gaussian function ( cv::ml::ANN_MLP::GAUSSIAN ): \f$f(x)=\beta e^{-\alpha x*x}\f$ , which is not - completely supported at the moment. - -In ML, all the neurons have the same activation functions, with the same free parameters ( -\f$\alpha, \beta\f$ ) that are specified by user and are not altered by the training algorithms. - -So, the whole trained network works as follows: - -1. Take the feature vector as input. The vector size is equal to the size of the input layer. -2. Pass values as input to the first hidden layer. -3. Compute outputs of the hidden layer using the weights and the activation functions. -4. Pass outputs further downstream until you compute the output layer. - -So, to compute the network, you need to know all the weights \f$w^{n+1)}_{i,j}\f$ . The weights are -computed by the training algorithm. The algorithm takes a training set, multiple input vectors with -the corresponding output vectors, and iteratively adjusts the weights to enable the network to give -the desired response to the provided input vectors. - -The larger the network size (the number of hidden layers and their sizes) is, the more the potential -network flexibility is. The error on the training set could be made arbitrarily small. But at the -same time the learned network also "learns" the noise present in the training set, so the error on -the test set usually starts increasing after the network size reaches a limit. Besides, the larger -networks are trained much longer than the smaller ones, so it is reasonable to pre-process the data, -using cv::PCA or similar technique, and train a smaller network on only essential features. - -Another MLP feature is an inability to handle categorical data as is. However, there is a -workaround. If a certain feature in the input or output (in case of n -class classifier for -\f$n>2\f$ ) layer is categorical and can take \f$M>2\f$ different values, it makes sense to -represent it as a binary tuple of M elements, where the i -th element is 1 if and only if the -feature is equal to the i -th value out of M possible. It increases the size of the input/output -layer but speeds up the training algorithm convergence and at the same time enables "fuzzy" values -of such variables, that is, a tuple of probabilities instead of a fixed value. - -ML implements two algorithms for training MLP's. The first algorithm is a classical random -sequential back-propagation algorithm. The second (default) one is a batch RPROP algorithm. - -@sa cv::ml::ANN_MLP - -Logistic Regression {#ml_intro_lr} -=================== - -ML implements logistic regression, which is a probabilistic classification technique. Logistic -Regression is a binary classification algorithm which is closely related to Support Vector Machines -(SVM). Like SVM, Logistic Regression can be extended to work on multi-class classification problems -like digit recognition (i.e. recognizing digits like 0,1 2, 3,... from the given images). This -version of Logistic Regression supports both binary and multi-class classifications (for multi-class -it creates a multiple 2-class classifiers). In order to train the logistic regression classifier, -Batch Gradient Descent and Mini-Batch Gradient Descent algorithms are used (see -). Logistic Regression is a -discriminative classifier (see for more details). -Logistic Regression is implemented as a C++ class in LogisticRegression. - -In Logistic Regression, we try to optimize the training parameter \f$\theta\f$ such that the -hypothesis \f$0 \leq h_\theta(x) \leq 1\f$ is achieved. We have \f$h_\theta(x) = g(h_\theta(x))\f$ -and \f$g(z) = \frac{1}{1+e^{-z}}\f$ as the logistic or sigmoid function. The term "Logistic" in -Logistic Regression refers to this function. For given data of a binary classification problem of -classes 0 and 1, one can determine that the given data instance belongs to class 1 if \f$h_\theta(x) -\geq 0.5\f$ or class 0 if \f$h_\theta(x) < 0.5\f$ . - -In Logistic Regression, choosing the right parameters is of utmost importance for reducing the -training error and ensuring high training accuracy: - -- The learning rate can be set with @ref cv::ml::LogisticRegression::setLearningRate "setLearningRate" - method. It determines how fast we approach the solution. It is a positive real number. - -- Optimization algorithms like Batch Gradient Descent and Mini-Batch Gradient Descent are supported - in LogisticRegression. It is important that we mention the number of iterations these optimization - algorithms have to run. The number of iterations can be set with @ref - cv::ml::LogisticRegression::setIterations "setIterations". This parameter can be thought - as number of steps taken and learning rate specifies if it is a long step or a short step. This - and previous parameter define how fast we arrive at a possible solution. - -- In order to compensate for overfitting regularization is performed, which can be enabled with - @ref cv::ml::LogisticRegression::setRegularization "setRegularization". One can specify what - kind of regularization has to be performed by passing one of @ref - cv::ml::LogisticRegression::RegKinds "regularization kinds" to this method. - -- Logistic regression implementation provides a choice of 2 training methods with Batch Gradient - Descent or the MiniBatch Gradient Descent. To specify this, call @ref - cv::ml::LogisticRegression::setTrainMethod "setTrainMethod" with either @ref - cv::ml::LogisticRegression::BATCH "LogisticRegression::BATCH" or @ref - cv::ml::LogisticRegression::MINI_BATCH "LogisticRegression::MINI_BATCH". If training method is - set to @ref cv::ml::LogisticRegression::MINI_BATCH "MINI_BATCH", the size of the mini batch has - to be to a positive integer set with @ref cv::ml::LogisticRegression::setMiniBatchSize - "setMiniBatchSize". - -A sample set of training parameters for the Logistic Regression classifier can be initialized as follows: -@snippet samples/cpp/logistic_regression.cpp init - -@sa cv::ml::LogisticRegression diff --git a/modules/ml/doc/pics/SVM_Comparison.png b/modules/ml/doc/pics/SVM_Comparison.png deleted file mode 100644 index 4bb3dababc22b65525dc486593574de011a922b7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 94157 zcmXt91yCJJu)VmuyF&=>PSD^^aEIXT?(Xg$f;++O1_=-xg1fuB|9$`0huxysy0v$v zr>Fb$=`&Hvic%zZ<)LHzW73a5xy6Jq~Xzx*#Cq(7}hVGyn1y9UM-(2yw6{l_;lNt?tr*$`fZvzxy|iT zVnsiJFr@<_|9Vj?Ozq}+m%1!G45`%|$z&HI2+g$DRn=yl*H+dFyB+7gtSZg)+~i&0 z``n*W5TkPFQG@737&gC(ii%QjH#qJ5p}2VI)V2SOSPgSMbeY`+_7NH9WYvJ2FGu4l zgkD6EhtUAFTv=5);_r`#495K4N_=>KUeJA7yAYO?l2Vml-v|wr)i8X5=5XNcTR|#Z zCQP~WDh)TKCMu6VDwR*f1&%>gt-CS&N!^XjBay`Oj#AS^|BCdvj*yMCXXJ!7C zlQuiH3mP5sh-G`^3V+%hgGjFkVx4&>9Lt88U+eT}^19hK#sbrWGXish#=$H>{ByO4 zh=?h~G&$VHFdFf9*qf^z>0Q`dK4w`$ciUAhM zuuk2}%MbZ3KYzM4gQ1v1-U;3GT%Uj6XIgueFUUkNY5Z*-(#AW*RC!?lcmn@Vrnn?$ zc2m%NSxx{%{}g-}(yBOl=Qg&+|8F=|HkKcy2q@ zvtsl(y-oX+6$N(G-HXunDFu=r7l+Wk?gIlhXBLoosH>rm%dsI(3#=jwMTMZ~ zXfsiMZFEFYdk8&E=%G_b_`P&jHWyW63i$&2O-IkDgo{4baM3AFa#F{vJYw>R(}N}q zt`(3+Q_SBruPHN<$-VSGOLQ>o=0fE4g0($je?&ioTsoMnAdDF6l^bif`Nb72TUdb}t9|^mLpEQ}Wf}})lb(3Cp@m&a(R+Szs(e(>k669i zS&i)PR^Yg{Z-~j+WKG$~hzL$pr?hS8)+FZUO8dOT{ae_O(#y^J^M|oizJiW36EE5r z^;=a?Smd_F(bB^!1QHjQnx*w$(4UzDdP=GNAFR>P%Eq@ETR!*TFYiAd^-|L-xm6Au zBt&u(hdwcEU{~KYNV2$VLJ7)BeR-- zGpgYcmg94F&$9AKjV5x*6Jv)`f`)d8y$#tC3u%W$9%P43+_oPQ+{{vpXL8lCO=3aT z*^k+lgsEwwm+Wp(mrt9kd~*aiuwo>a&e#;{_-d=}!5G%UP>!L7^fn>Z=Y zPDJ)7K1g8qUGA}jEFdWf7L65T(6I+0yXe@ej>UWiZfsTSka7_VwjJ4vi zvv1jxnP}8DihC|E#(}eGY4sIO475%9oFTfp~c$KiSg($J*P_MjHXFFzyB z6hw`S6Jb{)wc^)zd=pJVp-Z8>`v2kl4I~i}Fxt(QH%1=oEkYm5XOcDKMZV~w^7n5t z|NXCG+nW8=MMwGb-@UMw7PE;wh5=Sr!F+Xo0S;-!+FnZ>SI%W4d;ekb+g@1xNEh`I5$uEMOQ6RAYoWWvjWXwc^YWKuuh%}LUp#gq()1f|k> z&%V{K(N4-n$dbxZNdCQfJR4m>8HPPMMpH7{OFHo+FYpYS?+=@EBH@Aa^Jb_dI$|y^ zxTazv9;JyB-muSO_YHly9~+zAa+s(*TRhqTMpPi z(3@_20^2_{S}OVgo4#i|mQ(K%M zmw$zRy!8eQTL|AE4<$tK`L?09>iWvCSRXkAuKnk?ChY8lBcoNtZJylC&1$U3qo1%u z`?2v`vg)X)S#2l2a1RLjdeKTmjTn;9%az5ke)&B}TF07S&dmb@0~3~Hywvz6pQC}x zL8U{ry(PG`<@osNQ#l2*A|ej4uFeq?FZjW731RcbcO;|sWV?yMP;K)Y#ErsCMKq-fWgQV`S37LreJ0=I z(NnoqAiHY*Qv&JP5VHM~y?pts-kYmh8;Z=A+{#&u;y;69%a3KbBo`NBx(X3vE$kf3 zRN2}z_x-YJB;jN4fM(5x-{1R|l&6o^tUSYt9QWrB^8JNj^h#c2DT*Q_iujLau0iU! ztxuR*aV5k^l;O|A?Sixrf=JX5H$h@0s6LE8Z-t>wC1k54a#60wPKa^kAQ?lj!0*GU zI^FRLe{o5@;m`|U&~eeVr8XJQWDGAB!Pb@#{Rq4BDofUL_e`G?YMj)iODQ^dCS(H zoRyn)bK~vY!}rvb?BwP>ok`cb%b>{|#i8p8)@Huic%#Qpoc|%cnQ6T59$B{?L2y{&I2A^y%$+p?9LDb^Y%*O?^oppO?fxtD_Lv=DCLW3y?nY zklab-TQdmIM@;gE-lvA9yw%hHQWd?P{>6~%;TD2M)O2}&_sW*7Q0vQV@Vb~v^g7Ag z?;X!owV$=DNXyHgHeCB{h2d!d@IkRGg@D8S@_LlTv}x&ad7+1U*98v;r@?VcOjT9Y z^ZBR@;_+XZFqidEzz)%d(ce`1)U>n)|2J<;EG+F_f1%L~7P1FjQh4x+LqA#oyh9J) zdlfSf9JWmBfd!NQ$9eV&d$}~}pZ;>uTk!XDp`ntbBurRX7@lCjZueZ3W|Pl@J;>R`rJ}O3xTz^6G&EF&?|pY9 zWh{#$Lg2hDWWm7a?DfiH)d{Yy=h?cqR~LZv^K^+tYQY!2Qu!BBWF$+EJTq*FMhmt2 z)jG}5%{B{-E(a6B1J|cZO}WB;*-K3p36^7dU;$Qh3xn2kebRu}s%ou<1l3I!>FNWC zV$dUm;|I+>@!WhLk%Nd#RA%T9kc~D?lM+dQQPn&_C zQZU&7bmhaW)Q@kySpxly-}b*neQVHvQ=Kv~@)wQmxfFoqVQ9)#K8A&DdTU$ZV5 zb1Fom&{b44Q!$XP&a;N=<8*@m09AtE%*DHVr{D0w{gW@1&RoL$6_8dlAebrRw@vwMX#ZCq!+>zrC(c~VSn}y%!bX&Q*^&D7<_*l zRk)_*ib8=37G3dD<-HczhE9iI2uz_xIAxbHqNqodfj^7f>MW!v-JCYw+}WQXxC(j# zHiM+MH@`+z+U@OarpWsQyLZo(xeyE=%GQtf1v@I9~>@qODL;(dyt4f+w{;ONF z-iE*-QlHr6Wt~!`f{D(Ho|u)h{WXt`z00kBP=5;+0^j7^T;hDMeF9lIxufIbzLPuW+0htLHYlH1y*$!$^eYjTLu^ylMSez#gbfwm;ecYgMXvN*k0>k{u=Qa#5 z9GP$8LB3D?{=T`6uCCYplEu?5-4_BE6d+DacDS8*zTGVVUt8SXo>f-hpY7m(lXI(a6*(WV9XI@{+W1&zDfK0`@;UgXhOH>MBKKUVR#0Gm6g#VxU>;znS(@z)WR;LRAG=gl|~#A4@zEUDkE%6S`0-^$+|t&I8WCtg2D)VqV<&CND*t;0uP70}S=WiCA_F%fdP!3S<`+Cc6`b9}s{ zOlKAXH!t+`pDqoy>E)|*hPxafKAwAgpslp%57B*j5+L<|Jix|QzwdNVQzHcSAPwNs zNb2AA3Ie_m&+|DN;oRMAd>=%_la$PHY!}q35syK{-BT(9EI=YK?{t64_-y^-N}4d8 zFA#2IRx{+Vya|KD-tV7}-M%bS@&!va{;@Q3FGJ!j3t}_~6{B^{o#;PTjKO816cWmd zjz%=_KB9TQGyK5m-3YMy@FZzNc+7qhe1l$`kld40i^S0i6bK4nhZ`eNFwUSpK0M;@$@nYPI%pCz2W8SVUuvt4bw zy4J^(hndeYyZpW>#6@912oI6xniD91^F*ZFsI0)cG&P?t-^RgQ>U&KS=8mrUsFTBd!X59+^@T6OGV_{Z=9DxchG1xD^*@Ijv=<3IJn}l z0bcXyoe2Sji{UEf=Z^k~YrU6y@2Jx*YjUB@%j21ynF*`yl##QcVQa7vp<&&ASXj;$ z(_h1OF^M`QW_zK9@`^;n7fqIV{bj&JI9;IYh z5~YAYg>b7901(cbJWoY`nkI6&?1aYe_lX9m^E;N9rjvWX;1Uzly70_}tIUs%i&UNV# z@L(7!rs4lQD`jbUsEfwK)tt{|^Uk7HCM<4lewQfMM*v&$v)S=(?qcgtws zc3J?Mnmj6tXnbK$nlg_eP!9~p2{ELgddOxNF+Lti88L)@V;=3z3rih%k!jaVGGQ+~ z8hgZ{UZr<_>2x&5sv2qXC_IOZ*Y^@lq%@!3jk{=!YPf~amF@;U2uvW4l7{X3SFz`N zqWclQaANc)7IS+?=!e*AF`JzZfye3wMG3S{6<4)0N7SG{Tq*3px$aUh5@LBck*o^r;2(6FD>l$G1YriB}JMUBni zDVRSrAspD=KO22kwLK-=)S!m`Ej-NDVb5wae{}Pa0CRc|@>qZRyrBQU{q!^!a{k5R zoWAFIu3&0Ce|UeqH8&t&tFK@166{i~rK0c&x~67oi4TOpDIY%{Ip9$eiD>BooyB*; z$kp6_y41}dP4QJ3uFGnDJn+d2{*$QKO^9oipANRyDO#Siqj>)R8v70$QI4S`YK*M{ zziNtVTt^wMDb6)o5(vSlHqQD4Z=^M3GTc2%77AL%&lTjTXHM2OE_|+*Y}(pxs~#H#ZTs1Ii?*y5sQ>V?krzVs`2z-F5cpi zTBi{&+|FQt_zxw1TTEo%G35DhV3jG<V}#135M`WYT@(V(-oiBh#$)i=JL5?g7i+C4Pb|cl9-@*asMCGY9&$Z$hdH{G?*ZUw9;vdZmqW zNF`q>-2x9NpP+HDA=L5Fabv3Y?K>4TJT7{WE_wsnvB18Y4nZ%08G%jUBMK62)Ym`6 z+3L7?BVisu2VrQd^Y1md0k?Ju#9?qQ2TFm489HY3DHs{e3O-6>KpE=`x z-AD84H=O1m<5LXyqSX|CE#ISsf>SpXKc5T^<4e!r{>OxzQF3bNluLQ-#P_3_kdiVU zeaDZ92~Tc^o~(kmQ)BnjHr&7vxQy%Wi#Iac77!74m!JPeD703i#QR3mkUHcUdOkvp zuw@ly?-#_d-Y^ovw7GWMFRl5F6O^3D$gb2yH<$yw1=c`&>xX40Rs&f?(mxiaeGB6h zWQ`o^sf1k!=qp(LN?1-w5Du*PC+G;&-zRmW#||I<-yyjd`ffSDAd`Gf8Qzsx;Y|XI z5c6JySJ{UZ^Qri&Pvie&FV(PtFPjx8)@etLAd8^77t%rz@5hH(nveeXvrWD5#26Uy z@4_%;OUDqmPiDV&oQkcCnqlE#?JToJ#e#{-njF3yWyuI(#>{MvRIcIq zeh$wK5DBh+koiF+&@t0GWR36B@wAp21{agJgKn-X9r@uVA+j?M_kmA@NyfxqX7sL( z)aqAdag=2$`EEZj>UoxIn8K<;Hb~XvjrUqvG3mAX3vfhFIA3n6PUL1q8M}f6Fq#}7 ziudz=2#mO3Qbx2&<*^n+f0gfn=jFk*qb1c%2`>#xIMB?V~_i_nSr zikFLs^1!q%YHhZTP>E=Eotq@HI^4L_%AJJ7OUQbR?y27jlTxb4AJ$(imX}pM-=BJc zOm=Twjf<03Z#hk&SIayX0M`~F&csk=1$%WxTHCgbKUk{G2P7W zKxYqC5Z=}1ow$AsenBI_VC3W=_zPu^iuY-&p|>}O&B8e~eMKCHh55{Sz67srRMtu7 z)gPu+pNp81(w52zVI3rFg|Uy3kuz%Aj!r){7EmY2_KqY(bzQIHPjr2r$b<`%zo~@I zE>&#Eaq*~lK_b)v!S=QBx1SmpR%oPtIH)^vBDhjj8Z%@5D?@K@U0Px*1nuwlq40o@ z=Vr9m{fUm%Q7iy@2O&*9@p8)7at&bxEl3F21s~=&GwEZ%Q9NfRvTP8Igoz1U!P8jN zfBA6Z{l4jS3k?mR;&`_f(zCQD*E~ML`ubcdH~xUw*{OeoE+AP<=q+}ON}C71zFMoQ zuAA+T&F1p5p(W7ehlVO5L-`8B&CPjr%MRV~%6+LShc`y15T%Gnk`RuD{%YEXVC2GK zyY|`N0$|!{js9ygWReG2iLkp`3t=hf5$CYOM9E}o4(uJ9?EOWn0OH8aevU~nXrdq> zR=X*>K`Zq_nUOr1$`J^jle)SzP$%_PlnnL@o0WBibz6EJd+L| z9vHngP~e5IwG1?Tg?zt+E3XZ|cfSMCfBG~4zYOL@QtRKA(e^su%-t&|$j}O^LtQ94J%pxMqo6b2I5mGZbE;eniKrURcr~C(I@s^{^dMh{S^52lCk5ibXs!c`dqIQ zQrY_lUyoU<4INw^pXb%$6CN6EmxQx()aaS=Oq0R5 zoB9$M)n6&&MV&h}$R!~%C=hr}m)Lkz4Ue}oetbrW{jVz#06$FJ@QJ+H=xg(?W){8h z0Ed-LRErbH>8&D@6 zx1D#`;fE_~FUb#()e|ntdZf=wj~nqgB39_K<2ivQDDQJ!{lG8#4kPh`Fw&$0Th*U_ zW)@Kf0{kH2TM=%uVxOuhvffZaPO4bh3w%$|HoSZD)Hl?v;s7>=wCOx}q3 zu@SxC``vCl(=z!LFC%<{N`0;Qx{{qU&+Oeo?>GuiS5$3+;o!;bEi5jJFgOHFcx33y zJ0(569pDkq+22x99b)jrW&b|n`bbM3nv5iikLCG%w%$yIn>4?f&a+7*EkJ1a47)B7 z4Usw(K$v|Mh_)C(Jrb{H`ircPZHx&yG!3%6kkx8N{&MT31c>cUx+tLc;xd z(FrYjSN~1u`e+&bT287TQ2+&(916`7@I32hYCvPQ{EMB`YnQHCuRZ5zc69i!s#ibl zIJ$Y*(X8?LY7$JC9&&OrzgCManSz2yGdKULMj z!tYo2^EYC-M4=%es$eJM;0+`+$MURJh<)&V;Y8D-Tbx$;x~#-l%{=Ob#2n zykj*5Lq^&9)6U*m9U*AQJ4 zMgMBO9OOY}XWb4WFs7penZoz|rWb-U-}`t}U1J|Axm+_@63t;-R>o@Y4^bs1!V`+zNplSQ75$q8@%pFUaMgvpR|Xf z4*n??OK!w0{0;julaiTL(Qp^`Jy0Ebgvh8s(Ir2N+I)LpK(V&NI)hJCCVBntWP^gf zA}`=6LyDe(;rOBL=_Vn$z)v(Q6igm1co6Xh2xK1yL8v;~4C6S{L?FBD^yJSILV>_OCkti)gdzHvv0 z1(Ol)o}PwHq*)n}rZXFiB#wHnz+lBTUute%R43QZg^c?HGG4MwcG2(V+W3@|!W>p+Kg{<`36j@TMUmr!LS1?Q62tI|I!GlJANZz9nSsUzRd^HL`5(OfqC0*aftvOXRYdJlF- zyo7`e8%$ULR}`;yYUpiFRuBg%tj+j*NDKT0?z1(zzk zS&$W2$`_#WY%nggd`C)6k3WBM=u{AU+6W+XKZC5(yu*VD!{|rp0$$pwakUa`)+2iuTxxPTAEE(bWij5LC)6=8$%hH*pS%avo<6uDtvC9$IIk3|=) zd)jQt)D&cjuIRfI`%9_POF?OOlhLCV`k!{Wb8=DzXE=g8#8tL%}OcmMA0HZa8A5NWshKb4F{{C#oXxNkRee~XG zcUa86_l>Kz+Z_%yVp5$J`zhz&RFHNg*<6o7!5&C1QLl7*-dACs3l)44!U3?lv@(l% zu2_w*Bwb;JOk8~5vY+IU|9$7YS#rucWuJXDAluG4EV2*`>)*9=T00O_f^J41X_Ki> zPs~wOP*kWnqT+>W6-dkHK5`#m)HAPLPaaEA`uUQfDC(b=*3;;Zl}uTjz<7#QJe44`@-=TO7$ee`tXHCY*}wn}#76d=;k0W=+Vc47DfwwpV*&A5C)M2E~G ziGw9dgeX8sDWl}D9|ZRvxNrj04^TNwh0ni#hC*&$$fMrUCbK&TIkGtZTx#uqOXNc7 z(^u|=y8j@RMe9%2(MIC3bYzE%Z7xO7vT!s!jEu!s8BZDSHBKoBf2OEIeRgYNow&fH z6}>RAcLo4*T53iCpbTQBp_%;=NlxnmPpN7e;x(gsQ)kbv6*D5+G>$aRk+6|K?VAqg zfBa8%zHZ~Iz5RGOQzEiBwunikv+HhTxnV=3LJoKb!@0$DXwX6r zEsR=>fkHI$BH)7uVAFd19MAPm*ZWhkJr60`j%Yn!>^x7|C|cdzJht$NAZyCfjYm%&xpZP`o>p)9WOD{cUWrHF%inM&?{_bzns1+3BEGzhG2a0yERn8)8*$k|~rb$_@eih1x6QoT6E zpp}xdw3VE$Y~#rafUXMaRy|jAg>|=<B-3<5%U9dt>Ylhwa`#}oCA|AOql-`w1)G@E=XPZupN=7 zb<25(H(jpnh}+vrb_|lU z6)wb-L35WSD8TodgDW z6ciR_f~p{cfr$?vIF2d`CAO76aGq^bB0ni`0q(rGr;b2{tJX(i(1=9a^tnJt67<3K zf3xxOJ5D8TvP>#>z`+rU`fgoWlI+C4b+_=7dH1XpiD0z@=l&vs$R1r4ACGR}CJc|c z+WP)*N<*t%bvS>61zK}~CirDSU~jp_KFwqoP&wYl(16dK9`k=nqeuQ%5BW(%dfmw5 z)SKgTg_wrIcj)PlWvYnrR&$tsMey(aPp1Y<>F+4g)e|u%xB1k+R8exHe%6kY)porz zMs4TBuC9j3unKsi*lN$lCgXeCD>VUk+uP#U8jZGh@5h6H^o4*|D#2$G>DUNyU85k1 ze7FP7;c=YpyQBng<9F5KvdYR%H46(l&PxTJ5wD57loJIdyqmKYyT-}4hkmBk$iMeD z1tBX;J)SlTe?#7uCjUO^=ne|L{ACcEK_$U-CRD+cqORl}_~-L-<~)H#M@|KG-X;#4 zv~5CB(SSc#gH9!F>|0r5MnOOcX!M@PqO!6~P%103d>0(>ld}h{QH-0SHng0b1-9xH zRa6j)5&>~w(EU9%At8Ht-DGJ6ecps3-e^X^a4^i!^j*UwS2es5GC6~s@8Nm3CU#R< zesJshaeKlztjoT11h1d>A@tXUy;m3*%yvBk)X8@``RUDd@>OJF5GfR={{6#C=Xl3k z9t$>Rae5vK*-Pf9@Eo50StX>Ujynw%Vmoe?i{+IqB*Va$^Ar$fr@s7{a6#fg3pd|I zkKgsny02R5-xh8*1hiXSedZ^9(D(1fxX*85UkwBF)d&4vsHK0_DEvnSS`7iyZ+*+E za?`I}yGR%}+dwo$CaYYI4(@~Q-$MUz&XC(jvcoJDjV{r}P-O^ZWmADSR#k-q>JoN8 zZvx>0l2!qT1iJXuRcj#8w~YZH@DDsh*AZ>owX90r{l$e0$W0%+J>Z$n3*P@x{PD8C zr^n}wf?h^Ct2V~OW0M@8VkGWKWm8A((sMpASytr~l@uGx@z*n)vMz(aw}3}6}MF|I+%EZo9Wc%Y@Mwm!GK+_ylsD*XS{TtTMLF|1T+!Q6R z(u~PIeCRKDr!HL+5&4|z`?4a&I+=@+{Mb*1sI6ZQ>bYaGw8!0R@SsU)@=U0x(&ij6sq8sGsi8uC3l)IeR!MVgLm(?2W@ADc@Oug^TpB&@j zFymI15hsNiN*DAycLgx9w&V4EI6{ciWJkN37JJu z5GoL5N_dnW&zyn}xrwL$)U{Q@#onrXU|^i}J=SOzRMn^?l(*RX@B;NVoi-2g{UWd` z(l?dIYYJJ(MH}uNqOxNT&z7;-uH4;ayRqsPMCIfVmzM6RsXhPDsD)7mvIS-nKF-xi zxY;;l=y^}WM{MoNj0{=eUk6-(!u!Oo?rvTry<(sZIk^yuTzIK3i+&DL=YdnCYo0Vp zyp5sXl6oyAA}#D{61y`DahvNK8giz-zQa+tI&{~EgS=i+{_fvP z7EhYS-T7p$g6VON4Qbgh?A80Ez;~eJPq|>x5J}k{(3=_ff&_^W(McxXYxpy!oiQm$ zov9q^J**)LzJ)Tah_SLASk7#Vi99TuaWkqQj7B_#=U0D}g2^Cb5j8c~xVR&Lx&xS% zs~3_`G2G?lWUa18RtA7%5?&YBzexaQ92_1VR23sZX~yumgyXJE8W2ecK9?tEJNSyr z7yI`T-|VIon{#o(TR}NFS#z}>DhlZJhD0vii%q$y92NULW#{~~j;fW*PA3tNP`E`g zZS=bkVw3T3jV##s9dg|RoX^N`mwM}Y$j?j-cr~Sn4yPzRdGw?RncmP=f#`jTdqn-? z-Ha8+7K)Vl-=PAnEHJU}~eZ-TjxkU%O zy8r}xPxO}Z)utor}JEV@F4$TV7uc(u48y9C%9H+5Wt#Rdn1Pyy1gA`U$ zDaHC}$bM-?ppAm$k=l{&lKT>xkFQ40BhPg?1dirzbet!K;O=T@@W`DrF^3^~cgLbSxM;U!wKlbm^LhLk+$uSZres<`QBj z1zkr}vp44#t7VH8L}kM$8mAM<;6^BD<*7wrTo(y@yHQdz>nW4J8SemJ##C;G^IG zf`^=~mtXN9rrL1huW5qJzZ0^LPY^f&16ZLCaLX!FU!hPV&=+c%DRleecK?p15C?ZK zm#!WT%wPoDHX%I4(GG`Cr#wn-y2~je4s$iPRte+lI%WrSWBCw>ZX%oNdVlUfObzb) zj#{8pEdjtFY4np_G(evTh>4H^62`Y@CSv2F(1LRMH!KERVhACbglWmWiL6*2_()gJB@}ZrhgRF^M#k-^iO&Uw;(j>>*8;QNDz8cV#gdhU z+hX55J%h=)2S*Xgog+xjoY6JUsnx1zy+$g`SoQOFikK$o3$%YsEf0r-0TR#pC$!}l zmOh672H8cINtxyJqTOyeMclr{<>i`klwXxA?P`=sQpqcyk~x>R&>L~jR(MUr6Z-&H zDd^754tk@e`oxl9l<0M1G)aoHiy=@C;Q{dxE+NGmxRFZ+KZF&r5oFaHT++eznv80M zRu54D=Sxu7#E}n(ker`y83~Cvc)gsN!@~CTy!H$nNle zl4FdCz2QhYSeYm}mQzAp9CxLq#^QG{7NBQKOqyQrCqBJ9hK9U1{`fPYSVkF$6?bg? zxP?aYdf$p-1{pnx6%&=br0o$>+`O@ul~|2KUVG2L$pD z63d~ZRGpsIRZ28{g}BG9r?=u_6c{V3%PH|>&5OGQ8f$J$ecvKDe`?Pyn0Db9N!1Yv zsZ)Mpu|!c}vdV-3B8TC2b-uRD9Na_HP84>P`)Rx0(9RaI8Lcu9zng!JhfL8ADu~_< zfB)0zJ94Q6gWorkA1wFh-O10-!~g*17trS=Hb6w20EF)DrHVpQGBOwo2Az3Twa&w^hpcwU0svyEO7ZcaC`11!Sh;eFP zcWdiXtTv5?(W^#pUWvwg3xKW7SeBgg#o@wK%a=9D?SWVKKXq6}vv_PE%$#sFUD_M8_n zi60)`zx93jmntUaYMWjjaEyh;d6Wqdm8y6DiNa$aNGq?zv_39uM&Qo|mx+@@3)eZ8 zTh8zp`S(hDSc#=xGaJE}d?VTJV7(^M*LPRU%N{+N<*>4n`Oz&zki#URql2YklPCf< z|Lmh7!ZF({fmO+BGCvMyo_S2VFPLZh?qG2{`b?eq#ambzrc3FQddN!NzsW^DJumg* z6hK|}jTc|gArz+VPglcB(3uFr_RB8>C)5rUddW+~ODu0X>XnXW0j@C-Q8XtfF;HEn z;xRp^?x)StDlC zQ)-gHzj#K5kY=Fk88+Z3 z^O7Tm)pO|8^HAZ7jC5SdILRz*KxAOMBs%9bgdtoIiD+lXPWJ1UD}yhS-R4=~CI)MC z5PjiZNIF%n44G`eCB^Zhf1!Ln3={J6=cyS2WrSGnljk>FfMw@(GGQJ{U;&>gk`uS& zTV#xnSE$IVT}YPq?Qf`;Fq<*6<|S45;6atO(?H}&Cq_o(pR-XprA!6ZQwIKs$O+;D z3mdT{OILpxgCdn6UN;n1?=jtXiU?u|RHAYRz>qi@B)-i0>V*)OK<^OL%0~Eixk0_E zBh1JK3p7S8kj+B$B!J-L0%h!LGo7gDEn&%W2g|6u9TO5-6Vpp)=Wy^@dke;4t2AUP zTCnIs$54HOQDtiTrGG8f>Q_F{$C4sb>py!!AGTaXY#%w6YGAt|-|)47?7%XSqk*Yu zg;@neuq7pt3}1f*o8V(+re%3urD1LFuyj3ER*!9G^uDq?^mtL}>to>!HkuCc1E;^3 zon1zO1EhoYnr|MQG6tWhDaCE7B4NWRkRCet2|k2KDT4faPaQ0cphX#A&cvrCAS{Bg zOKy$t$5BsOSlIsuMV7gsUF1F^~J_7IYT z&QLE^6}QyEzb61;Smgh;0PB2DdXX#}ddBqwYz<+ZHMBgAR0y(fDj|c?$xGHEU4Yp` zAUBD?pQ)#>HOJOh$Xs1ws#$M8;a-UU12aXQ1u(Emyd2v_s=f**U(q1AbBm4FZ9OTV z+Xr^P44%H6ImlUOovno3t~n#E`qY&KK3ye*X`yI_xwMzESxmSw8rq|yIvkv}jy@|q zB^R(%*wuC_My`yc;Gvojz#mMwy^wqk5Dseqd$y%p*`co^Pvi>^R@R=Evoqg|S_+Q0c$5uE5(|EDj&(go11gJR#_>87^ zMBcEXw;qNp{{zoJFuxYzu#mO>eC;$%NL_uskUCxOejUjO(b*}M9>b7xdA;3!*^&OC z>7a!`D+hKV$tMiNeA|VXa|hv2dPK$ZgVsyr=;%mf_n}pko+`r9OZ74gOtWq?R%-yi z-%ed!4xOEe=hd$6r`lnvXnH?OKbxuc%{3>~#D4ahC?o(&O->Ln)kqr>Fk%_O=w??0 zuqW)m)xc_#MQvedX(;@NJR5KBe<9oEao};2+#NMvN23*Z##9HZ&GQa|BH+uw^}wGH z=kjYIG!;&FLHSP5-Ue?e%qjw_O$KwnUkKTC*TJGia;o3|ek4htt#ozW#leIBM022gI!#l3@W}6R$3TP1yxWO4P_2-#z-CG@9`SIDbNcMuMoHS*zY&j zf|13|JLdd!g%FX5;?n23+Xo5A8bU;+rb&dR#nMd}fkrftxfur zrU|)=1Zn8H{EZI8i}=@_58%+u;l9iiAxh|Lne=f%52-LZHDprCbh4>q7NzI4x-Og? zW+5m2a|xbT^ZKH>IgOqH714LXFbUmKlbx3IdLW#@GGGDYZ|WT?(vYGKh{}U*s*@ul zsfqyhzzpDXi2vP2vx5vJw6p>EG4Lv4#=XFkMiAI-va~=yB*`#IU>id=LM57gz=HUs zUI|y+95gK5C76;*UFe2u4Qx!~Vfr3g% z4E5988W>)1`XN&TmnQz2PEA@Ue1Y2(3Ux1vBkHTuX}SZ95)5PD1w-^c(S%d--&v`z z&!V9r6Q|QletsoMNu53Y&ZA}2MswkER(@>cH|s;}o?aXjGn@Chnj+4UN-Jr!5Jsbh z91gIQAg-u6Gd#eVv;kV&g4m#!n0@zb)WtgylkXZvW>Ot5Xh@ov(Sag>J)lR8jNxIh z>ae-&iTM5v7N!)8+9%GG?iQQmabwhwF&J3|GE3k-0~VPN-!II|_98y3PMI+H9B?Ce ze+Bz~2}e&rFf0SZZ+|Pkcoh{xO9I_L-UR*uw;Q^;b`2Pu;`d8cki&;B9K@oaFj1Id zK3(Z!{5@90*mM%;Xh2+h0uT<$2wmBU4Bax7$i%LmDMPJ0~1|N(!KT z5rlfBVJp|b;RQVaAt!jPQA_ZM1u)rc+h{`64oBOCIQFalHG{1RlIy_k0jCc#{g9zU zx+c1fZ2(#lodzWo8oCS4ux2*B+nnFu!a&n{-gAovZnuNOhZl0_&=T_Usz^?5CpFb1 zfYCgw)l~jYh-P9K-Ep%Hb6peZ&jYztNYe+&qlzUQB$+m>1kL>o5^*z|(^Sz(3u%O~ z;KLPl5uqWFz{vX%eZO~I-mtj>8;gkE(S+CJ8A*x&jxNq3QMX&nVR<;V(15uO2L-_2 z0RNA<`gjj93zrRY+-*o8i5Jp;4jbNsG`|e;k(iPavmg6_JAr>P2ewX89HMz%ON%)9 zUYyCH68MW*s^-E@dBI@c)Gi7Nv4WdSYU3J*wfr2!7_}b=dgT3N^ZTbD$)Li(X>&Ac zIt5yV83}1aoa-}95*W!ntC>Sli@DD10M0?U5kfVbPc;_t+p?sZgeTAZ(oA!kBU4Bx&`en9xNo!3zEz*;cE8qT;Np>PyGYC{T6 zbaLK;q~(IwYNfhn3Uzhac)iIqHu|Wp&LJbC0Y_YraL6Qnr9d%~rKBA~VQKO_v-wla zk7^LLwbNX8L<@%znZ-tcX_S*oEgA+6yyq!g8qKU_C+#HAPBK31RFg|P$<&fVE1Bns zoQrmxc<3M*KXFmyFl1iIE_1%pd-aS`^Svw0xj7bLm=AQjxd^+s=T8y9KFoR-;{WF# z0ihcX?7*$S!*ol!ol&cjxlu!ZZ)P?NSpWaC_ukQwr003>uR5ZUa~c4H$$58ncQyxZ zAeZEdqAA+4WSN$OB#V@eDC>$AbxmE%l6@3t@tP7TOVqV!n-=4WNlPxdTyk^X2|GDr zFvvM{H-OGn_mB6j?gFNf8(3hco^$%l0I066uKK?3eZJ>SVgdS|gde>Q;iQBO4(OtP z*>U*S_4i)jg?Wb6s^d-UBo-GgHaE7jemzJ+NW^iG?%lk)6>K3F*RR|gzV!9WuQ*SnI@Qd6UHvXa0+@o z@@)42l?Ip|A!b8;^+Y1&G&T-VQ#-k^S7dJHNvs{ID&w}D$EX;e#=MXte)ct2 zB95m+N}>F~PTh-i>ljbyxU-eP83!TulA)LxOb2a~$a)hGQ7VXHGJuJPU33s9$RLee zV=L2?Vqmh1%M=mk3cI+#UZ$whv)Nv~24%0&JOiVhwb%Yv3wvc)eR9eTMF3lMfcBDW zxqLk`EGh9~NsCgu4ytQhzkjrcen#2+H<9Fz2{j4-URRaBMCt>g1Gi4)z3<9rMBjqH zOu*y7AmS^Ui+yVGcM}x>40V1}qqhK{y7cCXx8M!KjAe$yy@e3M@3cV$C(TrG{h#0IrjPydU{&9`<7QJyZID;etiRdgh3IG*=PnuNhz#6wl<1f~5mq`Scvfg)254*<~! zBt|oo8c|9T(+pw&J~9~}Bf|}h%nXyh;>XaEbT;v;K&rg#Q8}Z+K~GBbRj1_77N?W? z8k9Xs)|)(`@2}CX*Pwfut{ywoy=2a9?}s9}sV9Aub! zQiSwd){v9R?J;f3q9HbRb8fp+NMUfp5WpJXSCJCTKT)bRxTc@@C=z&=Q4;qevtt9( zND=5y>7YEWQ{;CLXTi_t^msuDU@`D9;2}NNXLLW+4}+P7zl=2x@eAq=xmShHXLv{U zCUxOrqoa5UW+$k^R+$>flnO9WT?wZ1Onciy+^&42V$Ly@YOup9=4_(>7e8T5vJQu_v7DJTQK5fX+@Jhmlx4lybhF zhh6m`bz9H@_s@}-z_>257nGSbAT=U>M)&dM`ZrraTp_js{|0G6{HmT3q(-#`JfSOs zg$oxhHVEqVWhtpkB6%Sxhl#LXrD{k^=2`3w2l;G51hlxG>t!S=x|JTvh$;cJWvD4u z)`4J+R1tq1nxfEdiT&P4f=!#_=d0?z?*s2?2v`XYauySV;6wsrVg<|XnB)I;9-}Wu zu*xz~j>L~)0l`!fZz@etWrq8ho%3oZ9lb`HAj6a~%{H!)#7`Mh)H8yg3>h6$hmnZ6 za#rS$^e{Wtwo=F7T$Ji(wWyhTQ!Q=2R1fF>LnLUUmukkTVuI&*h%hFljBz`!)4&kj zwDBf)kR+fsxqkx_Kod#Anm{s^SEp9qFa&T&^|lC-uR2!nPk2bD(@Uz_y@mwe6(K>> z^Mw#QeXo8U(r+NiDA)8se-wDX(%6%TBj`UMVd0-va=8MROh!M>j_tN|O#PnzeOOtt z8`|f>#fF2EyjAKn4eS3`>H)3Lm!(Es%Ecz|lEJ{jBA~RhZ3CGP157W}dqk8pHsFJ5 z%dNSo%P>+8W7SYM2E`<2H6L3wKbTX=!H8INh!G-#SLx_hHf{olO_`hZ zDXl&R_l6;WVc>7bWhE7c0D6(uZmqy?A^PG$W#}*KCwX)qJ$JXu^Xr3Lw_kj~ol#;RQ)sJ6F7-O%VZcyJ-;+Qe8aSEWjaN%M> zOd-ktmvo!BTUVIi3dmqwx1D{uN1ULFX^NE;_<-A#1a6chkTUtUwLxJu8MUB{M=BAQa;)G)qi0qMm) z#tX#o(M_+gxSTFYy!~e90F)JqOpJkMOLg}i4!~~ zQ}H&WoS=O=+bf)A;ZU7Q#C!cNwHAM+AICf^PbHGuawqT<@Kx42o5qC;7xV1jx|RHO z>dzjX_5=E|E1TWN;(A>|SFjVhWTb#39)@TjML>UR7pU1_2$@iBZG4MXh15mq18Wk( z)etVt&@)}j`M$$6rsA~FO*LcGAY?E|X2~vM14!ecpE@qGhYof!LOpScnWlu39HNF% zs+lCL)~HR#NE&epjj3wZO)FQ}iG>fy5TTqNT42DXrdeJ{x&g!p za)QI0=8$e$TJ_%s9mf&<_H8QS+*oi4;EG`Zrx6#dg=FQP;CkQd`;eeI1Bu)F5)zU4 zybiqQb@%bFkpz$*03T^zag75+5y+3gYpbHo4jVYw@!ZF0C2z8|KsKMN`F$rKv5NdoITLH8mPCX*e+T~mZ8c1E;%f!r9ZK}~mF*8&UrkQ@K z7$-!W7W$|nlHJeGw@mlb#LL`ECp$0~?S8Q>1y1TtLE%h{tQq6@n=^cIeC zJ3};PspkyETww=0=%9_kEGZpE)aOPD7k)X{!I@DrGc?Z_cJ}_Tk;X$O?VR8iX0oZ$ zHchS3|A&!qhP%iXadZiwi^Y*a(n^MP=$*|oC4J;FEQ-k`!Ve&R+9&k?rQ|w^1Q(dv zh7Q!9)&D<(h~hIMJYLorSq^*>_%QGt{fP7PoN^qgQu7^h4Nw=tm~vJV$#rpi`MPki zuIN|dcNK}%V*$hK)oJ~fY98~&YW3;%bnsRv)#=6Ha< zIk&LC39fC0j1LQw7^TE0WsEv{X(d2{07>?8fxCE>3Z}?lWK}pEgngPKF0qG;?8U-Q zu3(_hswv93$X>S6O&k3Ts*GTlM3w^0MO4#GsCG9rxv&J<8+i_K*c?;FHs9x+Lwv7kC4ZXzn`KiJ1G{f6l0ea8eig99#-!9IN7H4E?l@+zisCU#P^V3>9J6D;#@vAEutGY@?AuyqNSNs;^)7|06VVg`JpL0-1Y%EUN-u zp_QR*gwe2WNSau_3YgMO%~eE6oIo_@K_q^)QZ40Gy=^(u=3$gdhNx$dMh0lcz+yjV zsb!Q7cGAHPa>bLKnd^R}5Kg<&R^x_TPi`OrXa_CF*71`TbZ2|oIqT^ z9lAn!)vyLJxylaCaez2M)nqC+u%R?Ij}b3J67ITy(NY+%0?RkgPCZ8mXDHvy(8)}Wte zPH_{Jyhw@W3)?E-4we@SIDw=}p3$)#Cs*$}s=wQZIGV=w8Y7f2Mh%^8qlXqIsAh^% z?fYvnLLKFV8K;I+wnBC>v-L?2vwHX4ptwN@U^9~DnnB#A>m9T5NHWMHx+8gf z2?P*C`OAY=M9t11?%D6HBWL!gWwF#cnYeJ_VwtOicM%#h%F-KEdVncHln}#IgKEyA z0=c-qlp2FBni-&pYQ{Ovt&CBt3rZ~;yG7O}Dn(F)2&q0GVh*#L5?&GxDr4rpL zh#^Q4;xfBwqlW`p>0&vtfEJ=Sb|E3^-OP?V^*Bi2Iknba#Le498z;G$F=`p1o&+K6 z7)QIE+XS`h_O+X0`w-V}y}QWVm;~@cz<<_3{0StZHnst-YZP($Rw1>`SJ{yF1Hd2Z zX?_Rz8Y^!UQitTPfQXKZm3wJH!V^J~PP*O6RVZDf2)itUTXE-|O#aU5R&~Bw#wOw_ zOR;zrFb^YMKp@rvokZ!g)M!%Wo{#bKvauHIE!|#r}8;=0mxk1yqlkpbdT!lgp@F$2W zc@Vfor%2z*F3oOdXg2^2tdQp#NCL@!(Y4<|I@ZM(c+b0mKh`zlQ%Kcq7pq1S@USj( z&oX~>sEf^yrMPu%^PVOb1n-&me13dnn4*{j9x`|^9P5{*sEzy}1Y=8Ggcg)?G1kgfKTp$ew^Ti5K`H;Alw7#cU*N%)k!B&Hi zlD|*uN7VMn3e~###+>q2_$eopAJ&TDEIRsK|D0$;ayBo?l`6n zspd3_xbAAXMhju0`0$gWmsZ~72w`g3@NI!SCiphlf@1+QMGy;cy{>D_Au|FmY5b(9 zVw^*qVQ1t#>5B<^l5Y`agi&g^%x+a~?d?KF7w4q@o2HE(j_@Y!jI&k8-83_JIYWdp zO`fnvT|gIfQ^#tOb~?G0W0Voe=F{#_s#vkWSGH4$wvco4hV=@gzz`N0QUtNsLawcG z29r|y3Gx&p)G$ahaf+Cvk|7$kP`Q;Qh*eB5NhPst98*NbOd z@>eMMS-FtwVsl|B`L#_*Ca>+|h zF_%eF#28^p@Q_rhSjvV@vKX(1?ly6!`qCT!VS9k-Vi?;AshjlvW*oFWql7D_#FTs6kL3sMEpb7vW%pGijxX()`lD3SPcvzC19owkvs5#~6t{7fBOE1|C)T)| zojlGXe4qOmCPf*Y^wU6uMnrdPW<#rh6V-5O7fh8vWq9L1#F+q`dOuwHWw35`*o&x= zKuc}RD7h;Bqof&Nh6;MAV4708X~R#NYiuP#1u;UZ{@H}hPJXD;?c_8tK_A7$Jp?MPVFew{TgZeRkqQY4W)xqr~T z_sJ!YzXqKwMZl9R9=X*6{H;EYRl1kIg;dh?0xvFdI=i5fSBcvpwJIy&pa+0|ha{pr zshil-?&w)9`jE1Vu1DR)+*RqcW1nR!e|>D>1s>225NW2kkJH@C2~zwXf5c%)w38e86RT61aPS3`_DX|b_yqn_zM$6&q{SdvM*|C)sb3}`n&ionRHPVa# zO(e-Mg@-|!@L=il_BcjEGtxDxirLC-5m3VnZJfbJ4VQSB4w~`u8u#)|KF(BDMVu>y zNE3LI3}MDNPlz(2?52}(7EerC+IrW>7x)$SaA6h!9HfD-@-g1zUdMfTS?`M{O8_kl z5)_djh?J7P!=*Vc0bC8zNSV}M(uHs->8-y2{4P29?dvS=#Iq+^((1^4# zjv;=e-vi!@WEz}j<#qlHcf_m(T=M549|^pE%2`AxCrL4OVVMlIOp>CF9)k0%F%im{ z%$CXUkEE?^{HQ-CSY^S2)Lk1+HUSuO;Icb%o>IhQbsU`fG`_z7~s~OCc#gaec~DEZq6TVDZi#KC(VjU3CX*|~i`Y~x@kn`+U0%)kwUOWii${N@NlF`+UC`c{| zEIju!9lP_oMGl@uJWxw*0BDoTg>26ZJ_ZO7)0|rib3;65L#dvb=P!1#mFTWc?5@xzsT|);KE&*HtMIu5@NLF!9kQ48=Q<^S zw<6NpHbbvny{1ZgVOY4sA8!YNp9!F}2d`&N7dA#llmPGM`j%(&(I~qy+x&}mf_lPKV`7lRqMt1cXEVVUqiJik zjWbCRU~*0tdV(Mm)ahK`j|7~%a0%escH15TKCa8|pCiTd29P3nW55$geZS>k0O>UH zl)kqec%SZiMsz1{E^U5}>T9XI^El#hwtzR5DWp80w*sVB+0*U_aj{)d%yi94H&=d>9K_`N`xpjO0t}R`!VKLXd zAcXYGIFCesmg%#dUVsGp7I}UpSkwVtJXj?0)4_JSXkWs0uT4lm1`l1dF-Ri^=ai9< zoMl`6yGfOXPF#iBC4g@OyueN+XBi}Y){eXQ93s8>fvrfq!cr^K$8~3OQkmF(2P^v? z;+Hko=Ia~-{x2O7*Ou`yUPSPGkKTfpaa+N;SXbI3^Um%D5V;}ApKT_?RA};tlgt^I zco?IW^nwP{nnSEzto`Q2zszGTU8L<2Fbk=Gi#2c$%pzDZLd-0w4q#x7Mz4n$A>tG* z;)*U+;_SvKxdiZSK?13F@&qEyClQ%`2yxS9ti%a)H5TkRHSyL=l`` z+9{e5^}mmWRlp!{FK{0ca`>YaZQDdW{JQ>p+$Dc5)*C+}R~hg58<NV9Ot>EraviLPtR$R|6$FK}E>*d^!B!c`;=I`pU75HT&Z1Hu( z!_c#W%Y0GSCle7oUAS0RbUSt+H%jQ0*Y zsAZHem1^MM&;?>QZWeHX5d<2hp{4_RN0&eV`++OuN}nXiHUFCrGir7c6kc zTr5cb?gIW0_|Ho2t|HkFcOhz@U2AIyNx%J`va0R6n_dUhDfxR?FR5;k&76URm?T)4 zcewaKh5-0#~>a&I=RDGn1m_MtE?pPQ_Tc5 zjN!><0Ebm&aWlFKj#8jC0d-U0T`XL|B-aA23h75*iG)0u^Bixxho@L5)i_9oattgg znWUCcs^;w!lY>{kKoC(6z4NGbE8Ebd2(XU}Y~$LTYfaL^RVBk|B!047Uln>yc6Wm` zxz<)L#aRfh0=TRtcRwPRnMknP%SgsU3*rjC9dQANf&Z?@|6|}2dfflf0lZS!zTH9n zoIE=my9A7hBje(<6@T3onTOVq3Bi!1qyQo?Bt zGDIUOLJV;SCI=92L_JZeIL58q$4ittNI`-CU*Q+{G4G~}b}hHzN9vJ!)X=*qC=6iy z78rO8GEGZ3c#H0`?XDdLk`#CzDe+)Bj;%(rx3?f20X*}v&=?PJf(j;>=KB;gPBk6u zT;zJnfFnR787?xw2-n!kom^ejEt#T(m$;X&@$>Z1Mh#<(Qp2sh#X(MUj9Yo0cdNn4 z*QYwAw$&~V<562#=8iX)02TsCq*ca~I>}5Oh~EZ21ndQV7C3?ULHl%9vI~*p4B&^W zAb{n-$8_?&iX@v%Tz}Xo@I@pqWkA=7g+!C*kodoN0Twk^{d3`BT_Cx=xha>z&uI?P zPpkgr5nX#ipn6Cr+eytyI*3xr-|-Qi;gLC;-JR}DUMpAz^xgwK?`LuMuc35*Zh4jw z#M9&q@-~(597f8eEQBKB9O4RxxU#H^59!!w!XnKmUG%X9GuxVNWk_h)!vE%9XMcB; z+xR%2V-FWN&tBf*cKyqidCs!eL54HBd2u%|E&(hE(nws)XOSSg_aQR$|52uO7;*I$ zkqcHGLsUW&_{jon%!f$pi3Jvca^O+jnO6Y+D?b55Ip%ykvlwyJT8M`yzDn}fkHi_i zfrLf&x+BGfi#6vE%+9q65hFwlgA5iiBpW(7XBDyX=s8o=F@l#&!CZMRmhkjaK1h33 zIRaAp@;gXRYMsowl%XkNno1_|t}@;(NP^HDLo`h(Tj<3{I&W*Z>tTmr&8Ps=oZSGq za0y_6NZ=*J-TM%t0Dcj8MR(jah|A@QPLNUHe*oW7W|S`_w-qV)973`gugx>rBl`C# z;01D-V0kuZ_W=8pke$)X^yr*8j5HgG17AYQ7p?}=h_Lw-Zp^I<7wZUnt7!rOQf#G* zZnl!bgCxdmQzK|NHj;AWoqfpzmE`8)H1s2NV@SIs1gJ3m?*-FBE_PI@bhy4Qf&z zJj&`q9$mO_F^|ENX?6{}L#)X`&N59Y5y}`ROoS=o+=j(AJe1SURs4BsB)*rY7^Q~4 z<6m%vU8)35E5QglsHTg8Ve3)Y_B@R3U|~0qU;^oU^%g#cXk>&6!mF-M8z9O3yvQf{G(X_I z?B_fm;mfqpj|Y?6IL`h2m>0QS@4wyCMDkF6P7l3`h?W1K zAA53zx42N!RPkk`GsvVnYFxM|Bs@qYvd2LaYM9_5o+Uw$aVj{@ zEu3eRH1DCDtsLRlb#69*ZCvBG`ELx-Ko4!q;7975#t;>82X0~L0%K=C?0E%_KL+O3 z1rD4+Tsh}-v9(LU*nOR^&jlB6(-aa9c?YpgQNF~BWK>lcnM086>}QYTUR+c` zrrsTIE&(hQ?AShMkKd<-d}RmSQl~>giKE>P=n0lzMfpzPSC!Fy7J6QRFPb7ko}SzYly>H(coNK8|z+DbY*%mP-I#xLB)| zXztG$yi8L}oDgxUu(WK5g%=A$vwW||Wp>fYcGB5y6ZT|xrkc1Y7|MK59F&BR1>sd9 zSD|FkPHeE_JoBMQ7w=N$?-pW_B1Dv0X4DX*@H>iu&o|=iGBBmUq>icFUspR)B9wEH zJv1;hivUhh$_TCcz2bTgCe(%&%^`v|0W86d0dxuAI^n_!-eXVHD3Xz}k}AyiAU@q7 z@C+nQz?0Kb+T|Xd1AmUFe+d#NkhXzePL_P2_4^rx9=XkO~AxfcNU0 zyDXQp^0d$hU6kaZ{hc32qjW!}2K-&E5A`F~oFZc5TzBwL>I03%S&+-h9 zkjyH5_uytwt|20IFmfAM&5Ilu0uCUh#yXYcl<4mLRG#gCrKEi~l9V_fC2JzdT_kyd zAV&e{rj>pcx*oeE&0%1gX~r0!5ijScoOf%nbfoZelp}neU*|pin1^{b`?oMvT;w4{ z(cFiWTI(T~!8nSP#mP9RO%-k#7?%KUU~ES^iF^?8%XTl3omK0C(1ExIUe<--9N9KK z??*xlE#Tj?aEB~=lN59Ou0b#B7A1qA;^SJjKMzy}ZIPZsHm4BZUVqLu5$l zWNvXyMWGRnL2?j05isX#5)emRxsytGCJ+^HS{GsuK4N&VNM;vtOGiY%62?{)8l1=g z6O?nAJzS+GJDwucGDI(}^IT6x7xV!jLXZR{L@C26kdxJ-g+BfX-{b&iXZ@v;ac<#B zo}+>UWqL1$5Jl21mYs1BUQ>@5XWg1GxCGFJ48RH``))`FVFi*QJc+oDoopzkgEa_C zPIdncq}|7p!2fpa1p9=E`P4eh==`Wb6uNvx=Nk~0El85FT?~2<@kkqp0%#|5w318k zc#hd^=3Z4!YU=0StN*^p{O#ymxNxx|G$=i@JH!S^vzIFvz&kJqS+@eI&`!AaWq2exxO zO9F1^bw0>nagJXl$sRQ#PD0g%9sd_#oc&y2Kj-mMm^2GNX>R2>hdFVbE4rF79_5R? z!3BX&2l=-#$m1BOdXYhYS5{=PK=4XK~>xM6?qvb)w z15lzip;*@xd&AnT$NjwHyuXj+&A4#kqG0f=i5}4D?QxjC5HX60Q9*=iF7W`1J9O{j zB>$KzoFhnx8R|H|8ysPb3M8AfcH`cRKmxY(K~a2jJUjzvyaMgtgy9-Uw8NuS{r;4D>KU^k~I zC7w;6s3giAe49O(oaQkljxC5AwFfCFaTS9ETj}K%PEbRXG!!HW#D)j!x|VcaU@KuB z<{7H#V3Y=WX=Rcs5)_faV1^QWq$p+v3l9kbBsr@F=?<3!x&&}N(2V#_59@Aj6bX#G zPbc+{9R$#B#qr!S9@rp$zC#ko@GSFIUhYI{MbE7mn|JYatTj$|?F6%HO{bCMlH>Z- zPLjWNwNJ^VPsyR1Kkp1sa)8G?f+ z2t*8uq9MwNP)-;RCRI#Qt66jgCgntEVu(`WScvMDaV+&7V5`=gt!A82Y6w%wG$q7| z;Ui5gBkZDs+d0l&;@3s{wPErwJtTRBGN$OHp9pn8nj)ec zt|2Z8iV5J)kTx5Gx~uC#T)$(?+reWgn_S?Nz@tQXk_aEA0G-!{5x4JB6E*Bmkj2oA zc(;8ly)2>&7cQ2Ka(y+C%MmSI;kk;HJlZQ+lo)%tOfNf_%o~|1e26(Kh5s0F}7VOqLQ$-R!b8gNs(iRZ8`ST^DWIDEZXU!nSQ1zVT@|Vs9}s6 zib$}N%hWMM3n}*M=D>+c;ASjF>86Yc8W`mmw~{2t4m!A5g#-lLsg!Cx;MWa` zSEZDG)ger5)jKxp{*)5uW=_*aH)k2(4ax~q47B25h!VbwhY`Z;M?7|0xzUP7y997e z5S>HxBFta2X(=#(20R#e8N|ezRq`_LOmS`xm$M@+G+grM!o`|nMyGp%TwbKTa+EBz zR^&l!&dL)aj*l7LEsi@V;zmLY7Hk)`aY>-Zmn2ZzPjw}fN5Eqmn0T@9VCGrGoyEWN zSqfSDz4*vb$}~xW7=Vv7UZqQR`h=0^+_`iLiy#S#NMhnAM4SN0StXb4j>P`;yIpvd zxOnE=4;$%Z!Na(Y>oFyOHx^t1xZ&Xi9svGt`Y`xY{)oqU zp3_(iZN4dC*s1zMj(6%HCu7S6A(o*f{PJ1uz~Fs%r)=v zEV6AKMk2;cBwgSd<(y$3VOo$Fs{L%t+G_P+Y_ghf23VFsWOO^nC-%_m@lt7sQMQ0e zCa7f$0}Bg-012v?%o4F85?QiileRd1dT6DC?L?>`N*NPWVPfDVLn%}2<|0Qp$!Vel z5f!jVNn983GJf8qg_G>3lWj~=g@un(9KyojCQeb$5Tls9j080uq%clYOR2n<>!VG_ zk!rig^#AsEk5Z!T3*}CD$PgkB!X_nNKWNN~j~v4idae zr?C@hC6N=rIeeU_jDCES5T~Cy&as~h>>Q04`^{Mv3mFhjhq(5cmWA`!v!QZ6PvW}aiUTP!o3;Nb)kwoM_)D{0Pfd7}}xo7_= z@Cl^&*na~)!)o$bT)1%IKtzdH36kxYFU=x>6pOOspX}U5+wC<&h)F7$pi=9o8FbT1 zfH-xG5@v)CFqoxqQ%FCFlZ3cT1*d4Gixw<=R1l?yHU_B|b&W%8=&lOk3g~Z^T7ce7 z;HK~ZF)v)W8M+=u^w}|eDsdo11}{^TW}omhWsFk~82E^Bg zHYTMss}$*;Q3hNhhKJ+a$^C{j4)pc1#aYG6_c-xQ)$C)9+@0OJ_+6Cy$}{ghKgiXDX6LV^ks zbQ9tQw$Z_M#;9k4YE6Xk63YfT*ji|YATbgY=?rO60$A$OANLNd6qUelB5l{ci}Y8s zZ=cEHnQ;N6Amx4fzSn?P$X`b#fP~lHiKN^<4g87@@IfSC)$f?#4*Uf1)ci9Y^LHT0 z$Z@^QEx<>V7(Tg-uW#z+KcHmtCFP_kcVxJ5v07E{O{5BFv6|{FN(sCU<9S-A@`#!{ zKa%+;Z_BLDeQ{KHk>`XWtLgA5`i;f@ra}CL2*oM%60i4kT3lTkH2+mwza5Fb@HxstMzKmvmiN*Je{5gHhzfhd(s zP=}usH4IQmm>Px|rJmCqCQJ?ayI`khi8TpAI$qn<=C-;EjY|MmgBIXjIz zw&eExMc7bMS<^oK`K5WtABey9w}D60G@M3!wjrI|kLw=FKfbLb|NIS|$uHr?1+LlDbTeq)7A~P?b3Kqk%D23wGuqzb zHR?*yuA|_xDu1JD)<>ygfF`zVBwN+Znq>@4>Pd1dr#ZwpA&MEsz$8p16Vx!p z7X8MpNLb4Z87}H4HzIL=o4`$$^n>Zy9D^+3h3-Qz@_;VLILKC-oF+V^TZ2AbtPKb0 z$&kUX%Wq~5A@b>YZ%2x!*l8L&5k>b+Ox|FS76xh6^H!^UJHYj>{$eBpJf+4Yg@u;^ z+L+Kx<6Ag^k8V!uSdJjs>HTV!hLz5GfEkMEWE)+yGfX{W)GALHKcI#-|Ga(Q~CZmM6ox?)rsj)Vpyb`qT7LALfXA8B_8l9@Hv$w zeo49M_m!W{xWl7xXh$SgLm7LgdrQ_C&#T#JW9_pWJ5|+-I@G?1DkOtyx@S~9nbu<> z#cWn4t@a+bhzsJ*DrG($rqM*~&A#n@MWOrE@rQlVxn0_?V_b$($EE zmT?I;SikEy>zk+S62NyRswUyzeF+|W9Db01KbzJjoUvt`B&mD(#V~?oaQr~`PyZb0 ztyZPxqLb^|#(9DFspWcAd7|^uOy!@0ssjEx;uSD}zg~ih*9v?<3E&%umVaWIU*E+d z2mp^E@trYUK0k*mv92%9=nTA7wS%=%0~7jLqdE;El;YD2MjsXtq>OyVVbTZc;n>Yy zI*P}b?COR73`EKy9sK=>ztO;?RY@5$Wb_+^^AL>~MRc*1NopM% z7pLtTn?q{tRHab|xmya^p9W5m)6-pSegas5DBI&YuGb6oCOi;^+jL0(FURQ4qESg& zzfR_r7S~Itw01_B*Qb!0|F#(jU;mai;A2R#@Hdp`ou6||T6yCsU0cezzE#{l{oRE* z?|l?$aohxa9`S%*X7e;(-xRQ2KOm}a%q4$s18m_uqSWaT)+YVA|3NPR03ZNKL_t(x zK$Sh)WXCBYpe`@w=raHr4;(!L$BtyMg59uV0*2~gs1b%5V6qa1>mU)Dql6f04CgbP zhC>B%9%t8mI-@lB5KC(+_jYX1$6@znD4pIUf&$P!2Dg0+tOQ&>1j!`19>6%BNj}BP zlA?j_olr9dGsQ621cQxpp8o{WyR#2*JZ+Y2))c(N<%Irgb7dp^U9bAK4M$ z8D_;CdK??Yg~oVN@7?d}aX)s9gm(cS*K_;@5+i8p{J%vvg~PfLyo8$ovL@8yd8Cey zkLC1;c2U^%%cq*h8Kpg?6y6CM)){W2glU2#wM=jBTI_#QezEVC%?xXz@@naW%RAxR z0r1s8GM^H-)Y!Ouq`+RqcW+z*SThFg*F+8546r#~V?WfLgpQCBG@I}(VYG!}u>C6R zy#UcNICh(C1ZKgDxRg5`BgwnKc(6x*0#Oi$5EU@0_AE~(NO%d`=6ExzBiULI> zsAY^|X7Ug~3OLgY-BfpwivZa`Cck}h3E-L&z-pyJj{q+q1y90-IB_kK(DQ(f->p60%OtV)YmC5eVLHCta5`e>jd`7L-Lhai6 zkn({BITx-E*n@Z=LBh{|f+UUoGUEOn)Q9;zZhpd=p;sS{(@MB_JK!PLMPn_Z%L3$5 zeIzQljRpp(*41)uO+SAcnucMx5t8X*NCaS_3i_Ji{C*f{0?W(w7T8u)=Ij4pIo@-9 z9G1}EIyPMUHzy`4p{HeYl!3|US~?2!w8N#_A-fq$fFjXdD z02lW_yhNXw^O+W_@hnm@KS54gHu8)ELys*}1JJBS!x@|CcwfeY8L4BC{q)gCB^T$N zPm<(R&q;1(kS4D0L2Au2PMzOpscN9Da@HvMD{~jM^&o(ih$r%X<=cN-C;~WyBpzgt zIJCZX7H5VRBAla$@;9O^aiV}9J_Nk1JDGisg~E?0j(0%fRe0eAcs-RX`LYn^=8mM0 z@Qx?+=Qj{h^Vfi1MBMJLD=8dsC(hcza*5vtfq|%h)oXPAS|kc13G~p&Yuv?t&at0! zlrlZbG#Wr411-mDIiIN`IngSr&q%$vev_d&q`~>l5;cBla4DDe z(9;HKA32x39lKeqi+H;}zhYgyFOds!NIOTEj>t+R8K@NrYbsL$Rip16Cg=V!vey%) zf(z`Wi2{7B1qK0SRp6QSCn6kvAXuU1xEzQrl?b9I$q%O4Zy^ENPeCRF zGq13so|qXVspCKBI&w)je-G;%>Ic3>0sZ+dTx@Iv5vS)`r9M=sw$_KAV$+m;{an;M@1XD<6T$hm~jbl%k_KM=_SK+ z2w+M@7iW2x8pLJjmVsGU0yvK3NIb1WHo9&q+}n*_a&=b<4HI#wA0=0J#a05#kUnk2 zh>!L~R+K!DQsQSi9^^Cn@AHVuecYWiE^Z7|s@Yj9*bdffv~!tVnD`i`g<%@#WE;0} zls0^8n0BqHy?)Q{w2&j!u8!Lyoh3puInw1wLa!h%_2yq-dVM-_ywa+I2Zn z+FU+-5MW`I*Qf7kCYOcTt*XEjkYt8Zj&cNmJ9vW1&e>$nd2Mt52Dx(6c1)8;&+Qg~UUvd$CzQLL!qx&d`D@XM_$8efk0DW) zPa-PeGK=&1$u%l@5%Dh-^GoCvOMOwkdRItAx?5LQ7Y7ECxJ-@XyrM| zXkvggKKf}QL5YL-*+#BLNng8OalIPR`3g5LwWy6}^F6SxtuS7*aj!2@3g7v8c;eS! zW*^IQse9*)#xh-)YY=Zx4U(l-xeT*j%*x_A?f11H8hJmttl?o|lyaOS_(;=6H?MLx zoov@`B_N)wwmhV7?)WQRU~JC)TM26ytl<(s7X?E=7xC4k(3SxoMT-7Cg=D9M_1x!K zQ4CoV(%s_&NOO_Px~}xFv_jD?T)abP>RKX6kP+&z@L+d9u;cWmsNize_39*kPDAKN zTJ!8>Qxt1nQ&?>1x>Qr;aN!;pyQx4Y4C%8^v*gU-f}=uTZF=uV0E-wQ25F+08G30^ z@|RZPxDCnVbm@mn0N;@)LhAO3gC{3TZ$(r(6Y;HfI~MXfH2|Y5?nbWF^W3Fm@YfL4 zFQ~h)Wx9Wzl>uUEQ{0i^;s%BXaUQL;4pz!EB}`+G#v;Y+{@#maa;4PZ+HOJbjs4J8 zS}Jb6P&*8J&qHZcGI!rmW;{)<=SKi98LF6|g+3~XFq4g>&A8OVC4ldA+=bLmDFQwV zyrK*AqrmU$F~5)GN2BELpCPS0-UR+D3um}YkZa`o3?jj=q5-<4ob&TYk?0rolx~q{ z7dJ2gtX&66Wny4uYp=B;N`4&+=*~iJH=-XAm8i1cdiszIAUv>~cJ8r_+b8Qm% zE8uI0e>R1bhe<1wTS0%elyb>eaofAOxZ&YlCrVX>g@K90LAC4T8$`SV3Feoz>_m_Cs zpCISrd5T;OreUN}ZxOi~H|S33-D-+{sD>fCnvg4m_zH!5)(6z#$Hb?bz)Hj$@NUEt&`qvZTfX?VZTfQ6zfFN;01`#-;4fH? zsMx(goFsmRX<(cha^;5hvSGW)xCGEeVemAPp}SWJpS^?c1irxag<5Sz?{Or(eniRO zyMaH^ol}L9yL|C|_W&Q#_q@pLQZY+FXc^?st{xFXlDO{Euktb?Mmhq2(1Nm0hy z>uMgLj37QtEG)$R8^kVPYmgztFeV8S6k*|=vv$;wE0`PPhED?7;5i@!l29_e246Z& zuCC+Tg%nboFaQ+cVVWWmloF+q3BpuiW@{17=y-KEE-nGwlrR=qX|^EoeE&#y=z~ZX z4?p78vF03mFS$Oj&LW4VF7iPn>Ep}fuf<|3j#EgX*W}W_M!oV(LwV~$CA6A>2g&8I zydU^4`txrq_qXYl5+X8DP^Oy#EK#+X!b5LTa^2Sdv_Qt_mFo zWjcD-?k$@fX|? z<6?8p^7Y67mMNIzAZNM47CH#yBTXy4?BNny>7t2IGC-6v25ICq?x2h9WbmrN?bdaq zh+IdHD&2Utu%X+x6&lTbP_co#fK*Pxzxg86UW8A7430g<%KIx%1Lu%lTcdiOIvu&Y zb!WepBxo9`aB)S)U>HbHM1n({;%;7}iZBLn6Q`+Tlv9lI7Skl?*6|wB>)o!`zS?9a z6UjujMU*yjE-4o-0bDgIkXXB)MzU}}qZ45+I+5((rx8`}Zs2!yfPNBD^6WM^#X1?P zfq$Vy?uUAwJ|%~DASz$?Ji3HkZf3<+Nao2twITEIS}!6p7VBkvNBO3KsDF>>aWOr{ zgBus% zKApsMHx>arcm}p?@LrWg3ApDO*xv%jZv$)F3QshGBo}t;?me!=r%#Dh!y12)ailqX zC%JZQ0m>L>FBfSbob`2iv1q3sA8EST$^eymy)$~APNeI0-705Y1}R^0Ud5LhmjJp1 zu+X}|Pa{PvtB~Gkm*+YDd-?(?3zN`^YBM9djoY)p_m%u@;s0mvy~E==&ol4enSws( zjQ~i3of3;!B&t$fR<&c4yd`WYc7Jo%$ZZ(_kHg7ews9{-hs&b z0VR<8bi%|tuSvQ+hLodz6ZkmdwZ5qTj}=1rE448$J;g?(bzF!@D2f&Pw=+%t(wM)C z;%xT82m?8IaSL;rS( zm;p~eWcpM8Jo{bXbtSoMTg?M}O6mt0-P74BCFIm+A4q`w`)zh5gGj$%68M~#Nbl*$ zY{|e!7Q>@_P?0Xhe#&X1oic)C+7trI*7d7hb=b!6u>PAFMdnQXoX__Eeng4P(J>xM zFJ@^1I9)JxcfDN)=@Yu!NCZ#lPRD`t^9|Sr{~6$iNE}2ylE4wYh{Yp+xkzAMs}BB2 z5WzO!*GMP1>wpgeKaay}n~)~Y*zAsdrk@8c=o%DAkC-&(CyHRC|Hf<)zzW(}!D*Io zo+6dtO(derGW+trR#tO@3)Ip>5uuTI!05HAQFm!oh&#AM*NEdv9!#C6)k*?$b^USK z6sBQ4g9_$_l4u(8N_F%E!F>`64nl7Uu{%y%0znXQyY#6I>C|V@rO(q5B?b&3-Xbs3 zx3???v9l76&Ztdu=!maIJW+*lwm>GLET2N$&z-i7hd~~FETN79dSi}A4+C7u0dg7O z7;EUKKr`U2Y8gIY72*mGs9kGTdsvFNbO&v-rbpjn6+-XUmth;zS2KAxdzt{IF?!}A zZr?|BU?!T;RS*13f4&Y;5#JzEDrT=vs!S&C0y47Q1wN0|4}1a%xa*u8uWdxU&K}@# zBGqMLp&SuFGnETB#;|5->>{@ZrxRxO1%b@j)j48I=Pjd zG*iVJY^0vWc*$k7C%~#K(4~g;)X0%CIF5lIKbhKo&g!XZ#SdmFF|;R`Y*jLm;^xA1 z%%92wOAf%QpTZk?5WG&wilwae>+@^3sajq7w_zoKmVjW`p1q;OsX=%4XOWInD~ZI{ zSz!XFbeG?$$8zb`r$$Lwjm`q6ZDDUjVh&F$(Tb>#dGxcMBWz(m9)`wfZzkX-NG*+& z(M|=eoMbgkRMU@#Fg<$A3uBS4hL%XLOPuRT3DTerq*)&mwj+-YO(3W34U@FY3ROSONQ#*Zy3;YP@nV ze*k<*fBus4+Trx5NaNDMY-IqWl*@yJ@t4yzrjki?K4%!nXOJRJQ%DadS;ZNa(@YhC z5f_==HD~H(wFv{0%Xk|P{d7}AF9mc_jF&upPa~0nyYd%K z+NG~LL^ZaOGm4aq?pE7p5>bQ=av5MfN4SR9D5gJd3Zmr-&c??Eja~RRUbICW(IB=mZ4qKGQcp8Y0@DO=PT;Xk?0(x)Zr`cLf*VO3`GOwf z$3%Ru7Mc;|&xaI`K0h_H<+aW2iC~cWHMi2ZbTPxpMYELwZ0T;fx& z!@vUCDW{!foaF$UIl~J2$ya61t_n%B?{b>h!s}FWksvO*DdGgHIL10UDJG0tKNF5p zRGL&VDokA?n$rmjPeb-#>W|+NUDhkwVa;pM?}oloUCTqZ(HD>4=-(Y7C~b$LE*Q*% zj#6niH;M>`5GC-UKD&Nh?436IV?A$&O%XVv&%7K_54-g^#;Egi5@Z=?CM18+L=d2e ze%5o8QaVRAGObh+CIbkP#{eGsbi-0eI}2hM9wX}CP}F@Ij|7HnL^DkQ)0p>9Cg)p220k^b^GZ|gi(6lYo z*6t7GlWN<|FEio zR$#X|if|c(sHTA{I80H3_t zfJftA5#b#Ym;(%}UU! zl^N|(>lLys=?j2Q>-#OTcnon5uhofoGZM#h7)c}vA@1li$w_ewm6HaMZgf7xts6p; zL0TvHe2c!%N=mb^6)6Vo1ilVzOOJpwrWwn0PjwU0Rq^kYm&|~`m@12Nwwk5!)RYk5 z!OLb2vW_?DrigaR=%j=W$_U{k6Camz3>U!>r7;X-5nwH+D59I=tmX|iGe|Buco`(m zR&J&RN$u}I;+XSOYi#77gRHLkF^)ZmhqWFEd*SNWg}kusul(>Bs<=i&&lz){cE&~BEQ=VJ=;*+>9WcLL+rNthL!rIdEsDJPpD7SS}KORV7- z4#FH{GmXw_DC&fgPH+XK_G3k>5W&h;$nkZ}!MJ01Xx^Onw_C3*i`2|W8H|zg@f@A}K3TgHpK3 zMCu1jq$D~q$-z$^t<=)W5C%OI62Pr$PuECPp#6ISKsD`TGsHzIX`zZ@IvFO*mSyZh z;wWR1G0luZ6fmSqemCW`bDW^9?O-ezIGw{( zWu0Zo!1*x|8P;g2QeEwJB5L(`<6UHvK1!p?AOYm!V>KseU=fY(GN?Tdt4@jHmp2F= zU&Md&2u7g6yHROK6mN> zi-$&)%yN`Dwdk_E4{;S20r%^oJEX5TlhciaY#6HM`J$}Euo_N}j(xwrvY32ql}}0E zZ}hZYNt3@cW&r1a?;vF}4>Mz}22(-XGIiElC4KX3do<#jWe8$0Og2GWv{A}Amf|Is z5<1wxVLUM^7dU`H+?-%FZB*dZO7Ejf%vd?et4K)~a8EgS^B9{bkiwSNN}@G^p$r(z z71GyR0IlWFSOeX~Lgt)dDC`z;7jQ#W3v4y?$ibC4#0lZF#%<93azayxX8-2-e;7G#xonFt-gBdQp-5lLSUDO+ns`n5fSxQ53e zleYn%Qv&u)BtOJX)OH|lG>4wvFp_IxFg3UB9yQ1Y=^nso%p5CvMW>sO8LRUeW_H8E zjX0}|Rf?v7iBm5ez%UuqQ^Q%7(MN$A_zJ*D2K^M!%jF!$WxH|mIX8#cz-wGfHzff0 z$%-O>IY`g)HKb&Bh3-`_RH3_sc`}wwAZ$Qq37lO9^@}8Ktv_E#UbG4~P@rYEoY1fs zGW@V*KV*f(Of*)(p$+2x9V}w3L~eKEhUv#c9g8WY69bcGs%fSwYDC6LxG*8)rX{qP zQ>>tz)-lR*7^}RI(5nj*kPWHiXV5?m2iZ(H?OelaWD>*#x+r8HTWF;k!eO}=1BK97 z3z=TX8iw*#QAUfpA-@ma*a&^ax=`fl*saxXtVTL)T?8&y)#RN=htdRa(o6Ot;ERac z_8=0A2NDRkS$7kyz=;G+j+Opzm-22ve5?DBmL2M&1x$Fq(&F$K$KV%>li-V9A+H{xQaHa zh?IZ{0b%@jXk;NPIgOjpNL1KZ@)xhwhC;~Y|4rf`)N=~tWwoxO;p%yGtk5Hcg`Kzafm$`{^ETfcmTm)IeF|rt@ zg(~V;5;Z0*+mj8x9O$*wz93`{%kK&5nJfpcBI4e4A@TK>@s4Vjm?nVJwS61ZSF!Eq z44ovNk>z#rCa4xk47mjn(H%%e)=A>BzfefL#!u*zjn0y+Mq0kzqRQaEBZA{l`iZ8I z##}6lZX_@3c$B%f5*MG<~7$iasJ_;GTWE279> zbcw345`fEy6pcC26UGowrCS zn4UtoSf%7I`ua1FIP(I;o79aJmDfoHoh*nVonfVJm54X15-9<*kI~&n3@);Ow^&O7 zd&t2zlGUDr1PtUN#UY||h+~nzUEIVuRsbd)RIr0vX`zzET%du4s!E6B*kjZ7H$%yv zMSK_Nb>zjEJ&yERm&Rn!g;Y=8g(PfPxlZ3gT4Ws3Sw2~UIiy6g1d;Tt_?GV?NhNl3 zaT(%z9)MS#ho%ei=jd%$DIzG|sN?=I=1s^#`O-)ug^&?r_Lu66w+gY8 z64CVRmCuH8bAgv8T&5n`PoyT@Wr)Wkh3loY zRlvc`aQk*B?oRCo-m7cXsfBR#P8fW*uJv8IAh#$P>__xpGX{M!h3vLgL2HeT0EKPO z934fg5U*4|;{G|JVnQg!XRPSG75dz(^?M8TeXon1Q9XK?Jl1wFyw=`A0=jiG%GRLMB=E%Yx$N<>MMiQ zz{im68;G>ot0vN?w@r8ORsx~b z3^2OoU%cL%4jn~nfsI6x8`di|%OcW$*E(jl5~U4D1xmYXGzW>93nNP45LVV;KOWAo zoW<0U$1ovAYsnQ#DeI#MM+k#Ps(Fc%y>)ND$Nlq#y9@`Y0P2H$bW#X=D77 zDrT{&CODBgYd0a{=F6mRQRhOk;j7!Va6+%2IP}H*Xb6(S~gr$S@h4 zWtqlp#WH}_(XIrxU~++zT);s)-Bb|K7b{dDTCR$bRWum=UHSaDk&f`_!pW%!i`Sf7 z02ekwIJ(EOrM49!D%`LVwY^FTtqvcSyEYaH3MpaViAPyR001BWNklgKFP>XtPSQAe$@9*(+)AP(qvc8p(|0HzdjiAd^=F~}o2RXz^frSJQm zPK0J87Q-?Jtvc}ix?nc}59oV+@%d`&I3&qHf{p$fQO(yQ9)k;U#04iamWi1VT7l>F z-1aiQqM1e-DT*?k^^K~3tWz`Vj(X2!x^J^0#H6UR0|#M3qbMZ$=|VO3mYP#RKDn6m z(nl2`vUIaqt!s(+~ry0&f|m8OoEjY{QEGI)j3 zO{=4gsM#it5#nb#jaRD?Do~Qws)lBP{ytytN0HClaxO+7|ldVxt3z}dKs zt8_4)WOR!RJGKJ|8 zQA{T#ghzF2aO)aUtCVpektjnC&3u>;{Yoiw7#(V2-`TC0e>d{zI%uwfiWClPHj#6@ za3h4TB2w&UJQN~s_%%wpGL#6uj3{vyrOHqOcbP8EJ!XY|u9CzuymJsm*`tep z0}y7AJch`|qu;(HYBVj?Gt4C7r|Kn>ZYq`7EzuEPp<_4)@oWtd=|~by?ybLBvZ_~) zY?ZgTf_GY7zj;gm7wSH^N%y%aF6C3HyOLHN-k;SW{&ge68+s8oqFAfFEq&}sToL*Ri|>xHxL(abBt501U`x6 zojjrQ?f4umrftmsM3yw((J+vxE4LEA(s795YF!TB zzI1Qo`K3}*FO@Nv2j|`e9ruj$q~jseM$$Zps}v=HRf`Gt!I8{2+h@ zh}(0Es$!2a`3&0GKq2s_ND=5mh)nig{r7iB*_K!Y&?*<>)PG}<+%DijBwM2!k-;~q#ht2&I9*G=Glqjb$j|gBD5`4OqJX{!T$IC@hcPba?uH{;tcn>IH3n5ZiysU`~ zB8^Pr4N{Bfb=rRCL)^aqs(;@}BuG1eWOwX9Qek(JwEE}Lwc|dWW4jUYF&eclczyccZu#KavW{?5~=%#}noK)i^5AhH< zbX9RsO(UP-yOhw$AbA{THE(hS!(?)rm3YY|gp(jHoCFxajgKtMaaEWULRGVLfJkLj zw8Q3Iu)i8QmqNIR@xyk)L`w45%fSS2(Le@=FloWhFd_1F*KRctbP`VmGYF8&Ah`_N z#+^wJHv{A`Jd(l@>wJ7T7$l2Ua;alEJGhsZcpqH}7h7=>;u?lZgrw9yfv9o@()lA4=T-+0f2CEC-Obo|!7QvC zsnMuM;{EK{u8aJDp2w-Nn;?Gz4=ALk{j9DEAi`gMn(x-qrSv8Wrz)*q1d=6-Va zz6-PMR(#ZAkg2=sJ|d-A48V&?D{i_dpr3qvIB;MvL)O7If8+QE^6tf z27@9D$}z|!NHHc^WYR++E!5J?W{$C*vp6xdWOA4;iaE$8j^SdsF(^6?ecDr-oSD zwU-j(AVJ5k84j<&AXk+us~5augqBE2tulVGzcFn-CegY?(tmh;fxX2;~V)~)_$>S6&InGKhP{bjY(vF*K-ed!>vw=F+ z($7MsFU6vOJ__igCPpcTT)Y(1#Z?>}F;_!4@MF@VQi%_VD;x$&aZrgt2!n1G(#A^q zSk7x)Llf1Urxp(bET&H5XNCNAP(lNX0h1we7$kROKA4v%IZT=W)&O@PDG6bvG@a9M zB0ievrHLa*$J>r+Qg@Q=8DFEsz=3!|;uW?uME@kmXCs=i&&%^3(9)N8uxn!1D8A^-PXnaAd;Io1>oZ=sv-h13^N# z^>>?Sr;Bz%`V1?93|=P4=;Gq>;9-ExyuoJP0H7CxPF(m1PyuAS2t{1Fe+M?ObcOA+0y;TV>yD~OxZEawE55yZtwR&tmPI56>%$thMcOg8;^ z@Xuj#Nj`mK#|+L-ewG*GN}d3ABjI7$NHdA)s=U#nTGoFdiKo3&Tm-K{@?wtw2NN{T z4%Oejss!>YO4ni!!7GtWsAEWcUZONJG|m*a+GqD-Rr-=RhWn8^P!`TATkDU_d~=Zy zh)m^=?rCBt^+>0?<48jCkg9-xqBm|k@Hq3L7_&Mw$e-%5y(~z?Fu%-e2`s?tqAA@_EH^&B}bABs` zUCMc~w50%(f_bpLo#^`V=_jB5m?|T{77nn51GZ7+!N8=Cd=9Xg3)HfcTX>lp>7|g+ zEN^~t@bYmU42qD{b(dauB)PR1NtyjU;Av$*Q`IbokwlKa(TG_**3Z`zfTRe!3lT8kZ_ABgmB;<1ot^q!;n8qyhmnpr{9OQa6ZW-BGyawdp5+ zY0Mn*)xLTdpjubWV;N`o4Burj4bwAcSU;8k<^rEnLFZ}U z5hTadj&4$;=B}0F6ts~5tB!rX>GywmO)nidG0DMDf%9oIOEnY-}jdNA!fgce`9Gh;0h-CRX5lJ`~sU0{A zAvkvZ^|;=dWR*7)ct)x9WL3LtJQMj31Z;HOqn`sK?s{E*$3F|hihMjKmAWwc{)9Y<~z!1XDx4WB?k!MMrpqMU6|BD5n00ZavCDgoG_>dg)!=@ku1+^#?(rofN!Pj+D*BE_;anI(a~2GhXe+v zafFN)Oxb`)!Ve)LXis9KC&Na%lf^0|X7Alf;yMvW{A9W8WMQd@dl83x-6W1t0{jmo zZ1XuCd&$kwbcpcFpzo#@QxSZ z3txllUzIY2Y0PgF(8nkFA%Dov=*PoBuB48|Y~wbb=Pm-%T!YX-n2j9db9|ed*hSg2 z`dniWzz#(DGm#j-R86X{0{%ZG0l!7c(Os~qJHJDcI40{97(g1ZJ*Dc*A?&WkAX0Dd z)k%JXowQXWZQnvj6j>7z_jeDXG6s~Ez7*rCoND?9RXdu273L-+>-UJhVllQPCE;S- zWSEGHK3O$6Y$I+4b63K}r%!C(6z=>2B$DcSL;-vzNdlOSDE9Bt$-G`U_+lhXqn>nJ zVj9zj#e}(=6YQpi(bQaP`r)RR;D$ZmN>9Q|1`fg_@r2bp1;91D z#&Is=3@fMgyPSl$hu`vHem*1DGKK(lsi5-+5;r%H1eKX7M7O%OsEh1$b!M%S$;{FVzB+N-9!pweEHzKi!!$|7N zzUejhD}gT}Y2VKRe;#!stk%Qf#93ITlJ9oFJH-Mhu_0CCKV=9Y|rB6mOI!TgW_hZ zZgj0ai5HNHcGDCikOO=gY0zSMqN7V8-GbC0c%8&kt%QQ^YsDUe1QiQ^4!rkypUPAho_ z1U%5?9$P2*Dg z1W?aHcJe9uxt7#vv6TZXf`XjM)pMF5v@C#=tKp9Akdf|>en}vp^#Y&fK6Y_4Z?K6r zDrPJjIE;gx+{S5E@@f8^&+;8|rqRy3K8VC)4FNyHUg~6GDznK(XNHkx5_L*NrUO=K zTE}&enOwZZxJWFD~?8UFVcM1rKZ1k^!FDa^?bTXOwNr) zgR+Wu>rH(+29ubiCcZ=vJ z3zHxz)nnCgLxp?#T2o8|dJCbc8iq5|1n^QpFZn#pef)wy;1sLKz)vCF3~HOXnShHR zgXFTE+qjMGqiPjS`Pi0|G1;lyg2c#GBaY1?#x^#qB2q(dG8jf0Ot)6&{8?(u z{?TUWewYL=S&eu)-;I>ddm8Dpxkaz%XHf*u?GAIzHD}nk@j;FsZ)DrH6PRYAdB&}3 zYX2XRblD|JS$&9W_@DLXe5ChpXx#mQ+@(E8qKKYDHR5T$9Z8RV0Fo{p4*d;N~GoakOK&!HUZrhQ8i}HU2Cm|JoaMvxbA#?O=Nj#hcLY3L zq+-Xe6s%5@#w1Zf7a!vxZe=GYScL&}Q^ZbgVGq|aNFH8tr%e&erW931TbEwkU^x--V!9$^g3lrUq&)7tU$bAgwf?A zIm}&5f33*h5vl3;65^RYkGPYQ;kP~{T;XNC?0Ct|iNI~U{7T^W5fyKHjB&U`zpq8# z|F&)iUPN5ACldV5MLN#UCrAim5kRXxco~vNGaJcVY0Q5V(#?8)#U4yfViMv;=2mtK zgK-pUw?Waxl)GP@CD2;{1%2tUb1A^Uq?``Q>EJ4x68AoyU_V#!3OBHgJE&(7UUEme zg@kb=MG6f}is|6b_)k8>&)JYbyTC~iz~3V21CxoJwWv&;lE|RVC@ur;Q&ncA8pO#c zECC|%NwEynFp^GT{r$NlkAFgq@b8oeSosW3DT#?LYZo4d0YBvA_!%7ZGdyW`dUsZnM8W*V6gFvT zymMdzCHPswDe8C?FC~bUo;gQn)C2|lVaeN&>rc5%u+9?b^Q6bkrH*Qvxt}L_4^Q(3 zKj&4hVGq~SLlGthZIshU4GtZkrY4dM$LP-NB*Yqy@?jq3&-r&e49?hjyL?knKZwl( z@Tq}*QWuQD9)waFL=C9EAM&0ZpB$o18rL>PtBLQ^ku{Y_nxLUcb<;rv` z8T%aK*8LVzQ*XSD#aTqX{!{(kQzWh=F40?emu?#Vj}pTBkmQR&MmKO@tAEea4VN`1 zl4)h<(tB{b{{KlP-{#qJgWs&1zpHfnk*-*g#(2m=RO4D&@X(HzdL$!kt_WZ&nEmk; zf;mKKJKVM%mYqwFn|BJFgt>|XT*U!C#*cAe62ieLR&s*XI5g-dfSVV&gXg&$2VsWD zW|(XYOqOttZ}V5&%u6#~+I&Xx_$~L=$91uI+xE9SHJtY$@pj*iB3riupV4D{3n{mp z$gXrM+5eUi&P1{@uR_$o7j4FM2@yC04mfKMHPQ$Yw#+G2JBKSrGI-9&n*4M&;j zTU1jDAti!;G1lZ)BMpylS1$R0^2yF91{Hm-Ugeb@Wi7sOZqO;c$;0|SBQ~Yoj=24| zPqX@0prq<$<+HCbI(97sxC}`Y@gi}Yzh!(uz@hK|YrO@7h+{pGu{5lcCr>%{IpBZm zn0`BIOk0_@&+B72GKq~!2I7H#ul~HB(W-m07)F$CD+_yXn*61a031lvUnxCg;@90_ zFOe?lvkf0OQnSHs?jV%6LiTWa+`JRvu~kvNl7n2y!BKy|iUZuubC@_d!WF#9dRzqA z%5Ju@dsb%Cv=G2dRZ@Z~K3$1q?6#{aRIP*{jO4lAt4hWIQeDI{mUjVPP!cwTcro`f zb%iofECD_W`~ec`b`o(jT3iVv#;q#|ox!PgTMO&m>X?AqS_M;_hz~Z3(v&Ls`H_{tmA5XYuPv}ua_DYE zpnL?YFLMM#O00gZe02{KdxOFw6IIA$APEtnMQ_~;NEktbzTVPfMU$z0Iv|WsX@xM1eGW@B5vP7z5HDyuFUzA)E$Ysb>4B_e>}l4cpdN; z`uH9|luVy~*Hij^&&K(#WL(0-h#NeOG?FENnG8`%JBIW6N68Y_X~3p(gq ziNyRpW_!l5keLHFZ-tE;;m8r#xf6zmle=|^B16aDeL4nTRRS1Np8hv_{%2!s#BM`U zie5n!P#(~4>;M%@G>oE|G_q%OuKfVTsGrkvrs zYFJ_!tTM!HbzBKl3zAdNs(*Wf(cx$p8CzZjHCoL?>X^-xs9&Mv;s!*~`!OPL1(hdN zD}lWU34IGjais!PXl_O-p?r-c{?>OAB1J+4Ax8LviSAsd^Kt$_Y~ z=eTL(n+7KB+agRRI9Ocsqs37#6%s^|7WS;N@OE6 z#OQ>P=|X0d8d8RI7`a)`>lq>;bAyOzW_9O{73WuobdtDM*P0(F51X^iXA|-Jh}9z! zmqB8ZPyyo4zg8Llw;5XtXBMyzQU0=YOr4psVj(WxFNJT^ZumRx|@c|p)4$3QycMjPzG4PSW5ZU-~^EQ_g!i8b8 z(IK21<_eCmVT|oIFe#yn+t|)c?83tUE`p;zKWhYVmlC)f#2p+*t)xS-zm3{e~L+zeWnFJgNUbtHkX`NCB26Mkk^) zAnr!TI3%GPX*c%_;w4SY{hOo24W-eO3HWsCV*7C<*LpYbAc=RMO+bF?RpvX4#1_s` z1^ZzVOPa8djbzD&lrZ`x$%PvcMfX*tcHcr^`yl*y8?@~LGexB}D~V+vkghtB#!SHv z)Z^eSR?*J8Fu758jdOu)U?U6+!toQZx*2@Ek+x2AjP^3s0ck^2==Qy?0 zkvr`X=MxaX0HOkZ8!5YSlt@<&3tps($PP8W4kB*fX!;X<`c^lEPa&>mFYv#Rl!tf* zyg=W33zA^+6D4F;k=%Rr9Cj(WGl*npI--bPLtG|5aZR+=q{;5}z#po%^kcniNo*%s zh!o0Mh(u;qB9V%ZO&rdVL5g=!w+PxuwT&uUKaQ6AwFJV?eg8zI-k7b*sE2jj=_g%G#w%}Pt2L*kd* zNI2Owh-=Df9g7;eFCZ>vYj^UuRiegbW6qfNl&NKt2yB#LBCB@-#VvR=o~t7_=R+ayec zDkX7^Iz|%}@XK|Za2Mjv{lCO-G<*P(gw+B^0`K*$>XOYari?)-(<4}Dw4Ymag@$}Nerv@iU)CIy>(O@-4`y5ySux)yK8WF z4^Amgae}+M7ARgQ+Lqw%TATvK-QD5K`@8r4^{thxOp;m2%$_sP+56eg+1qfV*xm~% z_7X+y(swLF+d`L}sY;`S^zp0tLhIF0s5D(^OZWgxKMz*x7XW3F=(|Zh7)IhzS}AV1 zo6_7LIgA(UMq3BAqctD&EiG_QCuBUNdF6Q=Zn_-j=-Pua&2dfjn%nn_ zZ%W@neugBdsPPBjZ2uq_!qpT@1M88RMsy()}-mtx$F^zTUf?NGx7Al244iEvlm zxl+HtscZN3OK>Q1Gx#M{oNaKa_X%C4C1#V|yNQdo=H9UFp$p6^9c5&8$+<`22Zq)+ z7v?`gZItC9JdfBzCBJtjygAzn_4=B^R+ZcMPyee{7@sX$`(9^rc+qtG5%q7=u55iZ z6Og+4If27Bt#s(gi4J)WL43bnw>p*|3i$SqFuK7Ql>dIIT>16PpN@S`pv)LpG`ksu z>u3%r+xY>=8(q@Pel{qK>Xx~d!xfqU=_oWN#`eRjW!83;RCbeIJq?4U)fo#U*yBvO zkcN{c(Vk!(T%*_9d$}y9DGp=4!{X}xs%1nKFo#DK?3q;D>kNg%zfm3NKZ$1E<0FCe zGFy)^X(Jl7Q_q00enXHVUARC9c%};?OFY#Iw)%_k_h!IV1|)uIRVJU08$0gr2V?C- zWU0>8PC#9ETp?fM@4Bn)JD~@z=D!drZ61`(4{dlvI%`bB4(-Z8yNS zjTg7$@w@_qpFOD^6l%4izUC3bm7=25^pCCt!D7I_z_h)vu~EEz)OY8g*<* zmpmgW$tON7HH>iG#Q9t<1nR3)Kx+68GHhN?rFPfo~^VT$YTWrJDCZ;!A^gygE@diB}qFrX;eIg1D~|{7MiYKF3LIEv-Rt}bZ+6p8tlhovP_r6hoA4$ zUaZoCuKU%u4mKG7x4Y|`ui`QT(APY@NTEJhVia1;JKCo>?(Bm6qlQmEn9GAIV8a2Y z|E6Oy7*y^X>G2;9W}%(&dkb%riN;g3k1nLc_#px9get8T{WSK0IJNBWG(@tQ^DqY3 zs9Mad_ys;PY*}ZRDS;~V9yRGIq-23vYS=ywA$!14Evg?^2BNN%8#R#%6sC=kY8?h; zng5_X2Bo>8tSrLW*?vOx`)JS6Y1x+}C4#Jb%~Kd3ax!r=*!*5IBz48@0lM0k3d{NC z5nwIr!u$rOz9$(4LNl?)YVmh+lrT+ASbS7!>%yN=`QK(@i9>p!a{Sr)BhUzSjoEk9 znB0@zOnWdpgSf$gvc|~iE&9pQ!NT4nwZsAa`T-I77yQP{mrpy9T1vd#EhR~^G#7a z4gRLefbC|N@PUqij=l}yK8s3Q7S07!kPUr%OoaIJFGEPZuzKxB6IXR)#YS) zJe#jDM0L>p^e$p#fPF22jO_67&#PuFJ#Ls@OR|oVVCSypIyMI*Btr2fHFlP;ZJb}> z>QzjL**XHfp-^mQvjcZaf?5`!uas|xX7dXR6Rjv8sD+@xKRRKYMzLCw3_?p7c@8U?mzXPr6`f)zOcf4 z+WpA-b4s}qdnpp#6NV;itHj+ExLYh#_<17vPV+CsW*O5?#H$%=E@5#-ah3&iUvM4m zz1_p0&9$H}f;de*(VhRot*l&5@TL`54u;y=^n}T49)yO=H?<3UpapWjIxj4^9TcYd zrsShazU2Od`s9-3ytHQOd(5m=PIH`X@_YOdJ9-J_9EI>d&+7AG8&@LJx9jNtu$8GN(wkp+*e%pb+)xt23Kzq@`eCja+tJ zrefKO=TujPLWiyEB6)wE-fIJ0!amhyNaNys;@&FbRNxtX@oFtZZoMSacD%uwoj51E zL2l=@)Y4p2EZ@TB;CmYh;gxs$LeDoJVuUPD;r2*B#Ucs?Pe@e&{iY}Lia;RMD+B&T zux6yCq0v6Ct2g>=zx<`M0(z(+O`bGP?b4=~MHaa}aVw`{f;~iYqz)_{@FM>&usL*p zpY`g6cmLLJ-9v>ObgO8lt1CA(S28hvL~}F|FS(!=VwKO2On*Z%MhL_jZ;cwJt&eSh zH&W@hGP;opK0m{UaU{Baxc>rPM%nV-cDZm0_otlamsQ~0^^sC)2LE6u{2YBIVN_?* z4&(3{Oy<-i!me`&-;bDt<7Pv{ZTi(c=NI%=O89;S%RS{v-^!LPS}J8KKyhWo*Nsa4 zpeY6qtb#AJjV56~JFFxObNFQ(1{W=iQ|R}(rD~fE3U|O3Joh<2|78mZHQNHC=uxiB zW$o8eU!^2^%~H=b33wS@<%0r4f+1okX;ELuSJO0-vkPfm`P=i$7U@Pv(Yr1~sE@9D zzX)A8kUj0gFY76RhjY!3!h^+94chsUMywMq_%?;9cApGP0t-$3&%8eHI!IB01f~2?Vm82 z(<+)9hDtW?S_1}mmHP1G7P@;XZhirsN7dndG(LW(sn5FkZ`bbdy1H%Q0R8fEa#QaG z>ap*fLH`5MWKc8$Ri4II+ zBS|e%?b;2R^X26aPCo8X#*5fsOU>au*jt#^pEd(fCSEU~Z(lB=L15%26>8VU34IGm z=MCm*yNNQ-v2ARWNj7c~5Jm7Ut)I*N1vi?O-&uURZO3U@L%l|t0fnF3$OfY%a?3h` zh6FOEq*&WPLgbcJ>%M&v{@p`Q=;M+@p)3`bAf2$pcS!3qQY^yZxruM3{{MR1X(M28 zpf={v?4LIc#o5H*moQ|eY=(c6A}7CETM7%+g+?xEu6uCWnV#_f&;i zt*&+)I8wFGn--|`o>IrCH>A`;6BQXC_}QWY{h>(|)iG0^e`<*};f9vPYxkfg#}g3| zc+8NJEk2N$Z(iBlnS;x?Nat?b3b0)s*EvlxL8o@Yng&9ry*7sI;x%}{;DNvVE-Nskf(rAEmwa&DLWS2Tk*6+9;ci5I zT0xSmECZh@kz#n3|FS@YnnPdcj1-ZAd<1JviQN$&?vs9SI~d?UoQCRD?iyR$5|Tza zbNqWTe|lQ#PO4{`LT|&$c9ux{Gp9ykoq7?PrfsHf5dShy7PW|Q_!n^{pofS`_@Wh+ z+3`{lp#M%EB6<-RiV1q;?)rH9;I5&Y;o2Rtx5xBY{k&TJI{S2EJxs*g4z@d2 zC?$SV)iSWzH@KNhe-ukQyMav&0?{u$HTBAyHRRKhn8V5A7cb94@8Tp23*U&I1GRU- z<@7SjwT3Ynnrvh;9v%OdV>#199s6)KM!GSg2gFIDRLwnPvdUPLk#Kfs|8Ir) z+;R?2&$Xk8JGAF<&jmt=Y`pBY|G0X+?2lcdDe&W?DgO%^$w-UmaPPPXN;|cdtAD|8 zTcWjv?NTa3{}X|$8@usA^Q4T(&^@$$;J%->kkFj+mF2beT&zt>CTQJ4->zB6>88r`YP=RO*TDK%> zB`?6mMY1FXwTLrab0pHySSv;rfFMJTgWs9~fZu72psdbuPJF-^2)CxW3rlZxoTaBJ zN3v8)OvxUmNT+&9N8XhpGHzzc;~JvzsbF5ok7aj}d9QVz13eMClM)d2Uwm{G9JmJ4 z!hXqfY%oqpIs5qXCMuxxoZp@5yz3e$oALcfb3;UAQ}Tnianzx6m?*6p89J7+5$C^luODyt*T&5rd$SOH^%2+wdP(RpMWbmVX{$Z9EPg7&Y&I?s~tQ zo||cKGe$4($EMJFm7!+L7a-ppw|HlCX|-jPmp^hBD@x5~%zw#bmA5%M#!22-Y2FsV zPzyJ(fI~~CS2<>8IkuYqdgA+b$F8gUv%&ksW$m2mU~0i*e*#5BR0TP@P#LaY8A>z- zoQN*+bbwO9A)X5vj$>UElcr~m@vy-eez0P1jc=QTnXEWs zYG4ppN~vZ;hT;3rPK%Pf$}Z>q+yKQ4B@Wk7mGFUi8~PDv$kBf4JR10~!)Ps;vy*O= zwKQF)LLd~U_i9H*!sjeZa6@&?(@E;3w&t5T=U$9kqGifI)ZPO zW<20HJ>%do&nP30mfcoHgYCK@j~%S3FIwJ{BtJaQg2O-gYG~MoC-EQ*ezIGs4Lnl8 z3C|l6lmBMMQ0sd=q4N602JF-5hvZ`VqO5AHH$1 zKgNowZMd9H0m^1VlZ~--tRC>XScj}%oJ(E*OvHeX{{!L<1S>0xD4u5YL-X?!GBY31 z(Dn~^1ub#$WsDyRM4Nr@(Zf0FLMMYi+>11o(A?2>b zOLy9)0xmXgdyCN#I#Ze8SO-{iXcj&>4|{Z++Q=_Wjj0ukH11@6Vhpao`6nb;@2%r< z8W>ZEKb({?khDj@=$`(gO0y6~Uis8g{^B#jT!8RJ^Z^=l$|00=#+%7WEi{5X%Du3C z(MK@L-QDGkEJ4zb#L)&9O($k%B5|qVUA_AqI znF)-FvRL3_zvTXMqpX*Ype2_BQ6e!9aVdoXKgh#_=JmP$BV^;L5VW5ps-7}R#GN~v zGx>dOT%p$`dfzS<`Si@p=4_mFs)k7Pi)rqCm!>AxW>09k4G)8e2#FG%b2=KYL8{1i zpS5rCLLoo<4H*ffPz9sdrxYL6?7VbgUYZv(j~Z57Uh}6J7l$qa6@$OBr4=blacFKd@S-)`0STK!BZda8z6OA*%pxt0WOSauir+o@XezcC5?^w+dxd$4tGvQ z4T=JUFTS^w&U`XFcrsb{QH*}jM5#>d~~c>@Mw(K6fdDNsp|^0>n@jviQ)1NvfvxR@_##9{oDHQ zqZ$mxZIkje)C9_K!53yPIi(l> z{!Q5|tmOB^<+Jg|P@+i@Dn*?;@#>nFnJK%w7rEfq(%WcE&*w-#IvNYH*8IZQu7bA` z9jj3PSWDEn8*iI(Js4^&LP+?5Mtw?};rWe&@z*bH659)N-yL*pkFzsWAH<|YrQsMt ziwtA=t$YDSHNNVuTtXesW^i+}ckc1=F3A6w`N^XD;|;lyC-*)0am+xD=&K0*V%Mrm zdUv5X>&1&`H|?9phOg{DSCRPQNrqUgq7R5~0Spa#FpGUDP$A3A8WZ56;`o>jRDjzv zab>>{vX!)V)9fme>3s-wJxX5$w#L|C2vTo}o#?SLvgb@XPJLY;&SYJM3~f{#5LBi5 zS!I2FRuj)cwH>Xsz__WYE*%2tj@qIbj^KH5eZ3vYP6GT*B%FexSN)t(=5AMfq~y+*5I z-Fzk#qI7a91KkEa#Hu_~erPqt%p{b_CtbOx-b+4ULl!2hp0q?vOG5F)bwx}Ltru?ttCMkoe`a6!6ZL`v( zSSdDzuapxvNg|HU!}Yk%dQa?tZsXovT!XViijY#WqQpmewq4Q+fNnP@@cjno)8m?F zS_Us6NA|5p*MoVZ;rnIu9DNL1;b@VL9zA!|#>>CH9a8+0BaX6qQ^tS@g3mz14O#s7 zJuW_7e&_kkV#p<%N!ZHCOp5<_lnW?Wg>OgI?$C9~QU4Bf$&!+hdYg0IkvUIcC= zzNkk4NGmYkUw6|=@mT%`g~;&>flSP);GY5Ke+WOY{gTU3drm#Ni5nknQ-W((NU-^P z>S=z_BvHM8%Vq4E%-=EymX{mr3c{H)(58uud3hEmUm2tU^o%=5<&H~s0 zf2wRuKd&+j#z_xs*VIpmyU5CF2mW=)nD#vv9vvxk{8iz&ZT#jVzGU0=>*@Jf&QOHx z+B5k57Fj~2yxRiKNfy?51*ZEU0O#c~(Y0!poiyu~M_qSCE-k<)n?S0DU)}GCNx-B4 za;euT=u;cm!CXI=Oymp<#Q#)uo$K~iY zb8d!Vhka4z#)hAL@VAwn)Zmux$4DDn`}J~RVH}Gb_4t4X|B~M`-zIi;X6x9~Q}11h zUJ4BDEPSo@mtv&&s8TB_=ZoHmXGBYx?E+2zZj{fZv03xPI_MjY0j7bp=&Ae*RqX`*-&KKC0Nm+Dog4?Ms+)FXSRkU-e?XC%nFH{#vp% z>0=fk(60f98qmB?JFEYhpI7&Jke@1C^(l06oMQ|VZ?VJ+1Pl#`V1?A={?Wo#tMBGu zG!+v2r90NgO-;}}P30a8Np@|zo5k3`CP00m=0>9Lo4d+F9U|*~~0(?{4XWD)V{n z6kFk#dD2;gj5OUic|~=fR#9J(U3!qc7&ms_a7t+AAXTCd)c2!?H2|U9@nvUsvLq&gc;(U!2j5O$OzVngLj^Gsg6GlM zx~fot&*wiSjtW#7-GT?U59YlKCil%`ZCJW6mlbs|3JUv#*#eSHN2>%u;Lex@*QPcU z7)TR>zF?z|x5tT#oYY6Lb~7Baw)R0eip01XR*o#WD2Rt^`eRxmz`B*p5kxD~SPT_> z&&xd2K_(vfLw~u;3`LPK{rYQY$N&+AD8pb&GjVro>$aUZgpj-keLUdq`NhR=`G5=t zc2ziI%dfWO-97s!zJ0l|QzG&^<$kJVHAuL9u;*NSVy8t!*vz%xwbyI24aZqra++R? zcyN*K^IJ@XQms^zE5-O;gQ!$FX~UxcnP+TWW6?h0R@`d|JR#xX9I!4=dBkG(+Q@D!6C0BjYYh){P_Y7SCx& z1(upP}>143xeufM+P^ zVe|tMegShQhr%Wnd*xj?iXD;p&WJ8QjzHo=f$7c}m#drEXCN-C0t**)nCuh}u9Gr` z;?a!#KZ+LHT`%U0g#u=tD|xDpZS8_L=|zDtSy}B!a429eM?`a1>|eh+Pgi}SUM@h1 z-@iliO-jki5_)^D;}Iat`+FP^qqst>U*P)XR#?V0OiAAASb0T%lfr_F)?Y2e-sPGc< z(kq_9!SK^Hzu2*{5_pq&*H?>@3cHsfoRBXBgdm5;m|yIvhK94!pMT7ADp$61=YOPb zZSj?s=5c7E#SC%^D?!#Y#}lDxzf`sP_YI1Y8~HeWW*57Ti75|y*(`$S<7XSK)6jV4 zPSB%byMC@zERRz|@e;=}z7BE@pa(vz)7(zjjp{>!UFBL`T>x_Ku@kfC&2~JETysJ~ z%!^1ao|UKs$Hv`QW^7P`f;**wo{|{Qq6lfFvWNit@9Z~jp4spLT{}*WNFYYjNn>jM z?R&0Yz3d{Hz-r>h^5T!P*FTg&ksnu*?@7LEQ>~uu?(HfL4u`rx4Q|%fRv-Wz=m)R%)4o-5s!%NYJj!1ami3yDp_*sG~Nww zkKy2mmNw0U$PS9#v<88Tk(NVa*>aP7-qOXgd#>(SZG4|{ni6ViO!)cvWQ2?#5!$vk zfxAP8bVT_a2j7o~+Cj}ZK2-)mf^jaB7W0bg>ck%=^}xn5uG|6NpQeo|dA~z}P$1;T zZxSsaj7M(IrTO10Wn>c{Z{0MtQBiVu5_j`!MFtS>$JJ*;e8KqcxQ2_T3E3Cs2;)P? zEXll;L4=fa%bF6m#lgK1PyDMIRNFf63WnM0X3>O~uugzv@ETHLKkWXflD-7UF>axk z8S|r8`l+K8M=niCiSDWXBkkiTU?Uy%aVvUoFW{Qa>&w^%0wVp1Z(G=SXJ_-$CM#?D zEI+0s8$A$`!IguuldqF$w0wsY8HtB`8p<;3c$1-euR1i+BgG#cuIG?9-$h2wez4iQ!zb_s0-#)j zdBMDCmI4t_d?$=rMBHEwy4@k?tg5FF!KDuwO^0Z1K@%`2PH2U)OJ7!5QR>Nr5MLmfc0?eQrl9X@R0lDMUpu za}jLNRNhP&GATd}Zeny4Vdp0OIQSTONlq9!vGA2PJ+9P%np%UWAACMK@4YWo3Yk$UjbqEbsak2frX$?t&N zN+Q13O774P5AjfW8lQV1w}Z{b%hyxa#u#`l95VmV3mUCW`&8hJj8ZI8&o+?R^SYmM z%H=J8;%?8dZrCn;mI-u;h?8%q4YTnyHJ{UVR8^I7;dNQuF$~F@BK7<=ZcKB^1N#L8 zlp}10!p@eI{n8Uq!{rtgU|8&Tu4%0g_I4cp;-=Y_^?ImC@>dqao4^fbis6HoG{=kSv7*Dm65WG4r`OwG*C zE0~-64vr;#CL(mhy!2t~Ca-w(1OMu9{E4NqJhLqGReOF`?9?A!6o%)@oRj%(_cA*hxZqC;2_ z^1xkMNuXPUGWw91A)sHSpKnrdQc1`QE%>BpOI#8m!0nrv{qG-#Rmfrb4K7iW7vUAd zy&dRGx;x$afbz`&8pf=oREtof z{!HDt%E81FhzsB!3;EU-7PN0~7mT@qL`-S0wVJiO45mHqn>btuIThy_qdf~Xm$NFH zci*LTm|XY=2Py6^B+C(i@Je}GPQ)Eun18=hTZR!~{gU#cw4s3c%xU<&XJ2#Y7lUhz z0uw82_ITLnE=+;)E3-fd6x<>7KR6n}uQ%0rrd?=?$ucx02c$g-{Y|LbYmD zCIrQJkrJTtUDkv%!@4AhNWv!b7dDykTGHa;td5V{gaf|RlO$wPAtZ#^g#j?=%q-oY zTVVH0Q4xNw_Li`Pd?IKkr|`850qol2;iJ1h-osXD!{OaGT00&|x74auf_R0F=&h&c6P4&02A2 zq#1;H=$BS%iW%pqRBYcX`B?O|F0+UdnG!>z>znY?4Nx5AI~RbfstMPXg?oS(XMWJK z#O1BVPI&>YRBIm)jzVg3labjQut8eZc!hS+wK^4xTkm=2Bn}IkPel5O2Jf#jgN_=5 zPE18648dEJqcmH@@lcgvJPa3f2f8jaziJ~4j3FDB2up4+LA)UhKFZ)&v-L2(DMvC+ z2rD&I^j#}9mu^Te%&zK0avlOaJVg2ZbnI4(>fuq`N_>1QLngL7v^Q>M+L6E zjfQ!7f2-;*zExJLqYq8eBt{IOjn@Ab%E;@r$_2j4&e=#iH#66d`1d*BS#<}AI{(c8 zZgf%kto>0}RK#3g7l82S#H2OQeUHHdgT&}a8W01f24{|09iij*9&OIS4 z;D4~B50$Re)IpBH#2h-AKL-b+m6u8>=OTfNF|@eZX)6)(&;NU=){E=m)Q}TSAK47 zMV*nYQVN&MIJlfsbmcDDONEAMLQFn6yfbR=H6J@|AV-ywJbgez_9X?dn+Cv3SH}P5 zNZgf>=)oeD34n1^|;2>2o36k3QD74YGCFCF_=7)>h!w&z_ZWG;Zs%e24a38XPDM6W# zWOsOh$tyxak8`DIvZ}&jOM~OSdDnLNRB(H@z{8_0&1ERXLo5}i{rjXSrQTJ&7_X7Y zjy-aW<$u@Qa0vp!n!0@IOc|93)~!T3vc-nx)D8mUS?RRIgL`|L28g0QVH-ug2jg5k zX~00Tj(=DZMcIG><)9$Z(2#VgVyy1&kgF@2z%H7;Ez{TcT6j0CMi`MC0Ndyg4#813 zD_%TS2&8G(^OjrA;-Y@n^Fsag+XV<@UJpSeFE6jlc^!?bMAyJ@RR-RZcR_P0C;n1R zRY+c}($UGOw7Z)?O=oE|?N3OkpZYgDUdor{Vc*xQ;guFwN{;M!IT~pL146}Qiam>u zI}5*;vliE!1G~IwyXP0L=?(W`^1#;}%7d+F%8H7LsMJ(!^{-;17Htur5N1m|QFnKW zCQ~-#0ia8^neCN}ao>aALCh)?^!Q*LV`)-4&(g{S)|zk8H-TCBNv zDe!6BVm}$8fnE9mr5jm{&pLVW_D|6F`ACBa8a#0FCq!B#xC2Q{;H4`wGxK$qQF3du zH~i*ndzKE=6ra5v4{`9>Qg9+fO{nNJW!it>EZ{P85q;&p0s|eRAkQ|mvw5?(eS^RK zK)8J)3<=||eB+R#$p?S}X-d3DKMPfdBEgZUxlZ_O^O42qdw6R^IjU|Ko$(I(4qQZB zL}ZCp6-%~^bJj=^SFAx215B;*3z2Xle7bY#rVpe8mIX_Gg zh21uB9@c#l6e)!J`Y|cD>e`K$n=3PihA6&&M;Mtr8w5HI+lWN=UNT&wT(Y>~3Bh_n zyHJJIhUj9Qwn+{XI6)V%4JfOq5R;SBQzTortRCFub##b@^-}d(ri6us`TYo7Y9Ev? zF~pcEa^3GmwCFwK&dUZ6!<$Zhveb(F&OG`^{UOC3? zp$n3ppZ!HdMU~364cW51{SxM%*z|jBa1<0QEun^n27*p+V!buzsB-ymlUI=uST9y2lFjgNe8`8B(?HukArycofnnJ%TPyL!bZ0CHzQKQ>FgH2cMOXHtn$ z4)3^ovn??rgnQBa;Tstb|7(mLo*IMH*T{Y8+jFMAbyv|~Tq;z!YnuIJW+oZ*x5s zwOb(Or%gVNP6emNFv>DzM?i?LWarMt&=_7?3W1v5lSwQTdStjhAu{N(u_yrDsT_@; zuNX&EG%FD)D>YWiZki#RMK_UuN)jD>8&<8{Hlm8A5*?{3l)0oMx#*V!Nl27X@NjbW zMHEx~7K`$ECK|$vc2_5b$TTJdUWQL!mvg>#JLGw+uqi=3_h1noEUV%X5Ws9+`RY#^ zd>krDjvYvT_-R+;Sv>X;&Nr@wEIT*h6UZ3l;(fb+u|kU6}oKtj-g3&#dFx+|+h zS5+U;Vlh$SjSXLL+Bj5&NfkqNOBMm>2-U`=Rd>VzJWBA}98(Nw%UX5Y-W*VjxG0g2 z@!gN{!t0bqCxz=SmDSa;-R~#e`xfte77Z)TfjgudIqo2c-^O)%Po)1T%y+F0!MgD= zglHF$ifh~@xVvb38yFWKPfSc~bU8eM4+|!`dN^|hk%nwM|Jc|*{8M^t=;FrULs$Jh z?V!Val!1aYe9P)doJ>7U)jwufoDK&o(fHP|7t#uzm6)7;@W2xXWiJhokfyv6Vs7@B z3AOiZO}s5K^}AZ9){Y<(-nnJ<31XMHLEzS~e@oC|@j*ej`_%ky9g8lqH4{&!vfI)JMInboE(`AD815I5@6{bJ@# zhCipROP4$7^ADmGWXm0l#_X>%)6}5_`Lw3-@u4-^Ps^XTUwBgl-O8VLKA_*93M&Q$ zQAS2WJO73dN#`fv!WFmyVD(CHm$!}~SSH1-hG1klc{QTt}Cny*67!BH- zDUn4rZ(W?9uc9s1WTP!FhiDfiCr8sSw89$=hxST-Q;OKFoFPg+LSu?uLTN}m(M?N! zh*?{=RCtfrOTHMr-@ASgsLegzR!UAnu%JAt1ZcjyH~QNVmkuXQZu^U@1tl7KY`fu{ zHXdQJjyBI!5_vXK^bHY39mKs$dw(ZcJJ+U3HN`>3+=QN@My6d+-FABO(X)2@)kqip zX3%ww&tcX!K<=~F%Lsu_;fI#hv$Zd)pI_%B-*BM_In8wyrAiF#D10{|bCsTs^`8V8 zbvWgN8PAR9#TKEUXTH0;JIHO7rV+S}>-?{SIDSw@T^(b!#q}VT@$2S~mVa%I7I&zS z0nLB)P^}RZR}XqSIeug&h+JTC55KzuVh4V_Y{61up;~$J;t__g*KN5)RMnj9Zua8- zm-n~{#S|M4dgvUQDkOAkT_Zx%D*L)AzsbL8G$7-PeOVJ~F3(ntLDkcbDLJ9S*%`5M zVLzNij)soDewlX!DAyLdU$z$ec)jrq9x|3&_^G|dl0+#vvhG7kpQ*h^Lf|Q$_mxj` z0g>_<`Sp$LiF{#U;T2-&y`E6mW{sb$Yshpej*iSOEhdw>aq_9kN-e3HfMZm<9fWn4 z6(IMBjVc|^mv4W_AXz*#41I1P!_h+r;!vsI&Z)(3JTqW?zJ>yYa!LbBw$P{70a;GPF|^6=5|@%E(PMn_goiDoVT zgx!jVCw(cnLZK;kcxXWF^h$~=hXD@-H$Kw6RZLeO~686 z1YJ59Kp18VU~tDClMh5qx(M_sm}GgV>3*(34W9Hkt=D97u^G?8z#ws5nFgn~A&!?g4u zXSqM%kTJ_GFW+DrZwQZwSkjE!)=nQh160y7JuRtnC}21FOH9mM+2Vt1P%p6cB|Am^ z1)9?h-W}By`Zx8t75;ebe;i8AK}11Gbv1fCeKp)BO+#4^1#BkZhb9|Z4ZgApy|wR+ z9d;Nj?6r^((iIp&IblW9%6Q*Fsi_$d6IGRTcSpUyLn1R^8Pc-<*we%=*1*`Nb`SZEa-W zG$J%jL!{N;O80K=jo_$e7pr*q(|b6Gzy=TBkC~acw6inz>sK}Kx~&w+A@EzE>LP#b z|L%oB1e)~F-vXCU+ZkErwr-c7H#y4Mx_}&PQj6~IgDYS7phZI!Z(kB9_^KSY7u#N3 zmRofaD`mJXPQN&EAGpOJIe$u@QMDAXXJSieuvI}E95jb`$*sQbi2eO~{TD-8}U$l4KgG;^OJ8!DVu6;n^J0TudO9@*X=CEj@l^tv^EQmC57TEq@jk zMs^VGhW?)x;L#U5R~3awjwebS;B5ITIF%)T2@l!Zp#Qm;F}E{E#&m-*{z`;dq)3Vk){>fWSkY*Osb%9GLr+gOBQr7m&#I7cq1&`L zj!j-sJq8%~=6PNaBNa`nEU9aesLI?JzDGZhw)Nkng5V9Z!UKut9dJYj0 z%V7+6D`vd7C+lCzX)$xmS-t6u^grp_N5*V=oYWd^RTw#zgv!gU3naVqdwU@%i1a#n z4k+3f6<^U=`{gi}+cMN1>Vl5G7jT~d&(c7F?X2!HoS}U#%T4M8DFbrP)rs8holVN- zZBH5OmTZ_%b2p<`&7gCiCM=FzY3uDRACHK!g8U#?%lenELqiw^Q&2lQ5CU^HYX8#e z$N<9n7>I~&z52ygbLaEN#Y0l*$lA=PZpiMj&N{HaOyyE%N`v1hth0!f=gPW_KGnee z&dwU+)?&g#?LUeLN8CHWWwde6?9Ac|`lYYCyR+Hr+Vtv}g znn^H(L(M>fn_zszGgwwez|~f)JZuXcHQJQgGi5}w2abEwa3?*UqQ}@a5F%0I_b7V( zahQ4FrGgepkF?Bl;LEP7Lx4Dt!l2-k{eAu4pWEa95bd8DAv1}4A?4-G+teC@27s4i ztlKQy$Db;^{sI`%&E(`m?C*wyoO-h2i(MR;7ArMMCF1Ez+D)2BFe!BFEF*!9yL-K7 zE%6H%w~n#MeCCAhc3l4@d!L@5ZnMm_f71sJfm1G){5-CU>JQm(h2m@Zm zZgXp}8t;u5J`1Wa=hnXzkkJ7;4GB~7y+6s*0^8QZ`62{pGrPl|r5A25gs#MOm{Eg4 z*yQAiw+~Ax>iJA;WQ+tVbN@K(cAIz{wMsQ>3Dpb3ch4Qcm_O zq2JMY1uR~-Umt>YMi~kB5!6pUEdYa1nms0y3)II0t(#*Xq+7%A;|8som7J*{WX;xP zBF&7=?8syyL~PmPk_<_eBEvN`f|!HeQK^P1;I?~zx`G?|Na!V2bWL7BUCT>|HT`5x zIkyh8Eo42_u6jKj0}<_Jgt2x(zZ2IkDRL8#yEjHNSw)G6s^|!swag_(W>N+YgbK&# zYYi!H?|P>}5?V@<@e6sykQwaHpKGNhMH1xRs#(HdWhf3MC_C?8(u5&?rp+)UXf#l? z*dwEJ9y4u=r7tnPyR}ojA7~veeyg!%Jd_vuF535&7ukiczS^|p%B zPT45uJ=onEA8%x!Mn)4aZV-}Ky@LsoP-<%Xs`~%Ybd8O1wOcf{ZQE?vG|t40ZQHh; z#!jn&-qez-qi4)*M`wbpvJbRByocl40U!SB|UMKu{ghk2=@$h262 z2r&GmD2ki_u`SClSta+7US5bt3e2_7Ad(#dtm=F?uI_&?5_tm6nVVg+O=hFCZ-IP< zPO(B);W5`Y^_yZUeqUEO8K3K0_03$NswwtNP0^NdZy%k|gM1;E>A4 zO#sFf=4UuyIrl^qyB!TtN!^A*NN=iW4vUw4andG^gc$#KUs2+wau;TDSLU5?l_|v zI^~H{C@|5o5&h7pox`;YOGLvAP9D2Es#(h$IAbmA>(exPH)Qwdj{azp(_%SGjc5-x z3i>-f9P||-Mta;z&V!{CvqUAvAa=C;r;cM?jul`>4z$l5pu)mdHVI#zyy(h4a@KS`Nj406u9mqEeO8 zo`5)!UpA?DLnhk}2Gr+b;!`VL`rB|!art5G(Zm==9}IJl?p&WN566;a7+6V5+a7U9 zNhJW)Ex(Ybs80vdZ_OGJC`inpfPm)0!W_8lcud7y8(urfU5jrg%zrF-+vRCPz|fXf zzijJhBdl|@?bd&n>49jV*4yMmqk%&53tAL`EfRA1U3$f|2>R~}L|ePpnBf^)I!^Wi&)ox>Cus}aij9Vjg@(>4s5@~mxp{X-V)gNl^0J#7 z=yYw<>$E>x6Lr6`kqFUcly#VnPaWdGNmyolRsd1l$-kIa60NdjH}v+pQ(!Xsrs!x! z%1}{laTB_r^6|>XAmid!eVsy2m6a?4AZZ9IH&{EO{e3V{4q3Y2QPtMA?cCxP+oZCF zz|lmOk|GkuQDul$Vg;W$qlVy=ZIv;9cv2H|0 zrX}EB6|u9Ul#^@nN5leEy@mxE`Z~^NL;Ku^Y;gm!w>mzV~MaOGyvy%M_$k2=C*h!xd`Bq z+}xPX=$4JcDmzWBIxh8{+bZ*s*5Tv)@px$FhnCy zY_R$!ZBqQ-?E#bnpfSdf$0=eZNn$u;*l(Y-2e*KHVGvWlvcyo*bU2foSnHG$6V_n^ zm}4~9~J;VuL*yGx8|!TP<}PM{ToIUQw(cr;v^zoCnWlA?%f_qTpO&z^%CJ{ zh#&5!gT9sQp^flTfr70{Ukf=#OSF7^UQ^uMj8M;~#B?xfI^UpMKG7%Ms1FlH^-n2Z z_YG>ud}8|%f8keKjJq!-WDOA(_Oa;i;o41{Wn5;M&%c}_vyatvf?E=}IfrTOImV_L zlY_s4up~!DXrwtv3L2o+?QQ3)SKFW7Rn?j+t!{pW;G`*Gq8)K@d>x(|E^OuuRe7gc z!C>D0-M!KhMNwAUJf<=H+^nA3(xS@x#4REg{3y2D~@7 zK;^KNv3uH}-G4wdYA(lqoeoDgF;sTvX0*(x)A?PdQ<*T?w;5M~K)V$YGfVMrjHDz2 z8fuKx+(C-{j$-cq(S|-|*aTPADuX#!69o~0_RH)35qx#^$=+au0ZbUI+f)=`S8F`|0wzfx(5Ijqd zNDb8dfW8SKBRwXLpWUfMG2=U9m3Do6f-L9#&Q5v$Q4XA@U2F;#I^gC2tI7Je2i-IA zp#-Wd0LRN$eU8qQMC?Zm$CN!`rbrzh*t0n}$Rs~_dp3cC|5MV|mRqaG>v;)eU|!wW z=Twi^0+Ak+-C2li7##0VoHFQmm4PW2HtytFOm9`Ar>e3f3raNom8cbTdsuz9DZ!B# zqR_xTdjkV3UsX-IjDaRD2Bwthov-3w8)w*Z+iB!Hye8k;)0*sa=<3;lw%zCtFwg&7 zy`=Ay*wV(w^(iB)0+o{^X>bpdPcg8U zh;{f}vQ+ixYKhTBnMLC*8S;V(hYU3dI}vL*@*5VOCJ|lN_6-zgne)qUIV2ri&R}j# zaX~dWOea-Hwe_?a7wAPESrSCJVXB2iXj0N$t_N>mK$VK=6#f^)up)SIfrZD-)8fzT zlQ4n;2i?-~%GMVeXQx^z4WRuBU`iDpniK1VE@io`zH*!k=M)te( z!7i+in^#$FhWEVEWOf~%>V7$u{CU*`^YjQ&ALD6v2qn}pHYqHL{Mh6yPti|k@b($0 zs;Jq58bV*J`$8z$zghe}kZaN;$3L-5l?uS>eMB&lRJjBMnjky`X<3F((pCrF{Cz}T z$-=_Y!KM#(w>bkzlze=WI@x-p6ZG)ciod0^W(P+N;tAz(mY|(Qk1GQzI9wgkinIkm%a=bZfhIW;AMI zijG~2DybL}pK(}zuj-gL_^CK75FC&H%ZALGy{YhV#Ypu_mX z1@fc8{ngM-507OeQsLww%8Q*n`1@{9d66RjQ;J-6%xEpp%>lxWn014}r}nrT7yKAW z$_DZi>#riODoU%zmvx1IH=b{)UZxLWfPa>VNN}y=7HhrD$nsYmG7n$du4}@cvt)yA zDj=!z5XOz8vU?T^)NcyRWaSwtV*j}-`o;QaV5y*jaY0NM-QmOno2bHPCPo^F>Nu1M zeOi6pTqGhjHmt0tl3hNN&&(jbTC{h8|0v4{|DJhjYd$7xE(yZ9Fuk-Te znP%k*j5uzqm-57YvQk1;1S`vsbwq-&Jv@H4F-iEqW;`hIryj=p3s%cdUmeiU6TQ|t z(^PnSXm0XTKj9&K7$l%rarEK@C@gg`eZ~#J?!RmFxE1;#x0MbR1PqUV@stC_uo>Xb z05kT=`S>exm#0BdlWTZ)cT(puE;3biTKG4wtz?-+SDD;9iXA_#kmTy1PVlF#LnU8p z^FU3}yZ7_Xr!M(EYb~qj`2VO~KVY_zcEtQ$@$09L1q8%^G~V&J#mC6zS!zS1L=TWU znm{(y->KUTJ6vJ60*R%iX+-Siv2!8EhpwS!Pg2*Y1{?QTU40TC7iaLW_F0SLR1BDhdvAb&xu$`^y}RcMx3In}l?iqJrxVA`nx`L- zWa3jXlR<7=1Ux{GAs~R6r^N2z@p&WgwM+PEn|Z_ImkJVcm@U-TsqwLB8h`y>7TChY z6I$JWC@;Ke3r?f?AkrDmfSIL=%N($zPy)#jTBo&czke0w4N)6(ly>bsWIBYt%SJ!) zi|bId`C55(HKFmJUbXYB0J1n*DJI&w?NBJ$#jR!;!MLiaoz`!E*k#Fpyo#N-)*UXZ zXvugiahhys?jqZh8n57(=dL1y?6p(i)gzh^V_Nk{9Wo~evBhH_y;DbqvMx2+5daLP zh-~caOugGft(U}@csp7_(qE+0UlMGDm~l(Z8);kw1D2NVT&{YY8iGC;0m{qtO0C1{ z1(S~?XF5nyzZaifJg6^xq-;^X{e$p+oVD&xs9y-1F-CPycJ{M{%ar>#SL~CkY zWyhfebPB@X;K172-V_}j$f|RBW@*IhqChnV=qc-*nBHF9kr6rPt~dezCjwYl6DrIO z3?Ou(C@Sufu`#M#6s$14zdQdeb9ag!&GaT|zG2o1{&lZW^RwkD6Hfoh*%gl&(QsGg zt^Kg^?=;i=ZS>@xr+Q^lE{KExN&;0x*9Dpzi*e{8|zb|M;OOJ!@NAfRWn~#V1&u=Kgqmj`QQsSoBnC zDs~PY&ljTmQH~UqFGX>ElwdP1nNTOVG0I^&-!&;_6Ot$A6Q{iFy`lLpcO`5vT23|u z8VJ5pKs|w#VUtVm1as24X-L5-a9jv=dsp@T2vznwN6D@0@!uOYv916VD zL^42tL^3LA8dDcEe)?W~d7qK-*+3yj_Ly(5`RF=3m6Z3VJ)q-vpCa6t=Qs{0 zr#d)-@ofrmB5p{eXfwTjD86`{XwSyqO`**ZMgO_--z12F=w1>|Lm|JOEz{rY#4*F? z_G=3P(+=slp@1Hhh@3PX8ElN<)oAL$CCFs(IguQnvVFhvn{-*rBxYH!(e9zY6Esl- zOm%soORTfn_!~Tm+H`hP^D~Fxm0yFSFy>PhGn!$*lc;_e|Y)DZ;kDxw>7%3#Sq(8d;cw6KFJ0 zu!iKK{ZrgVn#`q)Q=>{>H&(bJvwW9j6)Y}BRdGmnfQaYii(&aaA9DO zyL+_X>P10SqGKZi{!Oj+yfhWn1R#;c`t09JOBQ9{(?(hLLk2l+;;+GPFus{k<~a2j zM>_a$%9fj|ZoA0QN0+q3>@#BjxeaGx-0g;R)&?QiQx`w5OZ#FIv1DFfQ1GYAF8GsD z;ZJTsK?NQns$`HE$CyVM%+7B*qV&)is~M26yI$^Hy;3Eq^0H83ySFmMqx75wa%62qL(FC5rwlV`TUf%7HuuT#eP)WGF%a#n$g3Ct;7`*A)1?I+*i?h_wnfG_q-^2EXa5tHv z`aLoo#Y)SuhZ)UZk^5;ZL-#Rh$Q>#s=5Q$Q z3F9>1Kgw+2c3rrkp}TIpS@b(nMt?fgH=D`8Pfx$AYtfI*W)f9PNnxjBrTy@}XOD-5 zy!NJ8XGg$Jw^SkAQ<8NmWPl@yRE`Uc)1`-&1o>-2GFY4*^)_oArBnPu zG(}7^lUW3I$Row`@zrVD`Z#@>G*8ax&7Rl7W=}StT+07X!|}<^oJ(!Fd!C_g z1mMlyYdG^4brL;X=3D=BY_7&MQUa>1EvwmBi1Rb-owj`xBWTqhrzF>CzkHv!nq)~D zk}Ua-7%k#6eUcn=fZ<6zVYTv}s2pY0-Ms~_f+!I;hFJ5G&A~lf;aSa&IR+S;BNo!chr=BbzvUv_LhKx%qI8q z?LWhnkXqTb;+j_LCf?R{cwyMpzNRkM_jPx`wH(*LI21zv4O#@j+r&#Ya!XE^**RAD zbr*Ch^XX600015&D4}Tmp8Iuzhh7yQ zBVR1gHiB?*Kp`imwv7O&jLjJq8@ zUi6D+!JbcG4BF3ABVRg7eyHvk^S)oz*j75#L$N1Do(u#@2fi47VW znnRf7=5%NXyY(%8#D|BEL%~Q|I1ESc7+4fU`4>^OKmMJt z{+*fxqKAy;D_Y$tWGWz$L8v0#80%^YEsGt;}WJ^%Fn57JuH~aFl zu#NASV1`RcHSzL_S(M{sYr62>wqGwceRpt*x;?Tmjgtf?G_vsFH}~cQV4ocC7zCIV zQIci2ZU~Re`s4yVoe~rdcYq0Nn226qSi6Y&#EYvjjOi-DXt^Fl{TDgBBBUw{UcBOb z@@LTWAZ7O!j))OvX({fLKU+>Ndr~1eO}-Kq(l`~L9_D~C#t-A4sj4oV`$v<1Snx2( zEpNFV(l*E9lH#X=V!{bXq@~SGfT?%6{@@Pc&1WY&BhS|CmC7`e_YD5B58=SGk1HHw z+Hp@dY^uWMb4p=5NdMA0^X6fMw%VLCRm6S`3D}fczKJQ!8p2%{AyQJk>u02nvpPJ_ z^Y-u|%)(u)0$G%iJ#2g%t<3fqdHLdEfCdJZJ9e(3cKQ7NPW1u@Z*ksK;-?}dSD8!9 zLt}S=GdG(e=vxzij~IJ>3sJ8kK`!GBLtf4uEsBb0J_Yfu32%2^6F9Q6ai`QeXKR`W zd!_G<_n*-U6(UO@59`#mGcTZArKUar1Vsx=OOcC&-Jc$%JmS96P~kD8%GQK;bWlO$ zD`g)|Ozd0bIt+?efwzV6Lvo^f)#U_mnL36R6%cW`x4W4v^Mk_KY7~byNa8j7njOOa&iK^Z>GR z5$I*UwdYCE48rG*d+c;%TwW`N5?sc)4 znTeaeD;+~|48($)Sbou1Q_m=|nb8g)cjKd5a4Q7c&{$V$0S$LL=yyvSU1Q5uo?##J z>?be@8T?zBDjp!Us><#3^+!Di^uFC@Fo-%N&{gw1Pk_OS;*)J^5QK zWvUPL3TZY7jz|A#b-)4&XJL|#E6XYi+)Ow?3gTBl`$72$csPwwN3g@nxIUqDV^%)j zR(ob|FA=~j;I*}bKf(!FtUYdOJ&TO8p!K?ASXv%8Z+FP8$&!~_sb8lNf(r@Oey-C` zUhNg#rW-*U&Z^Q`?(z)(b5AhdkngD=JUZ)x!d_?dFWWw#A1`3R&XgBcyk1H*D|&Oz zfP!20Exk#ksh$+H^A-sHu%bzC!a~utc5tA>7^BCTsO;^<&SaLP<5r;wYQo6#&zC7R z?cpK-cRDqKx{VFl_5~@}jShR?{B>}S1pCdyy9X0o94&?og+(au{6*H@0vj9XQkyKU z_;i#PYaZdD)w(>w?_sa|Y34JRU#eRX|pOFkwB) zeL8a-H(=A$J}+Nva=rI8@z<{j2n$Q8hC`0l(>bnPnvg`t`<#IjE|+Eb ztYuDFx(Y7}K_oHClUkWCSW3kZ&z-BiTWra*zZ2hc6Pakpob$#98e=5cHaAhhrUC$K z-u_Quc5JLl&B`6>3d7L7W_21H+#beqza7LhgIo3{otwTA+Wt|906fe5WAOjE09bU+ z=^<+f5Bg(n;)!Z}`viwS@v@WUoQK`cdsRQJpFe^3sE&7CFasL1z_%7>6EE+kQqz~I ziicydJpxB#<0b6naa}dnXI3TQ@=3WXYsVE^$`J6fIo8yoA}X#3Dq~ZY@(D4gz{LKvJ`N+4aqJyT6*2Pn(a3UW7M0cm{1vDL?IqiRoX^ z!)5#e5c@%v(P39PF2ni}5Cl_ym(+$S-CY5mp)SW*HV%#vCly zQw`w!d{S-hU@K3KRWlOjB#X(ES{4IYtxf^2_Ix+3sPCuW-5$RE@$3A4=2>-Fr+@Y6 zlUOIHvHYC>_}=r!C?iSM?uR zWuz{M-z+vX+U!xP{4O>=i=^Zm(IH1;`xqr}0ShJ>&B!fD3VPKm)&&fw0v;lVGOml$ zkJ@7i)Mx%UA1?;@2d1AKZIQ!LrG}0h$L7AB87C)#jy?B04i3~XOn5F`H5yp$B{cgu zToI@Z*msLi98O`R(0-Wbunnvn_B$-iY}dSDI%xkis~U?2>B$|Kk9yQH7DJzU6Ho(3 zMg80EV|(d)>y%Yq_eq;AxoThDGDtrv9K_6N3)bSp&%y(s~T5lLTh{=F3`)s7%=#msFIsaBn4me`K+GZai9O=;X>yeE9PMRQ=gBY@v z+W*O+gx%jAj<^w(_Qe2n!>J=#deHswdow=qr|ZW%``qT|mKCh6?fg%>Hn%Z$E9phm z!SWKue<03Sru;0hmwt&me15Vms>I(wJ^zjz;hj+yaHuJ_($L=waS-{od7j6>i9+eu z?1q6f)bL$NPe&!PT3+WkdlfP9psS{46Mv}}9rx&a0)*n!HNq^$$YksC-i(~@cP9q+ zF!J*0d7u4=xw$<^1pE^NqS#e8dB$T| z`vj`dPO}#5Qo(k$EM@iAEyYydr1O-C=_EHX7-f*w^wK4oG-LdTrR%B-W~8xm6{Rrz zmgMyi!3~i7HUC5~FF;lT^{BabEdFE6%qBmiLh5lKUuq}QySXTJdA;gp>3%-tBhm<&QzPY_6P~MgRoNqEycqA0sDG1i1LPOlaP1}A=wY+bSEAoK z?fVau_OMWxL;30u3Kx4VLPDqAle_=;ne*2{7zV#~TsbHUYQg*=qB&h=6;`T)^Zqm1eKx$7qSDJx`xb3pEL>&M4CBuyleE}(R6eP>d!C@Syo>( zDLR6GEmg$U{phg7gMz__CivMnGHF#wB`)7sqY2(w{c)Ek!x%|Y(K=Os+aqw_$N=w0 zzN)xQ1gYGI!QrRtZki5Brgp+<=AflixZ7cY_`OAV0Modp^vIrm%C73xSQ@j37#t^% z5v_Ied$sOTZ8K($k}p=dD&!%@Ejn+TDa_Bz&Dd%(~2 zd*W#cqB4CUwjpkpX_ORCR5zYfd>1p_7WbgjA(YX|2xfST#bb!sRFfTja3gq^bP^o3 zLByppmS41i;-ME3ekG8$J(o82o-&>5=LuOyYj3@DvAB1IqjjY-ATFl+XSYv zQDqa-8P3tvo}-|Ol}vVxC!S7q1?jmq`lGfVxWj-}d8Z1C4ggyLd% z{qhB;$d|xjDD7rrWu`&C;%?+gxuanQohO}LekV?4Tzml#Oi+p_3Rpw1noKo{E|`e8 zIK&%vCaS0E zJFVp1AunrMt#TA50p;t!|Bq>@2cc1_$F=;0bA)qdmvT_eKS|&ed&LEXzZ7Si71rg8 zCe(nNpbssx3Oy2@;wA{Wk3>v&hB&D1j%x zY9RphWkoz;iO{yZYk92^y&a#h=j|s|*uyk^OaA`6oQM*gcJ87Z^s!UqqlUzhsVwn; ztp3I>0lo|79v-Er`WiD7OlnFiuhO%`Bt@YGZ>HR%X#Y3l6>+$M$aW z&dB@kik>aNwy8M(Lf~V@I9UB5E6EKiIC~wdl%-5m+!lMYONgfSLBBbN{{{EhSrc8f zxc87Fs9qZ%u)wrpS&hft4FZnLPI3Lal^E=WTz;e~xEqvVqh8V z6bd73>Ra5LU%;zSvslmaOxtaxO;Wd-7w2077OHRYJ1aBtWN^XaHZ|F9!k-r8AjeSkHd?6lFYw5xx9y+LbG) zE5vW9uNjrjqi3wY6Ps4gE z7R-Lw&#UrXa$Ns0Gp@TUpt2r8?!v=Ip#z_ZQE&Y67s_E}R+`1q8>gpEV2Q1CQv`SVxkJ!_DpZG$aoe8oPS%y7Hmxm0j$9;qZLuS%=v9%k>TxZP2)612&=h zjS3Cp2IFp%T2O+W-_8_gbZ4v?m7QQ?=pB-OmX*L5tb%E5w*s|nc1LZI4eS+~hWC0)UVb-BgB3vLe!dt>@iRUOUlS>i-1+4Grd*^y{W>KCh%v})KO zC~pw!oG+NTeRvI%Ic#1t%X;ce{3I{6Uk-0=lJ~)dR_q#0e80&coGutP;u8%)>u{ZC zgvr(NG-*VlP=yR;-z!5tPKo-qYUSN)FgoTHB(}W6uTg*J8;bZAkYmw^E;v|o#5;@J zInB$tcBF92(xJ!W!3BPo!j`&c_)@7WK_V9BQY+2jk!G&+Q+*{^Qw4RGwN8&&YF+zv zDQ_!qb0Ts}+2!cK=&cD|!mKjeUbCD*~Kx8EVC;dzl&c=OapK zdT8K_vH;)PGoW-h#U8JDg%y6xSFfYYOROzvfCuiKGpwkK- z*}#X-9)szE7>h}6;I=AR(oa#dLQ#LpbO)E#>e}m_2=HlwcCqChNtN3<^9HE z(U50YT@Z3mJ6N5fLH|qr;G)C2+1xKgYcfNsK{EV1c^}1M_(1F4{o!lm9jtxEE1!J_ zR>zG&?A!C5;W{4ov1VeZ7bB)q@GugAQ+U=VSH8_^Iy?guXJRL{Nd+j#c#8D%90=R8 zaKkc@`Oaq-%^C5lRu3P_*N{SK;h@fcepVbw{;yF<9OIfX`<&GXy<4?Xqlfz0pGLgN z|7b-}GWWM7r=0Ib$hf3>Q3lrJlR^rRsph)z8;Wu}CJkd~U2@VGmn?F4vi-*H_ltoq zNDSOtw){c6kQ|eKtgzF#RGNTU*s|w;p?2EtA2(T8^t1G=JMmT|`*_P3V)Jf( z27^qz<{rwj&nUbC5*G1`DvB#(%nUp3Ra@lD%Wp8GyzxiIoPG4%5eyXLIP#zG7?WPuD1r_k)EosYmRrosHEBt_) z2NHYvmI>AgMvehrE8w2UR%8PTgUPjjKcM98wDe)InwETsPDWK||6NfUz8$~l?o6q) zzq3~9PW9$zz6JKe6ee?j87#)^vG2>jX5q5rxmlr)eoRkbw6Jv&7nW@p%gsJC2^2+1 zjmui)q3jWg#fJn}eu?CXS8{0i?k1>}-p=H8`ooU0fkLrJFG={PbZyDP2#)VkN*vTX zBJoDJMc+QuPjrXAf;rP#vw8UqwWBFYHW@6^jo5RI??KpS{!FWa%=DP@v93CZFwTYn ze>g`f(l3#I6biWY=A*o()2LdPwd;1Bn1#YsR9VFkNePt-_|y==#z>c?zkk!dP_+y% z3bef#zY>!!*bGH`vOqJ zmHGpiWFbtrdpslk$fAl~Q`v_8j>uN(DkMkBS`X`+Mw_OhjTLea*sNS47LpL-X24%V z7{)Y2JJ-NgSX7q9X6#mQycrF#U_z1X&~u$Ve{$hqHUpc z8bgU}p-Imy7W8+WlH5SJwIL1L)y|s|pj74i-1*)w+j^%!7~E@xp}_o)Stcp*=Yg&% zp_I^dL&~k5oLZKhfu7dGPz)cJ{EMkv3|!h@dsUix-S<9O%TSbYFz55R2blAlH#{p% z2xU#LA?RyKBJQ6Fm~Yo8j`nN|iNn-V4#QcXkmpdU@ltz|ZQce>tuXV0V-Y2bevzuX zK@oZNQnYtV%VaWjt+kK?6x5qtZy z;#fuG%ds4G&bq_i7*57ih%GzP@Cqvui|Q|@JtPZ98k%m%T<1lyjJ1D-*Y=ej6W}~T z!iCM`d}g4jL3C5?P3`!>bR*u;S33hkIyuoUdh_h1?_loG3ceywkwF-3<7`XEnJIQD zf~MWG)9qlbs6g%n30sq1s!`=7+R~j+$ZMrhdE%A5Z-(W$XiQxlkT-wukO*b-?l+4P ze>3c|l4#|5i=VVooK^OLgNg9?2Yu+QcX2Ths4atgtw?VBJynH>FR9gO+)l}Fv zRQ%VJC|6d-9nWu-4}(pd!DL*7t-B}+aiKv)QJvpLqdjkd6InP#8KLAe3JHehFHbwP z03#n-qAR7-!kLR{Tjk&M1yCs5tk{6Rt%wM)rx)fCTyV=baUAZcVK6%cCWxyP< zVGNRuyWj9I+F~<)5vHMN2T-uYp{jDzb8{%}HrMdDNy9K$Wr_Ms(uhc zAMAWYVIzF?Z8sR%UvN!;+4-;ne%Q8|Zm;*)I@IM?UHqaajitu$rIp>$5B`l)Ci+Xt zbJ_w4{+RHDO6R`_)Gb^f`d#1L(e8Xbi?dChO&)1}J-l;gU zucE?8;1vQ(dJT`$P2db=!#a*_xGmzP{3-$&@ud`~>X?QP(QNt6jSm=VmgGpO8b*dN z;t?Dn1+Z`xhRs%~%w3QD{az59D8i4EiczK++989#z|2v+WM82<*uP%PhDO+l{Y1FU zxhsWN{x>WCRd`_iO`vatwcFtCQ9s7p`_b`x)xES4HL?~8ihLYZZk5y^=H~Vzs1fX9 zzPhv-OTJRlqo{Og44+aEgwEw~<;Ws32GKUJtP_h`!~?nXwJM4Akhi~$s*aNMWtyBs zTDVE>b$Ek||8shR6*XhtXs|MsS+rlPq`;Jv$nYQ4Vg30(cF-wKS17)2;`VfSWSV2c zPgUI!LR1b76N9>VYto;+6Z-?dY&an_o)l-Tw^fezuYX%Y4Gh%@%)vh~0RfH4g~*_E zG19B)0!Tp!_Aj!MNCm4;FmlMWUZ7y{)+clCN$hC+v2xmuSfibfL**yloV=wdst9<* zQh_i65-9Bvt^|ecVM-y|-V;S+U777;ovMOmVwy<~{jZxaUOxBxJ^4jY-sH2+^z@8Y zI{)3Th^RBn`LX-*G4orMxG#+6c>i55n>K?{(5((AYn%Vt1?*Fme)96f6EtLW^QAc| zq@-85?=Q|0b)vb=b&q&3h4gq^kgVm@yf-sT!g-3Qgep{PaVPirfgt)V%WHcHV$sN_ zizqV@exgNX9;5k)JKLLR{5jk%n+icpnwt;I?Ep|cOTzSxO79H$txitdD)zB7+!r!& zH0^9fTQk!$aSZqe#gEctjR$y6gWzVnje%T}gLoipuZQ64+7^qi8~P-4g#=)@%JVif zKbJ&%9g%GhjlI2avDP{y_y0L+vo`IvzG_4{Nj;vDfF;IPNr$hJoh70xN%m+r{mKVj zfXf=UY0R$Gk;-Rnt^ioXA*;_(-v65DIgZK13z?N;i7poioNwf!NRhdB6&oSX&4a1_ z`QNTvOXwsNqp5X#MMe-BK*6x2a4Ta1{wDm+h~Ezf<|bW65gNuwy9Zph7GSEBjFIK?%+s~akJv{;B0)mZGufq~` zc(d7`Lx>Z-(x}RxZ(3zVb4h;n!6YvRQm>!4tbF(Obfi=^REY{i4pzL-!1t8yIvI@m z#LKZ>zl}k{f6;&TAqz%X79JQbOeMaeg59~T6NEz@#>yLoS~v#-y1|fa78_kofuZbb z33TX0L^>gmErSK0Y_3YIRx0sVGyGD-AUL1R zeQ`9uJNafG?Uu}NZ}F`)!&@xZP!}cwrXD@KvIt>|3e?@$T;r~{za;3x**nDL$FHy4#HBj^V7T-}UoYxE*%v>l_! zpO};(olyLWWkl9yS%9Ix#AVY4Lwvh+xIB_#eHIUMki(|i43?GHqw|lizvcjvZNPId z8_NTBSqf~~=axw62Nhm=R%0SMUPuM{2}HLq>~mQI>KPhPt1}#5#=l?~i^=yk5EL=> zEu_rNfkwz8pJv}wRkrj#8=x3jj!L4B}t`DyuSN6HeWttnW>4}#YfVkopnYbTMfm1TiV&;B&)kiW9Z zOc1bYQL328WTA7ko9+t{5s_3WC=QcuZi+;vJ!xK;5;Y+KLpwZ?c{yKPsKq|GMpJy7+yv794QHpYw*^Y^KnUr z1l{}qYNAV|gyL7PYm@u_m)j$}})(;lZ;E9w13 zLR99Mtkk>jc7)4KV?1cCF%o&oezseYFbxqZ|CuJ9`@dhF2sSvJY&zHgZ6DiASV$(W zKCJe`qkWAjAynE7ak6;? zS=#SO+xJkb3RnJ);dK%e<^BU!y$0(ZK1uzH^`*PVo|{{xoyau)gUbq^;O>VYU1d@M zE;o*gC_hTJ$b;y86d3<-JNdV#Bf-6Qeqif~NYQW5UEC84SfOF}Whd~ns~+|A@qr*~ zyp5Q(IZ3)$`>o=MWl{3y7S1V1w&WS6$~1(cDBQrXFS};`QeR+E6AX&FlqW^0->axV z9hb0G$tGzL+m~894{iPRU0@4b-l>Md$(H0^5-RRxBYVrx*=sA3{m?)xA|fGhsj`>` zuv(mYIubTuV`7~r&G&mE+J`01ed_gmy@WD(yZf)=nn9UxCd(Vl2@yyb{sCDa-F{6h zcr7VzG*F+G@aoG5lYZGTiO{w5xDA|?^a`CJ$r;(1N&Q-vKP@0etp<3>&%oeys$h}9 zH({gg(PwgB&4$R4PxEW&kG(a3vk>xdXC%9*L_kAF3K{>@WsgKD^3)ydd#yXxIqVQe zZFZmkmQP1Lx^YR)!#y+m_YW6WhNMJkR^#J$ny+@&=1>kOi!%uec!TKy9BW1R5^xPy zNdDK1Rk_Q_5Bik-R0el@l%F@-rt{zC{k#kQ>^?4TrF{i{fAiGVdQg)ru*4-nja|+f zexi%;dS?payFnNX9nC#j5#O;YN3qFONe zeJ!61L`G$vzXu8ma4xbK80J(~72?2tkE*aUhvuDfjlNytQ0t_bBV9IQT7WP@APXRJ z4AQHt-^aJ#kaR1A&WbM0JI>9~`9Bw6kNb_#;8+E|LmNZzM&Qd$vC}}ZTPKGKwGacf zB=(*<7K;ItHT2J!m@QO&&f^}s+8;!KsO$TJR(C%Un-YQ$iN9 zz1k$@ECzzyS$Hs6PF*tXQ}o=Bhi`{wC!@g71;n~3*A62)I%`wEJ*Ardg2)6sEX!@bp^uSuR;$+q(u1VHBrL(!G zP;nzZl-9F|2IO&Z7OvZGv^=U^g6V@?P)R9lQr zX_5Xun8G(1$`B%=HA#U@_Cm9o33>@Q zWqRkVc#Y^wxBW%?$oT-FHow?dY>+gVw4)yWXkqya@sIXt?E~q`*I-hd`qde`JVGUh zkae0tp#jPA$G!L}q;AyqfYtrfA5}3x7gD&3{6_f^se6Y~6R+1@zX46ns*C3n{Q3QQ z>=F2kTxB3~3CqKZTNIEP_)$Fjhb1tUdLY{3uHAmN=@BaJxI;NX$@Wzx7G|!6Qk|~YI3dkgJEhtGiur4u zMa)+!^hqQ#|A4fm9H3+#~&$6f|kjT~S_u$uiI?tG(j!+A+A}cCGeN z1jIpdn1-k`AlPL(BNBmGU2W_Um0?7#4cb4Xs1)aDCq1r4hK8bpVnky+7xsE*cJ>_;sV@5Ca0V*PaSrcz|!(Fr76~R8$bf9+N?*St4EVnj~xU z8(ZH{sXu%hT}T%gg@YBQp^tbZ;F=`O{8Tw(UKok|ooV&=$@k=_WAmgQ{o<78h0dD; zS42oKH&6RD0+L;-AOOBb@UHpe%$hK2tDP99 zL5Io~79>p1!64)m2tkwwip*yDoiHK#VCZdc6$uND1tz;a)O+fL`0d6k&}E^# z*Vg=)SqrCvOf^tgSoC>A+^3cCGU!EmeISs%QMR}4=sGuW=un8pZ)$ZM4bK3>8ld4Q zlA*72@M9Du=Rj)*Q$?o_9K2)v6@_`CJl4&~nostnD9atJxyui|)TNkC) zc6RtA>pgpWUF;;5>4}CzBmTrc%{rxKKmv3GB8G=NmKj6QiD8c0x(MXS?U^>f z1h^{tC{0le1-q2Nj;8*3^?$qG66r!1sS)C7KTGq%yLGGdIzVTJ;6rX6+57@f&RdVC z81|F8XQBwmFL6BFhQ99*5+anrdz~0CG|JEK%0tkz619#m`t7rr(K~Yg3%)2p*Gm~l zUL{BPS>j*tMg9Z-8@)Olyai&w%fRG11h4|G_sX{o#QXO;@T?|*yG8vZ{N^9Qt!K0> zGztSgB6Ko9H`hz+#v@)BX@QwKh?RC8pkYJrHTa1y!(M%~7WT5n%L0|yX`pX;w=2pB z@ss=`|DNBVQ&%u-f@$FD29MB&D|=z8N(i8V)QY?Y{6PJ`#XSthKFwrHJ zH)9d_E-My^Tt=!p zHdz3nGB|xq3WbdX61W1qibRjKCopB~y#oK?H{tfTwJbCW4^_<3%@usi0VPxvv0%Ut ztD)ajvOQ7v42t3U;B(;7M`>X@B9In(1>Xodm0LZ)75;+H@IG|8WgOE;EH{OneCZ(E zXj}V~O(ZAqC8R~nqL%1Z=p2CG{P%ENH%-+jPnuz!BB=C$Tftw)^gg^NVeClttEHnz+bKql|I^M=88P|Ul>}i;bz!~5)B{v>( z?^SsCS?KB0d!QJwNClVJ&pcIWdA))tjXK07KF_E50N*X)NAFbqo=uJX0jzl7>@gUv z=ccCN29gJO2B{7n*PClSeCS*7*muFyt;94696lCk<|JmxxWb}f2VuztLjgGJhAWgz z>PzFe=&$$uNU!~3!2*6&0Fs{O<^@w5f0&E>HNU_o_)>{J`a#9kw%O5nx(+VwXWgie zlM{Ff_$HE8sIiJCzXreiIjGPcQjH>jhY*dN#mDs$VL>ZE5W+r~ZG@pRxPXjKFM%Ss z2VQ{3z74ITdhHhroC@F)$N{_=dl=-;_zWN8D<$^WZvamt`G4Dt`$v4@1)Qqe_-V(G znZf^z?iHxcjppycr+)_yUPMEq;9&zUVpKCh1!plzVk*cYFd_gb9% zC$4!!x)$ucuGfAsVS)zw=uS%yz=9)y0IiJjDgKr{CDS8tT9LmS+b4exf|W2+yZO^L z)LT6Ve4Ww^3ERLZRQ@I0_rF5(xss}v)F@)afEYFb7O3JJF2*SC)g48uK1M~kO*X)C zg^<2sq%nSxl0bJ5KJ?$cRuim*b4Os#zcq(6kx28GfESbwEp`?ZLni7y3xUtWgP(`n-hprw-GE4= zD6oul4K-M%kt=w(MsZhvt*HA`;f(oen5%}UDVur-cm=pf@q~k8U|xkie-8)00R9oZ z?u!Yt)N+~q4AWM+1Q5VSn4jW{{5t<99o#5V>var?$zFinHhi?*aP}x%I3x;iOA`{9 z0KN}=8ROdFRt0HklOb-~WZ z@e$$Y`2tJ$`Ex$QFzrS1m?Oa3NTSLhq@~)&f^Y?#IR^W$K=u6AWX|D_BySpD+k$~kxY7%E zY@?&GOPwC?AmNf4;5MLJDa*0~5v}787)Dw_+yH_~*_Xkc&%@oXfiD^#o-(DLNR`$- z8btyFn;K@RW{5E^6Qi{V31Ap;iA}DxRHy5N1h!7Zjoh%1Cl=Lp7-$B{^PKnvl`gxA zFuiGL=!2tYpKT%6UO^|fZvu&=Pb*dzR>al~ zc%c0{+8c`ID}-HohI&*vdLTwA_mjVaOybJPea)@ zGu#ZdVpfo_!>Ce1P0DI+SER5mM;a$eJg;w()$lG$tZkp@ta=yhJr9pQ1$DD)!@Y*m zxBbAangD7PI4t4?O&(&jG0HWDSY%Lblzl~Xw?m30SqVGWhETZ_#D#s}PU@%XQUbcJ zxZ@ZS5^e!}Ili40Ww?$2*OXG4PcY2g&^iK#Ee4d) zt>_3KNKW8S@+Fq>^ErNp30m^_)5efEL8T(h1@jt7Sd6cy-y~NLN!hy5!_)}0byRkEL z4&4eyqp)Ek&hNNeV|rKql*@Cu-%;`V|tk(r2d> zkL*IyUD|+(EFTfGii{4bM-)`Ix3ZSF){Vh|%g{2m)`&F`FW{j1_%6K_3KkbRzLpg#*`i zS1M+xTW|=M(|5`#&aPCX(3PM_4Moh_Q&g%`#Cu>n;hKwxSIL*+8rYctg_Q z`Qc|p1RD`NQ}Nny2~{r!m%g8pv^?YJe1A4@s<)+@NkOT=%YLBuk9{2VR;F0C*S5AlRVFZMDY;Mg6LA)z(lIw%2*q6 zO^5PJWFeu1o!qBP%K0o4zV%?ia0i^e1rA=z*jtFgRaywdQ)egqJXNdOsm3riAa<#W z(tyf&c<34F^XH@iZN_b#At{;wY7{Drq%CO!ewL_Z7C-X@nNdb*JB6PMcta@*RsEZ+ zlol#stQjI+@NLon&{Ug0sIEz6Xx1P(&$UR7U^U=ZVl#>76>g*x6Ak7iMf#SNo}5tz z^o%+t77=fsnQ$dN)Q&+obP?(&*MII&Bn##|63wnC0*YU17^H(U9Alg&O#oAYhZy_0 z!Zi-kPmfXxHj+8}Lv`D&Y6nNv|925Dd3C8B?q%x$prsg2n0TecWwoQ{)%LRzG()~@ zKO0UTgN8}C?`3dVnTvI;#*$=9w7IrKpGl5K~(Vw zJ4x|ZGe;$h1X)oA>LDa|qfmo|^TcDN#`-!hR0dNGFjE69<8eZ^mELg*;6{*;Kmdts z)T*mG;dlM&YZ3_G0`U}Y=k=zEr_ju(f0q-8qRINX)ao?@K{$2>I&Of^T0fh|RBniV zbqc1pi=0L_aE>Efr#J24-=ZaeHqgohPw-8?%Llm1{v_c!3eVY%my^sZmr{fkZ0#co zBc44c9%&E{KdfiEpaE+%Nq$msg}!5keeIiOr>Kq=i=1 zs#_|C^h#<+s<;Zx-@Sk&1}~)`kaEcbTycYM2g87q4Ofe`T?T^6CyB&LO#gw~*g$4mF%TYA{V5r?{PI>N4dUDK0o^6wab=WR4H=6g^zqVz(Jd zmueEX<>gKwfZ1|sr5Ph_;t9Ogli4mb*@Sw8%|r0vZ@`I@P#)RYWDl3}Ei7xDq*1`z zfh5jSL6AD8@Df%OVon*pGX-{cVI;`ePdsUEBUsV~X1HC#z%e5;`p#zK!n9qU*nuD% zIS1W+;EtvC@=jgLdEyOnH1ZoOczA<5IL8r|@oNH@$|9dv2AH`jL}*}^$N4rrT&6f$ z9i&@uCiL~fvk$@5-op0)y1uYG4yw;^b90j@w#3q8U=_2btS_~j2h;s zUQ=OpRndg`0=mORwL4Q8SgxWTbJfxmI_Am@0W>n;6EMLYg3bXrd|`WSTpcgrAn~5~ z8u?BD-JIqaQ`BedJ&Mi)XmGPjJO(r|$rF5ogPf*FBj3BE&ISy2!4KaH1099y1$2Wy z2oF9BANgnK7><|V?AFI{inT4cG&BkUP9dw2wBubsB|+*}dn_61H?JT*??UhsMHH>+ zCtf{~!YL8-3jGllqg zucJf3`MYN;IK}Op}(W9u|j;OO-p_bjWyBMKEgR$ zRIAf}IiCYVjbrfmcVJ(iczx3y$(}>PCvAG6XcQ3nn`8a9$QIxuOg&Th2oa`?c-d1Q zQqJmha?eN15%*SZ)7Z5QnGHPA2sLw17Rm3_7h8zm;q%ZC+Zm6ls*QWMS5vaW#v3oRr9p8jIwXv6|tezFYWyOTuPU{gZOyw)C4eZFhm>A@-VM*cSgxyQ4+w>ZTx-@{oKKuyuv*! z5hxN8m`ulPe7WGK7P#LZjlUbL=f*rrZl{Bc1s)SGH&}yF|d1dQ- z?fgwS@kV4m!0>>p3zK0F>s_Iu zB8+Xv314&!dNS@kYExnomFE4j|tgLwE7hlluy4<2i=ie zhqt!=e2&xPCdH%iMT!gx8&Eu7(4R^G3u&adrm(wHKF_wh15!o19V1gRnal0l55 zZELQw90 zMt}j}ZCI^bPL%+fm}Zo+vPEiYrs?V$SdD}P{7aaI=*Gl-?sx#hJG@&%5xwaP7MCKYy?X29442ujia)wE0o*PI z)!N^VO&9~piSRB?;vve9cn?9U$^KB>1(FFl=gTr|Ik* z#N~=nTRVf-yMoUb#^Z@6zZE^<+l&a>N@03{JxJo-J|uk7hh#v{vF(yNQ@zAx#WA%P zP$M6gzcT}PD+1#pMjg}CG0h?YqBrIGxs+S!jQaX^weqt`0xf01ok3!sc8Z!_$gIn? zLQ~T?Jw4ax>A6OG`w)RZNFpKj+MMUaeI8(UvvZvxd+XiJuEmjva>R|;rYzH0br_B) zu}vV!*|D4d)|nT4jd4< zzfGpOfx;A)Vi?lI)V86nZkm>sQ7p@(ynLCSo@-Q91&PI6)YQyUQ6X79E>{$fN7$WV z#0j6Jn>*bsVh7bq%XJ}%iq4sGjg*T<3}sYb01j(qsgb8xP>!F?X2seGB(RboE@DJi zgOSNc#VitjjUr_OO<5#?L%=IYa<7^4`x-_JzkiAD?(5ug%W1m02JrhsDd|aj9rcm- z2Cws|8kAx>qKw=jW%S-iunMfTIxU10QI;u+U#GWFR&kXce!#~^0t*CqhZCePlg*^QevF!$SqvktPHfwtqGFMQ2QN}xJ%?q9MeFx3QCmBUZ5tSd zjcGdN4#t|lU-S8Gy@9s1NZd$xxEVO6NZ=)4n3Q_Tnu)ci}p_RxEA6m8>j=%w zqf}Nd;&$hMIKMXeR4$+!6C)=?3{*5Rn61TFiA_xC~a*+G&he^T|JN6ZErg*W<3vFqW-pRaJj(k zUi&+4HQBmG#x%}dd{p^CG&FMWQ4mTtn@j*r@c>bSv`K1JS0hjkZp&g% z{U|kcvvhRa;Kq$TT)*DU_;@pmivcXlo$wB}vWoiEyGL$XxurL^mdW zFA#vr%C))IWo1xZEyOMHbzLs`-RkPKud$JSbv%F@k;O!ubhs&%vRvkU-rmI{g@>h41nxV>D;|y^Dqb z4k$Zu%Nz15rFTI6+o7!C7F z01l){L_t*B+vVTQh?8wYLj%;;Z%r06fmjSCCq;p+_c;wil;iNQD9Sa*%ZmK%%1T@g zj&+>)_l5z6D&I3If%|~l#5(3IQvN$FXQ0y-=6Q9FR%?Bxkz+&@g>q~a(ZhaQ27$m5rm3bF zSxGHjjJ0~wFxKiWw_7e}?>pW)(~J|l#zrA_&TG`vKwVv&&^eyIRE+4|!t0gyP6dsP z8b ztk2rojg^yW%4_61JRa!ikZaRJFmkgT!eN-05Cvx#qOh-Ak*||~ClnHyEG~kj8mSo_ zGIGv1C})(>Cug54B*CzEfV!)aMAEKpYl*-B=Zg*_WXHF)4h7s4Lwk>&eRaJ34X_{itHa5n8 zud-4umrKZz%XPCpE_8UkYeUt6fGiK^TndNd@|zw5*8rFofY*Unl@T1$do3N_zi;yx z19W1MeUC~}5J0Dg(Yo&GI;0f-Aksm-l8u`)?U2ZAbhvg<-5vUcQXm z9i_2xjE06O8X6{OY8t1eW)9P|@OmS-T#~EfkTXaBmX`~m^ZVt#d_K{c&evYAke#x! zxPJ9`)*8R2nVaZFzQD;@bkNcXw1asE@FB(VUqFiGP9u@}u-XAG6$b;?bm+4gm;n&qSH`&VnV(jT!4WAh=gEq3gy-y?e!82R%FCB= zxhyIxgVfc{P*D-3vvZpA@-SYnTon~IwY6)b9932E$c7V!t*(w6!;WF>a>dE6OAox= z9wBswPk9uVZ&7FK2aw)^XMj^k&CV$;uT*h?Lp;la%u<`P&rlQuaC+5#R15PqBn_pU zc$IG~?(*Nri&s+vt?QiJv6zs!?(Uoa-nQe3zw5y?*H<`(&ZaF0BzqE_#Ba-jBZuSb zW8w+0BM=$@>np%_kyIuPjZC8!$!UHs@E#~T2uJE*#T2Dz0Fkhezon(P{t1U+Zce_R zsMjn@es5vn<{}FDTMNTj!Z4PI#b&WAgHWi9<>hiL%Vc4pf?%+USj?sRmfAPzOw$x1 zS63IMu5KQ$H;l_=DWbPTAYjwhX0QIfWr-19T`fkNqd&{a#Os%6V3)G@E<216sr6|> z!X58IBKVhqA0Xkl0rfrY8C(~AbnzV?<5lj->E|d40@z;;(|1762NBoH2A;Z}9@xKs z<8ZCtFJ8Hgph=sCvj&{1X$PI%Y8~wd<9$dP)L#Mrop?UGhDN&JLsD8=fcu5`aWBv= z>TkVbq-5jPw#EAw3a!;`v6zs*xj88m*m^(M7{)TD=_J!mZdrlva*5GgTN{sdB(_@d zdWG2SWN&I@5zc7nX5cO)3Hlo16?_%x-|b`sXfn8p=Xj9s@WGsV{|c7?Ivc$Mcnl8w z6x4P=?_Myq641y4g2-Xu&w)=P-oSqbUP5x^bw6*7-4MT8r2q1R>cG8=9gS>oDqh!X z>C~VbziI3Qz3O@d@dCc0h@jIwVG>FGTq#`NX`QB_u^rSZ()SqfZlwqH3_^{3J*&Hr(&7&zh2C!| zLihskHd63iVhG{mYdp#Gyqm==H+?Q_0$7WT>iRfRiPWX{m_`Ai8u(v;j{;8tUqr&h zljK$ZNsT;2BNF0$1b9dts%3!Ab<-#WBIpP9An6M40e%807CEa3;U!8UO(uqm*LWA- z;3Evto)@1fzX_lVX(M$Q_!N@kvS05hjRM9V;1_^Lkvy62Ac@oGv}iPv(5-addw~a# z{N`HpI0lWvgNdXs_>gpky~?2ZFw#WzWu(VoACfOvgczrhukvGD=0MKcz7!4t>;--t zNpRM4wlsWz{7l#Cz#Kpgth!5fCrRS@G;<4r03Eb7icx(zTi3N{2OTL#8I?mP=U4N|YvW1iE-X?5w1z zz=8ASykFb*f~&)M&f($xJfF||{h@`e{76OGE^F!qU>w!;A4A5D`mhNaWA2Xmz&k(_ zicHv#B4jS%EWvaqAE-b#ej8Bf*UnyJCPog)5p*k1iTGDEf4wt^!7(TC3}g&mrjTad zppECak;Wy?)Cs_Bk|SZ(q6T+IxfnBf=8^1;ISw2Ej)haONiVWd`RWB!@~uJHrpB15 zAs+t5mLl`WMszE90hNf`gE;I%6PQL)`|JG0A`Y{O7V7AsFs+v^H3INK*R5qYCh#vx+75PoLT``7EZcbQMqHGzl+99i_ln&dSZHzQ?416iLZNkW`t$HCe*OL z9=m&unF$sG)!~qQ4VgvSQHi)ch{67;5rU(@86M$%nmED+Mj|k)acTr$aS(lV$lYe) zXUz1IAD&VaEpiN%hmQh-UY=>A0@#T%{faF9jG1xb$VFU$dItN07(7cdXRr(SbK)=2 zMJWf^PAlsoA^s*W0PhbQ-IlQF<>CxKV`iE;s12h5_23=_K0pyN4wX(FOVRA!9mwc$ zub0G_v{O7|PU6Ku3_gnjrq7}=bU%{}oxa0hybQ)8mAKaR~LE4WN=PPPCNIhMY zMeZ%fZY%ue1>si*yoSoY#+c}`2HjHa0QLcG!0)I}aMa6lS3C~94KyNGwlOC9#L=x_ zN%;A1)H8SrO&;ll#JL#leU0jQvaDR-rhTTD?JIOa}3AcxuqC;9g`Lai3t4dxB8j9S+K#<7-Uj zkp;2&Hz{9vhFv?GxtWM_;zTtA1M?y#0P~Oyx)DtkC`OIV#+c0H(eSKA(Fm=;=corJ zlUah=VfFw|VY7!Z8P43?0cvXx)7Mu(Z|`C{I#xzZ0KN-6fXp2+f1t*gKq(iO z_yjlzT+YC?C`40^nu7Q<#u%WWppVU)57FISNN?|Ay1NTW_guW;VMrt(M%A!Svc{P4 z$wKWRPovqI+kh{C?@_??tQ5Zg8QPnJdj0lT{260zrKkuR8cvc(te~Z(j?vNdy-gb# efp{EBO5h(e{BqoslM#df0000fd#}x5TyhuS!zK-kPuj;yITYVmd+(4rKL+c z1=;-!??2%C)Av5lJTvFsbI*C5*ST|N=FW}K)6pO!W+DcGKx9ue9~*!`I8ngyk`NE5 z5ptE#0!}y{1{x}$ieZ**K)`o+r2Pm4s*WZ3XG;Lc_g`olXoEn$JRneDCp!9XA+2!y`%1V|7F2m%8^kRS*e1i{`m!XO|R30a-y#KJAs{Raghhg|XaM%D;ak&yEub@?jlK1ATVN0f z5&}U(AlO^CfCPgod7zBxgAkh#c_7)VNi-tkaNC+AYL1O`oKvxJB z2EigBSTqC+cm+@c#sJU&N&ptn1~>+61Ev96Kxgc2K({hbkT4h;2E*RQ2e<_!Boc;1 z!;sk9Z~+uRBN_=qqhV+)5Iq12(1l@W0Xl&A0T4ih0BXP(02)9E zr~)_u$AE3XG++yR%iL`0 z09=4NKr|37KqbHj02Am9&;eiq0tO-kPy@yQ&_GWB7JvhA4A=%tV{cP*s|_>*GElHU z#sDIL_yD(ng#K@k-SYZhzHb}MZ=F3^}#3*4{eJsH*;E+9n;xqpJ*#e#C zAI84?qE=hsB&6)hotv*T;T!!n7U%a+>b>AB692v6zFD1Wke9FfW|Tbro?cdZ#{A!v zm~L_6w5z{=G1HenuD5H>T~1DYFLV5)X$`RFI^TcXXv$uU&*^#0JU0#PZnwk1HnEJ( zj7WdI@x2ogT%WNYdjj9UiyG;?%&$rHLWZF*UEy~{iAX*6$RQbI>`N;kx$Vf_mCIXk((EuwxUBF+bN_v9uL|0=So z>S_!PhIQVBGiR4i7w*4V(dVo`@OKMRdp+@y$LC0u(=`kgu5-}xCGgj9!tO8_tlTcK zy)Saw;bDMlJL+pxXfOGy;p>pY1tFBom+Z$$cYd4e-;P&WPP{)qY>j+N8O_W%tEAY0 zv{){zcN2iNbqn;*4rV?^R(86L!d;|#Z`>d$y0H&qvE$MKCD=I%k9GcjM*hffQyd%PTgXbhAcz@r~-}?)$s-=CBgijVH)M=c`|ZFZheGoU?6o z*}+{c3uf6xN_k)RzOq1VE` zsE<11@r56vH!{sN*t}Lpu-@tdFCDYA^YztZww{v; zb4dI4Gf#&i&L`|G=f1qTG6^Nftm9#VNlyFv&LwyjMO=n^MjV{k9Q9dd({jY_GTuA& zl1yjix<;T(xy4LE5PYqKr?ZoqQFKR$XOFDDJg^r!DJV?Ae{t_utp4(Ao7itQ)ex$V z`Par#6Hlus$4~Xc*uf|K7nS%?su%3c`}Y$dHOWH$PTXdHx(Bu+C5pe;*yb2>PqUH` zO}dZtJ>ZAeoHIo3m4}L~(SCVoo@cekyl*y9P}JNaPwzfE8E^~@Bsc(-NCh$O-7r^O zCOnFLzSu-9=#?%}uhPF?_}m}`v;W1W+7xo zxt3qY9V@{fIYeM~mMOl3S)5_uktAs>%gHBwT0~|A35nmoA2#Ib5BD7+=zsHTGbTj0;H z#qd@dMo~9+fg^bZp?sRcXmDXXM)#aqgkYVGd5x3_x)uHRslZRQ9OY62xJjnPChtwg zJb@GbgCx2vzF(eS3Z{O2S$scY$3&Q=^&xQTG|R-fNItu&NRc6p7k3*c@{Nz1k$K)g z(D}D(6vGTPly7F?y;I@O2Yaei~0g2`&p{1`HSQF*z?^rY=68-vwve=$Y=JkgP!qZ-La%?PYGXMK2hjYu>|9! zaP29qGj=S|TJ%7a&&;x3_}s$@oCY4Jnoi*=o@e>iOM-M`Fm!AB5{$^R+JPVqw~~X! zM(y)FD_Of+Sx(E?4VT|kU!pIC+M`*C{X$WbjJ@7_=i<6?Ud_ZV5#8I|waU?(q^4Uo z0!@2#VW}1sHLd=uau`=K70DH2Z-`(#$8**9m{`o+bK=BR>k`+ei_>A8bDygCWMdc? zo;n)9MR+9ggLvBb?2Z=L>S~JIccuq&u%&f;DTwX&un=29%Sw!zqq`o+%2yYMWHon< zPe8@0dlx3h*w`}UiV*jHsjcg|3$;`gUH>^?D^|3yXX(~GWfNBt^WvfSrq_8=9REN@ z+%p;|3e!E*LOgC(Wd2?Lj!C_AOS{Eqjm)ZVsTa#F<&rGAg;5izhHu595d^<9zw>E5 z*xj!8mkU&SuJApiaD(PMjZ>ik8MFw~{pmv?wVAzO_I5nFkY;o~@Chz5$w4Df0 zmN!vzsl;M}iD`W!{@u)f5d5gzs-hbQrSP6<7jE9;FVKnNjCZDX7m`|%95W{j^oaxf zZOoU{Njp8`0{I~o8L_i2laCDIM)jD(0>YNxN8KOcTcunGu{lzX`joplA0DG0zIBM( zq|%;!$B~$_H!$vxSJRv5Zahz&H(Hy+f3_m4ytDi$4P`_Av#<$Nkd6_(!1 z`H;WCT+A=n`;%j;TQl*sn+iXLk{=5hKdPSjfr~4vT=vAL%O5shh^s(|xLS*Q#Mb^j zKAB!VdRboM0#hg(Z#o^|Mimt+|9Xe--}V0X<~{XF72b0SV$zErsDS+!Z2f_!v~c<4 zy1yp`C4DABS5egbLIajtxx*_*t{tDt4Nu1K<1D5=Jox}yt#8_A`}Pkvef6Nm|Hy1= zp>uQG-$)8q?hRGe?}Vh@wf4TEroHCv$@#43`k6B$pFUpW5>dgEJ6DwF%p0`-7*dA_ z*LZrg`nTnEPwqk~8)qB*I?pIZ=hClXT5>8YC!|eQX9Z_Eu8{>FhI6B;7pb!YE$F!w z+4p;DuD}vY#StJ4$`wMWnP!T=*ywp3<})hp&(|2=K;nHDrIEK%B)ibh&k%)f zaRve_;TcG*_p!WPUlR(<*>`Je*QP=NKB#WCi>?O@PE8>gIQsx}Y=2^I(2I_^- zTCFbxIF&wVdP#iLK?*5wBDtY_WOqg*~;e?6X`g!d5?!b|kTR7*O?tqCkC0Rrz;@?_*PZjI^beqGI zXtfdLQNK+-hZK3@arG{Opl{=?+XZJL^s+T#I=K#lYYkU+k!o67imlM)>0f5f3d< z?<#3H{0_;r;v;i4(oK4|zh}(Ri<>H(_?s(q#|G!Fy2wq`&35|Iq(eVyN$w{4T5)X^ ziK)-XecmmvRi$UJ%@|9>71uZaxPDH2mh^3T2tj`@=Z^iqiNZlBQ&jrTPOu%05b4N` zxO+`Gk+a^8%@KE;WHN!ro zgrYR5=e^tH!IPctJu2=>HccezpA+Or`Fo6IayvK7V2V%^EJwkPp-4#Xzr^FfgL@Y( z#nWP^a$g2GiDI5!licS-GltnxezipgS=+#I<{!O}P8l`2(GhW%vs>pJJq*X-DGgnN zzRD`Lg^z`v zt1TAUFi-T*+|M)j(kx-?p)*$v`Q>UZo{?tVQ1W&x>O-Z4Fu|J^A6ZK}Z~9I1 zPLjypgvUl}Xu(eF4I!Ah=HTrV-pqPbhT9#pWd`1gAlQZ-(5;9;GYlL6{itY z$h)~$V0=*OR^3qzh4J)8X| zR#(I?37k3P7EQn4kgjx9fB!4yr%vx(U8HnGQ!i;0R+Q$Lp=4c*A#ez^nG_wuiCntZy@257rmGssG@c~Oe2eCmFvkXmsLzxPW^nulecDtbtqMeB zDNt=xE8KC=^jf$`N*md(obV$5Ye|IRqZ1gX?D+MZcC^2wNg)zxOblh~klc=1eSL3x zCYOF4wX2&j&DmHRv-7&}Sj$#v?}_->3+o5|;^v-k0uSRAYzBh{7aPN0(ke~Y+If6? zwu!rlRQo9PHHUCN{6U4}g>7SB2mG1xHcD$F+RmSzKrMiq~q%cJw>vdAqa`D2+ z`1`R%8qKJ(8P*TH>RQKqAC*()+De!$awARnn%}4F5&!)95yGF%8*3Aqr#rw7R}?Vw zd89|Z3w1VsT(DGTyyufHSGBf#$)rr{!+FlgJwj?f&`#Ed5+rPQUJ8n(miv7a06B;( zWSh7GCkJ;tHblSJe2Xhc)@QcG@%Aq=zPdY>>tf@_aCOiCj)q0rlS1dS9q1_1w)MmV z{wpN8_lHpA$#+T;qu$O7ukB8i1%bZNIxQaZ)OufXHZ#l51VuLD?S00T7z92kmeCPo z`OYwX)}eGJ`In=zSAny2jAOxw)SM$Ij_e(7bP{wx!l)e6Z6_M)oIO#Q(&YFxT&Ig> zD4=Qq3>l5KiS}3=VrEzqWy{u#bhaYrYjv7LnGuIEkf2K+I>3WZ)!N&$mB#s-9u2`f zUSuAV#Q4}tSLGjTJphAO9cZCncA$dzs?h{qCW+q(T6?*9B8fI^-ko&hW_{H8zIo#M z8r5-=XtdatMyISxdJsQ>GQ$r$q_q~QJtEJwPd;O%hL1%X7xsTYCkn#-&Bz@g$EnNO z%f#;hH7mq)<4T0?Gaah`>8GZzi5ZUEp}BYR@|>#eA)|<8uzmbl#iHT#I*B>TVa_m9 z)4p`f$R9JzQ>u4maqMh1RXM9$9wfpOag-)SltDh{e{--o)LAce#Q(ilLEmjrI1W5E z%zRY7rXpV0qr|qZaa@_~RXOItF~WHBHm;d+3N=dIDQMhK{jU)|v$n#hiDL!isIQIc zCH+|C%D%`1=gJj00WL({SuXj2wLJM$Eyq=D0eJpPsI$&GMVih*1xw&=N<)q9ow5~b z^*_H+C(VePPE-Q-Y-bmX_RM0pS=FVT9+gA!6*%!tm;l*7^9s7zCwu85oep-1$5Ld- zD{z-0WyJB3c9Zyavo`0;t!`^XP|7ScFGO16JEg(*A9HoTw0pnlt5r|u%h8tHy~IF0 z22C_KS6-kpGQu53!k@X%zIl8Rd-+C&o}l*BMTI+%S0sJaK7pxRe~}|^q_f{rUv#T< zhlN&EJMOrhY?F6SfQ~@`5kF*q*c`OIa9X-zwDhQOX(jLz1N-b22TB6Irj4S5x^Y50{pAHzcIh z9y~n$lt=elp&C{9r(*hiEX>UJgcLt-zF^{`Cx=)R)Xe^&+kxW03XInDttxu17Dek7 zzt+7wqhnMA7mJ@gOZea}WY{oJLZQylD;X;_T?Ail9v%2Nf6OBmf)*379;AZaYECPp zE=W&0XTtoQE4auab`yyokL8?)+`bPqRAbF+_8~wOLIOCZ1;JC0A8GFS%vm;vIJ=ZNh z$v$+1p(zV@_32gYT{FP>vWW7?Vxtv9BW<*DXUaKCv(61kaP+UsBVV*Jf1@cym6<7*~aemb_{vkHf!1|&TfBh}Tjw=_qzGL?C&lKz58)2PFj2b<%RX}fz; zn$8(}r+*#>KXp}dMA9Vja89%;rL;efkF8|nR*tLL@F^Frgi|r&D8kLR2DNH@iv~q3 z3NOyCw5Cyfe>i)dRP5CSQS`p=rxx|ltjJ3WN38kJL_IuAV%u$I6Sf$~$k{BnEj453 zuY=O&>7GjJ4o9ew)J!Y(H1nW-^d_<;KOAuOgn*;0-6sw#_^kxS*PLknR70u%x^g_o zDI1U3Z4aVSRs59cwci86wYYP}6Qhc-V&ukUu*`>W@2s-Sp4eiYXH-^m^**$XaZ}IUP3XPQ`diY0P!VI&I z8KfCABe3Fy$W>?N$43u;tE5fd{6*ePFt12Ve$iUuvwXj!R^y4Ffbyi6(CD*a*I*fs z#Wu>BG}jCK^wJT2Ru$bm%Rk>7*ae1Uu0IHyYO+71WqJHeP5kh?tbcVCrdwt%&ihB( zjO+gURD4{>??|ZntQx)j7{|J=MHyWYh9#r@om!6Ny$&t>Kw@g!Utg{nyQ2 zf;d!t+`_G1EI4>=pNZ%+kbu%Vv>K22dbW|ZZz3>e6!LD?iKBaAt~plI%q3)E zPXsdUR-+p9P6v0P{}Lx=7{Q+t5}Ikbr+hwkOAxxG(&yJm$+lIEnne@P5R-Ks12bz2 zyZT*8%k}oST>UL5$;Hm~lOH4sd69Se4Ft{f>h!THLx<6|FaAK~bCg%32PaVXv-*QM z(hDDVN6{Ag63k^uX0(y&t`o6b&@V>Rb9ae6R8%@$zwXQu?*9_B@)%OG*s;~HPplV(6O&9 zZJ8#5e!Yl$ht7f^J59G}eqg#7j-$w=y^7^rg>N1AV~ zsvqA4hm7yo7!L>f#12NyXNor>zXnEB_?RDrH-_s|h zU)h4A(M5nW8#BB&ODTR)ZjZO_;YC%On_xSdb>v+eD%b*8dnX(hmtMgYo(Y>_IWnX0 z6O6{Lz^^O`mCk`#!#P>CaA;BMeykQLcg>(zR@5Zww+DeyF1O#{m7cr%yigI`7TR>; zEa&voot0fI{&g|65rOh#0dSLKr8g%(iS&~FcW7}YqCV_uWGe7*Bl=GDjpMr*p6v^8 z5uE*ZBmpM6Od(BK&+&#N3b9Hjv&XV=a%e*@r=U81_<9=kdyrW2S@zZ){Yb&LKIRX~ z;MwgmH>n>ZyFd69GGStZWmHQ6CR-t9PN9W|qmf*hjYSy5zF73zd5ud28&B_E8hfQ( z(HHp@^OHMJ8(H_Gmsq-;kL_?1)wOiX2BFAfWC9nJd%`Lm!?5fRicGKNfJKU^+|!o$ z&8L4|)1}^gP!u4p90bQ_%X&QTZeXvS@1KwEh3Lzw&MRKD8zj9t;Jz|?DY0G|BBE}6 zC^_e7bfv%&Fz?~^zRyGWlz(DaDt;t<_n~h9Zn~}fhaTXKayGt?g#7``iZ7o9^pF&p z7Ql5^%69W#FN5XLsqb;@h>1SdZus5jiksuH&GWML8|lw=DmNrGWim4|R?}6ggPkGT zjQbcBADNV``^F(go8{YHTr@|`nKC?4rwSLUQ@PbeFK0g|WR@tE#Ox2!@@TtLei6tx zvZ!9UA=sFVpMh>&d0~`vvJp0(!taK0o*D?v zRmhq?uxyklw}V^7ywOu5;fhfSS8>lkUPgE*6!PqP6u=Ccyn{sYCoMx)8=egMFv$VRh5^-T5%@{#)RrA;J5FaJWp%D$ z%O>0B-`#e;h9+m2uyD*60gJ#9=+7p(&7qRk*beHsubzf~w?2Zd-uf??L>bAv-i_S*(Jo!s-puE5|q2yOe>c zK5t~=#6X2i`x^Cwba(^@UVa4*t2&B6c&TU=U7u_c(8bf~#>Gr)-dowYpsv5rPditg ziS1*)1cvn%0k495z5K_-y~(r6A?}A2Mp%w^y7iXOf}(*r78z|wgR$;J;>-JOi8MY1 zvt&0RY2(d~!e}2tLz@PQ{b5m*f`~-Hkc-xeJnvg^FCK+Xe2#CHdT=@8skp z9DR52^RA(gbo+Kab9myYwLf+iSdZ{xCe=zX=Z~AjTaBzqWT$iWPrO#F&3^kW*C=5bFYs|VsmCNNB*Cfi7*EG7mmYgs& z*U9B634ZNcOQ$r3mdh_h`Qpv`ekVE{`dbleJ+d;EkLI;RvjU$Fo{12*;*!$?E4Ls@ zpTRK+@pr)pzu4NNb=EI2>y6DD-^KGox8EPt;h{NQ3sN{PjDghx>q64t?zg!;l=#{2 z4;95MQe}Dk!*` zR^T;r5gbQvYkc%^YrAV%kr>yeszBn)L+e^0u{Va~+WKaFdVGI2f&EI7O)7a#6sJL=Ph)n?--YTr)|A(Vo&p zW;cS*6E9q#OGMu){hqble9m>`lD2cjOsZ|4H?UI5d|cq+P(O!w{YalAN+~W!0HXbw z{p$RlG+Q6FV7;atRnKV)6YZC(Uj;4wm^dtJ>Rw?4Uv+I2?^f}@69xU`I9964rWrky=q=-%rL@Adzd zI%*bC+?qizJ$;|MH1dSLpNFzA{Ue2|TjM-zn04-3V@GMeuw}v$D=&?8>kI94?>;_; z4pKbe6>~bgBW@v(q}WoYZope-kmYNlBhiVb#zxh%)QHvXw;)6~fdJjX;|k?7Z1*Co zTBmxq^kA~or0lVrCkdZ|;A8!NZh|;-l;E83jY>KqTgRKIugc4|?6V7Y)%f!W{zS^6 zuFuXbh%=G5c!*ep&Hjg6;fPcJXRt{^8N4B=fSyom%DvxSL_er#mFRz)sGN~}ThQr| zc354=c`5&H{L#x{yy>IVIP&?upXFJERhqqbxbm!Li=}}d9 zx4cl%_4s3*G)4wXn8C}?Ntt^@b&QaEkTu=)wClw%-sYdq5QgYtop`OD!D*DuLCh8( z_#iroE38dx3gx<(8igW}+q@f|9T6vNja><|T%O4lbaa|PxfWTH(`LLq-{2Bf5wXOs zBv}U8;xy4dc^qH*F79Gk>92m1Wj&@_yAiirMYa;0PSjQ?2L;WHM-KdjMayk7^6Fdd z!~={C(#gs$&w_`bGh!?f>lECx%V=rFpCI5Ck}dxjFt$h+=&w!>0!Sr86#_twI3v#%XJ|-&_Q`F8puoE zNpjP9Wv|$G57NSSeJlQ!s^3I~HLQf0C8~TTp?c)G1Fh?#lkUb&Dw)fpjolv6&?v{<)&G0Tks{O$ryM-hgkk3 zroZ3$6KzUrUj9MvEB2r_hT))Y*&&T@{z?6qtsac!K*!*0?ihNVCD%(1Tx zy;QNoH{rOzpCVw@vV1Vo>#WGF;*`=b8xD3pb!M)|2}VsKyv9#qU)M?Cgo8WKth26d ziKZ!F3yVt4c0I2CC(!{%Z=vIw#godmj#y@gpPQ-DSmr9TyQX!`s6*IVyrk3Rfh6xm z@cSE*%bY*et$_iOY&T#V{RB(!o7dcCEI*{#8P`g`9wLr=fz2U$AGgTqZ~t z!}a|r^UqB>!2f+$8{}Bt9GC_ZG`!J)i4PvJyx`eqyNUl+y^`Pn#4MhS=K3O@WW`8;-_=zxtETrJPhVDdA?p7;i|6J z^j^{Ibm4`g=<NDRCe2kjQK$TzO0D`GVP|_oa%azJsfk}uDw<`7GEqb;*5Yf&>G>3*yL%TpO7uQR zej3poW@#08k0<~qhxn@{BYko?oITIByw@ZQajrz+9@>=`s1Q15f!|~&|GKlo=zbp! z;SOaPRiyfhu9`aGNtE}NSB*!ZKn>`sGl!hW)oAJfqU(K$Ld zQBqlEl28ei4yOe(S*TI2Dhb&_^(!2L@$mn1dm8rcwuijVNrUkWu;&r1~%;Lhfem;m@gY5H7Q5psO;^AU}V2Y1dnKGAcAKp0Mc$s#%GsK8Asm#3o z(37z&(lp0HAU7dT6(2!8rhd=*Ju%ZcI^K$>%bnTawOOI4O5z<>(M#_&Zg<1BZ?Ij? zNV0xzOIclvHAacq2hElY1q?ET3o1OMpgm6twPJScmRqbo`}@q;^=dU{-5UmPqJFfo zaYxefw143*|6+ssf?JKLl*W*RbCjaG&|TYG;{>nEi*=%94bB^NtNPW|)n_0dp$BxT z5J;;8#9x;RLXU$$Oj#gdt`JB$E#&6d3*N@R3l}2VOv`oOocGsNzW2A!0ikdUJpQb8 z0I^%i*ZB9X8+iOZ7MvK{xw3rlO~<4ra|yZPoGT4GL(lopJ41m~bkQu5yWgnTe%zFM zp_}sUlBZ+}_f_`R2=Mww{ie)z??Y=Enhr*}Dq1bCmDJZVe}4wb9`6qfNNv-UkZF|U z2t~LTy82tQK;()kP9}GAjO>5R_&+!Qo$={Ez%u+bn`xpUU_^60J-se-!LJl@#Vs@P z*&4pShvjA+YUyHI6J{|P60Y4iLVth%kf2=ok8^<(`i6$KHa64uzIl6jkq7waSqWVM z@10=?SA@Z4H_*Il$cL;k!!P9m2G0fh_LvJAmAMaO?o3Wj9vvalF5Db5aiO4~h=_=& zv-Mo=PE_Q06hYQ1L1X9RBejP@z~OOMDS(`GBr=S^qgPh z-7Cmp&(G5oK>8Xli$*Ogmm7UmRr?wFLoaGty>? zjEd`Fm$)SkH2WehJ>OV){1^56_mzL9=>(v`Wo%iLAyU!f#gu-;tY} z8_GKU{P}ZuV1^~L78BzSzoTuoXkKQTQ3=|%)ooy4q8J~oquMWD=))E9g8Ainc}lD~ zh$drdC+twJr2B@HOHXz-J9`k3`vp579F7@uWhYM{H9HB>hc1-zfNF2Nsoqc2Y+>BNjnaof@3*n?C@d~63JwEt zNU^FThbO0ngoGj^Ba6$*Fb$qsnUQE0?9gbDj+J!6P3MgHjLgg=K7%L|4R@=Q%*+)U zkMc9*0X{5hYn{+~-Hdr=W@evXPg2$cJ@;C?eSK#?cgSkIy=8La#*L*~YP}3QTiecP zXcK0j;0uHEVtZ6n)WvM3ANUC4gN1B^!tD3(Vy|78jD)1*@}78I0P0=$>h@s&M}5A= z(NXRF?w0v468~^gm1ZXt5@tF|!1SDvACsv!kHZ2V>CV1_b&CO9@e zJ`P?mj8^2Jn^nR{a^=jdZmX)R7i|z6#fl}2zR!PoM{*QvK%u&$6`l&>o}~2mtBDtW zTiuR|jQmqlRErm5ploJOh0hS%+e2;??DmObzFZ?~o+(HCo)WHNj#A1Qyer87>Aisi z#s_EY{rU4FbJvfVnTBCopT54n-V!o@T~VP_tbI*A8V%cBJ^PvV!RePPz~1>gipQH*7-8F2m0TJ`$$74M~U_S`V7^eKwbx znu@!qn|CymLCM(I*h|{CxA1F4W!=GiO$VB6n87zt>Pt~d1gEm<7ct~=#m74Q zjJ;0JlY3l$sxdwrlm=k^jJM(Vh%~u{w$roAVLj`wd(0QpahduId=d_a58((KHgdOf z-)!8!f1i?y%EIQe=Dj*izjGA9Hit<{hco z+dDdNYiqlbh!9O7We}D=8U6(`3!>lN-o`6u4Iq#I#5esTFLivd-M|&lGIbwz;lhPU z=RXq6(dFgkB_$<&eX0b54}Bs4Pcf^>x%u>_zP_;1!DIHCX&#-@va%lGdBa9?pQK*(5CPn2t@#gFd2-lamv`5Rg&zVKow|yb>{L zosp6({C(VQFa|Kb6}I7L2(2-8H@q7G*Jt91ZLNEe{Us zZ|Rl1Ozv6U^3XRhs8oMX8ThTdTtIbr2)i>``8qT-)Y)$(E$v1f_+Suymfum`!4`hG z2mZ+D`+Yy0Tm9&+#qthz7(U+~#YD`!I*R!;*7WE51s`w9UqM|l9#ZyI`r0Mvy1LXw zihGauipRi`X6)VFFeBA64}i#w3y5XV=OHa7qmySp78el=#WMQq>cqu=N?$oIEG%5m zkIA|Fc==U_o%$VRr6lw1#;tZHI@rrto*<7y!gwXy#N6E6;2?Xlq=dxO^tAX(vO>Zk z;d{Ly?MP=^44IMp*>~g$gGj)%YBCcPD>MA<3E1yJ>O7 zV|$OK)psw$TS0WC8W|lG77#G0(tnt$VqjnZoUrlPpe>@w&(*a`imD4Dc0S1t-K@b7 z9)yaj>R1d{CKtsaPe z>3v6ZG;PiF?2jLEJ&;x}1TeZ*QvvY~Id^w=1B2;roYu-4_A|BFSyDfzs%*ZVlPg}0 zPdAfO8$lcr2{WsVavM7eBMXA$ z46J?t4?EMixVgExyAN{$tai4swT$9&J=fdaF8ph4jR7+t z;D{UJNjnQ(wFj2eD*$qDzI!rR>Z&9C;NdWW=1+Ltjx!Da3rV2goh4=p-sI=hPPnlW zBGC&d{bbSU`RJ-DZR*%3x2muD_0&d>kl>sKKWkvyv#JZ9Ei<^V^BXg$Fa{|Q%o zLuES4%uL%cGh?WwrNt4;$i`-@rx&}Y|H-2FL422^+4Mo&Vh5N|0YiAlBFKi}aLs-J zegOe?uK0jS*~ig)-AtX`Jq=H&%8bbBL{+H|&6NR{dO_3Jlo zoGRjw=$aOs^H%9Z7cdyiIywN9MRRjTQ)f=@05y7M&o1{QqmB45tz`7Ll@&*^g{|6# z9Ty*;hSE~rRF$CVS{E3wyZb8R7(jw6=gSvC9VTgQXBX9GN))F&Z%AvC zw!1T|z6#G6`TN(y*B8Am2|}cqvKrFa-o7-Lu0Ft1_l&9eCJ4Qfr?vgJBADD2dVU)peA9Uc(S=p^C) z`pD>o^b{V{Z#Kr`E{6ws-LYl@^%gug%j{!8LBT6$nqLpLzyUm>$AW+k&}6we<^-Gw z2E6GM81^=%S8>*Pp;s5z4tR1wF}V2i1{0H6c}lw3D-Vyq>ywrBG%?rN*ap16Ws=Gu z3LtH`8gYkIDhyPM*J+h_r_?X-(?uefK0ZFazE^->R@M&~f6jx6d!r^C^NzYFD%UJX z&`*7qyF|E(ugRsy6+oXMRHCAz*T!R_qq_;jW487ez=9*%MwlspASC+){(w@G74}W7 zfcCQI(f;c4_t#mzc`pks2nNF)UKSwK?R=&Gl#^oy5|%^{78VxyFrYbLr_q#V09?+H zld&s;G|DPEpsTA3M3GzfPiF%nnSG4`^%he0@M#IO2VCA^z{!V&gitnN_=3dzjvDAq z&CGJj%G9an_eh}N5W7NC+SDY=5S}kiIl}+XQ@47y7blY%_FRk-4Ju`8Uzg@LO>QKB zisBsmr@>#~sInRVG?-7n;NC=Hp$fPM3=O;s&}VvTYUC+&a+URbFkTqv-zbHIhGL~i z>y_|FyXX`&m1T{tEaKpA-v`lPda$4hOC+hy5I7C|(=Kkj%;Cd{=`GnX@M>pk6}Ciz zz(+Xbx7-Z4R38)MR6mR7P|^$j2TjTabI+{QmOj#ScCI`v4#&z2jf|3*dNy+K=9QBm z_J9S_o1Vh>OH3}}FCs;egMFa8n*&{&i(2*;FQvLNO7~D!)?t@5McUNY%uL}Q7mUxg z>Yy>MsI2k+wZ;Sdb6=kx+D~tU3S+#hqP(DPH159dx6h1Y&E^ty(t50QinS(?P zZR7xo_yR4YxdzCPz@g8b)WPblLcO)Wn&pk21C~jyE=|cCSE#pKW1zLyOwHap)(;Xw zg25c9W{UEmy0x?RRf0eTI*j3pBVPL@&!}V7kTqYbgarlT(0kIX0~;|QWdbm<qWJ-u!1>_Gku zYFzQfMGC-k;vD>V@yF|vzd)V!LXrAm>Bq86p6xC?y<)wd|L;M+=#S5SDH8F}K zv$MBvH!Xj~KQT1)sS&#Ixv1y?X+A(sF8Wg}$kA`oPhcrteFzQ?e#M`ET%VSAmOOxp zj+GwAZ(EG>roEu``KrX&D3y*BYqAXhL>YC9j8d6l^T@&6FiSR7Q|=3Ug?36K;J$d>&*IWYk$e#f7Y!+Uce-mq9*QY^lVQ+AK3CtFB0a>(Lhguwyvy+mNVD0%d7cX{2fx-2$ zj$-&YRqMPP1xRG5ZYY@<%$I;k^0uTQ9yAzEa%rVl!!X#gO{*8)k4to3lkZ68-9?_E zF*PwUF*pBQRpn|xSKDVfJu?FpWvS9HF*7sUKODY%`SMy(V8^2PJK9v=HsU3;PPprK zJL=$F)+~iqV>z#Z;_ko|{~OB{CCJBY+Qb3IDzkU4&zk)=X6r>oL{_%V!3)D_06$xf^-b?XuZMP41Q8YV@K2RbT`Q`4=Pd#KtE-H9GPv63>2%}M-(Stndp>JU84({k;9Spy_GL;p;Q3$b z=x9fid}KV$8E~x<<&8}ayw+zvXCHU5xTyrFpJR+#>*@CP>&ygok+!V&och4kEwuh$ z-6qfNytOw|B0&nY9E{A&Qq9BNn^kSnzZHwGNsJND)8cB6U*)rCC4iMz)YaV!mC`R6 zYsf+qj3lSBCP5!#jQ7ZBSSt!^dg#9==zDy)*C%;V z?S7)Qg9BZ#Jji|t+{qs_?%w7yVq;?JXGP*ck{k>=1(e@2U(vb?%F&nm59CBL>z zQB5nyN*oxPe>{; zp_w;h1h~L>nX#jBQ#is3?x>SAB_KI_u612Gvu8ODeZ5X`;P-Tm1(aol z$%}RJx10FHR*c_4jtQoSWIg}A32_($>TLKt2(DQ6<9A+^g!H@olONES>8Rz0W5ZY% zP+0LAlol6*)W2c?pI=n;JZf$7j+bC+B=@_E!e@R}jT^-a2gGhBC7WHimVuJZUALB` zA<+08;*32b{&~z9f1Qz0%xR(Z=(}_bi-aV>aPxC>a&tn9#?JZ&OIzHfm!!LV8BO~V z@1O#@3D4v46W^5>&sFxQO=5MZJf7_Dlw3RPO`SUXvPigNdT^;y|9HJ}V7CgL_UxKx zwpAM9XgYK((S397A9;Xo(mRwMkJm$EFF!>bY;{;VnEVX;Jd8Dbu-6dtD8!_yx;j(F zSAX{k8@3kP5*aE`WqgEBhT`^1#1wNuL~W2?KB6>Bh*aTRBzRTI8rz8&+@ybFXXlIW z;)+VmR+rieqF)$PW=o`SRhjw@ij3L!pLNx;WLTn5Mg+ig7%ize! zyr1~2V3m7e^$?*xl^AC(h$VQ{l(^Ix;$KZ;U+hN`($dlnch4m#MOiXWhUbvOSnkME z6Ou$|_s=dE9}|-SIlbIJ)oyEY%cV?5N2lnmy+JV@Ep4W(pAoeHNS%}k+re^a5Cm6oj#GP}z7s zPclSQVbmCJxi9~o{I2APUwT^ihcIruIg-3U@LIN)@We0ij;5R%($(Z1kRF2Ug_jM! zAL;s`FR#h@__8YmSY{t^Cr`q%CPt(d5ITb;&5h z1`j7BGHw9W$q~9GX2tzrmm<=T*B}U84Ju4IF%(}Q62R-$Y4Qreaiw-=_h`n`X zCC}4f;-_(-Y@OFTWmWajZ>Zs zE@`^HZW1SvY`px$jR?527Goy8nX4ji^YS?B98>BHN6bbmzj5Q)aA|oLFJ1(-ah)OW zp}~k{W!2ZO+}zxZ$1oFK(3fy%+_#K&x>f(>;)EZMLx)-KFb~NZEGs|%{aKPSo3G1H zu6=+`<^}f6m)GyQaH;IN&X?;(dXh(Pz;`JO35zZgQEuXzAlvK2xN5dpn<&hFlHe0X zKUlyaFSpBO@J~f(R?Ty z>(6rh3+qPL1aB^yNR|G?lrD4oS(HV(PVS0*0w0~{3m6O(xP&nS2bQ1Z4rl^MzOGWd z3Uf{IAI2uUR=p@(kd$sUYh48&6#!)r#(WQjQXVp4_L%7r!Jh>E~g++CMnfcYt7vme52J&(JIlaIF~MwsuCifz0ep3 zGUOgTSw6#BIXW6p!1x1_bxGs<1D(<<<*xusf<~h&psU}_KpB_aX(Jui^*ODv@DG4* zi!v{M#?MfyFjwYx9gDGNz3*NbQWr7cud$zv%SE-!zmhoz>Mhp%dLB@q{qxt}4y3mD zOg@!C8hTtO6QB@5W@MF?p8e&O`|i_Iq61$s^yIc_^EnDY#5&$#mE>+Rb($ZgLHS)@jaKK5Y0b#y7?Qo1wZo0C3V1$hg9K+#optIMdE_(=_ z+rOQm23UW#!B@K=(FTNs_zq~`4Cc}%I26AYOa;w`x`R!>V@Xnby_jyAyPI40l%ZRj zBF821?k|;<51Uv7PZ6Vk_1no|JN(-rE917V>+LOB z&%j~W?uu+Z;*USZ#ps3P}W500q6sgN$bbMc z@##-_R(QlUz*7wi?SG|1$7T*=K@*QWhcQ5&W9+$xwsvB=m%G8YIShu+Sr0=|`FsUs z$eY}gsKUv`#pQr$ELnLCjIll@5TLZrY11Ia=QyETkf}knO?a&%tEVtlU@wqjbT$66N6RS*wB!?898WNCSU&Zd%%l@9|PWwJ3MHH7>bLk7P^GPnc!aYGcKn|U`@<(j;Q6sy-Oz}c|={c)E%0uO?O-`=dd z?7=+UiO=Wd{F?LLM9*$6L`f(5guA(LgH*%vm~E*f9{3w*WS%p=s-E{@p?)#^GS{0> zsftP4y^Yx%mhK|`Ys*eJb3qol>)N-`^HCR=C>)0m^(@($L^@>07KX24R%gn)ovxh*4@ zqJUAp;@8#D8K0WcPtoN%c|JE#h01Y+@#mBOR>Xhn;6IA^zbfi-0Y>s)r&fvcaad4+ zTs;Y~AgJTUMMXsoxRX{^EDyVvmo4g?jE&iNXaGlNnsN01Z33Op+o`-kVyff5cY;!~vXW(qJ~G+3(AfXktA&@VjJj~FW^zZF{If}7Ld3?cyhPIgO{{ao?I(phrHk1rQ_>USP;+CCy zPv+VDu@^riU?)@%E2@}i5=39tbVS@x$Ae}!@bE9jkSDEPgMu(>P!@bl%JSWvTtB+b z-s(IcoqYKFSrg}tD5H9}C>RCcli_(6*zZ4hupFk8HJ}57m5hOWtLNx?6Q;?o7M1=) zu8EVGHcQqIF)eil(w$^sfDL_XK_qieY2&bQJFG}oT3PA)FgbnV_A@G&dt=GT$?BO> z*J(pR|5Ldb1WO0RusKn!oHvM(h2{6JU%D`uG~z(4oE9(y%6Xe79N5ar3Nq8n+T1)y zm#Ta6mQX;rps)KiJ|h<~=CTI-c}d!j`k_{WzRA`wYA3?l==SPBkHs#xMkeD=-Se)f zp}D!zCip*)QHjV*Y`qQ`4^$;6lmyraqRS@V{N#nA+Ri z-2CT{BWL`J2PH3y239YeAy4Kr<{Sn+*AMOM>$aeTSy)(5@G}6&s=^73wG}0Pn;{&G z>l=Hd0Wd`CL2@jAs!Co-iQNDICf$;kC*Ei8($dlbh}mSG*Or#@+4Rbkz-WHR9Q|FK z5pDD(iGb_)`+&4F>xj-B#xjeR78Zi~dhsXNfMT!l;K75s*Qk?dyhlrEc$nDnaqHe&1!s)OcuaWfaB9(p8$&R3+xtYVLf|t4am_ExX=x-Q(zqmS?-qwY;T(eW``x>DAc%NK@di-`%e!yPOjhSMmQez^ zwLHz$gd3WPF~4Nt2CX1w+6(Oc>EbzAQaAr~F;CCb!;2r6693Xf7t+BHlrw;Zoc4+a zyRK08%wU;@7eiZC`!jBhfp6u-xU#qY`t?RxIh2`8;lvFD>~ivQAlg?3857ma>G2 zB+8a;MApe)zNivaDn=ooy9`RdIjde4cuAVNE-sFjA!1tMi1^$*Y(YeAdnyl z2n0`rK(@dTo&tf~mWM!o*h3&{k0B7ATbXqiG{6MoEqzO02!!L$5Bfl)mH`4edc{CT z+dRPbXO@YJ*id?Nb=sAvis($gD`ED&NM@*@ZU@Okk0+eX_wj%poLeFaIa85g`55)! zdL&WdAzfuS-N0K0<_~4M94jS_i-^0tEdqM2QGV{+${WHCDhXm;6u zx19E7SyCsVzQ{IYd^vbGBkgi|(caSc?+y{i5EuvyX^O!Rdk@1=5GX10p{y;{LLWLeYb`>Fy9Wpam0}>xNF_hVQa4Ha}ERak+m&5=^ z*=Iwr%H!a$pa>UXJh|FDNdwHUgZWJ~1cJa|n4!e7{G{m%_-6OhG8H`a7}O;gLUXW- z-Jc$&uCA`q2vcS=h=5>C^3C2_rZs>3n5bdor6TazBYH|0DFY=6xj97_yTYujtVF24 ze%(GaZ}cp`@d^S(y&|nGP|9(u&6n1OYP|GGX-pJK92323JSqrP*6`P2>93#aO|KKj zlj~=ChPO~iwm!Z*+68FuFguWAcQGCiCEN<=MK!+1-um&15k-B)I1t(gFFSl7X3b$d z&JV=+uky!*H~*Sc4X3I|G#4RYLSTr- zGq|hzk$6PN$#gMJGy$6{1|`~Z@YWZehmZ=bH684XAtX8Hw?a1&D8EzrVu`hIq?%wG z5=!)Z{zc-VE=8i-^OP&mvwF>9O^;$BZIq3LkOo-&z`At{hIcN5r__j^FocbNNGBU< zfPsY_Ly5d^RP=y6Pglo?@*Djnu6|%S95{cHL$b7}-xmHK@i=z1kmkP^%IiX8oypCB zV25n%s;(1_kICymNJphl|8){uc$gxAC*K}QTjc%!QeS#}FqpQ~dCMyFoOnB;@d)~k zaU7<08Rs+4UU3fQ?Cjh-i?DGDF(^6-j*?H1VFQPHsft}LJd}CL0(jErF67WDm`To= zUy8Ee7*NR9;D`rgl7uJ3L+5%0iP`6e5ifDTGPp09MM6+XZ@xT3MMR^JVccf}2x+0D zxU0m`9?eNXpCXo3y%oEZ48fK&CbVq>XWpLdwVVRSF;#Z>;|vl*@nc0;OXA7%Tm~E% zib3kbTq`hXb?E&1PcZrOR{r2i;6^VOoKFW4Jzr79-rynF!eBA5ilpV~Fwb-d){9TR zrv?Wh)wHEp41i|`%5sJ&>~+Ka$toAoAl@`Yo2Pu|8R{58%mLHX9~fWwU*iLt*++w5 zGj}(00yZNXf!&wM4J!bf@rvNdtXvu9e86V^iXXT{B=8qpyTqjfmly>uv9y&Wc;FJk zuw7=GLq^5L#Z#+aE{>0m{-QxWJv=u1oQ@3-c&VK^vyrhuqwUURWMsS;{@{_MHLbN< z(AcPXFIr$ps}GKn{FQMioS@}+GQFgXce7=gX#_2q>L^7sn4vgfxIsAB*Vvgvp^JpBxk13}k8l`^>IDam4(Fi$6J z5;N{UPcM$xZdPvEq}|&Z7+zhKT=I8YSm5Xw-&-Hw-)m}GeK;3}-c22+?k;OBYwd5; z?5{S`-gzb~)xX(096Ipus1`M9XVJN7Eyo%5yVrlO(`#h7HjxMaazc4`fvHDG%ZP0wpo^R&@Sgr5iog8yCVAmk&XjzMxb-S3o2((D@@+JMVZxq)^+l;z$ z+a1}V7XvyrHv`9n`@_bkPtC>s0;^oPrO%7M9H}IJZ;t*}P|y$Mh$teogKh7qOitAN zbbF?%7>m8O+BJyJy94bU_oCI#OUliD5av3E+#s+JRMHYX!9x!hDz zva^1cOX{4Y;JHAibG4y+s z2bfGdBvHuOCc20Vrhu@Coc_K*o_cyaaeHzyO5~YEf58Ps;M?hD$u8q2RvKzs>z_Zn z+0HC1G(GC%UR6&aU@pRuK^MhbC84m8hvdG#eUk!HY|7!N_!}>p#PJW1$sl@?rFmLD zi;`o>XP-%imRak~Ja4dNRSg2E!gya^C-*wZr0_+C^7sawZ@N^P3CpC!U@wUE-bCNl zJ%rzJ>cBwLR(teGdFQf*rmCZAihrW?=Xr{xWX`55_n}$8hF*}ddQE+w?lJm_`dRi= z)one^a82Y}-y4vC{kwh1ZtfY5=ND^LY*JHkf-oC^GB*9Q)|#9y!NesG^t$Xf@lU>$ zM%hB7j1%>Pc~)F?)*F6`vJSqrA~#bM0%-tgXa+gH{^xTwNwF=XmrUKqyp2?C^Z|+- zV_|nOpd@#ib4%0(`ld?q|1eq2zfYbjW@N9{%Vi~X3+`lOl-Dtntm!>U{gkW$(8v*E zviy)PI7-!rZ^JvkhRPe%a107n`mrEa7u)Js#bTzgwC<}q3)Lc1_6Q|y2uWEx@id1p zxrF?Fs&BN4yS6>&tfWLg{iCQDathC5YFP=?Wr@o#^oMmOT6wFD52gTMQ^B0Tt0qF8 zK-i2Kwe;)?raT-t0+5mUnZtW3qmsBEUy^!8x0_6VA@)!ZKr_v1vE+O>KYNhMut>Sc z)gZGwc#JbJuk&-&3;SSvtEC0^`S^yNp~&O2;I-vg*q_V!)wYk9x(|#>`Zz!L(d4eX~47J%PNe0fH#3oQ*38v|*Uh@QNre^)-%GV~pOAIG_s(u_ zFMn21jl$;M23hO&+f0-YY>>DVIp-t$Xd63qTs-juAT{&6kqC2S-LY)o$1I5=gGylg zN(*|QQVvF~>7D*^5dR|Zi4JfWIzQuK9zqp1jqARKXid3;_ZSn{Aiaw~k~A0{9|?Lm z8CcYQnok+4YLj|&q5WZvm%UM#s?pg+Cn@5x~#F9&Ckc*oSR%Vr#1k`?9@jr*}n$x2XS zw%;b=jQ}AZ>u=KKRPCTzvMBhUHDV){F3WU@P&ckmvLIamhxN)nE~{r~UrC;c&?&Ka z@ahiS0(goXHYVf|@(RkY>1yH-M&|gdP9J~TRaAOD?99Jg^ zMWDx~eU1M1f{XcdyC*U_0fLRMkRpEKCTNi2-{;g%ZvOnbTyxuVBm%PyPGS3jDozQ$ zMqHAMGL{%{2@^*m9s%Ds6rIBRF@E5xYZ8$5da18`N+b$@z&_}3@L8r&t%jME6&&t_ z{w2V7%PNbeAK~oF$fKhqA64_n9|_@@exSL3M)bE2yo}jqiS2e8&WXR#+G>%N@Ec&z z@0(biQufzqzcHte{O=-w z-uGFn>C+#dPf4gCj)>yPvEB}HT=0|FroeR%T7@+_sz^%Cpu#=zjC~A7iaPHnA!335)_!IqD7>OL!cGmOF%=EOPls1L|?EYDt0?h#$3xw$tSnCLTn`W>9<48r zu|wtjYIZUUy>-oxVqL?4Ny9`u#IVhbSu=Ank7DHw0g28}^v2Y|Gs#X_BHeuhvLcCc zC=g0Qk%(yW{#x3;ja9HE@l6daj`YD`@DL#7&w3pL4KT#L1-t#UdV#HpmiMLv{7JyS zEwv4t*`e%P>wQgK-MRDPR{>6xrihoT!gVNpnzW1Ap~iFMa}bhS>50G|o~^E~_}bB& zUQ!C++*d8vy12GxXN$=M3=v*1;M(&X8DxYP$YvpQI!a$epTwrs91-VKB3u=@b@y&V zWu*fDLutu}WSM7E&2JF`0vdi;o)?nbYLg=<6t-#csZ2_B9NB|#&et9xy*=Z~nNYto`g3p1Bk|g~kxpczEo`XJ`hM}Z3)+;R zV21P%-HF+rn)cbWzG*DQFE)UYh?Oh$Aqsif#FS{m2Ik#w|I%gOnz`u99u)54T67I& zOc^wfBOgT{>S;HyR|*2Yh#q$%kf%yGE@+CP)(P};UF!h~GguQdV|)46a;MY$do?qz z=}@|gJv23mo{bEYUb`{qPXM7AQh-^zRZTsW3c;vC1Kndew*q!H%}DoF+j**JEzEOD0vd=O)(f?B{rSHLcEwKifH;R%nu=mJ+447IT-)9FYfT zdZF3z?#;ir44#=D1?FpN|M}o-;aTJ0{JO zj`qiDsj9B3caJaOhWVYtbm$2z01VT}2F>G3_X2)Tnvurq{rhUavi6O}f5@&s>{M*5 z=a?S{`2&!3ViR7poSwZ`=9JJ44fV0jn{=ka32*?GI_Zci0m90i{gsY}<2*#$IJw~W zz<=Lg<@;WwjY+M3{Y!SumYHZvCl)242Pzs1`Bqu1z1za6%GwjyrZW*IX{)sl$g!qQ zIln#=q#+Yd$quFE$Z@GHkms+cfvhihTP#+NQ9y8an63L z^nmzwAA3RpK*!;qpqQ?Xtq}LUTkYnt~tG)O2o3&J+FQrP+#47LT zad5g^M-q>`5u3;f&9)4+btIyGSI<&3WjbGk*S8dA)9lv_kR48*)PyTK$M zlrP0OJ2W(l8CzKXcd0yNdZ)&7S7Dv%3yS!iydc2B2H2I5D*$&>y2I}BlH8#Lj9@DmzK!%X*ioLP|_=-4r+J8G~@4j97ry)$=Z}oo(woQpn-k~ z(~Rz3O~P%ye0hD1>PqJilO*lj!2}iB4&Q0>Im{-6HsU17>Aqz%;I#jf`t}^Wmb3DU zxilq+#_=Ej6pQ6+i#s=UKL#b5vj?%CTTBL7G~4rM^iHc*gRX?_{Rs?3SE|ya4!CMv z^$Vfedui?Vx0+DE=*T1W;=masud1}ZSt;h4i4Rjj25Flvrb+<~E|ZAu)>4aaUjp(~ zfNUmLZ~egJg-C2o;opEaq9y*$XJe?thw zOp|_O^Fp~+YA5$*seEwnBg2zU5@dex_N)N{Bf`-A>C~Q{%V}>qMC8!u`**G=7!UqK zrz{Ehdle3=k~P*^%-C{FHjV>S5S-Gfqo%vQeNOa2f=q};HqE$)IUxNv>FES`ug`w< z+-R8a?MwF|4SW&9lRZ9b=9e7P`2A(LVZtqjd-@$hGryJ7EfBnphGHBg0dKwkh+(=; zJ*Hs5vPV&r`UIk?;RD`k4Q6?GdE+N-bY*8{rETj3GAJbSB~lSkC*Rv8?7d=(6v8YuFqa!JYCF(r|Eb^J`!(Yt+k!~l; z`h>e&1o{r@BxA5rZ{3ynE-j+r*!fSu1IAG2pWOeO&3wi^IbY!BCRfjc8L}JZ=+~iq z-cH@vVXxa@Doo8epH+WWx@)r@5F0k)QswE7fv@o>{$Qg~uF5QpsVs*nmf@t`d`p(oo)E(V@)iK5}I9(Cz6)!9M z9s^*^l)he}2HFbYX50XLexVC(bIu zRJb~ua2kLl!)AveCivTtlPt7&XZ93-EqU5CQ7RMHbOI6aR-8IDWm32KGt~XDY0mRw zMb?v#=El3|dUl;ml%>;XVPlQk+R8&(b2kX&1)el+2MJ^%d3T?0E?xt+cIa4ob@ZmP zKunZn2fr_0q5qv##buc&^7T~q%0Yn4O_}{Oc8Tv8JBp{Ke;sL?6b(G(vYS1k&x~kf zf1t!6ONb_p^*9=eIwuCw1#ak1H&rQBY*#io_y2zO;pirlFjY^!U;iasgrm;+WqrMj zod)eib@hzavH16(#S}WQx>`B@vZ-lVYy8`{%$^&b$veXP^U6Pe{@lb(-`cNLl6kmo z8_c-pYsxm$~IXR)(MQN?n zKz6@+c-iT(R8uD z#E&s2?(IBjCIxc`+)ob4b7DFam(^2_w}hc(fBmYQNkI)Tw3c7v#S|DuSuoO3Y^VxM zHcS#%MolP|1aazaLBi<&YzIX}r1kL#a2!I0UYlNSfEb&YfOboQJY=}#-u0RFgI10r z=nkFGmt?{8K0b?L!F)e_;qNX{9+t87OoA}-5XyV&ORL|S12?N?=-ru=fo#kw2QdUC zhCBq|T7+ewGgj4CKjL5rm)q)pv*qtbQUYey&}KzFg?6gMH0;BW{NfL;Hy06&{xG`!5N7JlhldkBC}n!cDA}y860*-DOS1_u9G4063YuGDSIHTtuOf zO!gqL+hPwi4)O@laGazSvh}I&r94*olcAY0*PlkboI&m*a^u_OZ?)lM!=xLuP*59% zzV`iWdr&{K^oyGl#|#@)TrGj(6ABw9kAar%W&k)guMnD(MOI9LB&(uVo7RSmnt2iA z9$J48T2=SqAsga_m{T3=5{ zHK{M0J*Y%nyiDfcV?ZksIt!Y)Umc4OqmIL+5)a9x7Gvke5TobvJ1>H>sU!T;&|mt& zGS$`Pc=9hugesMZtdbWDGzZUSRw~0%&Qcn6Kg0-|MuHL+7aQ^~`;r4q?Rk;EyTYf_ zrT;GYMg59XYLb+(HD<1BM_>yJ3v+XGxp+M2t=qUcb-J+;hTRYGAfy2Baq*;m0v~s* z0O=|XN|uGAazS-|+1lDH90GR-F#qa@(JLe#?*N|k_BLRJAuDttq_0KF!&nIHK1jjs v?c%2pm}YKJSQU>sGK23>|2w4<`_R2hk78s!;~l_ -#include -#include - -/** - @defgroup ml Machine Learning - - The Machine Learning Library (MLL) is a set of classes and functions for statistical - classification, regression, and clustering of data. - - Most of the classification and regression algorithms are implemented as C++ classes. As the - algorithms have different sets of features (like an ability to handle missing measurements or - categorical input variables), there is a little common ground between the classes. This common - ground is defined by the class cv::ml::StatModel that all the other ML classes are derived from. - - See detailed overview here: @ref ml_intro. - */ - -namespace cv -{ - -namespace ml -{ - -//! @addtogroup ml -//! @{ - -/** @brief Variable types */ -enum VariableTypes -{ - VAR_NUMERICAL =0, //!< same as VAR_ORDERED - VAR_ORDERED =0, //!< ordered variables - VAR_CATEGORICAL =1 //!< categorical variables -}; - -/** @brief %Error types */ -enum ErrorTypes -{ - TEST_ERROR = 0, - TRAIN_ERROR = 1 -}; - -/** @brief Sample types */ -enum SampleTypes -{ - ROW_SAMPLE = 0, //!< each training sample is a row of samples - COL_SAMPLE = 1 //!< each training sample occupies a column of samples -}; - -/** @brief The structure represents the logarithmic grid range of statmodel parameters. - -It is used for optimizing statmodel accuracy by varying model parameters, the accuracy estimate -being computed by cross-validation. - */ -class CV_EXPORTS_W ParamGrid -{ -public: - /** @brief Default constructor */ - ParamGrid(); - /** @brief Constructor with parameters */ - ParamGrid(double _minVal, double _maxVal, double _logStep); - - CV_PROP_RW double minVal; //!< Minimum value of the statmodel parameter. Default value is 0. - CV_PROP_RW double maxVal; //!< Maximum value of the statmodel parameter. Default value is 0. - /** @brief Logarithmic step for iterating the statmodel parameter. - - The grid determines the following iteration sequence of the statmodel parameter values: - \f[(minVal, minVal*step, minVal*{step}^2, \dots, minVal*{logStep}^n),\f] - where \f$n\f$ is the maximal index satisfying - \f[\texttt{minVal} * \texttt{logStep} ^n < \texttt{maxVal}\f] - The grid is logarithmic, so logStep must always be greater than 1. Default value is 1. - */ - CV_PROP_RW double logStep; - - /** @brief Creates a ParamGrid Ptr that can be given to the %SVM::trainAuto method - - @param minVal minimum value of the parameter grid - @param maxVal maximum value of the parameter grid - @param logstep Logarithmic step for iterating the statmodel parameter - */ - CV_WRAP static Ptr create(double minVal=0., double maxVal=0., double logstep=1.); -}; - -/** @brief Class encapsulating training data. - -Please note that the class only specifies the interface of training data, but not implementation. -All the statistical model classes in _ml_ module accepts Ptr\ as parameter. In other -words, you can create your own class derived from TrainData and pass smart pointer to the instance -of this class into StatModel::train. - -@sa @ref ml_intro_data - */ -class CV_EXPORTS_W TrainData -{ -public: - static inline float missingValue() { return FLT_MAX; } - virtual ~TrainData(); - - CV_WRAP virtual int getLayout() const = 0; - CV_WRAP virtual int getNTrainSamples() const = 0; - CV_WRAP virtual int getNTestSamples() const = 0; - CV_WRAP virtual int getNSamples() const = 0; - CV_WRAP virtual int getNVars() const = 0; - CV_WRAP virtual int getNAllVars() const = 0; - - CV_WRAP virtual void getSample(InputArray varIdx, int sidx, float* buf) const = 0; - CV_WRAP virtual Mat getSamples() const = 0; - CV_WRAP virtual Mat getMissing() const = 0; - - /** @brief Returns matrix of train samples - - @param layout The requested layout. If it's different from the initial one, the matrix is - transposed. See ml::SampleTypes. - @param compressSamples if true, the function returns only the training samples (specified by - sampleIdx) - @param compressVars if true, the function returns the shorter training samples, containing only - the active variables. - - In current implementation the function tries to avoid physical data copying and returns the - matrix stored inside TrainData (unless the transposition or compression is needed). - */ - CV_WRAP virtual Mat getTrainSamples(int layout=ROW_SAMPLE, - bool compressSamples=true, - bool compressVars=true) const = 0; - - /** @brief Returns the vector of responses - - The function returns ordered or the original categorical responses. Usually it's used in - regression algorithms. - */ - CV_WRAP virtual Mat getTrainResponses() const = 0; - - /** @brief Returns the vector of normalized categorical responses - - The function returns vector of responses. Each response is integer from `0` to `-1`. The actual label value can be retrieved then from the class label vector, see - TrainData::getClassLabels. - */ - CV_WRAP virtual Mat getTrainNormCatResponses() const = 0; - CV_WRAP virtual Mat getTestResponses() const = 0; - CV_WRAP virtual Mat getTestNormCatResponses() const = 0; - CV_WRAP virtual Mat getResponses() const = 0; - CV_WRAP virtual Mat getNormCatResponses() const = 0; - CV_WRAP virtual Mat getSampleWeights() const = 0; - CV_WRAP virtual Mat getTrainSampleWeights() const = 0; - CV_WRAP virtual Mat getTestSampleWeights() const = 0; - CV_WRAP virtual Mat getVarIdx() const = 0; - CV_WRAP virtual Mat getVarType() const = 0; - CV_WRAP virtual Mat getVarSymbolFlags() const = 0; - CV_WRAP virtual int getResponseType() const = 0; - CV_WRAP virtual Mat getTrainSampleIdx() const = 0; - CV_WRAP virtual Mat getTestSampleIdx() const = 0; - CV_WRAP virtual void getValues(int vi, InputArray sidx, float* values) const = 0; - virtual void getNormCatValues(int vi, InputArray sidx, int* values) const = 0; - CV_WRAP virtual Mat getDefaultSubstValues() const = 0; - - CV_WRAP virtual int getCatCount(int vi) const = 0; - - /** @brief Returns the vector of class labels - - The function returns vector of unique labels occurred in the responses. - */ - CV_WRAP virtual Mat getClassLabels() const = 0; - - CV_WRAP virtual Mat getCatOfs() const = 0; - CV_WRAP virtual Mat getCatMap() const = 0; - - /** @brief Splits the training data into the training and test parts - @sa TrainData::setTrainTestSplitRatio - */ - CV_WRAP virtual void setTrainTestSplit(int count, bool shuffle=true) = 0; - - /** @brief Splits the training data into the training and test parts - - The function selects a subset of specified relative size and then returns it as the training - set. If the function is not called, all the data is used for training. Please, note that for - each of TrainData::getTrain\* there is corresponding TrainData::getTest\*, so that the test - subset can be retrieved and processed as well. - @sa TrainData::setTrainTestSplit - */ - CV_WRAP virtual void setTrainTestSplitRatio(double ratio, bool shuffle=true) = 0; - CV_WRAP virtual void shuffleTrainTest() = 0; - - /** @brief Returns matrix of test samples */ - CV_WRAP virtual Mat getTestSamples() const = 0; - - /** @brief Returns vector of symbolic names captured in loadFromCSV() */ - CV_WRAP virtual void getNames(std::vector& names) const = 0; - - /** @brief Extract from 1D vector elements specified by passed indexes. - @param vec input vector (supported types: CV_32S, CV_32F, CV_64F) - @param idx 1D index vector - */ - static CV_WRAP Mat getSubVector(const Mat& vec, const Mat& idx); - - /** @brief Extract from matrix rows/cols specified by passed indexes. - @param matrix input matrix (supported types: CV_32S, CV_32F, CV_64F) - @param idx 1D index vector - @param layout specifies to extract rows (cv::ml::ROW_SAMPLES) or to extract columns (cv::ml::COL_SAMPLES) - */ - static CV_WRAP Mat getSubMatrix(const Mat& matrix, const Mat& idx, int layout); - - /** @brief Reads the dataset from a .csv file and returns the ready-to-use training data. - - @param filename The input file name - @param headerLineCount The number of lines in the beginning to skip; besides the header, the - function also skips empty lines and lines staring with `#` - @param responseStartIdx Index of the first output variable. If -1, the function considers the - last variable as the response - @param responseEndIdx Index of the last output variable + 1. If -1, then there is single - response variable at responseStartIdx. - @param varTypeSpec The optional text string that specifies the variables' types. It has the - format `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, variables from `n1 to n2` - (inclusive range), `n3`, `n4 to n5` ... are considered ordered and `n6`, `n7 to n8` ... are - considered as categorical. The range `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]` - should cover all the variables. If varTypeSpec is not specified, then algorithm uses the - following rules: - - all input variables are considered ordered by default. If some column contains has non- - numerical values, e.g. 'apple', 'pear', 'apple', 'apple', 'mango', the corresponding - variable is considered categorical. - - if there are several output variables, they are all considered as ordered. Error is - reported when non-numerical values are used. - - if there is a single output variable, then if its values are non-numerical or are all - integers, then it's considered categorical. Otherwise, it's considered ordered. - @param delimiter The character used to separate values in each line. - @param missch The character used to specify missing measurements. It should not be a digit. - Although it's a non-numerical value, it surely does not affect the decision of whether the - variable ordered or categorical. - @note If the dataset only contains input variables and no responses, use responseStartIdx = -2 - and responseEndIdx = 0. The output variables vector will just contain zeros. - */ - static Ptr loadFromCSV(const String& filename, - int headerLineCount, - int responseStartIdx=-1, - int responseEndIdx=-1, - const String& varTypeSpec=String(), - char delimiter=',', - char missch='?'); - - /** @brief Creates training data from in-memory arrays. - - @param samples matrix of samples. It should have CV_32F type. - @param layout see ml::SampleTypes. - @param responses matrix of responses. If the responses are scalar, they should be stored as a - single row or as a single column. The matrix should have type CV_32F or CV_32S (in the - former case the responses are considered as ordered by default; in the latter case - as - categorical) - @param varIdx vector specifying which variables to use for training. It can be an integer vector - (CV_32S) containing 0-based variable indices or byte vector (CV_8U) containing a mask of - active variables. - @param sampleIdx vector specifying which samples to use for training. It can be an integer - vector (CV_32S) containing 0-based sample indices or byte vector (CV_8U) containing a mask - of training samples. - @param sampleWeights optional vector with weights for each sample. It should have CV_32F type. - @param varType optional vector of type CV_8U and size ` + - `, containing types of each input and output variable. See - ml::VariableTypes. - */ - CV_WRAP static Ptr create(InputArray samples, int layout, InputArray responses, - InputArray varIdx=noArray(), InputArray sampleIdx=noArray(), - InputArray sampleWeights=noArray(), InputArray varType=noArray()); -}; - -/** @brief Base class for statistical models in OpenCV ML. - */ -class CV_EXPORTS_W StatModel : public Algorithm -{ -public: - /** Predict options */ - enum Flags { - UPDATE_MODEL = 1, - RAW_OUTPUT=1, //!< makes the method return the raw results (the sum), not the class label - COMPRESSED_INPUT=2, - PREPROCESSED_INPUT=4 - }; - - /** @brief Returns the number of variables in training samples */ - CV_WRAP virtual int getVarCount() const = 0; - - CV_WRAP virtual bool empty() const CV_OVERRIDE; - - /** @brief Returns true if the model is trained */ - CV_WRAP virtual bool isTrained() const = 0; - /** @brief Returns true if the model is classifier */ - CV_WRAP virtual bool isClassifier() const = 0; - - /** @brief Trains the statistical model - - @param trainData training data that can be loaded from file using TrainData::loadFromCSV or - created with TrainData::create. - @param flags optional flags, depending on the model. Some of the models can be updated with the - new training samples, not completely overwritten (such as NormalBayesClassifier or ANN_MLP). - */ - CV_WRAP virtual bool train( const Ptr& trainData, int flags=0 ); - - /** @brief Trains the statistical model - - @param samples training samples - @param layout See ml::SampleTypes. - @param responses vector of responses associated with the training samples. - */ - CV_WRAP virtual bool train( InputArray samples, int layout, InputArray responses ); - - /** @brief Computes error on the training or test dataset - - @param data the training data - @param test if true, the error is computed over the test subset of the data, otherwise it's - computed over the training subset of the data. Please note that if you loaded a completely - different dataset to evaluate already trained classifier, you will probably want not to set - the test subset at all with TrainData::setTrainTestSplitRatio and specify test=false, so - that the error is computed for the whole new set. Yes, this sounds a bit confusing. - @param resp the optional output responses. - - The method uses StatModel::predict to compute the error. For regression models the error is - computed as RMS, for classifiers - as a percent of missclassified samples (0%-100%). - */ - CV_WRAP virtual float calcError( const Ptr& data, bool test, OutputArray resp ) const; - - /** @brief Predicts response(s) for the provided sample(s) - - @param samples The input samples, floating-point matrix - @param results The optional output matrix of results. - @param flags The optional flags, model-dependent. See cv::ml::StatModel::Flags. - */ - CV_WRAP virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const = 0; - - /** @brief Create and train model with default parameters - - The class must implement static `create()` method with no parameters or with all default parameter values - */ - template static Ptr<_Tp> train(const Ptr& data, int flags=0) - { - Ptr<_Tp> model = _Tp::create(); - return !model.empty() && model->train(data, flags) ? model : Ptr<_Tp>(); - } -}; - -/****************************************************************************************\ -* Normal Bayes Classifier * -\****************************************************************************************/ - -/** @brief Bayes classifier for normally distributed data. - -@sa @ref ml_intro_bayes - */ -class CV_EXPORTS_W NormalBayesClassifier : public StatModel -{ -public: - /** @brief Predicts the response for sample(s). - - The method estimates the most probable classes for input vectors. Input vectors (one or more) - are stored as rows of the matrix inputs. In case of multiple input vectors, there should be one - output vector outputs. The predicted class for a single input vector is returned by the method. - The vector outputProbs contains the output probabilities corresponding to each element of - result. - */ - CV_WRAP virtual float predictProb( InputArray inputs, OutputArray outputs, - OutputArray outputProbs, int flags=0 ) const = 0; - - /** Creates empty model - Use StatModel::train to train the model after creation. */ - CV_WRAP static Ptr create(); - - /** @brief Loads and creates a serialized NormalBayesClassifier from a file - * - * Use NormalBayesClassifier::save to serialize and store an NormalBayesClassifier to disk. - * Load the NormalBayesClassifier from this file again, by calling this function with the path to the file. - * Optionally specify the node for the file containing the classifier - * - * @param filepath path to serialized NormalBayesClassifier - * @param nodeName name of node containing the classifier - */ - CV_WRAP static Ptr load(const String& filepath , const String& nodeName = String()); -}; - -/****************************************************************************************\ -* K-Nearest Neighbour Classifier * -\****************************************************************************************/ - -/** @brief The class implements K-Nearest Neighbors model - -@sa @ref ml_intro_knn - */ -class CV_EXPORTS_W KNearest : public StatModel -{ -public: - - /** Default number of neighbors to use in predict method. */ - /** @see setDefaultK */ - CV_WRAP virtual int getDefaultK() const = 0; - /** @copybrief getDefaultK @see getDefaultK */ - CV_WRAP virtual void setDefaultK(int val) = 0; - - /** Whether classification or regression model should be trained. */ - /** @see setIsClassifier */ - CV_WRAP virtual bool getIsClassifier() const = 0; - /** @copybrief getIsClassifier @see getIsClassifier */ - CV_WRAP virtual void setIsClassifier(bool val) = 0; - - /** Parameter for KDTree implementation. */ - /** @see setEmax */ - CV_WRAP virtual int getEmax() const = 0; - /** @copybrief getEmax @see getEmax */ - CV_WRAP virtual void setEmax(int val) = 0; - - /** %Algorithm type, one of KNearest::Types. */ - /** @see setAlgorithmType */ - CV_WRAP virtual int getAlgorithmType() const = 0; - /** @copybrief getAlgorithmType @see getAlgorithmType */ - CV_WRAP virtual void setAlgorithmType(int val) = 0; - - /** @brief Finds the neighbors and predicts responses for input vectors. - - @param samples Input samples stored by rows. It is a single-precision floating-point matrix of - ` * k` size. - @param k Number of used nearest neighbors. Should be greater than 1. - @param results Vector with results of prediction (regression or classification) for each input - sample. It is a single-precision floating-point vector with `` elements. - @param neighborResponses Optional output values for corresponding neighbors. It is a single- - precision floating-point matrix of ` * k` size. - @param dist Optional output distances from the input vectors to the corresponding neighbors. It - is a single-precision floating-point matrix of ` * k` size. - - For each input vector (a row of the matrix samples), the method finds the k nearest neighbors. - In case of regression, the predicted result is a mean value of the particular vector's neighbor - responses. In case of classification, the class is determined by voting. - - For each input vector, the neighbors are sorted by their distances to the vector. - - In case of C++ interface you can use output pointers to empty matrices and the function will - allocate memory itself. - - If only a single input vector is passed, all output matrices are optional and the predicted - value is returned by the method. - - The function is parallelized with the TBB library. - */ - CV_WRAP virtual float findNearest( InputArray samples, int k, - OutputArray results, - OutputArray neighborResponses=noArray(), - OutputArray dist=noArray() ) const = 0; - - /** @brief Implementations of KNearest algorithm - */ - enum Types - { - BRUTE_FORCE=1, - KDTREE=2 - }; - - /** @brief Creates the empty model - - The static method creates empty %KNearest classifier. It should be then trained using StatModel::train method. - */ - CV_WRAP static Ptr create(); - /** @brief Loads and creates a serialized knearest from a file - * - * Use KNearest::save to serialize and store an KNearest to disk. - * Load the KNearest from this file again, by calling this function with the path to the file. - * - * @param filepath path to serialized KNearest - */ - CV_WRAP static Ptr load(const String& filepath); -}; - -/****************************************************************************************\ -* Support Vector Machines * -\****************************************************************************************/ - -/** @brief Support Vector Machines. - -@sa @ref ml_intro_svm - */ -class CV_EXPORTS_W SVM : public StatModel -{ -public: - - class CV_EXPORTS Kernel : public Algorithm - { - public: - virtual int getType() const = 0; - virtual void calc( int vcount, int n, const float* vecs, const float* another, float* results ) = 0; - }; - - /** Type of a %SVM formulation. - See SVM::Types. Default value is SVM::C_SVC. */ - /** @see setType */ - CV_WRAP virtual int getType() const = 0; - /** @copybrief getType @see getType */ - CV_WRAP virtual void setType(int val) = 0; - - /** Parameter \f$\gamma\f$ of a kernel function. - For SVM::POLY, SVM::RBF, SVM::SIGMOID or SVM::CHI2. Default value is 1. */ - /** @see setGamma */ - CV_WRAP virtual double getGamma() const = 0; - /** @copybrief getGamma @see getGamma */ - CV_WRAP virtual void setGamma(double val) = 0; - - /** Parameter _coef0_ of a kernel function. - For SVM::POLY or SVM::SIGMOID. Default value is 0.*/ - /** @see setCoef0 */ - CV_WRAP virtual double getCoef0() const = 0; - /** @copybrief getCoef0 @see getCoef0 */ - CV_WRAP virtual void setCoef0(double val) = 0; - - /** Parameter _degree_ of a kernel function. - For SVM::POLY. Default value is 0. */ - /** @see setDegree */ - CV_WRAP virtual double getDegree() const = 0; - /** @copybrief getDegree @see getDegree */ - CV_WRAP virtual void setDegree(double val) = 0; - - /** Parameter _C_ of a %SVM optimization problem. - For SVM::C_SVC, SVM::EPS_SVR or SVM::NU_SVR. Default value is 0. */ - /** @see setC */ - CV_WRAP virtual double getC() const = 0; - /** @copybrief getC @see getC */ - CV_WRAP virtual void setC(double val) = 0; - - /** Parameter \f$\nu\f$ of a %SVM optimization problem. - For SVM::NU_SVC, SVM::ONE_CLASS or SVM::NU_SVR. Default value is 0. */ - /** @see setNu */ - CV_WRAP virtual double getNu() const = 0; - /** @copybrief getNu @see getNu */ - CV_WRAP virtual void setNu(double val) = 0; - - /** Parameter \f$\epsilon\f$ of a %SVM optimization problem. - For SVM::EPS_SVR. Default value is 0. */ - /** @see setP */ - CV_WRAP virtual double getP() const = 0; - /** @copybrief getP @see getP */ - CV_WRAP virtual void setP(double val) = 0; - - /** Optional weights in the SVM::C_SVC problem, assigned to particular classes. - They are multiplied by _C_ so the parameter _C_ of class _i_ becomes `classWeights(i) * C`. Thus - these weights affect the misclassification penalty for different classes. The larger weight, - the larger penalty on misclassification of data from the corresponding class. Default value is - empty Mat. */ - /** @see setClassWeights */ - CV_WRAP virtual cv::Mat getClassWeights() const = 0; - /** @copybrief getClassWeights @see getClassWeights */ - CV_WRAP virtual void setClassWeights(const cv::Mat &val) = 0; - - /** Termination criteria of the iterative %SVM training procedure which solves a partial - case of constrained quadratic optimization problem. - You can specify tolerance and/or the maximum number of iterations. Default value is - `TermCriteria( TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, FLT_EPSILON )`; */ - /** @see setTermCriteria */ - CV_WRAP virtual cv::TermCriteria getTermCriteria() const = 0; - /** @copybrief getTermCriteria @see getTermCriteria */ - CV_WRAP virtual void setTermCriteria(const cv::TermCriteria &val) = 0; - - /** Type of a %SVM kernel. - See SVM::KernelTypes. Default value is SVM::RBF. */ - CV_WRAP virtual int getKernelType() const = 0; - - /** Initialize with one of predefined kernels. - See SVM::KernelTypes. */ - CV_WRAP virtual void setKernel(int kernelType) = 0; - - /** Initialize with custom kernel. - See SVM::Kernel class for implementation details */ - virtual void setCustomKernel(const Ptr &_kernel) = 0; - - //! %SVM type - enum Types { - /** C-Support Vector Classification. n-class classification (n \f$\geq\f$ 2), allows - imperfect separation of classes with penalty multiplier C for outliers. */ - C_SVC=100, - /** \f$\nu\f$-Support Vector Classification. n-class classification with possible - imperfect separation. Parameter \f$\nu\f$ (in the range 0..1, the larger the value, the smoother - the decision boundary) is used instead of C. */ - NU_SVC=101, - /** Distribution Estimation (One-class %SVM). All the training data are from - the same class, %SVM builds a boundary that separates the class from the rest of the feature - space. */ - ONE_CLASS=102, - /** \f$\epsilon\f$-Support Vector Regression. The distance between feature vectors - from the training set and the fitting hyper-plane must be less than p. For outliers the - penalty multiplier C is used. */ - EPS_SVR=103, - /** \f$\nu\f$-Support Vector Regression. \f$\nu\f$ is used instead of p. - See @cite LibSVM for details. */ - NU_SVR=104 - }; - - /** @brief %SVM kernel type - - A comparison of different kernels on the following 2D test case with four classes. Four - SVM::C_SVC SVMs have been trained (one against rest) with auto_train. Evaluation on three - different kernels (SVM::CHI2, SVM::INTER, SVM::RBF). The color depicts the class with max score. - Bright means max-score \> 0, dark means max-score \< 0. - ![image](pics/SVM_Comparison.png) - */ - enum KernelTypes { - /** Returned by SVM::getKernelType in case when custom kernel has been set */ - CUSTOM=-1, - /** Linear kernel. No mapping is done, linear discrimination (or regression) is - done in the original feature space. It is the fastest option. \f$K(x_i, x_j) = x_i^T x_j\f$. */ - LINEAR=0, - /** Polynomial kernel: - \f$K(x_i, x_j) = (\gamma x_i^T x_j + coef0)^{degree}, \gamma > 0\f$. */ - POLY=1, - /** Radial basis function (RBF), a good choice in most cases. - \f$K(x_i, x_j) = e^{-\gamma ||x_i - x_j||^2}, \gamma > 0\f$. */ - RBF=2, - /** Sigmoid kernel: \f$K(x_i, x_j) = \tanh(\gamma x_i^T x_j + coef0)\f$. */ - SIGMOID=3, - /** Exponential Chi2 kernel, similar to the RBF kernel: - \f$K(x_i, x_j) = e^{-\gamma \chi^2(x_i,x_j)}, \chi^2(x_i,x_j) = (x_i-x_j)^2/(x_i+x_j), \gamma > 0\f$. */ - CHI2=4, - /** Histogram intersection kernel. A fast kernel. \f$K(x_i, x_j) = min(x_i,x_j)\f$. */ - INTER=5 - }; - - //! %SVM params type - enum ParamTypes { - C=0, - GAMMA=1, - P=2, - NU=3, - COEF=4, - DEGREE=5 - }; - - /** @brief Trains an %SVM with optimal parameters. - - @param data the training data that can be constructed using TrainData::create or - TrainData::loadFromCSV. - @param kFold Cross-validation parameter. The training set is divided into kFold subsets. One - subset is used to test the model, the others form the train set. So, the %SVM algorithm is - executed kFold times. - @param Cgrid grid for C - @param gammaGrid grid for gamma - @param pGrid grid for p - @param nuGrid grid for nu - @param coeffGrid grid for coeff - @param degreeGrid grid for degree - @param balanced If true and the problem is 2-class classification then the method creates more - balanced cross-validation subsets that is proportions between classes in subsets are close - to such proportion in the whole train dataset. - - The method trains the %SVM model automatically by choosing the optimal parameters C, gamma, p, - nu, coef0, degree. Parameters are considered optimal when the cross-validation - estimate of the test set error is minimal. - - If there is no need to optimize a parameter, the corresponding grid step should be set to any - value less than or equal to 1. For example, to avoid optimization in gamma, set `gammaGrid.step - = 0`, `gammaGrid.minVal`, `gamma_grid.maxVal` as arbitrary numbers. In this case, the value - `Gamma` is taken for gamma. - - And, finally, if the optimization in a parameter is required but the corresponding grid is - unknown, you may call the function SVM::getDefaultGrid. To generate a grid, for example, for - gamma, call `SVM::getDefaultGrid(SVM::GAMMA)`. - - This function works for the classification (SVM::C_SVC or SVM::NU_SVC) as well as for the - regression (SVM::EPS_SVR or SVM::NU_SVR). If it is SVM::ONE_CLASS, no optimization is made and - the usual %SVM with parameters specified in params is executed. - */ - virtual bool trainAuto( const Ptr& data, int kFold = 10, - ParamGrid Cgrid = getDefaultGrid(C), - ParamGrid gammaGrid = getDefaultGrid(GAMMA), - ParamGrid pGrid = getDefaultGrid(P), - ParamGrid nuGrid = getDefaultGrid(NU), - ParamGrid coeffGrid = getDefaultGrid(COEF), - ParamGrid degreeGrid = getDefaultGrid(DEGREE), - bool balanced=false) = 0; - - /** @brief Trains an %SVM with optimal parameters - - @param samples training samples - @param layout See ml::SampleTypes. - @param responses vector of responses associated with the training samples. - @param kFold Cross-validation parameter. The training set is divided into kFold subsets. One - subset is used to test the model, the others form the train set. So, the %SVM algorithm is - @param Cgrid grid for C - @param gammaGrid grid for gamma - @param pGrid grid for p - @param nuGrid grid for nu - @param coeffGrid grid for coeff - @param degreeGrid grid for degree - @param balanced If true and the problem is 2-class classification then the method creates more - balanced cross-validation subsets that is proportions between classes in subsets are close - to such proportion in the whole train dataset. - - The method trains the %SVM model automatically by choosing the optimal parameters C, gamma, p, - nu, coef0, degree. Parameters are considered optimal when the cross-validation - estimate of the test set error is minimal. - - This function only makes use of SVM::getDefaultGrid for parameter optimization and thus only - offers rudimentary parameter options. - - This function works for the classification (SVM::C_SVC or SVM::NU_SVC) as well as for the - regression (SVM::EPS_SVR or SVM::NU_SVR). If it is SVM::ONE_CLASS, no optimization is made and - the usual %SVM with parameters specified in params is executed. - */ - CV_WRAP virtual bool trainAuto(InputArray samples, - int layout, - InputArray responses, - int kFold = 10, - Ptr Cgrid = SVM::getDefaultGridPtr(SVM::C), - Ptr gammaGrid = SVM::getDefaultGridPtr(SVM::GAMMA), - Ptr pGrid = SVM::getDefaultGridPtr(SVM::P), - Ptr nuGrid = SVM::getDefaultGridPtr(SVM::NU), - Ptr coeffGrid = SVM::getDefaultGridPtr(SVM::COEF), - Ptr degreeGrid = SVM::getDefaultGridPtr(SVM::DEGREE), - bool balanced=false) = 0; - - /** @brief Retrieves all the support vectors - - The method returns all the support vectors as a floating-point matrix, where support vectors are - stored as matrix rows. - */ - CV_WRAP virtual Mat getSupportVectors() const = 0; - - /** @brief Retrieves all the uncompressed support vectors of a linear %SVM - - The method returns all the uncompressed support vectors of a linear %SVM that the compressed - support vector, used for prediction, was derived from. They are returned in a floating-point - matrix, where the support vectors are stored as matrix rows. - */ - CV_WRAP virtual Mat getUncompressedSupportVectors() const = 0; - - /** @brief Retrieves the decision function - - @param i the index of the decision function. If the problem solved is regression, 1-class or - 2-class classification, then there will be just one decision function and the index should - always be 0. Otherwise, in the case of N-class classification, there will be \f$N(N-1)/2\f$ - decision functions. - @param alpha the optional output vector for weights, corresponding to different support vectors. - In the case of linear %SVM all the alpha's will be 1's. - @param svidx the optional output vector of indices of support vectors within the matrix of - support vectors (which can be retrieved by SVM::getSupportVectors). In the case of linear - %SVM each decision function consists of a single "compressed" support vector. - - The method returns rho parameter of the decision function, a scalar subtracted from the weighted - sum of kernel responses. - */ - CV_WRAP virtual double getDecisionFunction(int i, OutputArray alpha, OutputArray svidx) const = 0; - - /** @brief Generates a grid for %SVM parameters. - - @param param_id %SVM parameters IDs that must be one of the SVM::ParamTypes. The grid is - generated for the parameter with this ID. - - The function generates a grid for the specified parameter of the %SVM algorithm. The grid may be - passed to the function SVM::trainAuto. - */ - static ParamGrid getDefaultGrid( int param_id ); - - /** @brief Generates a grid for %SVM parameters. - - @param param_id %SVM parameters IDs that must be one of the SVM::ParamTypes. The grid is - generated for the parameter with this ID. - - The function generates a grid pointer for the specified parameter of the %SVM algorithm. - The grid may be passed to the function SVM::trainAuto. - */ - CV_WRAP static Ptr getDefaultGridPtr( int param_id ); - - /** Creates empty model. - Use StatModel::train to train the model. Since %SVM has several parameters, you may want to - find the best parameters for your problem, it can be done with SVM::trainAuto. */ - CV_WRAP static Ptr create(); - - /** @brief Loads and creates a serialized svm from a file - * - * Use SVM::save to serialize and store an SVM to disk. - * Load the SVM from this file again, by calling this function with the path to the file. - * - * @param filepath path to serialized svm - */ - CV_WRAP static Ptr load(const String& filepath); -}; - -/****************************************************************************************\ -* Expectation - Maximization * -\****************************************************************************************/ - -/** @brief The class implements the Expectation Maximization algorithm. - -@sa @ref ml_intro_em - */ -class CV_EXPORTS_W EM : public StatModel -{ -public: - //! Type of covariation matrices - enum Types { - /** A scaled identity matrix \f$\mu_k * I\f$. There is the only - parameter \f$\mu_k\f$ to be estimated for each matrix. The option may be used in special cases, - when the constraint is relevant, or as a first step in the optimization (for example in case - when the data is preprocessed with PCA). The results of such preliminary estimation may be - passed again to the optimization procedure, this time with - covMatType=EM::COV_MAT_DIAGONAL. */ - COV_MAT_SPHERICAL=0, - /** A diagonal matrix with positive diagonal elements. The number of - free parameters is d for each matrix. This is most commonly used option yielding good - estimation results. */ - COV_MAT_DIAGONAL=1, - /** A symmetric positively defined matrix. The number of free - parameters in each matrix is about \f$d^2/2\f$. It is not recommended to use this option, unless - there is pretty accurate initial estimation of the parameters and/or a huge number of - training samples. */ - COV_MAT_GENERIC=2, - COV_MAT_DEFAULT=COV_MAT_DIAGONAL - }; - - //! Default parameters - enum {DEFAULT_NCLUSTERS=5, DEFAULT_MAX_ITERS=100}; - - //! The initial step - enum {START_E_STEP=1, START_M_STEP=2, START_AUTO_STEP=0}; - - /** The number of mixture components in the Gaussian mixture model. - Default value of the parameter is EM::DEFAULT_NCLUSTERS=5. Some of %EM implementation could - determine the optimal number of mixtures within a specified value range, but that is not the - case in ML yet. */ - /** @see setClustersNumber */ - CV_WRAP virtual int getClustersNumber() const = 0; - /** @copybrief getClustersNumber @see getClustersNumber */ - CV_WRAP virtual void setClustersNumber(int val) = 0; - - /** Constraint on covariance matrices which defines type of matrices. - See EM::Types. */ - /** @see setCovarianceMatrixType */ - CV_WRAP virtual int getCovarianceMatrixType() const = 0; - /** @copybrief getCovarianceMatrixType @see getCovarianceMatrixType */ - CV_WRAP virtual void setCovarianceMatrixType(int val) = 0; - - /** The termination criteria of the %EM algorithm. - The %EM algorithm can be terminated by the number of iterations termCrit.maxCount (number of - M-steps) or when relative change of likelihood logarithm is less than termCrit.epsilon. Default - maximum number of iterations is EM::DEFAULT_MAX_ITERS=100. */ - /** @see setTermCriteria */ - CV_WRAP virtual TermCriteria getTermCriteria() const = 0; - /** @copybrief getTermCriteria @see getTermCriteria */ - CV_WRAP virtual void setTermCriteria(const TermCriteria &val) = 0; - - /** @brief Returns weights of the mixtures - - Returns vector with the number of elements equal to the number of mixtures. - */ - CV_WRAP virtual Mat getWeights() const = 0; - /** @brief Returns the cluster centers (means of the Gaussian mixture) - - Returns matrix with the number of rows equal to the number of mixtures and number of columns - equal to the space dimensionality. - */ - CV_WRAP virtual Mat getMeans() const = 0; - /** @brief Returns covariation matrices - - Returns vector of covariation matrices. Number of matrices is the number of gaussian mixtures, - each matrix is a square floating-point matrix NxN, where N is the space dimensionality. - */ - CV_WRAP virtual void getCovs(CV_OUT std::vector& covs) const = 0; - - /** @brief Returns posterior probabilities for the provided samples - - @param samples The input samples, floating-point matrix - @param results The optional output \f$ nSamples \times nClusters\f$ matrix of results. It contains - posterior probabilities for each sample from the input - @param flags This parameter will be ignored - */ - CV_WRAP virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const CV_OVERRIDE = 0; - - /** @brief Returns a likelihood logarithm value and an index of the most probable mixture component - for the given sample. - - @param sample A sample for classification. It should be a one-channel matrix of - \f$1 \times dims\f$ or \f$dims \times 1\f$ size. - @param probs Optional output matrix that contains posterior probabilities of each component - given the sample. It has \f$1 \times nclusters\f$ size and CV_64FC1 type. - - The method returns a two-element double vector. Zero element is a likelihood logarithm value for - the sample. First element is an index of the most probable mixture component for the given - sample. - */ - CV_WRAP virtual Vec2d predict2(InputArray sample, OutputArray probs) const = 0; - - /** @brief Estimate the Gaussian mixture parameters from a samples set. - - This variation starts with Expectation step. Initial values of the model parameters will be - estimated by the k-means algorithm. - - Unlike many of the ML models, %EM is an unsupervised learning algorithm and it does not take - responses (class labels or function values) as input. Instead, it computes the *Maximum - Likelihood Estimate* of the Gaussian mixture parameters from an input sample set, stores all the - parameters inside the structure: \f$p_{i,k}\f$ in probs, \f$a_k\f$ in means , \f$S_k\f$ in - covs[k], \f$\pi_k\f$ in weights , and optionally computes the output "class label" for each - sample: \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most - probable mixture component for each sample). - - The trained model can be used further for prediction, just like any other classifier. The - trained model is similar to the NormalBayesClassifier. - - @param samples Samples from which the Gaussian mixture model will be estimated. It should be a - one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type - it will be converted to the inner matrix of such type for the further computing. - @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for - each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type. - @param labels The optional output "class label" for each sample: - \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable - mixture component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type. - @param probs The optional output matrix that contains posterior probabilities of each Gaussian - mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and - CV_64FC1 type. - */ - CV_WRAP virtual bool trainEM(InputArray samples, - OutputArray logLikelihoods=noArray(), - OutputArray labels=noArray(), - OutputArray probs=noArray()) = 0; - - /** @brief Estimate the Gaussian mixture parameters from a samples set. - - This variation starts with Expectation step. You need to provide initial means \f$a_k\f$ of - mixture components. Optionally you can pass initial weights \f$\pi_k\f$ and covariance matrices - \f$S_k\f$ of mixture components. - - @param samples Samples from which the Gaussian mixture model will be estimated. It should be a - one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type - it will be converted to the inner matrix of such type for the further computing. - @param means0 Initial means \f$a_k\f$ of mixture components. It is a one-channel matrix of - \f$nclusters \times dims\f$ size. If the matrix does not have CV_64F type it will be - converted to the inner matrix of such type for the further computing. - @param covs0 The vector of initial covariance matrices \f$S_k\f$ of mixture components. Each of - covariance matrices is a one-channel matrix of \f$dims \times dims\f$ size. If the matrices - do not have CV_64F type they will be converted to the inner matrices of such type for the - further computing. - @param weights0 Initial weights \f$\pi_k\f$ of mixture components. It should be a one-channel - floating-point matrix with \f$1 \times nclusters\f$ or \f$nclusters \times 1\f$ size. - @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for - each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type. - @param labels The optional output "class label" for each sample: - \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable - mixture component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type. - @param probs The optional output matrix that contains posterior probabilities of each Gaussian - mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and - CV_64FC1 type. - */ - CV_WRAP virtual bool trainE(InputArray samples, InputArray means0, - InputArray covs0=noArray(), - InputArray weights0=noArray(), - OutputArray logLikelihoods=noArray(), - OutputArray labels=noArray(), - OutputArray probs=noArray()) = 0; - - /** @brief Estimate the Gaussian mixture parameters from a samples set. - - This variation starts with Maximization step. You need to provide initial probabilities - \f$p_{i,k}\f$ to use this option. - - @param samples Samples from which the Gaussian mixture model will be estimated. It should be a - one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type - it will be converted to the inner matrix of such type for the further computing. - @param probs0 the probabilities - @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for - each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type. - @param labels The optional output "class label" for each sample: - \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable - mixture component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type. - @param probs The optional output matrix that contains posterior probabilities of each Gaussian - mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and - CV_64FC1 type. - */ - CV_WRAP virtual bool trainM(InputArray samples, InputArray probs0, - OutputArray logLikelihoods=noArray(), - OutputArray labels=noArray(), - OutputArray probs=noArray()) = 0; - - /** Creates empty %EM model. - The model should be trained then using StatModel::train(traindata, flags) method. Alternatively, you - can use one of the EM::train\* methods or load it from file using Algorithm::load\(filename). - */ - CV_WRAP static Ptr create(); - - /** @brief Loads and creates a serialized EM from a file - * - * Use EM::save to serialize and store an EM to disk. - * Load the EM from this file again, by calling this function with the path to the file. - * Optionally specify the node for the file containing the classifier - * - * @param filepath path to serialized EM - * @param nodeName name of node containing the classifier - */ - CV_WRAP static Ptr load(const String& filepath , const String& nodeName = String()); -}; - -/****************************************************************************************\ -* Decision Tree * -\****************************************************************************************/ - -/** @brief The class represents a single decision tree or a collection of decision trees. - -The current public interface of the class allows user to train only a single decision tree, however -the class is capable of storing multiple decision trees and using them for prediction (by summing -responses or using a voting schemes), and the derived from DTrees classes (such as RTrees and Boost) -use this capability to implement decision tree ensembles. - -@sa @ref ml_intro_trees -*/ -class CV_EXPORTS_W DTrees : public StatModel -{ -public: - /** Predict options */ - enum Flags { PREDICT_AUTO=0, PREDICT_SUM=(1<<8), PREDICT_MAX_VOTE=(2<<8), PREDICT_MASK=(3<<8) }; - - /** Cluster possible values of a categorical variable into K\<=maxCategories clusters to - find a suboptimal split. - If a discrete variable, on which the training procedure tries to make a split, takes more than - maxCategories values, the precise best subset estimation may take a very long time because the - algorithm is exponential. Instead, many decision trees engines (including our implementation) - try to find sub-optimal split in this case by clustering all the samples into maxCategories - clusters that is some categories are merged together. The clustering is applied only in n \> - 2-class classification problems for categorical variables with N \> max_categories possible - values. In case of regression and 2-class classification the optimal split can be found - efficiently without employing clustering, thus the parameter is not used in these cases. - Default value is 10.*/ - /** @see setMaxCategories */ - CV_WRAP virtual int getMaxCategories() const = 0; - /** @copybrief getMaxCategories @see getMaxCategories */ - CV_WRAP virtual void setMaxCategories(int val) = 0; - - /** The maximum possible depth of the tree. - That is the training algorithms attempts to split a node while its depth is less than maxDepth. - The root node has zero depth. The actual depth may be smaller if the other termination criteria - are met (see the outline of the training procedure @ref ml_intro_trees "here"), and/or if the - tree is pruned. Default value is INT_MAX.*/ - /** @see setMaxDepth */ - CV_WRAP virtual int getMaxDepth() const = 0; - /** @copybrief getMaxDepth @see getMaxDepth */ - CV_WRAP virtual void setMaxDepth(int val) = 0; - - /** If the number of samples in a node is less than this parameter then the node will not be split. - - Default value is 10.*/ - /** @see setMinSampleCount */ - CV_WRAP virtual int getMinSampleCount() const = 0; - /** @copybrief getMinSampleCount @see getMinSampleCount */ - CV_WRAP virtual void setMinSampleCount(int val) = 0; - - /** If CVFolds \> 1 then algorithms prunes the built decision tree using K-fold - cross-validation procedure where K is equal to CVFolds. - Default value is 10.*/ - /** @see setCVFolds */ - CV_WRAP virtual int getCVFolds() const = 0; - /** @copybrief getCVFolds @see getCVFolds */ - CV_WRAP virtual void setCVFolds(int val) = 0; - - /** If true then surrogate splits will be built. - These splits allow to work with missing data and compute variable importance correctly. - Default value is false. - @note currently it's not implemented.*/ - /** @see setUseSurrogates */ - CV_WRAP virtual bool getUseSurrogates() const = 0; - /** @copybrief getUseSurrogates @see getUseSurrogates */ - CV_WRAP virtual void setUseSurrogates(bool val) = 0; - - /** If true then a pruning will be harsher. - This will make a tree more compact and more resistant to the training data noise but a bit less - accurate. Default value is true.*/ - /** @see setUse1SERule */ - CV_WRAP virtual bool getUse1SERule() const = 0; - /** @copybrief getUse1SERule @see getUse1SERule */ - CV_WRAP virtual void setUse1SERule(bool val) = 0; - - /** If true then pruned branches are physically removed from the tree. - Otherwise they are retained and it is possible to get results from the original unpruned (or - pruned less aggressively) tree. Default value is true.*/ - /** @see setTruncatePrunedTree */ - CV_WRAP virtual bool getTruncatePrunedTree() const = 0; - /** @copybrief getTruncatePrunedTree @see getTruncatePrunedTree */ - CV_WRAP virtual void setTruncatePrunedTree(bool val) = 0; - - /** Termination criteria for regression trees. - If all absolute differences between an estimated value in a node and values of train samples - in this node are less than this parameter then the node will not be split further. Default - value is 0.01f*/ - /** @see setRegressionAccuracy */ - CV_WRAP virtual float getRegressionAccuracy() const = 0; - /** @copybrief getRegressionAccuracy @see getRegressionAccuracy */ - CV_WRAP virtual void setRegressionAccuracy(float val) = 0; - - /** @brief The array of a priori class probabilities, sorted by the class label value. - - The parameter can be used to tune the decision tree preferences toward a certain class. For - example, if you want to detect some rare anomaly occurrence, the training base will likely - contain much more normal cases than anomalies, so a very good classification performance - will be achieved just by considering every case as normal. To avoid this, the priors can be - specified, where the anomaly probability is artificially increased (up to 0.5 or even - greater), so the weight of the misclassified anomalies becomes much bigger, and the tree is - adjusted properly. - - You can also think about this parameter as weights of prediction categories which determine - relative weights that you give to misclassification. That is, if the weight of the first - category is 1 and the weight of the second category is 10, then each mistake in predicting - the second category is equivalent to making 10 mistakes in predicting the first category. - Default value is empty Mat.*/ - /** @see setPriors */ - CV_WRAP virtual cv::Mat getPriors() const = 0; - /** @copybrief getPriors @see getPriors */ - CV_WRAP virtual void setPriors(const cv::Mat &val) = 0; - - /** @brief The class represents a decision tree node. - */ - class CV_EXPORTS Node - { - public: - Node(); - double value; //!< Value at the node: a class label in case of classification or estimated - //!< function value in case of regression. - int classIdx; //!< Class index normalized to 0..class_count-1 range and assigned to the - //!< node. It is used internally in classification trees and tree ensembles. - int parent; //!< Index of the parent node - int left; //!< Index of the left child node - int right; //!< Index of right child node - int defaultDir; //!< Default direction where to go (-1: left or +1: right). It helps in the - //!< case of missing values. - int split; //!< Index of the first split - }; - - /** @brief The class represents split in a decision tree. - */ - class CV_EXPORTS Split - { - public: - Split(); - int varIdx; //!< Index of variable on which the split is created. - bool inversed; //!< If true, then the inverse split rule is used (i.e. left and right - //!< branches are exchanged in the rule expressions below). - float quality; //!< The split quality, a positive number. It is used to choose the best split. - int next; //!< Index of the next split in the list of splits for the node - float c; /**< The threshold value in case of split on an ordered variable. - The rule is: - @code{.none} - if var_value < c - then next_node <- left - else next_node <- right - @endcode */ - int subsetOfs; /**< Offset of the bitset used by the split on a categorical variable. - The rule is: - @code{.none} - if bitset[var_value] == 1 - then next_node <- left - else next_node <- right - @endcode */ - }; - - /** @brief Returns indices of root nodes - */ - virtual const std::vector& getRoots() const = 0; - /** @brief Returns all the nodes - - all the node indices are indices in the returned vector - */ - virtual const std::vector& getNodes() const = 0; - /** @brief Returns all the splits - - all the split indices are indices in the returned vector - */ - virtual const std::vector& getSplits() const = 0; - /** @brief Returns all the bitsets for categorical splits - - Split::subsetOfs is an offset in the returned vector - */ - virtual const std::vector& getSubsets() const = 0; - - /** @brief Creates the empty model - - The static method creates empty decision tree with the specified parameters. It should be then - trained using train method (see StatModel::train). Alternatively, you can load the model from - file using Algorithm::load\(filename). - */ - CV_WRAP static Ptr create(); - - /** @brief Loads and creates a serialized DTrees from a file - * - * Use DTree::save to serialize and store an DTree to disk. - * Load the DTree from this file again, by calling this function with the path to the file. - * Optionally specify the node for the file containing the classifier - * - * @param filepath path to serialized DTree - * @param nodeName name of node containing the classifier - */ - CV_WRAP static Ptr load(const String& filepath , const String& nodeName = String()); -}; - -/****************************************************************************************\ -* Random Trees Classifier * -\****************************************************************************************/ - -/** @brief The class implements the random forest predictor. - -@sa @ref ml_intro_rtrees - */ -class CV_EXPORTS_W RTrees : public DTrees -{ -public: - - /** If true then variable importance will be calculated and then it can be retrieved by RTrees::getVarImportance. - Default value is false.*/ - /** @see setCalculateVarImportance */ - CV_WRAP virtual bool getCalculateVarImportance() const = 0; - /** @copybrief getCalculateVarImportance @see getCalculateVarImportance */ - CV_WRAP virtual void setCalculateVarImportance(bool val) = 0; - - /** The size of the randomly selected subset of features at each tree node and that are used - to find the best split(s). - If you set it to 0 then the size will be set to the square root of the total number of - features. Default value is 0.*/ - /** @see setActiveVarCount */ - CV_WRAP virtual int getActiveVarCount() const = 0; - /** @copybrief getActiveVarCount @see getActiveVarCount */ - CV_WRAP virtual void setActiveVarCount(int val) = 0; - - /** The termination criteria that specifies when the training algorithm stops. - Either when the specified number of trees is trained and added to the ensemble or when - sufficient accuracy (measured as OOB error) is achieved. Typically the more trees you have the - better the accuracy. However, the improvement in accuracy generally diminishes and asymptotes - pass a certain number of trees. Also to keep in mind, the number of tree increases the - prediction time linearly. Default value is TermCriteria(TermCriteria::MAX_ITERS + - TermCriteria::EPS, 50, 0.1)*/ - /** @see setTermCriteria */ - CV_WRAP virtual TermCriteria getTermCriteria() const = 0; - /** @copybrief getTermCriteria @see getTermCriteria */ - CV_WRAP virtual void setTermCriteria(const TermCriteria &val) = 0; - - /** Returns the variable importance array. - The method returns the variable importance vector, computed at the training stage when - CalculateVarImportance is set to true. If this flag was set to false, the empty matrix is - returned. - */ - CV_WRAP virtual Mat getVarImportance() const = 0; - - /** Returns the result of each individual tree in the forest. - In case the model is a regression problem, the method will return each of the trees' - results for each of the sample cases. If the model is a classifier, it will return - a Mat with samples + 1 rows, where the first row gives the class number and the - following rows return the votes each class had for each sample. - @param samples Array containing the samples for which votes will be calculated. - @param results Array where the result of the calculation will be written. - @param flags Flags for defining the type of RTrees. - */ - CV_WRAP virtual void getVotes(InputArray samples, OutputArray results, int flags) const = 0; - - /** Returns the OOB error value, computed at the training stage when calcOOBError is set to true. - * If this flag was set to false, 0 is returned. The OOB error is also scaled by sample weighting. - */ -#if CV_VERSION_MAJOR == 4 - CV_WRAP virtual double getOOBError() const { return 0; } -#else - /*CV_WRAP*/ virtual double getOOBError() const = 0; -#endif - - /** Creates the empty model. - Use StatModel::train to train the model, StatModel::train to create and train the model, - Algorithm::load to load the pre-trained model. - */ - CV_WRAP static Ptr create(); - - /** @brief Loads and creates a serialized RTree from a file - * - * Use RTree::save to serialize and store an RTree to disk. - * Load the RTree from this file again, by calling this function with the path to the file. - * Optionally specify the node for the file containing the classifier - * - * @param filepath path to serialized RTree - * @param nodeName name of node containing the classifier - */ - CV_WRAP static Ptr load(const String& filepath , const String& nodeName = String()); -}; - -/****************************************************************************************\ -* Boosted tree classifier * -\****************************************************************************************/ - -/** @brief Boosted tree classifier derived from DTrees - -@sa @ref ml_intro_boost - */ -class CV_EXPORTS_W Boost : public DTrees -{ -public: - /** Type of the boosting algorithm. - See Boost::Types. Default value is Boost::REAL. */ - /** @see setBoostType */ - CV_WRAP virtual int getBoostType() const = 0; - /** @copybrief getBoostType @see getBoostType */ - CV_WRAP virtual void setBoostType(int val) = 0; - - /** The number of weak classifiers. - Default value is 100. */ - /** @see setWeakCount */ - CV_WRAP virtual int getWeakCount() const = 0; - /** @copybrief getWeakCount @see getWeakCount */ - CV_WRAP virtual void setWeakCount(int val) = 0; - - /** A threshold between 0 and 1 used to save computational time. - Samples with summary weight \f$\leq 1 - weight_trim_rate\f$ do not participate in the *next* - iteration of training. Set this parameter to 0 to turn off this functionality. Default value is 0.95.*/ - /** @see setWeightTrimRate */ - CV_WRAP virtual double getWeightTrimRate() const = 0; - /** @copybrief getWeightTrimRate @see getWeightTrimRate */ - CV_WRAP virtual void setWeightTrimRate(double val) = 0; - - /** Boosting type. - Gentle AdaBoost and Real AdaBoost are often the preferable choices. */ - enum Types { - DISCRETE=0, //!< Discrete AdaBoost. - REAL=1, //!< Real AdaBoost. It is a technique that utilizes confidence-rated predictions - //!< and works well with categorical data. - LOGIT=2, //!< LogitBoost. It can produce good regression fits. - GENTLE=3 //!< Gentle AdaBoost. It puts less weight on outlier data points and for that - //!(filename) to load the pre-trained model. */ - CV_WRAP static Ptr create(); - - /** @brief Loads and creates a serialized Boost from a file - * - * Use Boost::save to serialize and store an RTree to disk. - * Load the Boost from this file again, by calling this function with the path to the file. - * Optionally specify the node for the file containing the classifier - * - * @param filepath path to serialized Boost - * @param nodeName name of node containing the classifier - */ - CV_WRAP static Ptr load(const String& filepath , const String& nodeName = String()); -}; - -/****************************************************************************************\ -* Gradient Boosted Trees * -\****************************************************************************************/ - -/*class CV_EXPORTS_W GBTrees : public DTrees -{ -public: - struct CV_EXPORTS_W_MAP Params : public DTrees::Params - { - CV_PROP_RW int weakCount; - CV_PROP_RW int lossFunctionType; - CV_PROP_RW float subsamplePortion; - CV_PROP_RW float shrinkage; - - Params(); - Params( int lossFunctionType, int weakCount, float shrinkage, - float subsamplePortion, int maxDepth, bool useSurrogates ); - }; - - enum {SQUARED_LOSS=0, ABSOLUTE_LOSS, HUBER_LOSS=3, DEVIANCE_LOSS}; - - virtual void setK(int k) = 0; - - virtual float predictSerial( InputArray samples, - OutputArray weakResponses, int flags) const = 0; - - static Ptr create(const Params& p); -};*/ - -/****************************************************************************************\ -* Artificial Neural Networks (ANN) * -\****************************************************************************************/ - -/////////////////////////////////// Multi-Layer Perceptrons ////////////////////////////// - -/** @brief Artificial Neural Networks - Multi-Layer Perceptrons. - -Unlike many other models in ML that are constructed and trained at once, in the MLP model these -steps are separated. First, a network with the specified topology is created using the non-default -constructor or the method ANN_MLP::create. All the weights are set to zeros. Then, the network is -trained using a set of input and output vectors. The training procedure can be repeated more than -once, that is, the weights can be adjusted based on the new training data. - -Additional flags for StatModel::train are available: ANN_MLP::TrainFlags. - -@sa @ref ml_intro_ann - */ -class CV_EXPORTS_W ANN_MLP : public StatModel -{ -public: - /** Available training methods */ - enum TrainingMethods { - BACKPROP=0, //!< The back-propagation algorithm. - RPROP = 1, //!< The RPROP algorithm. See @cite RPROP93 for details. - ANNEAL = 2 //!< The simulated annealing algorithm. See @cite Kirkpatrick83 for details. - }; - - /** Sets training method and common parameters. - @param method Default value is ANN_MLP::RPROP. See ANN_MLP::TrainingMethods. - @param param1 passed to setRpropDW0 for ANN_MLP::RPROP and to setBackpropWeightScale for ANN_MLP::BACKPROP and to initialT for ANN_MLP::ANNEAL. - @param param2 passed to setRpropDWMin for ANN_MLP::RPROP and to setBackpropMomentumScale for ANN_MLP::BACKPROP and to finalT for ANN_MLP::ANNEAL. - */ - CV_WRAP virtual void setTrainMethod(int method, double param1 = 0, double param2 = 0) = 0; - - /** Returns current training method */ - CV_WRAP virtual int getTrainMethod() const = 0; - - /** Initialize the activation function for each neuron. - Currently the default and the only fully supported activation function is ANN_MLP::SIGMOID_SYM. - @param type The type of activation function. See ANN_MLP::ActivationFunctions. - @param param1 The first parameter of the activation function, \f$\alpha\f$. Default value is 0. - @param param2 The second parameter of the activation function, \f$\beta\f$. Default value is 0. - */ - CV_WRAP virtual void setActivationFunction(int type, double param1 = 0, double param2 = 0) = 0; - - /** Integer vector specifying the number of neurons in each layer including the input and output layers. - The very first element specifies the number of elements in the input layer. - The last element - number of elements in the output layer. Default value is empty Mat. - @sa getLayerSizes */ - CV_WRAP virtual void setLayerSizes(InputArray _layer_sizes) = 0; - - /** Integer vector specifying the number of neurons in each layer including the input and output layers. - The very first element specifies the number of elements in the input layer. - The last element - number of elements in the output layer. - @sa setLayerSizes */ - CV_WRAP virtual cv::Mat getLayerSizes() const = 0; - - /** Termination criteria of the training algorithm. - You can specify the maximum number of iterations (maxCount) and/or how much the error could - change between the iterations to make the algorithm continue (epsilon). Default value is - TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, 0.01).*/ - /** @see setTermCriteria */ - CV_WRAP virtual TermCriteria getTermCriteria() const = 0; - /** @copybrief getTermCriteria @see getTermCriteria */ - CV_WRAP virtual void setTermCriteria(TermCriteria val) = 0; - - /** BPROP: Strength of the weight gradient term. - The recommended value is about 0.1. Default value is 0.1.*/ - /** @see setBackpropWeightScale */ - CV_WRAP virtual double getBackpropWeightScale() const = 0; - /** @copybrief getBackpropWeightScale @see getBackpropWeightScale */ - CV_WRAP virtual void setBackpropWeightScale(double val) = 0; - - /** BPROP: Strength of the momentum term (the difference between weights on the 2 previous iterations). - This parameter provides some inertia to smooth the random fluctuations of the weights. It can - vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good enough. - Default value is 0.1.*/ - /** @see setBackpropMomentumScale */ - CV_WRAP virtual double getBackpropMomentumScale() const = 0; - /** @copybrief getBackpropMomentumScale @see getBackpropMomentumScale */ - CV_WRAP virtual void setBackpropMomentumScale(double val) = 0; - - /** RPROP: Initial value \f$\Delta_0\f$ of update-values \f$\Delta_{ij}\f$. - Default value is 0.1.*/ - /** @see setRpropDW0 */ - CV_WRAP virtual double getRpropDW0() const = 0; - /** @copybrief getRpropDW0 @see getRpropDW0 */ - CV_WRAP virtual void setRpropDW0(double val) = 0; - - /** RPROP: Increase factor \f$\eta^+\f$. - It must be \>1. Default value is 1.2.*/ - /** @see setRpropDWPlus */ - CV_WRAP virtual double getRpropDWPlus() const = 0; - /** @copybrief getRpropDWPlus @see getRpropDWPlus */ - CV_WRAP virtual void setRpropDWPlus(double val) = 0; - - /** RPROP: Decrease factor \f$\eta^-\f$. - It must be \<1. Default value is 0.5.*/ - /** @see setRpropDWMinus */ - CV_WRAP virtual double getRpropDWMinus() const = 0; - /** @copybrief getRpropDWMinus @see getRpropDWMinus */ - CV_WRAP virtual void setRpropDWMinus(double val) = 0; - - /** RPROP: Update-values lower limit \f$\Delta_{min}\f$. - It must be positive. Default value is FLT_EPSILON.*/ - /** @see setRpropDWMin */ - CV_WRAP virtual double getRpropDWMin() const = 0; - /** @copybrief getRpropDWMin @see getRpropDWMin */ - CV_WRAP virtual void setRpropDWMin(double val) = 0; - - /** RPROP: Update-values upper limit \f$\Delta_{max}\f$. - It must be \>1. Default value is 50.*/ - /** @see setRpropDWMax */ - CV_WRAP virtual double getRpropDWMax() const = 0; - /** @copybrief getRpropDWMax @see getRpropDWMax */ - CV_WRAP virtual void setRpropDWMax(double val) = 0; - - /** ANNEAL: Update initial temperature. - It must be \>=0. Default value is 10.*/ - /** @see setAnnealInitialT */ - CV_WRAP virtual double getAnnealInitialT() const = 0; - /** @copybrief getAnnealInitialT @see getAnnealInitialT */ - CV_WRAP virtual void setAnnealInitialT(double val) = 0; - - /** ANNEAL: Update final temperature. - It must be \>=0 and less than initialT. Default value is 0.1.*/ - /** @see setAnnealFinalT */ - CV_WRAP virtual double getAnnealFinalT() const = 0; - /** @copybrief getAnnealFinalT @see getAnnealFinalT */ - CV_WRAP virtual void setAnnealFinalT(double val) = 0; - - /** ANNEAL: Update cooling ratio. - It must be \>0 and less than 1. Default value is 0.95.*/ - /** @see setAnnealCoolingRatio */ - CV_WRAP virtual double getAnnealCoolingRatio() const = 0; - /** @copybrief getAnnealCoolingRatio @see getAnnealCoolingRatio */ - CV_WRAP virtual void setAnnealCoolingRatio(double val) = 0; - - /** ANNEAL: Update iteration per step. - It must be \>0 . Default value is 10.*/ - /** @see setAnnealItePerStep */ - CV_WRAP virtual int getAnnealItePerStep() const = 0; - /** @copybrief getAnnealItePerStep @see getAnnealItePerStep */ - CV_WRAP virtual void setAnnealItePerStep(int val) = 0; - - /** @brief Set/initialize anneal RNG */ - virtual void setAnnealEnergyRNG(const RNG& rng) = 0; - - /** possible activation functions */ - enum ActivationFunctions { - /** Identity function: \f$f(x)=x\f$ */ - IDENTITY = 0, - /** Symmetrical sigmoid: \f$f(x)=\beta*(1-e^{-\alpha x})/(1+e^{-\alpha x})\f$ - @note - If you are using the default sigmoid activation function with the default parameter values - fparam1=0 and fparam2=0 then the function used is y = 1.7159\*tanh(2/3 \* x), so the output - will range from [-1.7159, 1.7159], instead of [0,1].*/ - SIGMOID_SYM = 1, - /** Gaussian function: \f$f(x)=\beta e^{-\alpha x*x}\f$ */ - GAUSSIAN = 2, - /** ReLU function: \f$f(x)=max(0,x)\f$ */ - RELU = 3, - /** Leaky ReLU function: for x>0 \f$f(x)=x \f$ and x<=0 \f$f(x)=\alpha x \f$*/ - LEAKYRELU= 4 - }; - - /** Train options */ - enum TrainFlags { - /** Update the network weights, rather than compute them from scratch. In the latter case - the weights are initialized using the Nguyen-Widrow algorithm. */ - UPDATE_WEIGHTS = 1, - /** Do not normalize the input vectors. If this flag is not set, the training algorithm - normalizes each input feature independently, shifting its mean value to 0 and making the - standard deviation equal to 1. If the network is assumed to be updated frequently, the new - training data could be much different from original one. In this case, you should take care - of proper normalization. */ - NO_INPUT_SCALE = 2, - /** Do not normalize the output vectors. If the flag is not set, the training algorithm - normalizes each output feature independently, by transforming it to the certain range - depending on the used activation function. */ - NO_OUTPUT_SCALE = 4 - }; - - CV_WRAP virtual Mat getWeights(int layerIdx) const = 0; - - /** @brief Creates empty model - - Use StatModel::train to train the model, Algorithm::load\(filename) to load the pre-trained model. - Note that the train method has optional flags: ANN_MLP::TrainFlags. - */ - CV_WRAP static Ptr create(); - - /** @brief Loads and creates a serialized ANN from a file - * - * Use ANN::save to serialize and store an ANN to disk. - * Load the ANN from this file again, by calling this function with the path to the file. - * - * @param filepath path to serialized ANN - */ - CV_WRAP static Ptr load(const String& filepath); - -}; - -#ifndef DISABLE_OPENCV_3_COMPATIBILITY -typedef ANN_MLP ANN_MLP_ANNEAL; -#endif - -/****************************************************************************************\ -* Logistic Regression * -\****************************************************************************************/ - -/** @brief Implements Logistic Regression classifier. - -@sa @ref ml_intro_lr - */ -class CV_EXPORTS_W LogisticRegression : public StatModel -{ -public: - - /** Learning rate. */ - /** @see setLearningRate */ - CV_WRAP virtual double getLearningRate() const = 0; - /** @copybrief getLearningRate @see getLearningRate */ - CV_WRAP virtual void setLearningRate(double val) = 0; - - /** Number of iterations. */ - /** @see setIterations */ - CV_WRAP virtual int getIterations() const = 0; - /** @copybrief getIterations @see getIterations */ - CV_WRAP virtual void setIterations(int val) = 0; - - /** Kind of regularization to be applied. See LogisticRegression::RegKinds. */ - /** @see setRegularization */ - CV_WRAP virtual int getRegularization() const = 0; - /** @copybrief getRegularization @see getRegularization */ - CV_WRAP virtual void setRegularization(int val) = 0; - - /** Kind of training method used. See LogisticRegression::Methods. */ - /** @see setTrainMethod */ - CV_WRAP virtual int getTrainMethod() const = 0; - /** @copybrief getTrainMethod @see getTrainMethod */ - CV_WRAP virtual void setTrainMethod(int val) = 0; - - /** Specifies the number of training samples taken in each step of Mini-Batch Gradient - Descent. Will only be used if using LogisticRegression::MINI_BATCH training algorithm. It - has to take values less than the total number of training samples. */ - /** @see setMiniBatchSize */ - CV_WRAP virtual int getMiniBatchSize() const = 0; - /** @copybrief getMiniBatchSize @see getMiniBatchSize */ - CV_WRAP virtual void setMiniBatchSize(int val) = 0; - - /** Termination criteria of the algorithm. */ - /** @see setTermCriteria */ - CV_WRAP virtual TermCriteria getTermCriteria() const = 0; - /** @copybrief getTermCriteria @see getTermCriteria */ - CV_WRAP virtual void setTermCriteria(TermCriteria val) = 0; - - //! Regularization kinds - enum RegKinds { - REG_DISABLE = -1, //!< Regularization disabled - REG_L1 = 0, //!< %L1 norm - REG_L2 = 1 //!< %L2 norm - }; - - //! Training methods - enum Methods { - BATCH = 0, - MINI_BATCH = 1 //!< Set MiniBatchSize to a positive integer when using this method. - }; - - /** @brief Predicts responses for input samples and returns a float type. - - @param samples The input data for the prediction algorithm. Matrix [m x n], where each row - contains variables (features) of one object being classified. Should have data type CV_32F. - @param results Predicted labels as a column matrix of type CV_32S. - @param flags Not used. - */ - CV_WRAP virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const CV_OVERRIDE = 0; - - /** @brief This function returns the trained parameters arranged across rows. - - For a two class classification problem, it returns a row matrix. It returns learnt parameters of - the Logistic Regression as a matrix of type CV_32F. - */ - CV_WRAP virtual Mat get_learnt_thetas() const = 0; - - /** @brief Creates empty model. - - Creates Logistic Regression model with parameters given. - */ - CV_WRAP static Ptr create(); - - /** @brief Loads and creates a serialized LogisticRegression from a file - * - * Use LogisticRegression::save to serialize and store an LogisticRegression to disk. - * Load the LogisticRegression from this file again, by calling this function with the path to the file. - * Optionally specify the node for the file containing the classifier - * - * @param filepath path to serialized LogisticRegression - * @param nodeName name of node containing the classifier - */ - CV_WRAP static Ptr load(const String& filepath , const String& nodeName = String()); -}; - - -/****************************************************************************************\ -* Stochastic Gradient Descent SVM Classifier * -\****************************************************************************************/ - -/*! -@brief Stochastic Gradient Descent SVM classifier - -SVMSGD provides a fast and easy-to-use implementation of the SVM classifier using the Stochastic Gradient Descent approach, -as presented in @cite bottou2010large. - -The classifier has following parameters: -- model type, -- margin type, -- margin regularization (\f$\lambda\f$), -- initial step size (\f$\gamma_0\f$), -- step decreasing power (\f$c\f$), -- and termination criteria. - -The model type may have one of the following values: \ref SGD and \ref ASGD. - -- \ref SGD is the classic version of SVMSGD classifier: every next step is calculated by the formula - \f[w_{t+1} = w_t - \gamma(t) \frac{dQ_i}{dw} |_{w = w_t}\f] - where - - \f$w_t\f$ is the weights vector for decision function at step \f$t\f$, - - \f$\gamma(t)\f$ is the step size of model parameters at the iteration \f$t\f$, it is decreased on each step by the formula - \f$\gamma(t) = \gamma_0 (1 + \lambda \gamma_0 t) ^ {-c}\f$ - - \f$Q_i\f$ is the target functional from SVM task for sample with number \f$i\f$, this sample is chosen stochastically on each step of the algorithm. - -- \ref ASGD is Average Stochastic Gradient Descent SVM Classifier. ASGD classifier averages weights vector on each step of algorithm by the formula -\f$\widehat{w}_{t+1} = \frac{t}{1+t}\widehat{w}_{t} + \frac{1}{1+t}w_{t+1}\f$ - -The recommended model type is ASGD (following @cite bottou2010large). - -The margin type may have one of the following values: \ref SOFT_MARGIN or \ref HARD_MARGIN. - -- You should use \ref HARD_MARGIN type, if you have linearly separable sets. -- You should use \ref SOFT_MARGIN type, if you have non-linearly separable sets or sets with outliers. -- In the general case (if you know nothing about linear separability of your sets), use SOFT_MARGIN. - -The other parameters may be described as follows: -- Margin regularization parameter is responsible for weights decreasing at each step and for the strength of restrictions on outliers - (the less the parameter, the less probability that an outlier will be ignored). - Recommended value for SGD model is 0.0001, for ASGD model is 0.00001. - -- Initial step size parameter is the initial value for the step size \f$\gamma(t)\f$. - You will have to find the best initial step for your problem. - -- Step decreasing power is the power parameter for \f$\gamma(t)\f$ decreasing by the formula, mentioned above. - Recommended value for SGD model is 1, for ASGD model is 0.75. - -- Termination criteria can be TermCriteria::COUNT, TermCriteria::EPS or TermCriteria::COUNT + TermCriteria::EPS. - You will have to find the best termination criteria for your problem. - -Note that the parameters margin regularization, initial step size, and step decreasing power should be positive. - -To use SVMSGD algorithm do as follows: - -- first, create the SVMSGD object. The algorithm will set optimal parameters by default, but you can set your own parameters via functions setSvmsgdType(), - setMarginType(), setMarginRegularization(), setInitialStepSize(), and setStepDecreasingPower(). - -- then the SVM model can be trained using the train features and the correspondent labels by the method train(). - -- after that, the label of a new feature vector can be predicted using the method predict(). - -@code -// Create empty object -cv::Ptr svmsgd = SVMSGD::create(); - -// Train the Stochastic Gradient Descent SVM -svmsgd->train(trainData); - -// Predict labels for the new samples -svmsgd->predict(samples, responses); -@endcode - -*/ - -class CV_EXPORTS_W SVMSGD : public cv::ml::StatModel -{ -public: - - /** SVMSGD type. - ASGD is often the preferable choice. */ - enum SvmsgdType - { - SGD, //!< Stochastic Gradient Descent - ASGD //!< Average Stochastic Gradient Descent - }; - - /** Margin type.*/ - enum MarginType - { - SOFT_MARGIN, //!< General case, suits to the case of non-linearly separable sets, allows outliers. - HARD_MARGIN //!< More accurate for the case of linearly separable sets. - }; - - /** - * @return the weights of the trained model (decision function f(x) = weights * x + shift). - */ - CV_WRAP virtual Mat getWeights() = 0; - - /** - * @return the shift of the trained model (decision function f(x) = weights * x + shift). - */ - CV_WRAP virtual float getShift() = 0; - - /** @brief Creates empty model. - * Use StatModel::train to train the model. Since %SVMSGD has several parameters, you may want to - * find the best parameters for your problem or use setOptimalParameters() to set some default parameters. - */ - CV_WRAP static Ptr create(); - - /** @brief Loads and creates a serialized SVMSGD from a file - * - * Use SVMSGD::save to serialize and store an SVMSGD to disk. - * Load the SVMSGD from this file again, by calling this function with the path to the file. - * Optionally specify the node for the file containing the classifier - * - * @param filepath path to serialized SVMSGD - * @param nodeName name of node containing the classifier - */ - CV_WRAP static Ptr load(const String& filepath , const String& nodeName = String()); - - /** @brief Function sets optimal parameters values for chosen SVM SGD model. - * @param svmsgdType is the type of SVMSGD classifier. - * @param marginType is the type of margin constraint. - */ - CV_WRAP virtual void setOptimalParameters(int svmsgdType = SVMSGD::ASGD, int marginType = SVMSGD::SOFT_MARGIN) = 0; - - /** @brief %Algorithm type, one of SVMSGD::SvmsgdType. */ - /** @see setSvmsgdType */ - CV_WRAP virtual int getSvmsgdType() const = 0; - /** @copybrief getSvmsgdType @see getSvmsgdType */ - CV_WRAP virtual void setSvmsgdType(int svmsgdType) = 0; - - /** @brief %Margin type, one of SVMSGD::MarginType. */ - /** @see setMarginType */ - CV_WRAP virtual int getMarginType() const = 0; - /** @copybrief getMarginType @see getMarginType */ - CV_WRAP virtual void setMarginType(int marginType) = 0; - - /** @brief Parameter marginRegularization of a %SVMSGD optimization problem. */ - /** @see setMarginRegularization */ - CV_WRAP virtual float getMarginRegularization() const = 0; - /** @copybrief getMarginRegularization @see getMarginRegularization */ - CV_WRAP virtual void setMarginRegularization(float marginRegularization) = 0; - - /** @brief Parameter initialStepSize of a %SVMSGD optimization problem. */ - /** @see setInitialStepSize */ - CV_WRAP virtual float getInitialStepSize() const = 0; - /** @copybrief getInitialStepSize @see getInitialStepSize */ - CV_WRAP virtual void setInitialStepSize(float InitialStepSize) = 0; - - /** @brief Parameter stepDecreasingPower of a %SVMSGD optimization problem. */ - /** @see setStepDecreasingPower */ - CV_WRAP virtual float getStepDecreasingPower() const = 0; - /** @copybrief getStepDecreasingPower @see getStepDecreasingPower */ - CV_WRAP virtual void setStepDecreasingPower(float stepDecreasingPower) = 0; - - /** @brief Termination criteria of the training algorithm. - You can specify the maximum number of iterations (maxCount) and/or how much the error could - change between the iterations to make the algorithm continue (epsilon).*/ - /** @see setTermCriteria */ - CV_WRAP virtual TermCriteria getTermCriteria() const = 0; - /** @copybrief getTermCriteria @see getTermCriteria */ - CV_WRAP virtual void setTermCriteria(const cv::TermCriteria &val) = 0; -}; - - -/****************************************************************************************\ -* Auxiliary functions declarations * -\****************************************************************************************/ - -/** @brief Generates _sample_ from multivariate normal distribution - -@param mean an average row vector -@param cov symmetric covariation matrix -@param nsamples returned samples count -@param samples returned samples array -*/ -CV_EXPORTS void randMVNormal( InputArray mean, InputArray cov, int nsamples, OutputArray samples); - -/** @brief Creates test set */ -CV_EXPORTS void createConcentricSpheresTestSet( int nsamples, int nfeatures, int nclasses, - OutputArray samples, OutputArray responses); - - -/****************************************************************************************\ -* Simulated annealing solver * -\****************************************************************************************/ - -#ifdef CV_DOXYGEN -/** @brief This class declares example interface for system state used in simulated annealing optimization algorithm. - -@note This class is not defined in C++ code and can't be use directly - you need your own implementation with the same methods. -*/ -struct SimulatedAnnealingSolverSystem -{ - /** Give energy value for a state of system.*/ - double energy() const; - /** Function which change the state of system (random perturbation).*/ - void changeState(); - /** Function to reverse to the previous state. Can be called once only after changeState(). */ - void reverseState(); -}; -#endif // CV_DOXYGEN - -/** @brief The class implements simulated annealing for optimization. - -@cite Kirkpatrick83 for details - -@param solverSystem optimization system (see SimulatedAnnealingSolverSystem) -@param initialTemperature initial temperature -@param finalTemperature final temperature -@param coolingRatio temperature step multiplies -@param iterationsPerStep number of iterations per temperature changing step -@param lastTemperature optional output for last used temperature -@param rngEnergy specify custom random numbers generator (cv::theRNG() by default) -*/ -template -int simulatedAnnealingSolver(SimulatedAnnealingSolverSystem& solverSystem, - double initialTemperature, double finalTemperature, double coolingRatio, - size_t iterationsPerStep, - CV_OUT double* lastTemperature = NULL, - cv::RNG& rngEnergy = cv::theRNG() -); - -//! @} ml - -} -} - -#include - -#endif // __cplusplus -#endif // OPENCV_ML_HPP - -/* End of file. */ diff --git a/modules/ml/include/opencv2/ml/ml.hpp b/modules/ml/include/opencv2/ml/ml.hpp deleted file mode 100644 index f6f9cd8f89..0000000000 --- a/modules/ml/include/opencv2/ml/ml.hpp +++ /dev/null @@ -1,48 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Copyright (C) 2013, OpenCV Foundation, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#ifdef __OPENCV_BUILD -#error this is a compatibility header which should not be used inside the OpenCV library -#endif - -#include "opencv2/ml.hpp" diff --git a/modules/ml/include/opencv2/ml/ml.inl.hpp b/modules/ml/include/opencv2/ml/ml.inl.hpp deleted file mode 100644 index dc9c78393a..0000000000 --- a/modules/ml/include/opencv2/ml/ml.inl.hpp +++ /dev/null @@ -1,60 +0,0 @@ -// This file is part of OpenCV project. -// It is subject to the license terms in the LICENSE file found in the top-level directory -// of this distribution and at http://opencv.org/license.html. - -#ifndef OPENCV_ML_INL_HPP -#define OPENCV_ML_INL_HPP - -namespace cv { namespace ml { - -// declared in ml.hpp -template -int simulatedAnnealingSolver(SimulatedAnnealingSolverSystem& solverSystem, - double initialTemperature, double finalTemperature, double coolingRatio, - size_t iterationsPerStep, - CV_OUT double* lastTemperature, - cv::RNG& rngEnergy -) -{ - CV_Assert(finalTemperature > 0); - CV_Assert(initialTemperature > finalTemperature); - CV_Assert(iterationsPerStep > 0); - CV_Assert(coolingRatio < 1.0f); - double Ti = initialTemperature; - double previousEnergy = solverSystem.energy(); - int exchange = 0; - while (Ti > finalTemperature) - { - for (size_t i = 0; i < iterationsPerStep; i++) - { - solverSystem.changeState(); - double newEnergy = solverSystem.energy(); - if (newEnergy < previousEnergy) - { - previousEnergy = newEnergy; - exchange++; - } - else - { - double r = rngEnergy.uniform(0.0, 1.0); - if (r < std::exp(-(newEnergy - previousEnergy) / Ti)) - { - previousEnergy = newEnergy; - exchange++; - } - else - { - solverSystem.reverseState(); - } - } - } - Ti *= coolingRatio; - } - if (lastTemperature) - *lastTemperature = Ti; - return exchange; -} - -}} //namespace - -#endif // OPENCV_ML_INL_HPP diff --git a/modules/ml/misc/java/test/MLTest.java b/modules/ml/misc/java/test/MLTest.java deleted file mode 100644 index 504805dffa..0000000000 --- a/modules/ml/misc/java/test/MLTest.java +++ /dev/null @@ -1,42 +0,0 @@ -package org.opencv.test.ml; - -import org.opencv.ml.Ml; -import org.opencv.ml.SVM; -import org.opencv.core.Mat; -import org.opencv.core.MatOfFloat; -import org.opencv.core.MatOfInt; -import org.opencv.core.CvType; -import org.opencv.test.OpenCVTestCase; -import org.opencv.test.OpenCVTestRunner; - -public class MLTest extends OpenCVTestCase { - - public void testSaveLoad() { - Mat samples = new MatOfFloat(new float[] { - 5.1f, 3.5f, 1.4f, 0.2f, - 4.9f, 3.0f, 1.4f, 0.2f, - 4.7f, 3.2f, 1.3f, 0.2f, - 4.6f, 3.1f, 1.5f, 0.2f, - 5.0f, 3.6f, 1.4f, 0.2f, - 7.0f, 3.2f, 4.7f, 1.4f, - 6.4f, 3.2f, 4.5f, 1.5f, - 6.9f, 3.1f, 4.9f, 1.5f, - 5.5f, 2.3f, 4.0f, 1.3f, - 6.5f, 2.8f, 4.6f, 1.5f - }).reshape(1, 10); - Mat responses = new MatOfInt(new int[] { - 0, 0, 0, 0, 0, 1, 1, 1, 1, 1 - }).reshape(1, 10); - SVM saved = SVM.create(); - assertFalse(saved.isTrained()); - - saved.train(samples, Ml.ROW_SAMPLE, responses); - assertTrue(saved.isTrained()); - - String filename = OpenCVTestRunner.getTempFileName("yml"); - saved.save(filename); - SVM loaded = SVM.load(filename); - assertTrue(loaded.isTrained()); - } - -} diff --git a/modules/ml/misc/objc/gen_dict.json b/modules/ml/misc/objc/gen_dict.json deleted file mode 100644 index 1f35051c2d..0000000000 --- a/modules/ml/misc/objc/gen_dict.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "enum_fix" : { - "EM" : { "Types": "EMTypes" }, - "SVM" : { "Types": "SVMTypes" }, - "KNearest" : { "Types": "KNearestTypes" }, - "DTrees" : { "Flags": "DTreeFlags" }, - "StatModel" : { "Flags": "StatModelFlags" } - } -} diff --git a/modules/ml/misc/python/pyopencv_ml.hpp b/modules/ml/misc/python/pyopencv_ml.hpp deleted file mode 100644 index 564eba5fac..0000000000 --- a/modules/ml/misc/python/pyopencv_ml.hpp +++ /dev/null @@ -1,22 +0,0 @@ -template<> -bool pyopencv_to(PyObject *obj, CvTermCriteria& dst, const ArgInfo& info) -{ - CV_UNUSED(info); - if(!obj) - return true; - return PyArg_ParseTuple(obj, "iid", &dst.type, &dst.max_iter, &dst.epsilon) > 0; -} - -template<> -bool pyopencv_to(PyObject* obj, CvSlice& r, const ArgInfo& info) -{ - CV_UNUSED(info); - if(!obj || obj == Py_None) - return true; - if(PyObject_Size(obj) == 0) - { - r = CV_WHOLE_SEQ; - return true; - } - return PyArg_ParseTuple(obj, "ii", &r.start_index, &r.end_index) > 0; -} \ No newline at end of file diff --git a/modules/ml/misc/python/test/test_digits.py b/modules/ml/misc/python/test/test_digits.py deleted file mode 100644 index 2d5c99826f..0000000000 --- a/modules/ml/misc/python/test/test_digits.py +++ /dev/null @@ -1,201 +0,0 @@ -#!/usr/bin/env python - -''' -SVM and KNearest digit recognition. - -Sample loads a dataset of handwritten digits from '../data/digits.png'. -Then it trains a SVM and KNearest classifiers on it and evaluates -their accuracy. - -Following preprocessing is applied to the dataset: - - Moment-based image deskew (see deskew()) - - Digit images are split into 4 10x10 cells and 16-bin - histogram of oriented gradients is computed for each - cell - - Transform histograms to space with Hellinger metric (see [1] (RootSIFT)) - - -[1] R. Arandjelovic, A. Zisserman - "Three things everyone should know to improve object retrieval" - http://www.robots.ox.ac.uk/~vgg/publications/2012/Arandjelovic12/arandjelovic12.pdf - -''' - - -# Python 2/3 compatibility -from __future__ import print_function - -# built-in modules -from multiprocessing.pool import ThreadPool - -import cv2 as cv - -import numpy as np -from numpy.linalg import norm - - -SZ = 20 # size of each digit is SZ x SZ -CLASS_N = 10 -DIGITS_FN = 'samples/data/digits.png' - -def split2d(img, cell_size, flatten=True): - h, w = img.shape[:2] - sx, sy = cell_size - cells = [np.hsplit(row, w//sx) for row in np.vsplit(img, h//sy)] - cells = np.array(cells) - if flatten: - cells = cells.reshape(-1, sy, sx) - return cells - -def deskew(img): - m = cv.moments(img) - if abs(m['mu02']) < 1e-2: - return img.copy() - skew = m['mu11']/m['mu02'] - M = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]]) - img = cv.warpAffine(img, M, (SZ, SZ), flags=cv.WARP_INVERSE_MAP | cv.INTER_LINEAR) - return img - -class StatModel(object): - def load(self, fn): - self.model.load(fn) # Known bug: https://github.com/opencv/opencv/issues/4969 - def save(self, fn): - self.model.save(fn) - -class KNearest(StatModel): - def __init__(self, k = 3): - self.k = k - self.model = cv.ml.KNearest_create() - - def train(self, samples, responses): - self.model.train(samples, cv.ml.ROW_SAMPLE, responses) - - def predict(self, samples): - _retval, results, _neigh_resp, _dists = self.model.findNearest(samples, self.k) - return results.ravel() - -class SVM(StatModel): - def __init__(self, C = 1, gamma = 0.5): - self.model = cv.ml.SVM_create() - self.model.setGamma(gamma) - self.model.setC(C) - self.model.setKernel(cv.ml.SVM_RBF) - self.model.setType(cv.ml.SVM_C_SVC) - - def train(self, samples, responses): - self.model.train(samples, cv.ml.ROW_SAMPLE, responses) - - def predict(self, samples): - return self.model.predict(samples)[1].ravel() - - -def evaluate_model(model, digits, samples, labels): - resp = model.predict(samples) - err = (labels != resp).mean() - - confusion = np.zeros((10, 10), np.int32) - for i, j in zip(labels, resp): - confusion[int(i), int(j)] += 1 - - return err, confusion - -def preprocess_simple(digits): - return np.float32(digits).reshape(-1, SZ*SZ) / 255.0 - -def preprocess_hog(digits): - samples = [] - for img in digits: - gx = cv.Sobel(img, cv.CV_32F, 1, 0) - gy = cv.Sobel(img, cv.CV_32F, 0, 1) - mag, ang = cv.cartToPolar(gx, gy) - bin_n = 16 - bin = np.int32(bin_n*ang/(2*np.pi)) - bin_cells = bin[:10,:10], bin[10:,:10], bin[:10,10:], bin[10:,10:] - mag_cells = mag[:10,:10], mag[10:,:10], mag[:10,10:], mag[10:,10:] - hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)] - hist = np.hstack(hists) - - # transform to Hellinger kernel - eps = 1e-7 - hist /= hist.sum() + eps - hist = np.sqrt(hist) - hist /= norm(hist) + eps - - samples.append(hist) - return np.float32(samples) - -from tests_common import NewOpenCVTests - -class digits_test(NewOpenCVTests): - - def load_digits(self, fn): - digits_img = self.get_sample(fn, 0) - digits = split2d(digits_img, (SZ, SZ)) - labels = np.repeat(np.arange(CLASS_N), len(digits)/CLASS_N) - return digits, labels - - def test_digits(self): - - digits, labels = self.load_digits(DIGITS_FN) - - # shuffle digits - rand = np.random.RandomState(321) - shuffle = rand.permutation(len(digits)) - digits, labels = digits[shuffle], labels[shuffle] - - digits2 = list(map(deskew, digits)) - samples = preprocess_hog(digits2) - - train_n = int(0.9*len(samples)) - _digits_train, digits_test = np.split(digits2, [train_n]) - samples_train, samples_test = np.split(samples, [train_n]) - labels_train, labels_test = np.split(labels, [train_n]) - errors = list() - confusionMatrixes = list() - - model = KNearest(k=4) - model.train(samples_train, labels_train) - error, confusion = evaluate_model(model, digits_test, samples_test, labels_test) - errors.append(error) - confusionMatrixes.append(confusion) - - model = SVM(C=2.67, gamma=5.383) - model.train(samples_train, labels_train) - error, confusion = evaluate_model(model, digits_test, samples_test, labels_test) - errors.append(error) - confusionMatrixes.append(confusion) - - eps = 0.001 - normEps = len(samples_test) * 0.02 - - confusionKNN = [[45, 0, 0, 0, 0, 0, 0, 0, 0, 0], - [ 0, 57, 0, 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 59, 1, 0, 0, 0, 0, 1, 0], - [ 0, 0, 0, 43, 0, 0, 0, 1, 0, 0], - [ 0, 0, 0, 0, 38, 0, 2, 0, 0, 0], - [ 0, 0, 0, 2, 0, 48, 0, 0, 1, 0], - [ 0, 1, 0, 0, 0, 0, 51, 0, 0, 0], - [ 0, 0, 1, 0, 0, 0, 0, 54, 0, 0], - [ 0, 0, 0, 0, 0, 1, 0, 0, 46, 0], - [ 1, 1, 0, 1, 1, 0, 0, 0, 2, 42]] - - confusionSVM = [[45, 0, 0, 0, 0, 0, 0, 0, 0, 0], - [ 0, 57, 0, 0, 0, 0, 0, 0, 0, 0], - [ 0, 0, 59, 2, 0, 0, 0, 0, 0, 0], - [ 0, 0, 0, 43, 0, 0, 0, 1, 0, 0], - [ 0, 0, 0, 0, 40, 0, 0, 0, 0, 0], - [ 0, 0, 0, 1, 0, 50, 0, 0, 0, 0], - [ 0, 0, 0, 0, 1, 0, 51, 0, 0, 0], - [ 0, 0, 1, 0, 0, 0, 0, 54, 0, 0], - [ 0, 0, 0, 0, 0, 0, 0, 0, 47, 0], - [ 0, 1, 0, 1, 0, 0, 0, 0, 1, 45]] - - self.assertLess(cv.norm(confusionMatrixes[0] - confusionKNN, cv.NORM_L1), normEps) - self.assertLess(cv.norm(confusionMatrixes[1] - confusionSVM, cv.NORM_L1), normEps) - - self.assertLess(errors[0] - 0.034, eps) - self.assertLess(errors[1] - 0.018, eps) - - -if __name__ == '__main__': - NewOpenCVTests.bootstrap() diff --git a/modules/ml/misc/python/test/test_goodfeatures.py b/modules/ml/misc/python/test/test_goodfeatures.py deleted file mode 100644 index a590ba9fa9..0000000000 --- a/modules/ml/misc/python/test/test_goodfeatures.py +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env python - -# Python 2/3 compatibility -from __future__ import print_function - -import cv2 as cv -import numpy as np - -from tests_common import NewOpenCVTests - -class TestGoodFeaturesToTrack_test(NewOpenCVTests): - def test_goodFeaturesToTrack(self): - arr = self.get_sample('samples/data/lena.jpg', 0) - original = arr.copy() - threshes = [ x / 100. for x in range(1,10) ] - numPoints = 20000 - - results = dict([(t, cv.goodFeaturesToTrack(arr, numPoints, t, 2, useHarrisDetector=True)) for t in threshes]) - # Check that GoodFeaturesToTrack has not modified input image - self.assertTrue(arr.tostring() == original.tostring()) - # Check for repeatability - for i in range(1): - results2 = dict([(t, cv.goodFeaturesToTrack(arr, numPoints, t, 2, useHarrisDetector=True)) for t in threshes]) - for t in threshes: - self.assertTrue(len(results2[t]) == len(results[t])) - for i in range(len(results[t])): - self.assertTrue(cv.norm(results[t][i][0] - results2[t][i][0]) == 0) - - for t0,t1 in zip(threshes, threshes[1:]): - r0 = results[t0] - r1 = results[t1] - # Increasing thresh should make result list shorter - self.assertTrue(len(r0) > len(r1)) - # Increasing thresh should monly truncate result list - for i in range(len(r1)): - self.assertTrue(cv.norm(r1[i][0] - r0[i][0])==0) - - -if __name__ == '__main__': - NewOpenCVTests.bootstrap() diff --git a/modules/ml/misc/python/test/test_knearest.py b/modules/ml/misc/python/test/test_knearest.py deleted file mode 100644 index 8ae0be5f73..0000000000 --- a/modules/ml/misc/python/test/test_knearest.py +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env python -import cv2 as cv - -from tests_common import NewOpenCVTests - -class knearest_test(NewOpenCVTests): - def test_load(self): - k_nearest = cv.ml.KNearest_load(self.find_file("ml/opencv_ml_knn.xml")) - self.assertFalse(k_nearest.empty()) - self.assertTrue(k_nearest.isTrained()) - -if __name__ == '__main__': - NewOpenCVTests.bootstrap() diff --git a/modules/ml/misc/python/test/test_letter_recog.py b/modules/ml/misc/python/test/test_letter_recog.py deleted file mode 100644 index 66bef39061..0000000000 --- a/modules/ml/misc/python/test/test_letter_recog.py +++ /dev/null @@ -1,171 +0,0 @@ -#!/usr/bin/env python - -''' -The sample demonstrates how to train Random Trees classifier -(or Boosting classifier, or MLP, or Knearest, or Support Vector Machines) using the provided dataset. - -We use the sample database letter-recognition.data -from UCI Repository, here is the link: - -Newman, D.J. & Hettich, S. & Blake, C.L. & Merz, C.J. (1998). -UCI Repository of machine learning databases -[http://www.ics.uci.edu/~mlearn/MLRepository.html]. -Irvine, CA: University of California, Department of Information and Computer Science. - -The dataset consists of 20000 feature vectors along with the -responses - capital latin letters A..Z. -The first 10000 samples are used for training -and the remaining 10000 - to test the classifier. -====================================================== - Models: RTrees, KNearest, Boost, SVM, MLP -''' - -# Python 2/3 compatibility -from __future__ import print_function - -import numpy as np -import cv2 as cv - -def load_base(fn): - a = np.loadtxt(fn, np.float32, delimiter=',', converters={ 0 : lambda ch : ord(ch)-ord('A') }) - samples, responses = a[:,1:], a[:,0] - return samples, responses - -class LetterStatModel(object): - class_n = 26 - train_ratio = 0.5 - - def load(self, fn): - self.model.load(fn) - def save(self, fn): - self.model.save(fn) - - def unroll_samples(self, samples): - sample_n, var_n = samples.shape - new_samples = np.zeros((sample_n * self.class_n, var_n+1), np.float32) - new_samples[:,:-1] = np.repeat(samples, self.class_n, axis=0) - new_samples[:,-1] = np.tile(np.arange(self.class_n), sample_n) - return new_samples - - def unroll_responses(self, responses): - sample_n = len(responses) - new_responses = np.zeros(sample_n*self.class_n, np.int32) - resp_idx = np.int32( responses + np.arange(sample_n)*self.class_n ) - new_responses[resp_idx] = 1 - return new_responses - -class RTrees(LetterStatModel): - def __init__(self): - self.model = cv.ml.RTrees_create() - - def train(self, samples, responses): - #sample_n, var_n = samples.shape - self.model.setMaxDepth(20) - self.model.train(samples, cv.ml.ROW_SAMPLE, responses.astype(int)) - - def predict(self, samples): - _ret, resp = self.model.predict(samples) - return resp.ravel() - - -class KNearest(LetterStatModel): - def __init__(self): - self.model = cv.ml.KNearest_create() - - def train(self, samples, responses): - self.model.train(samples, cv.ml.ROW_SAMPLE, responses) - - def predict(self, samples): - _retval, results, _neigh_resp, _dists = self.model.findNearest(samples, k = 10) - return results.ravel() - - -class Boost(LetterStatModel): - def __init__(self): - self.model = cv.ml.Boost_create() - - def train(self, samples, responses): - _sample_n, var_n = samples.shape - new_samples = self.unroll_samples(samples) - new_responses = self.unroll_responses(responses) - var_types = np.array([cv.ml.VAR_NUMERICAL] * var_n + [cv.ml.VAR_CATEGORICAL, cv.ml.VAR_CATEGORICAL], np.uint8) - - self.model.setWeakCount(15) - self.model.setMaxDepth(10) - self.model.train(cv.ml.TrainData_create(new_samples, cv.ml.ROW_SAMPLE, new_responses.astype(int), varType = var_types)) - - def predict(self, samples): - new_samples = self.unroll_samples(samples) - _ret, resp = self.model.predict(new_samples) - - return resp.ravel().reshape(-1, self.class_n).argmax(1) - - -class SVM(LetterStatModel): - def __init__(self): - self.model = cv.ml.SVM_create() - - def train(self, samples, responses): - self.model.setType(cv.ml.SVM_C_SVC) - self.model.setC(1) - self.model.setKernel(cv.ml.SVM_RBF) - self.model.setGamma(.1) - self.model.train(samples, cv.ml.ROW_SAMPLE, responses.astype(int)) - - def predict(self, samples): - _ret, resp = self.model.predict(samples) - return resp.ravel() - - -class MLP(LetterStatModel): - def __init__(self): - self.model = cv.ml.ANN_MLP_create() - - def train(self, samples, responses): - _sample_n, var_n = samples.shape - new_responses = self.unroll_responses(responses).reshape(-1, self.class_n) - layer_sizes = np.int32([var_n, 100, 100, self.class_n]) - - self.model.setLayerSizes(layer_sizes) - self.model.setTrainMethod(cv.ml.ANN_MLP_BACKPROP) - self.model.setBackpropMomentumScale(0) - self.model.setBackpropWeightScale(0.001) - self.model.setTermCriteria((cv.TERM_CRITERIA_COUNT, 20, 0.01)) - self.model.setActivationFunction(cv.ml.ANN_MLP_SIGMOID_SYM, 2, 1) - - self.model.train(samples, cv.ml.ROW_SAMPLE, np.float32(new_responses)) - - def predict(self, samples): - _ret, resp = self.model.predict(samples) - return resp.argmax(-1) - -from tests_common import NewOpenCVTests - -class letter_recog_test(NewOpenCVTests): - - def test_letter_recog(self): - - eps = 0.01 - - models = [RTrees, KNearest, Boost, SVM, MLP] - models = dict( [(cls.__name__.lower(), cls) for cls in models] ) - testErrors = {RTrees: (98.930000, 92.390000), KNearest: (94.960000, 92.010000), - Boost: (85.970000, 74.920000), SVM: (99.780000, 95.680000), MLP: (90.060000, 87.410000)} - - for model in models: - Model = models[model] - classifier = Model() - - samples, responses = load_base(self.repoPath + '/samples/data/letter-recognition.data') - train_n = int(len(samples)*classifier.train_ratio) - - classifier.train(samples[:train_n], responses[:train_n]) - train_rate = np.mean(classifier.predict(samples[:train_n]) == responses[:train_n].astype(int)) - test_rate = np.mean(classifier.predict(samples[train_n:]) == responses[train_n:].astype(int)) - - self.assertLess(train_rate - testErrors[Model][0], eps) - self.assertLess(test_rate - testErrors[Model][1], eps) - - -if __name__ == '__main__': - NewOpenCVTests.bootstrap() diff --git a/modules/ml/src/ann_mlp.cpp b/modules/ml/src/ann_mlp.cpp deleted file mode 100644 index c6a4552c9e..0000000000 --- a/modules/ml/src/ann_mlp.cpp +++ /dev/null @@ -1,1534 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// Intel License Agreement -// -// Copyright (C) 2000, Intel Corporation, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of Intel Corporation may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" - -namespace cv { namespace ml { - -struct AnnParams -{ - AnnParams() - { - termCrit = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 1000, 0.01 ); - trainMethod = ANN_MLP::RPROP; - bpDWScale = bpMomentScale = 0.1; - rpDW0 = 0.1; rpDWPlus = 1.2; rpDWMinus = 0.5; - rpDWMin = FLT_EPSILON; rpDWMax = 50.; - initialT=10;finalT=0.1,coolingRatio=0.95;itePerStep=10; - rEnergy = cv::RNG(12345); - } - - TermCriteria termCrit; - int trainMethod; - - double bpDWScale; - double bpMomentScale; - - double rpDW0; - double rpDWPlus; - double rpDWMinus; - double rpDWMin; - double rpDWMax; - - double initialT; - double finalT; - double coolingRatio; - int itePerStep; - RNG rEnergy; -}; - -template -inline T inBounds(T val, T min_val, T max_val) -{ - return std::min(std::max(val, min_val), max_val); -} - -class SimulatedAnnealingANN_MLP -{ -protected: - ml::ANN_MLP& nn; - Ptr data; - int nbVariables; - vector adrVariables; - RNG rVar; - RNG rIndex; - double varTmp; - int index; -public: - SimulatedAnnealingANN_MLP(ml::ANN_MLP& x, const Ptr& d) : nn(x), data(d), varTmp(0.0), index(0) - { - initVarMap(); - } - ~SimulatedAnnealingANN_MLP() {} - - void changeState() - { - index = rIndex.uniform(0, nbVariables); - double dv = rVar.uniform(-1.0, 1.0); - varTmp = *adrVariables[index]; - *adrVariables[index] = dv; - } - - void reverseState() - { - *adrVariables[index] = varTmp; - } - - double energy() const { return nn.calcError(data, false, noArray()); } - -protected: - void initVarMap() - { - Mat l = nn.getLayerSizes(); - nbVariables = 0; - adrVariables.clear(); - int nlayers = (int)l.total(); - for (int i = 1; i < nlayers-1; i++) - { - Mat w = nn.getWeights(i); - for (int j = 0; j < w.rows; j++) - { - for (int k = 0; k < w.cols; k++, nbVariables++) - { - if (j == w.rows - 1) - { - adrVariables.push_back(&w.at(w.rows - 1, k)); - } - else - { - adrVariables.push_back(&w.at(j, k)); - } - } - } - } - } - -}; - -class ANN_MLPImpl CV_FINAL : public ANN_MLP -{ -public: - ANN_MLPImpl() - { - clear(); - setActivationFunction( SIGMOID_SYM, 0, 0); - setLayerSizes(Mat()); - setTrainMethod(ANN_MLP::RPROP, 0.1, FLT_EPSILON); - } - - virtual ~ANN_MLPImpl() CV_OVERRIDE {} - - inline TermCriteria getTermCriteria() const CV_OVERRIDE { return params.termCrit; } - inline void setTermCriteria(TermCriteria val) CV_OVERRIDE { params.termCrit = val; } - inline double getBackpropWeightScale() const CV_OVERRIDE { return params.bpDWScale; } - inline void setBackpropWeightScale(double val) CV_OVERRIDE { params.bpDWScale = val; } - inline double getBackpropMomentumScale() const CV_OVERRIDE { return params.bpMomentScale; } - inline void setBackpropMomentumScale(double val) CV_OVERRIDE { params.bpMomentScale = val; } - inline double getRpropDW0() const CV_OVERRIDE { return params.rpDW0; } - inline void setRpropDW0(double val) CV_OVERRIDE { params.rpDW0 = val; } - inline double getRpropDWPlus() const CV_OVERRIDE { return params.rpDWPlus; } - inline void setRpropDWPlus(double val) CV_OVERRIDE { params.rpDWPlus = val; } - inline double getRpropDWMinus() const CV_OVERRIDE { return params.rpDWMinus; } - inline void setRpropDWMinus(double val) CV_OVERRIDE { params.rpDWMinus = val; } - inline double getRpropDWMin() const CV_OVERRIDE { return params.rpDWMin; } - inline void setRpropDWMin(double val) CV_OVERRIDE { params.rpDWMin = val; } - inline double getRpropDWMax() const CV_OVERRIDE { return params.rpDWMax; } - inline void setRpropDWMax(double val) CV_OVERRIDE { params.rpDWMax = val; } - inline double getAnnealInitialT() const CV_OVERRIDE { return params.initialT; } - inline void setAnnealInitialT(double val) CV_OVERRIDE { params.initialT = val; } - inline double getAnnealFinalT() const CV_OVERRIDE { return params.finalT; } - inline void setAnnealFinalT(double val) CV_OVERRIDE { params.finalT = val; } - inline double getAnnealCoolingRatio() const CV_OVERRIDE { return params.coolingRatio; } - inline void setAnnealCoolingRatio(double val) CV_OVERRIDE { params.coolingRatio = val; } - inline int getAnnealItePerStep() const CV_OVERRIDE { return params.itePerStep; } - inline void setAnnealItePerStep(int val) CV_OVERRIDE { params.itePerStep = val; } - // disabled getAnnealEnergyRNG() - inline void setAnnealEnergyRNG(const RNG& val) CV_OVERRIDE { params.rEnergy = val; } - - void clear() CV_OVERRIDE - { - min_val = max_val = min_val1 = max_val1 = 0.; - rng = RNG((uint64)-1); - weights.clear(); - trained = false; - max_buf_sz = 1 << 12; - } - - int layer_count() const { return (int)layer_sizes.size(); } - - void setTrainMethod(int method, double param1, double param2) CV_OVERRIDE - { - if (method != ANN_MLP::RPROP && method != ANN_MLP::BACKPROP && method != ANN_MLP::ANNEAL) - method = ANN_MLP::RPROP; - params.trainMethod = method; - if(method == ANN_MLP::RPROP ) - { - if( param1 < FLT_EPSILON ) - param1 = 1.; - params.rpDW0 = param1; - params.rpDWMin = std::max( param2, 0. ); - } - else if (method == ANN_MLP::BACKPROP) - { - if (param1 <= 0) - param1 = 0.1; - params.bpDWScale = inBounds(param1, 1e-3, 1.); - if (param2 < 0) - param2 = 0.1; - params.bpMomentScale = std::min(param2, 1.); - } - } - - int getTrainMethod() const CV_OVERRIDE - { - return params.trainMethod; - } - - void setActivationFunction(int _activ_func, double _f_param1, double _f_param2) CV_OVERRIDE - { - if( _activ_func < 0 || _activ_func > LEAKYRELU) - CV_Error( CV_StsOutOfRange, "Unknown activation function" ); - - activ_func = _activ_func; - - switch( activ_func ) - { - case SIGMOID_SYM: - max_val = 0.95; min_val = -max_val; - max_val1 = 0.98; min_val1 = -max_val1; - if( fabs(_f_param1) < FLT_EPSILON ) - _f_param1 = 2./3; - if( fabs(_f_param2) < FLT_EPSILON ) - _f_param2 = 1.7159; - break; - case GAUSSIAN: - max_val = 1.; min_val = 0.05; - max_val1 = 1.; min_val1 = 0.02; - if (fabs(_f_param1) < FLT_EPSILON) - _f_param1 = 1.; - if (fabs(_f_param2) < FLT_EPSILON) - _f_param2 = 1.; - break; - case RELU: - if (fabs(_f_param1) < FLT_EPSILON) - _f_param1 = 1; - min_val = max_val = min_val1 = max_val1 = 0.; - _f_param2 = 0.; - break; - case LEAKYRELU: - if (fabs(_f_param1) < FLT_EPSILON) - _f_param1 = 0.01; - min_val = max_val = min_val1 = max_val1 = 0.; - _f_param2 = 0.; - break; - default: - min_val = max_val = min_val1 = max_val1 = 0.; - _f_param1 = 1.; - _f_param2 = 0.; - } - - f_param1 = _f_param1; - f_param2 = _f_param2; - } - - - void init_weights() - { - int i, j, k, l_count = layer_count(); - - for( i = 1; i < l_count; i++ ) - { - int n1 = layer_sizes[i-1]; - int n2 = layer_sizes[i]; - double val = 0, G = n2 > 2 ? 0.7*pow((double)n1,1./(n2-1)) : 1.; - double* w = weights[i].ptr(); - - // initialize weights using Nguyen-Widrow algorithm - for( j = 0; j < n2; j++ ) - { - double s = 0; - for( k = 0; k <= n1; k++ ) - { - val = rng.uniform(0., 1.)*2-1.; - w[k*n2 + j] = val; - s += fabs(val); - } - - if( i < l_count - 1 ) - { - s = 1./(s - fabs(val)); - for( k = 0; k <= n1; k++ ) - w[k*n2 + j] *= s; - w[n1*n2 + j] *= G*(-1+j*2./n2); - } - } - } - } - - Mat getLayerSizes() const CV_OVERRIDE - { - return Mat_(layer_sizes, true); - } - - void setLayerSizes( InputArray _layer_sizes ) CV_OVERRIDE - { - clear(); - - _layer_sizes.copyTo(layer_sizes); - int l_count = layer_count(); - - weights.resize(l_count + 2); - max_lsize = 0; - - if( l_count > 0 ) - { - for( int i = 0; i < l_count; i++ ) - { - int n = layer_sizes[i]; - if( n < 1 + (0 < i && i < l_count-1)) - CV_Error( CV_StsOutOfRange, - "there should be at least one input and one output " - "and every hidden layer must have more than 1 neuron" ); - max_lsize = std::max( max_lsize, n ); - if( i > 0 ) - weights[i].create(layer_sizes[i-1]+1, n, CV_64F); - } - - int ninputs = layer_sizes.front(); - int noutputs = layer_sizes.back(); - weights[0].create(1, ninputs*2, CV_64F); - weights[l_count].create(1, noutputs*2, CV_64F); - weights[l_count+1].create(1, noutputs*2, CV_64F); - } - } - - float predict( InputArray _inputs, OutputArray _outputs, int ) const CV_OVERRIDE - { - if( !trained ) - CV_Error( CV_StsError, "The network has not been trained or loaded" ); - - Mat inputs = _inputs.getMat(); - int type = inputs.type(), l_count = layer_count(); - int n = inputs.rows, dn0 = n; - - CV_Assert( (type == CV_32F || type == CV_64F) && inputs.cols == layer_sizes[0] ); - int noutputs = layer_sizes[l_count-1]; - Mat outputs; - - int min_buf_sz = 2*max_lsize; - int buf_sz = n*min_buf_sz; - - if( buf_sz > max_buf_sz ) - { - dn0 = max_buf_sz/min_buf_sz; - dn0 = std::max( dn0, 1 ); - buf_sz = dn0*min_buf_sz; - } - - cv::AutoBuffer _buf(buf_sz+noutputs); - double* buf = _buf.data(); - - if( !_outputs.needed() ) - { - CV_Assert( n == 1 ); - outputs = Mat(n, noutputs, type, buf + buf_sz); - } - else - { - _outputs.create(n, noutputs, type); - outputs = _outputs.getMat(); - } - - int dn = 0; - for( int i = 0; i < n; i += dn ) - { - dn = std::min( dn0, n - i ); - - Mat layer_in = inputs.rowRange(i, i + dn); - Mat layer_out( dn, layer_in.cols, CV_64F, buf); - - scale_input( layer_in, layer_out ); - layer_in = layer_out; - - for( int j = 1; j < l_count; j++ ) - { - double* data = buf + ((j&1) ? max_lsize*dn0 : 0); - int cols = layer_sizes[j]; - - layer_out = Mat(dn, cols, CV_64F, data); - Mat w = weights[j].rowRange(0, layer_in.cols); - gemm(layer_in, w, 1, noArray(), 0, layer_out); - calc_activ_func( layer_out, weights[j] ); - - layer_in = layer_out; - } - - layer_out = outputs.rowRange(i, i + dn); - scale_output( layer_in, layer_out ); - } - - if( n == 1 ) - { - int maxIdx[] = {0, 0}; - minMaxIdx(outputs, 0, 0, 0, maxIdx); - return (float)(maxIdx[0] + maxIdx[1]); - } - - return 0.f; - } - - void scale_input( const Mat& _src, Mat& _dst ) const - { - int cols = _src.cols; - const double* w = weights[0].ptr(); - - if( _src.type() == CV_32F ) - { - for( int i = 0; i < _src.rows; i++ ) - { - const float* src = _src.ptr(i); - double* dst = _dst.ptr(i); - for( int j = 0; j < cols; j++ ) - dst[j] = src[j]*w[j*2] + w[j*2+1]; - } - } - else - { - for( int i = 0; i < _src.rows; i++ ) - { - const double* src = _src.ptr(i); - double* dst = _dst.ptr(i); - for( int j = 0; j < cols; j++ ) - dst[j] = src[j]*w[j*2] + w[j*2+1]; - } - } - } - - void scale_output( const Mat& _src, Mat& _dst ) const - { - int cols = _src.cols; - const double* w = weights[layer_count()].ptr(); - - if( _dst.type() == CV_32F ) - { - for( int i = 0; i < _src.rows; i++ ) - { - const double* src = _src.ptr(i); - float* dst = _dst.ptr(i); - for( int j = 0; j < cols; j++ ) - dst[j] = (float)(src[j]*w[j*2] + w[j*2+1]); - } - } - else - { - for( int i = 0; i < _src.rows; i++ ) - { - const double* src = _src.ptr(i); - double* dst = _dst.ptr(i); - for( int j = 0; j < cols; j++ ) - dst[j] = src[j]*w[j*2] + w[j*2+1]; - } - } - } - - void calc_activ_func(Mat& sums, const Mat& w) const - { - const double* bias = w.ptr(w.rows - 1); - int i, j, n = sums.rows, cols = sums.cols; - double scale = 0, scale2 = f_param2; - - switch (activ_func) - { - case IDENTITY: - scale = 1.; - break; - case SIGMOID_SYM: - scale = -f_param1; - break; - case GAUSSIAN: - scale = -f_param1*f_param1; - break; - case RELU: - scale = 1; - break; - case LEAKYRELU: - scale = 1; - break; - default: - ; - } - - CV_Assert(sums.isContinuous()); - - if (activ_func != GAUSSIAN) - { - for (i = 0; i < n; i++) - { - double* data = sums.ptr(i); - for (j = 0; j < cols; j++) - { - data[j] = (data[j] + bias[j])*scale; - if (activ_func == RELU) - if (data[j] < 0) - data[j] = 0; - if (activ_func == LEAKYRELU) - if (data[j] < 0) - data[j] *= f_param1; - } - } - - if (activ_func == IDENTITY || activ_func == RELU || activ_func == LEAKYRELU) - return; - } - else - { - for (i = 0; i < n; i++) - { - double* data = sums.ptr(i); - for (j = 0; j < cols; j++) - { - double t = data[j] + bias[j]; - data[j] = t*t*scale; - } - } - } - - exp(sums, sums); - - if (sums.isContinuous()) - { - cols *= n; - n = 1; - } - - switch (activ_func) - { - case SIGMOID_SYM: - for (i = 0; i < n; i++) - { - double* data = sums.ptr(i); - for (j = 0; j < cols; j++) - { - if (!cvIsInf(data[j])) - { - double t = scale2*(1. - data[j]) / (1. + data[j]); - data[j] = t; - } - else - { - data[j] = -scale2; - } - } - } - break; - - case GAUSSIAN: - for (i = 0; i < n; i++) - { - double* data = sums.ptr(i); - for (j = 0; j < cols; j++) - data[j] = scale2*data[j]; - } - break; - - default: - ; - } - } - - void calc_activ_func_deriv(Mat& _xf, Mat& _df, const Mat& w) const - { - const double* bias = w.ptr(w.rows - 1); - int i, j, n = _xf.rows, cols = _xf.cols; - - if (activ_func == IDENTITY) - { - for (i = 0; i < n; i++) - { - double* xf = _xf.ptr(i); - double* df = _df.ptr(i); - - for (j = 0; j < cols; j++) - { - xf[j] += bias[j]; - df[j] = 1; - } - } - } - else if (activ_func == RELU) - { - for (i = 0; i < n; i++) - { - double* xf = _xf.ptr(i); - double* df = _df.ptr(i); - - for (j = 0; j < cols; j++) - { - xf[j] += bias[j]; - if (xf[j] < 0) - { - xf[j] = 0; - df[j] = 0; - } - else - df[j] = 1; - } - } - } - else if (activ_func == LEAKYRELU) - { - for (i = 0; i < n; i++) - { - double* xf = _xf.ptr(i); - double* df = _df.ptr(i); - - for (j = 0; j < cols; j++) - { - xf[j] += bias[j]; - if (xf[j] < 0) - { - xf[j] = f_param1*xf[j]; - df[j] = f_param1; - } - else - df[j] = 1; - } - } - } - else if (activ_func == GAUSSIAN) - { - double scale = -f_param1*f_param1; - double scale2 = scale*f_param2; - for (i = 0; i < n; i++) - { - double* xf = _xf.ptr(i); - double* df = _df.ptr(i); - - for (j = 0; j < cols; j++) - { - double t = xf[j] + bias[j]; - df[j] = t * 2 * scale2; - xf[j] = t*t*scale; - } - } - exp(_xf, _xf); - - for (i = 0; i < n; i++) - { - double* xf = _xf.ptr(i); - double* df = _df.ptr(i); - - for (j = 0; j < cols; j++) - df[j] *= xf[j]; - } - } - else - { - double scale = f_param1; - double scale2 = f_param2; - - for (i = 0; i < n; i++) - { - double* xf = _xf.ptr(i); - double* df = _df.ptr(i); - - for (j = 0; j < cols; j++) - { - xf[j] = (xf[j] + bias[j])*scale; - df[j] = -fabs(xf[j]); - } - } - - exp(_df, _df); - - // ((1+exp(-ax))^-1)'=a*((1+exp(-ax))^-2)*exp(-ax); - // ((1-exp(-ax))/(1+exp(-ax)))'=(a*exp(-ax)*(1+exp(-ax)) + a*exp(-ax)*(1-exp(-ax)))/(1+exp(-ax))^2= - // 2*a*exp(-ax)/(1+exp(-ax))^2 - scale *= 2 * f_param2; - for (i = 0; i < n; i++) - { - double* xf = _xf.ptr(i); - double* df = _df.ptr(i); - - for (j = 0; j < cols; j++) - { - int s0 = xf[j] > 0 ? 1 : -1; - double t0 = 1. / (1. + df[j]); - double t1 = scale*df[j] * t0*t0; - t0 *= scale2*(1. - df[j])*s0; - df[j] = t1; - xf[j] = t0; - } - } - } - } - - void calc_input_scale( const Mat& inputs, int flags ) - { - bool reset_weights = (flags & UPDATE_WEIGHTS) == 0; - bool no_scale = (flags & NO_INPUT_SCALE) != 0; - double* scale = weights[0].ptr(); - int count = inputs.rows; - - if( reset_weights ) - { - int i, j, vcount = layer_sizes[0]; - int type = inputs.type(); - double a = no_scale ? 1. : 0.; - - for( j = 0; j < vcount; j++ ) - scale[2*j] = a, scale[j*2+1] = 0.; - - if( no_scale ) - return; - - for( i = 0; i < count; i++ ) - { - const uchar* p = inputs.ptr(i); - const float* f = (const float*)p; - const double* d = (const double*)p; - for( j = 0; j < vcount; j++ ) - { - double t = type == CV_32F ? (double)f[j] : d[j]; - scale[j*2] += t; - scale[j*2+1] += t*t; - } - } - - for( j = 0; j < vcount; j++ ) - { - double s = scale[j*2], s2 = scale[j*2+1]; - double m = s/count, sigma2 = s2/count - m*m; - scale[j*2] = sigma2 < DBL_EPSILON ? 1 : 1./sqrt(sigma2); - scale[j*2+1] = -m*scale[j*2]; - } - } - } - - void calc_output_scale( const Mat& outputs, int flags ) - { - int i, j, vcount = layer_sizes.back(); - int type = outputs.type(); - double m = min_val, M = max_val, m1 = min_val1, M1 = max_val1; - bool reset_weights = (flags & UPDATE_WEIGHTS) == 0; - bool no_scale = (flags & NO_OUTPUT_SCALE) != 0; - int l_count = layer_count(); - double* scale = weights[l_count].ptr(); - double* inv_scale = weights[l_count+1].ptr(); - int count = outputs.rows; - - if( reset_weights ) - { - double a0 = no_scale ? 1 : DBL_MAX, b0 = no_scale ? 0 : -DBL_MAX; - - for( j = 0; j < vcount; j++ ) - { - scale[2*j] = inv_scale[2*j] = a0; - scale[j*2+1] = inv_scale[2*j+1] = b0; - } - - if( no_scale ) - return; - } - - for( i = 0; i < count; i++ ) - { - const uchar* p = outputs.ptr(i); - const float* f = (const float*)p; - const double* d = (const double*)p; - - for( j = 0; j < vcount; j++ ) - { - double t = type == CV_32F ? (double)f[j] : d[j]; - - if( reset_weights ) - { - double mj = scale[j*2], Mj = scale[j*2+1]; - if( mj > t ) mj = t; - if( Mj < t ) Mj = t; - - scale[j*2] = mj; - scale[j*2+1] = Mj; - } - else if( !no_scale ) - { - t = t*inv_scale[j*2] + inv_scale[2*j+1]; - if( t < m1 || t > M1 ) - CV_Error( CV_StsOutOfRange, - "Some of new output training vector components run exceed the original range too much" ); - } - } - } - - if( reset_weights ) - for( j = 0; j < vcount; j++ ) - { - // map mj..Mj to m..M - double mj = scale[j*2], Mj = scale[j*2+1]; - double a, b; - double delta = Mj - mj; - if( delta < DBL_EPSILON ) - a = 1, b = (M + m - Mj - mj)*0.5; - else - a = (M - m)/delta, b = m - mj*a; - inv_scale[j*2] = a; inv_scale[j*2+1] = b; - a = 1./a; b = -b*a; - scale[j*2] = a; scale[j*2+1] = b; - } - } - - void prepare_to_train( const Mat& inputs, const Mat& outputs, - Mat& sample_weights, int flags ) - { - if( layer_sizes.empty() ) - CV_Error( CV_StsError, - "The network has not been created. Use method create or the appropriate constructor" ); - - if( (inputs.type() != CV_32F && inputs.type() != CV_64F) || - inputs.cols != layer_sizes[0] ) - CV_Error( CV_StsBadArg, - "input training data should be a floating-point matrix with " - "the number of rows equal to the number of training samples and " - "the number of columns equal to the size of 0-th (input) layer" ); - - if( (outputs.type() != CV_32F && outputs.type() != CV_64F) || - outputs.cols != layer_sizes.back() ) - CV_Error( CV_StsBadArg, - "output training data should be a floating-point matrix with " - "the number of rows equal to the number of training samples and " - "the number of columns equal to the size of last (output) layer" ); - - if( inputs.rows != outputs.rows ) - CV_Error( CV_StsUnmatchedSizes, "The numbers of input and output samples do not match" ); - - Mat temp; - double s = sum(sample_weights)[0]; - sample_weights.convertTo(temp, CV_64F, 1./s); - sample_weights = temp; - - calc_input_scale( inputs, flags ); - calc_output_scale( outputs, flags ); - } - - bool train( const Ptr& trainData, int flags ) CV_OVERRIDE - { - CV_Assert(!trainData.empty()); - const int MAX_ITER = 1000; - const double DEFAULT_EPSILON = FLT_EPSILON; - - // initialize training data - Mat inputs = trainData->getTrainSamples(); - Mat outputs = trainData->getTrainResponses(); - Mat sw = trainData->getTrainSampleWeights(); - prepare_to_train( inputs, outputs, sw, flags ); - - // ... and link weights - if( !(flags & UPDATE_WEIGHTS) ) - init_weights(); - - TermCriteria termcrit; - termcrit.type = TermCriteria::COUNT + TermCriteria::EPS; - termcrit.maxCount = std::max((params.termCrit.type & CV_TERMCRIT_ITER ? params.termCrit.maxCount : MAX_ITER), 1); - termcrit.epsilon = std::max((params.termCrit.type & CV_TERMCRIT_EPS ? params.termCrit.epsilon : DEFAULT_EPSILON), DBL_EPSILON); - - int iter = 0; - switch(params.trainMethod){ - case ANN_MLP::BACKPROP: - iter = train_backprop(inputs, outputs, sw, termcrit); - break; - case ANN_MLP::RPROP: - iter = train_rprop(inputs, outputs, sw, termcrit); - break; - case ANN_MLP::ANNEAL: - iter = train_anneal(trainData); - break; - } - trained = iter > 0; - return trained; - } - int train_anneal(const Ptr& trainData) - { - CV_Assert(!trainData.empty()); - SimulatedAnnealingANN_MLP s(*this, trainData); - trained = true; // Enable call to CalcError - int iter = simulatedAnnealingSolver(s, params.initialT, params.finalT, params.coolingRatio, params.itePerStep, NULL, params.rEnergy); - trained =false; - return iter + 1; // ensure that 'train()' call is always successful - } - - int train_backprop( const Mat& inputs, const Mat& outputs, const Mat& _sw, TermCriteria termCrit ) - { - int i, j, k; - double prev_E = DBL_MAX*0.5, E = 0; - int itype = inputs.type(), otype = outputs.type(); - - int count = inputs.rows; - - int iter = -1, max_iter = termCrit.maxCount*count; - double epsilon = (termCrit.type & CV_TERMCRIT_EPS) ? termCrit.epsilon*count : 0; - - int l_count = layer_count(); - int ivcount = layer_sizes[0]; - int ovcount = layer_sizes.back(); - - // allocate buffers - vector > x(l_count); - vector > df(l_count); - vector dw(l_count); - - for( i = 0; i < l_count; i++ ) - { - int n = layer_sizes[i]; - x[i].resize(n+1); - df[i].resize(n); - dw[i] = Mat::zeros(weights[i].size(), CV_64F); - } - - Mat _idx_m(1, count, CV_32S); - int* _idx = _idx_m.ptr(); - for( i = 0; i < count; i++ ) - _idx[i] = i; - - AutoBuffer _buf(max_lsize*2); - double* buf[] = { _buf.data(), _buf.data() + max_lsize }; - - const double* sw = _sw.empty() ? 0 : _sw.ptr(); - - // run back-propagation loop - /* - y_i = w_i*x_{i-1} - x_i = f(y_i) - E = 1/2*||u - x_N||^2 - grad_N = (x_N - u)*f'(y_i) - dw_i(t) = momentum*dw_i(t-1) + dw_scale*x_{i-1}*grad_i - w_i(t+1) = w_i(t) + dw_i(t) - grad_{i-1} = w_i^t*grad_i - */ - for( iter = 0; iter < max_iter; iter++ ) - { - int idx = iter % count; - double sweight = sw ? count*sw[idx] : 1.; - - if( idx == 0 ) - { - //printf("%d. E = %g\n", iter/count, E); - if( fabs(prev_E - E) < epsilon ) - break; - prev_E = E; - E = 0; - - // shuffle indices - for( i = 0; i (); - for( j = 0; j < ivcount; j++ ) - x[0][j] = (itype == CV_32F ? (double)x0data_f[j] : x0data_d[j])*w[j*2] + w[j*2 + 1]; - - Mat x1( 1, ivcount, CV_64F, &x[0][0] ); - - // forward pass, compute y[i]=w*x[i-1], x[i]=f(y[i]), df[i]=f'(y[i]) - for( i = 1; i < l_count; i++ ) - { - int n = layer_sizes[i]; - Mat x2(1, n, CV_64F, &x[i][0] ); - Mat _w = weights[i].rowRange(0, x1.cols); - gemm(x1, _w, 1, noArray(), 0, x2); - Mat _df(1, n, CV_64F, &df[i][0] ); - calc_activ_func_deriv( x2, _df, weights[i] ); - x1 = x2; - } - - Mat grad1( 1, ovcount, CV_64F, buf[l_count&1] ); - w = weights[l_count+1].ptr(); - - // calculate error - const uchar* udata_p = outputs.ptr(idx); - const float* udata_f = (const float*)udata_p; - const double* udata_d = (const double*)udata_p; - - double* gdata = grad1.ptr(); - for( k = 0; k < ovcount; k++ ) - { - double t = (otype == CV_32F ? (double)udata_f[k] : udata_d[k])*w[k*2] + w[k*2+1] - x[l_count-1][k]; - gdata[k] = t*sweight; - E += t*t; - } - E *= sweight; - - // backward pass, update weights - for( i = l_count-1; i > 0; i-- ) - { - int n1 = layer_sizes[i-1], n2 = layer_sizes[i]; - Mat _df(1, n2, CV_64F, &df[i][0]); - multiply( grad1, _df, grad1 ); - Mat _x(n1+1, 1, CV_64F, &x[i-1][0]); - x[i-1][n1] = 1.; - gemm( _x, grad1, params.bpDWScale, dw[i], params.bpMomentScale, dw[i] ); - add( weights[i], dw[i], weights[i] ); - if( i > 1 ) - { - Mat grad2(1, n1, CV_64F, buf[i&1]); - Mat _w = weights[i].rowRange(0, n1); - gemm( grad1, _w, 1, noArray(), 0, grad2, GEMM_2_T ); - grad1 = grad2; - } - } - } - - iter /= count; - return iter; - } - - struct RPropLoop : public ParallelLoopBody - { - RPropLoop(ANN_MLPImpl* _ann, - const Mat& _inputs, const Mat& _outputs, const Mat& _sw, - int _dcount0, vector& _dEdw, double* _E) - { - ann = _ann; - inputs = _inputs; - outputs = _outputs; - sw = _sw.ptr(); - dcount0 = _dcount0; - dEdw = &_dEdw; - pE = _E; - } - - ANN_MLPImpl* ann; - vector* dEdw; - Mat inputs, outputs; - const double* sw; - int dcount0; - double* pE; - - void operator()(const Range& range) const CV_OVERRIDE - { - double inv_count = 1./inputs.rows; - int ivcount = ann->layer_sizes.front(); - int ovcount = ann->layer_sizes.back(); - int itype = inputs.type(), otype = outputs.type(); - int count = inputs.rows; - int i, j, k, l_count = ann->layer_count(); - vector > x(l_count); - vector > df(l_count); - vector _buf(ann->max_lsize*dcount0*2); - double* buf[] = { &_buf[0], &_buf[ann->max_lsize*dcount0] }; - double E = 0; - - for( i = 0; i < l_count; i++ ) - { - x[i].resize(ann->layer_sizes[i]*dcount0); - df[i].resize(ann->layer_sizes[i]*dcount0); - } - - for( int si = range.start; si < range.end; si++ ) - { - int i0 = si*dcount0, i1 = std::min((si + 1)*dcount0, count); - int dcount = i1 - i0; - const double* w = ann->weights[0].ptr(); - - // grab and preprocess input data - for( i = 0; i < dcount; i++ ) - { - const uchar* x0data_p = inputs.ptr(i0 + i); - const float* x0data_f = (const float*)x0data_p; - const double* x0data_d = (const double*)x0data_p; - - double* xdata = &x[0][i*ivcount]; - for( j = 0; j < ivcount; j++ ) - xdata[j] = (itype == CV_32F ? (double)x0data_f[j] : x0data_d[j])*w[j*2] + w[j*2+1]; - } - Mat x1(dcount, ivcount, CV_64F, &x[0][0]); - - // forward pass, compute y[i]=w*x[i-1], x[i]=f(y[i]), df[i]=f'(y[i]) - for( i = 1; i < l_count; i++ ) - { - Mat x2( dcount, ann->layer_sizes[i], CV_64F, &x[i][0] ); - Mat _w = ann->weights[i].rowRange(0, x1.cols); - gemm( x1, _w, 1, noArray(), 0, x2 ); - Mat _df( x2.size(), CV_64F, &df[i][0] ); - ann->calc_activ_func_deriv( x2, _df, ann->weights[i] ); - x1 = x2; - } - - Mat grad1(dcount, ovcount, CV_64F, buf[l_count & 1]); - - w = ann->weights[l_count+1].ptr(); - - // calculate error - for( i = 0; i < dcount; i++ ) - { - const uchar* udata_p = outputs.ptr(i0+i); - const float* udata_f = (const float*)udata_p; - const double* udata_d = (const double*)udata_p; - - const double* xdata = &x[l_count-1][i*ovcount]; - double* gdata = grad1.ptr(i); - double sweight = sw ? sw[si+i] : inv_count, E1 = 0; - - for( j = 0; j < ovcount; j++ ) - { - double t = (otype == CV_32F ? (double)udata_f[j] : udata_d[j])*w[j*2] + w[j*2+1] - xdata[j]; - gdata[j] = t*sweight; - E1 += t*t; - } - E += sweight*E1; - } - - for( i = l_count-1; i > 0; i-- ) - { - int n1 = ann->layer_sizes[i-1], n2 = ann->layer_sizes[i]; - Mat _df(dcount, n2, CV_64F, &df[i][0]); - multiply(grad1, _df, grad1); - - { - AutoLock lock(ann->mtx); - Mat _dEdw = dEdw->at(i).rowRange(0, n1); - x1 = Mat(dcount, n1, CV_64F, &x[i-1][0]); - gemm(x1, grad1, 1, _dEdw, 1, _dEdw, GEMM_1_T); - - // update bias part of dEdw - double* dst = dEdw->at(i).ptr(n1); - for( k = 0; k < dcount; k++ ) - { - const double* src = grad1.ptr(k); - for( j = 0; j < n2; j++ ) - dst[j] += src[j]; - } - } - - Mat grad2( dcount, n1, CV_64F, buf[i&1] ); - if( i > 1 ) - { - Mat _w = ann->weights[i].rowRange(0, n1); - gemm(grad1, _w, 1, noArray(), 0, grad2, GEMM_2_T); - } - grad1 = grad2; - } - } - { - AutoLock lock(ann->mtx); - *pE += E; - } - } - }; - - int train_rprop( const Mat& inputs, const Mat& outputs, const Mat& _sw, TermCriteria termCrit ) - { - const int max_buf_size = 1 << 16; - int i, iter = -1, count = inputs.rows; - - double prev_E = DBL_MAX*0.5; - - int max_iter = termCrit.maxCount; - double epsilon = termCrit.epsilon; - double dw_plus = params.rpDWPlus; - double dw_minus = params.rpDWMinus; - double dw_min = params.rpDWMin; - double dw_max = params.rpDWMax; - - int l_count = layer_count(); - - // allocate buffers - vector dw(l_count), dEdw(l_count), prev_dEdw_sign(l_count); - - int total = 0; - for( i = 0; i < l_count; i++ ) - { - total += layer_sizes[i]; - dw[i].create(weights[i].size(), CV_64F); - dw[i].setTo(Scalar::all(params.rpDW0)); - prev_dEdw_sign[i] = Mat::zeros(weights[i].size(), CV_8S); - dEdw[i] = Mat::zeros(weights[i].size(), CV_64F); - } - CV_Assert(total > 0); - int dcount0 = max_buf_size/(2*total); - dcount0 = std::max( dcount0, 1 ); - dcount0 = std::min( dcount0, count ); - int chunk_count = (count + dcount0 - 1)/dcount0; - - // run rprop loop - /* - y_i(t) = w_i(t)*x_{i-1}(t) - x_i(t) = f(y_i(t)) - E = sum_over_all_samples(1/2*||u - x_N||^2) - grad_N = (x_N - u)*f'(y_i) - - std::min(dw_i{jk}(t)*dw_plus, dw_max), if dE/dw_i{jk}(t)*dE/dw_i{jk}(t-1) > 0 - dw_i{jk}(t) = std::max(dw_i{jk}(t)*dw_minus, dw_min), if dE/dw_i{jk}(t)*dE/dw_i{jk}(t-1) < 0 - dw_i{jk}(t-1) else - - if (dE/dw_i{jk}(t)*dE/dw_i{jk}(t-1) < 0) - dE/dw_i{jk}(t)<-0 - else - w_i{jk}(t+1) = w_i{jk}(t) + dw_i{jk}(t) - grad_{i-1}(t) = w_i^t(t)*grad_i(t) - */ - for( iter = 0; iter < max_iter; iter++ ) - { - double E = 0; - - for( i = 0; i < l_count; i++ ) - dEdw[i].setTo(Scalar::all(0)); - - // first, iterate through all the samples and compute dEdw - RPropLoop invoker(this, inputs, outputs, _sw, dcount0, dEdw, &E); - parallel_for_(Range(0, chunk_count), invoker); - //invoker(Range(0, chunk_count)); - - // now update weights - for( i = 1; i < l_count; i++ ) - { - int n1 = layer_sizes[i-1], n2 = layer_sizes[i]; - for( int k = 0; k <= n1; k++ ) - { - CV_Assert(weights[i].size() == Size(n2, n1+1)); - double* wk = weights[i].ptr(k); - double* dwk = dw[i].ptr(k); - double* dEdwk = dEdw[i].ptr(k); - schar* prevEk = prev_dEdw_sign[i].ptr(k); - - for( int j = 0; j < n2; j++ ) - { - double Eval = dEdwk[j]; - double dval = dwk[j]; - double wval = wk[j]; - int s = CV_SIGN(Eval); - int ss = prevEk[j]*s; - if( ss > 0 ) - { - dval *= dw_plus; - dval = std::min( dval, dw_max ); - dwk[j] = dval; - wk[j] = wval + dval*s; - } - else if( ss < 0 ) - { - dval *= dw_minus; - dval = std::max( dval, dw_min ); - prevEk[j] = 0; - dwk[j] = dval; - wk[j] = wval + dval*s; - } - else - { - prevEk[j] = (schar)s; - wk[j] = wval + dval*s; - } - dEdwk[j] = 0.; - } - } - } - - //printf("%d. E = %g\n", iter, E); - if( fabs(prev_E - E) < epsilon ) - break; - prev_E = E; - } - - return iter; - } - - void write_params( FileStorage& fs ) const - { - const char* activ_func_name = activ_func == IDENTITY ? "IDENTITY" : - activ_func == SIGMOID_SYM ? "SIGMOID_SYM" : - activ_func == GAUSSIAN ? "GAUSSIAN" : - activ_func == RELU ? "RELU" : - activ_func == LEAKYRELU ? "LEAKYRELU" : 0; - - if( activ_func_name ) - fs << "activation_function" << activ_func_name; - else - fs << "activation_function_id" << activ_func; - - if( activ_func != IDENTITY ) - { - fs << "f_param1" << f_param1; - fs << "f_param2" << f_param2; - } - - fs << "min_val" << min_val << "max_val" << max_val << "min_val1" << min_val1 << "max_val1" << max_val1; - - fs << "training_params" << "{"; - if( params.trainMethod == ANN_MLP::BACKPROP ) - { - fs << "train_method" << "BACKPROP"; - fs << "dw_scale" << params.bpDWScale; - fs << "moment_scale" << params.bpMomentScale; - } - else if (params.trainMethod == ANN_MLP::RPROP) - { - fs << "train_method" << "RPROP"; - fs << "dw0" << params.rpDW0; - fs << "dw_plus" << params.rpDWPlus; - fs << "dw_minus" << params.rpDWMinus; - fs << "dw_min" << params.rpDWMin; - fs << "dw_max" << params.rpDWMax; - } - else if (params.trainMethod == ANN_MLP::ANNEAL) - { - fs << "train_method" << "ANNEAL"; - fs << "initialT" << params.initialT; - fs << "finalT" << params.finalT; - fs << "coolingRatio" << params.coolingRatio; - fs << "itePerStep" << params.itePerStep; - } - else - CV_Error(CV_StsError, "Unknown training method"); - - fs << "term_criteria" << "{"; - if( params.termCrit.type & TermCriteria::EPS ) - fs << "epsilon" << params.termCrit.epsilon; - if( params.termCrit.type & TermCriteria::COUNT ) - fs << "iterations" << params.termCrit.maxCount; - fs << "}" << "}"; - } - - void write( FileStorage& fs ) const CV_OVERRIDE - { - if( layer_sizes.empty() ) - return; - int i, l_count = layer_count(); - - writeFormat(fs); - fs << "layer_sizes" << layer_sizes; - - write_params( fs ); - - size_t esz = weights[0].elemSize(); - - fs << "input_scale" << "["; - fs.writeRaw("d", weights[0].ptr(), weights[0].total()*esz); - - fs << "]" << "output_scale" << "["; - fs.writeRaw("d", weights[l_count].ptr(), weights[l_count].total()*esz); - - fs << "]" << "inv_output_scale" << "["; - fs.writeRaw("d", weights[l_count+1].ptr(), weights[l_count+1].total()*esz); - - fs << "]" << "weights" << "["; - for( i = 1; i < l_count; i++ ) - { - fs << "["; - fs.writeRaw("d", weights[i].ptr(), weights[i].total()*esz); - fs << "]"; - } - fs << "]"; - } - - void read_params( const FileNode& fn ) - { - String activ_func_name = (String)fn["activation_function"]; - if( !activ_func_name.empty() ) - { - activ_func = activ_func_name == "SIGMOID_SYM" ? SIGMOID_SYM : - activ_func_name == "IDENTITY" ? IDENTITY : - activ_func_name == "RELU" ? RELU : - activ_func_name == "LEAKYRELU" ? LEAKYRELU : - activ_func_name == "GAUSSIAN" ? GAUSSIAN : -1; - CV_Assert( activ_func >= 0 ); - } - else - activ_func = (int)fn["activation_function_id"]; - - f_param1 = (double)fn["f_param1"]; - f_param2 = (double)fn["f_param2"]; - - setActivationFunction( activ_func, f_param1, f_param2); - - min_val = (double)fn["min_val"]; - max_val = (double)fn["max_val"]; - min_val1 = (double)fn["min_val1"]; - max_val1 = (double)fn["max_val1"]; - - FileNode tpn = fn["training_params"]; - params = AnnParams(); - - if( !tpn.empty() ) - { - String tmethod_name = (String)tpn["train_method"]; - - if( tmethod_name == "BACKPROP" ) - { - params.trainMethod = ANN_MLP::BACKPROP; - params.bpDWScale = (double)tpn["dw_scale"]; - params.bpMomentScale = (double)tpn["moment_scale"]; - } - else if (tmethod_name == "RPROP") - { - params.trainMethod = ANN_MLP::RPROP; - params.rpDW0 = (double)tpn["dw0"]; - params.rpDWPlus = (double)tpn["dw_plus"]; - params.rpDWMinus = (double)tpn["dw_minus"]; - params.rpDWMin = (double)tpn["dw_min"]; - params.rpDWMax = (double)tpn["dw_max"]; - } - else if (tmethod_name == "ANNEAL") - { - params.trainMethod = ANN_MLP::ANNEAL; - params.initialT = (double)tpn["initialT"]; - params.finalT = (double)tpn["finalT"]; - params.coolingRatio = (double)tpn["coolingRatio"]; - params.itePerStep = tpn["itePerStep"]; - } - else - CV_Error(CV_StsParseError, "Unknown training method (should be BACKPROP or RPROP)"); - - FileNode tcn = tpn["term_criteria"]; - if( !tcn.empty() ) - { - FileNode tcn_e = tcn["epsilon"]; - FileNode tcn_i = tcn["iterations"]; - params.termCrit.type = 0; - if( !tcn_e.empty() ) - { - params.termCrit.type |= TermCriteria::EPS; - params.termCrit.epsilon = (double)tcn_e; - } - if( !tcn_i.empty() ) - { - params.termCrit.type |= TermCriteria::COUNT; - params.termCrit.maxCount = (int)tcn_i; - } - } - } - } - - void read( const FileNode& fn ) CV_OVERRIDE - { - clear(); - - vector _layer_sizes; - readVectorOrMat(fn["layer_sizes"], _layer_sizes); - setLayerSizes( _layer_sizes ); - - int i, l_count = layer_count(); - read_params(fn); - - size_t esz = weights[0].elemSize(); - - FileNode w = fn["input_scale"]; - w.readRaw("d", weights[0].ptr(), weights[0].total()*esz); - - w = fn["output_scale"]; - w.readRaw("d", weights[l_count].ptr(), weights[l_count].total()*esz); - - w = fn["inv_output_scale"]; - w.readRaw("d", weights[l_count+1].ptr(), weights[l_count+1].total()*esz); - - FileNodeIterator w_it = fn["weights"].begin(); - - for( i = 1; i < l_count; i++, ++w_it ) - (*w_it).readRaw("d", weights[i].ptr(), weights[i].total()*esz); - trained = true; - } - - Mat getWeights(int layerIdx) const CV_OVERRIDE - { - CV_Assert( 0 <= layerIdx && layerIdx < (int)weights.size() ); - return weights[layerIdx]; - } - - bool isTrained() const CV_OVERRIDE - { - return trained; - } - - bool isClassifier() const CV_OVERRIDE - { - return false; - } - - int getVarCount() const CV_OVERRIDE - { - return layer_sizes.empty() ? 0 : layer_sizes[0]; - } - - String getDefaultName() const CV_OVERRIDE - { - return "opencv_ml_ann_mlp"; - } - - vector layer_sizes; - vector weights; - double f_param1, f_param2; - double min_val, max_val, min_val1, max_val1; - int activ_func; - int max_lsize, max_buf_sz; - AnnParams params; - RNG rng; - Mutex mtx; - bool trained; -}; - - - - -Ptr ANN_MLP::create() -{ - return makePtr(); -} - -Ptr ANN_MLP::load(const String& filepath) -{ - FileStorage fs; - fs.open(filepath, FileStorage::READ); - CV_Assert(fs.isOpened()); - Ptr ann = makePtr(); - ((ANN_MLPImpl*)ann.get())->read(fs.getFirstTopLevelNode()); - return ann; -} - -}} - -/* End of file. */ diff --git a/modules/ml/src/boost.cpp b/modules/ml/src/boost.cpp deleted file mode 100644 index be9c9a7b46..0000000000 --- a/modules/ml/src/boost.cpp +++ /dev/null @@ -1,533 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000, Intel Corporation, all rights reserved. -// Copyright (C) 2014, Itseez Inc, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" - -namespace cv { namespace ml { - -static inline double -log_ratio( double val ) -{ - const double eps = 1e-5; - val = std::max( val, eps ); - val = std::min( val, 1. - eps ); - return log( val/(1. - val) ); -} - - -BoostTreeParams::BoostTreeParams() -{ - boostType = Boost::REAL; - weakCount = 100; - weightTrimRate = 0.95; -} - -BoostTreeParams::BoostTreeParams( int _boostType, int _weak_count, - double _weightTrimRate) -{ - boostType = _boostType; - weakCount = _weak_count; - weightTrimRate = _weightTrimRate; -} - -class DTreesImplForBoost CV_FINAL : public DTreesImpl -{ -public: - DTreesImplForBoost() - { - params.setCVFolds(0); - params.setMaxDepth(1); - } - virtual ~DTreesImplForBoost() {} - - bool isClassifier() const CV_OVERRIDE { return true; } - - void clear() CV_OVERRIDE - { - DTreesImpl::clear(); - } - - void startTraining( const Ptr& trainData, int flags ) CV_OVERRIDE - { - CV_Assert(!trainData.empty()); - DTreesImpl::startTraining(trainData, flags); - sumResult.assign(w->sidx.size(), 0.); - - if( bparams.boostType != Boost::DISCRETE ) - { - _isClassifier = false; - int i, n = (int)w->cat_responses.size(); - w->ord_responses.resize(n); - - double a = -1, b = 1; - if( bparams.boostType == Boost::LOGIT ) - { - a = -2, b = 2; - } - for( i = 0; i < n; i++ ) - w->ord_responses[i] = w->cat_responses[i] > 0 ? b : a; - } - - normalizeWeights(); - } - - void normalizeWeights() - { - int i, n = (int)w->sidx.size(); - double sumw = 0, a, b; - for( i = 0; i < n; i++ ) - sumw += w->sample_weights[w->sidx[i]]; - if( sumw > DBL_EPSILON ) - { - a = 1./sumw; - b = 0; - } - else - { - a = 0; - b = 1; - } - for( i = 0; i < n; i++ ) - { - double& wval = w->sample_weights[w->sidx[i]]; - wval = wval*a + b; - } - } - - void endTraining() CV_OVERRIDE - { - DTreesImpl::endTraining(); - vector e; - std::swap(sumResult, e); - } - - void scaleTree( int root, double scale ) - { - int nidx = root, pidx = 0; - Node *node = 0; - - // traverse the tree and save all the nodes in depth-first order - for(;;) - { - for(;;) - { - node = &nodes[nidx]; - node->value *= scale; - if( node->left < 0 ) - break; - nidx = node->left; - } - - for( pidx = node->parent; pidx >= 0 && nodes[pidx].right == nidx; - nidx = pidx, pidx = nodes[pidx].parent ) - ; - - if( pidx < 0 ) - break; - - nidx = nodes[pidx].right; - } - } - - void calcValue( int nidx, const vector& _sidx ) CV_OVERRIDE - { - DTreesImpl::calcValue(nidx, _sidx); - WNode* node = &w->wnodes[nidx]; - if( bparams.boostType == Boost::DISCRETE ) - { - node->value = node->class_idx == 0 ? -1 : 1; - } - else if( bparams.boostType == Boost::REAL ) - { - double p = (node->value+1)*0.5; - node->value = 0.5*log_ratio(p); - } - } - - bool train( const Ptr& trainData, int flags ) CV_OVERRIDE - { - CV_Assert(!trainData.empty()); - startTraining(trainData, flags); - int treeidx, ntrees = bparams.weakCount >= 0 ? bparams.weakCount : 10000; - vector sidx = w->sidx; - - for( treeidx = 0; treeidx < ntrees; treeidx++ ) - { - int root = addTree( sidx ); - if( root < 0 ) - return false; - updateWeightsAndTrim( treeidx, sidx ); - } - endTraining(); - return true; - } - - void updateWeightsAndTrim( int treeidx, vector& sidx ) - { - int i, n = (int)w->sidx.size(); - int nvars = (int)varIdx.size(); - double sumw = 0., C = 1.; - cv::AutoBuffer buf(n + nvars); - double* result = buf.data(); - float* sbuf = (float*)(result + n); - Mat sample(1, nvars, CV_32F, sbuf); - int predictFlags = bparams.boostType == Boost::DISCRETE ? (PREDICT_MAX_VOTE | RAW_OUTPUT) : PREDICT_SUM; - predictFlags |= COMPRESSED_INPUT; - - for( i = 0; i < n; i++ ) - { - w->data->getSample(varIdx, w->sidx[i], sbuf ); - result[i] = predictTrees(Range(treeidx, treeidx+1), sample, predictFlags); - } - - // now update weights and other parameters for each type of boosting - if( bparams.boostType == Boost::DISCRETE ) - { - // Discrete AdaBoost: - // weak_eval[i] (=f(x_i)) is in {-1,1} - // err = sum(w_i*(f(x_i) != y_i))/sum(w_i) - // C = log((1-err)/err) - // w_i *= exp(C*(f(x_i) != y_i)) - double err = 0.; - - for( i = 0; i < n; i++ ) - { - int si = w->sidx[i]; - double wval = w->sample_weights[si]; - sumw += wval; - err += wval*(result[i] != w->cat_responses[si]); - } - - if( sumw != 0 ) - err /= sumw; - C = -log_ratio( err ); - double scale = std::exp(C); - - sumw = 0; - for( i = 0; i < n; i++ ) - { - int si = w->sidx[i]; - double wval = w->sample_weights[si]; - if( result[i] != w->cat_responses[si] ) - wval *= scale; - sumw += wval; - w->sample_weights[si] = wval; - } - - scaleTree(roots[treeidx], C); - } - else if( bparams.boostType == Boost::REAL || bparams.boostType == Boost::GENTLE ) - { - // Real AdaBoost: - // weak_eval[i] = f(x_i) = 0.5*log(p(x_i)/(1-p(x_i))), p(x_i)=P(y=1|x_i) - // w_i *= exp(-y_i*f(x_i)) - - // Gentle AdaBoost: - // weak_eval[i] = f(x_i) in [-1,1] - // w_i *= exp(-y_i*f(x_i)) - for( i = 0; i < n; i++ ) - { - int si = w->sidx[i]; - CV_Assert( std::abs(w->ord_responses[si]) == 1 ); - double wval = w->sample_weights[si]*std::exp(-result[i]*w->ord_responses[si]); - sumw += wval; - w->sample_weights[si] = wval; - } - } - else if( bparams.boostType == Boost::LOGIT ) - { - // LogitBoost: - // weak_eval[i] = f(x_i) in [-z_max,z_max] - // sum_response = F(x_i). - // F(x_i) += 0.5*f(x_i) - // p(x_i) = exp(F(x_i))/(exp(F(x_i)) + exp(-F(x_i))=1/(1+exp(-2*F(x_i))) - // reuse weak_eval: weak_eval[i] <- p(x_i) - // w_i = p(x_i)*1(1 - p(x_i)) - // z_i = ((y_i+1)/2 - p(x_i))/(p(x_i)*(1 - p(x_i))) - // store z_i to the data->data_root as the new target responses - const double lb_weight_thresh = FLT_EPSILON; - const double lb_z_max = 10.; - - for( i = 0; i < n; i++ ) - { - int si = w->sidx[i]; - sumResult[i] += 0.5*result[i]; - double p = 1./(1 + std::exp(-2*sumResult[i])); - double wval = std::max( p*(1 - p), lb_weight_thresh ), z; - w->sample_weights[si] = wval; - sumw += wval; - if( w->ord_responses[si] > 0 ) - { - z = 1./p; - w->ord_responses[si] = std::min(z, lb_z_max); - } - else - { - z = 1./(1-p); - w->ord_responses[si] = -std::min(z, lb_z_max); - } - } - } - else - CV_Error(CV_StsNotImplemented, "Unknown boosting type"); - - /*if( bparams.boostType != Boost::LOGIT ) - { - double err = 0; - for( i = 0; i < n; i++ ) - { - sumResult[i] += result[i]*C; - if( bparams.boostType != Boost::DISCRETE ) - err += sumResult[i]*w->ord_responses[w->sidx[i]] < 0; - else - err += sumResult[i]*w->cat_responses[w->sidx[i]] < 0; - } - printf("%d trees. C=%.2f, training error=%.1f%%, working set size=%d (out of %d)\n", (int)roots.size(), C, err*100./n, (int)sidx.size(), n); - }*/ - - // renormalize weights - if( sumw > FLT_EPSILON ) - normalizeWeights(); - - if( bparams.weightTrimRate <= 0. || bparams.weightTrimRate >= 1. ) - return; - - for( i = 0; i < n; i++ ) - result[i] = w->sample_weights[w->sidx[i]]; - std::sort(result, result + n); - - // as weight trimming occurs immediately after updating the weights, - // where they are renormalized, we assume that the weight sum = 1. - sumw = 1. - bparams.weightTrimRate; - - for( i = 0; i < n; i++ ) - { - double wval = result[i]; - if( sumw <= 0 ) - break; - sumw -= wval; - } - - double threshold = i < n ? result[i] : DBL_MAX; - sidx.clear(); - - for( i = 0; i < n; i++ ) - { - int si = w->sidx[i]; - if( w->sample_weights[si] >= threshold ) - sidx.push_back(si); - } - } - - float predictTrees( const Range& range, const Mat& sample, int flags0 ) const CV_OVERRIDE - { - int flags = (flags0 & ~PREDICT_MASK) | PREDICT_SUM; - float val = DTreesImpl::predictTrees(range, sample, flags); - if( flags != flags0 ) - { - int ival = (int)(val > 0); - if( !(flags0 & RAW_OUTPUT) ) - ival = classLabels[ival]; - val = (float)ival; - } - return val; - } - - void writeTrainingParams( FileStorage& fs ) const CV_OVERRIDE - { - fs << "boosting_type" << - (bparams.boostType == Boost::DISCRETE ? "DiscreteAdaboost" : - bparams.boostType == Boost::REAL ? "RealAdaboost" : - bparams.boostType == Boost::LOGIT ? "LogitBoost" : - bparams.boostType == Boost::GENTLE ? "GentleAdaboost" : "Unknown"); - - DTreesImpl::writeTrainingParams(fs); - fs << "weight_trimming_rate" << bparams.weightTrimRate; - } - - void write( FileStorage& fs ) const CV_OVERRIDE - { - if( roots.empty() ) - CV_Error( CV_StsBadArg, "RTrees have not been trained" ); - - writeFormat(fs); - writeParams(fs); - - int k, ntrees = (int)roots.size(); - - fs << "ntrees" << ntrees - << "trees" << "["; - - for( k = 0; k < ntrees; k++ ) - { - fs << "{"; - writeTree(fs, roots[k]); - fs << "}"; - } - - fs << "]"; - } - - void readParams( const FileNode& fn ) CV_OVERRIDE - { - DTreesImpl::readParams(fn); - - FileNode tparams_node = fn["training_params"]; - // check for old layout - String bts = (String)(fn["boosting_type"].empty() ? - tparams_node["boosting_type"] : fn["boosting_type"]); - bparams.boostType = (bts == "DiscreteAdaboost" ? Boost::DISCRETE : - bts == "RealAdaboost" ? Boost::REAL : - bts == "LogitBoost" ? Boost::LOGIT : - bts == "GentleAdaboost" ? Boost::GENTLE : -1); - _isClassifier = bparams.boostType == Boost::DISCRETE; - // check for old layout - bparams.weightTrimRate = (double)(fn["weight_trimming_rate"].empty() ? - tparams_node["weight_trimming_rate"] : fn["weight_trimming_rate"]); - } - - void read( const FileNode& fn ) CV_OVERRIDE - { - clear(); - - int ntrees = (int)fn["ntrees"]; - readParams(fn); - - FileNode trees_node = fn["trees"]; - FileNodeIterator it = trees_node.begin(); - CV_Assert( ntrees == (int)trees_node.size() ); - - for( int treeidx = 0; treeidx < ntrees; treeidx++, ++it ) - { - FileNode nfn = (*it)["nodes"]; - readTree(nfn); - } - } - - BoostTreeParams bparams; - vector sumResult; -}; - - -class BoostImpl : public Boost -{ -public: - BoostImpl() {} - virtual ~BoostImpl() {} - - inline int getBoostType() const CV_OVERRIDE { return impl.bparams.boostType; } - inline void setBoostType(int val) CV_OVERRIDE { impl.bparams.boostType = val; } - inline int getWeakCount() const CV_OVERRIDE { return impl.bparams.weakCount; } - inline void setWeakCount(int val) CV_OVERRIDE { impl.bparams.weakCount = val; } - inline double getWeightTrimRate() const CV_OVERRIDE { return impl.bparams.weightTrimRate; } - inline void setWeightTrimRate(double val) CV_OVERRIDE { impl.bparams.weightTrimRate = val; } - - inline int getMaxCategories() const CV_OVERRIDE { return impl.params.getMaxCategories(); } - inline void setMaxCategories(int val) CV_OVERRIDE { impl.params.setMaxCategories(val); } - inline int getMaxDepth() const CV_OVERRIDE { return impl.params.getMaxDepth(); } - inline void setMaxDepth(int val) CV_OVERRIDE { impl.params.setMaxDepth(val); } - inline int getMinSampleCount() const CV_OVERRIDE { return impl.params.getMinSampleCount(); } - inline void setMinSampleCount(int val) CV_OVERRIDE { impl.params.setMinSampleCount(val); } - inline int getCVFolds() const CV_OVERRIDE { return impl.params.getCVFolds(); } - inline void setCVFolds(int val) CV_OVERRIDE { impl.params.setCVFolds(val); } - inline bool getUseSurrogates() const CV_OVERRIDE { return impl.params.getUseSurrogates(); } - inline void setUseSurrogates(bool val) CV_OVERRIDE { impl.params.setUseSurrogates(val); } - inline bool getUse1SERule() const CV_OVERRIDE { return impl.params.getUse1SERule(); } - inline void setUse1SERule(bool val) CV_OVERRIDE { impl.params.setUse1SERule(val); } - inline bool getTruncatePrunedTree() const CV_OVERRIDE { return impl.params.getTruncatePrunedTree(); } - inline void setTruncatePrunedTree(bool val) CV_OVERRIDE { impl.params.setTruncatePrunedTree(val); } - inline float getRegressionAccuracy() const CV_OVERRIDE { return impl.params.getRegressionAccuracy(); } - inline void setRegressionAccuracy(float val) CV_OVERRIDE { impl.params.setRegressionAccuracy(val); } - inline cv::Mat getPriors() const CV_OVERRIDE { return impl.params.getPriors(); } - inline void setPriors(const cv::Mat& val) CV_OVERRIDE { impl.params.setPriors(val); } - - String getDefaultName() const CV_OVERRIDE { return "opencv_ml_boost"; } - - bool train( const Ptr& trainData, int flags ) CV_OVERRIDE - { - CV_Assert(!trainData.empty()); - return impl.train(trainData, flags); - } - - float predict( InputArray samples, OutputArray results, int flags ) const CV_OVERRIDE - { - CV_CheckEQ(samples.cols(), getVarCount(), ""); - return impl.predict(samples, results, flags); - } - - void write( FileStorage& fs ) const CV_OVERRIDE - { - impl.write(fs); - } - - void read( const FileNode& fn ) CV_OVERRIDE - { - impl.read(fn); - } - - int getVarCount() const CV_OVERRIDE { return impl.getVarCount(); } - - bool isTrained() const CV_OVERRIDE { return impl.isTrained(); } - bool isClassifier() const CV_OVERRIDE { return impl.isClassifier(); } - - const vector& getRoots() const CV_OVERRIDE { return impl.getRoots(); } - const vector& getNodes() const CV_OVERRIDE { return impl.getNodes(); } - const vector& getSplits() const CV_OVERRIDE { return impl.getSplits(); } - const vector& getSubsets() const CV_OVERRIDE { return impl.getSubsets(); } - - DTreesImplForBoost impl; -}; - - -Ptr Boost::create() -{ - return makePtr(); -} - -Ptr Boost::load(const String& filepath, const String& nodeName) -{ - return Algorithm::load(filepath, nodeName); -} - -}} - -/* End of file. */ diff --git a/modules/ml/src/data.cpp b/modules/ml/src/data.cpp deleted file mode 100644 index fd7c8d1016..0000000000 --- a/modules/ml/src/data.cpp +++ /dev/null @@ -1,1045 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// Intel License Agreement -// -// Copyright (C) 2000, Intel Corporation, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of Intel Corporation may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" -#include -#include -#include - -#include - -namespace cv { namespace ml { - -static const float MISSED_VAL = TrainData::missingValue(); -static const int VAR_MISSED = VAR_ORDERED; - -TrainData::~TrainData() {} - -Mat TrainData::getSubVector(const Mat& vec, const Mat& idx) -{ - if (!(vec.cols == 1 || vec.rows == 1)) - CV_LOG_WARNING(NULL, "'getSubVector(const Mat& vec, const Mat& idx)' call with non-1D input is deprecated. It is not designed to work with 2D matrixes (especially with 'cv::ml::COL_SAMPLE' layout)."); - return getSubMatrix(vec, idx, vec.rows == 1 ? cv::ml::COL_SAMPLE : cv::ml::ROW_SAMPLE); -} - -template -Mat getSubMatrixImpl(const Mat& m, const Mat& idx, int layout) -{ - int nidx = idx.checkVector(1, CV_32S); - int dims = m.cols, nsamples = m.rows; - - Mat subm; - if (layout == COL_SAMPLE) - { - std::swap(dims, nsamples); - subm.create(dims, nidx, m.type()); - } - else - { - subm.create(nidx, dims, m.type()); - } - - for (int i = 0; i < nidx; i++) - { - int k = idx.at(i); CV_CheckGE(k, 0, "Bad idx"); CV_CheckLT(k, nsamples, "Bad idx or layout"); - if (dims == 1) - { - subm.at(i) = m.at(k); // at() has "transparent" access for 1D col-based / row-based vectors. - } - else if (layout == COL_SAMPLE) - { - for (int j = 0; j < dims; j++) - subm.at(j, i) = m.at(j, k); - } - else - { - for (int j = 0; j < dims; j++) - subm.at(i, j) = m.at(k, j); - } - } - return subm; -} - -Mat TrainData::getSubMatrix(const Mat& m, const Mat& idx, int layout) -{ - if (idx.empty()) - return m; - int type = m.type(); - CV_CheckType(type, type == CV_32S || type == CV_32F || type == CV_64F, ""); - if (type == CV_32S || type == CV_32F) // 32-bit - return getSubMatrixImpl(m, idx, layout); - if (type == CV_64F) // 64-bit - return getSubMatrixImpl(m, idx, layout); - CV_Error(Error::StsInternal, ""); -} - - -class TrainDataImpl CV_FINAL : public TrainData -{ -public: - typedef std::map MapType; - - TrainDataImpl() - { - file = 0; - clear(); - } - - virtual ~TrainDataImpl() { closeFile(); } - - int getLayout() const CV_OVERRIDE { return layout; } - int getNSamples() const CV_OVERRIDE - { - return !sampleIdx.empty() ? (int)sampleIdx.total() : - layout == ROW_SAMPLE ? samples.rows : samples.cols; - } - int getNTrainSamples() const CV_OVERRIDE - { - return !trainSampleIdx.empty() ? (int)trainSampleIdx.total() : getNSamples(); - } - int getNTestSamples() const CV_OVERRIDE - { - return !testSampleIdx.empty() ? (int)testSampleIdx.total() : 0; - } - int getNVars() const CV_OVERRIDE - { - return !varIdx.empty() ? (int)varIdx.total() : getNAllVars(); - } - int getNAllVars() const CV_OVERRIDE - { - return layout == ROW_SAMPLE ? samples.cols : samples.rows; - } - - Mat getTestSamples() const CV_OVERRIDE - { - Mat idx = getTestSampleIdx(); - return idx.empty() ? Mat() : getSubMatrix(samples, idx, getLayout()); - } - - Mat getSamples() const CV_OVERRIDE { return samples; } - Mat getResponses() const CV_OVERRIDE { return responses; } - Mat getMissing() const CV_OVERRIDE { return missing; } - Mat getVarIdx() const CV_OVERRIDE { return varIdx; } - Mat getVarType() const CV_OVERRIDE { return varType; } - int getResponseType() const CV_OVERRIDE - { - return classLabels.empty() ? VAR_ORDERED : VAR_CATEGORICAL; - } - Mat getTrainSampleIdx() const CV_OVERRIDE { return !trainSampleIdx.empty() ? trainSampleIdx : sampleIdx; } - Mat getTestSampleIdx() const CV_OVERRIDE { return testSampleIdx; } - Mat getSampleWeights() const CV_OVERRIDE - { - return sampleWeights; - } - Mat getTrainSampleWeights() const CV_OVERRIDE - { - return getSubVector(sampleWeights, getTrainSampleIdx()); // 1D-vector - } - Mat getTestSampleWeights() const CV_OVERRIDE - { - Mat idx = getTestSampleIdx(); - return idx.empty() ? Mat() : getSubVector(sampleWeights, idx); // 1D-vector - } - Mat getTrainResponses() const CV_OVERRIDE - { - return getSubMatrix(responses, getTrainSampleIdx(), cv::ml::ROW_SAMPLE); // col-based responses are transposed in setData() - } - Mat getTrainNormCatResponses() const CV_OVERRIDE - { - return getSubMatrix(normCatResponses, getTrainSampleIdx(), cv::ml::ROW_SAMPLE); // like 'responses' - } - Mat getTestResponses() const CV_OVERRIDE - { - Mat idx = getTestSampleIdx(); - return idx.empty() ? Mat() : getSubMatrix(responses, idx, cv::ml::ROW_SAMPLE); // col-based responses are transposed in setData() - } - Mat getTestNormCatResponses() const CV_OVERRIDE - { - Mat idx = getTestSampleIdx(); - return idx.empty() ? Mat() : getSubMatrix(normCatResponses, idx, cv::ml::ROW_SAMPLE); // like 'responses' - } - Mat getNormCatResponses() const CV_OVERRIDE { return normCatResponses; } - Mat getClassLabels() const CV_OVERRIDE { return classLabels; } - Mat getClassCounters() const { return classCounters; } - int getCatCount(int vi) const CV_OVERRIDE - { - int n = (int)catOfs.total(); - CV_Assert( 0 <= vi && vi < n ); - Vec2i ofs = catOfs.at(vi); - return ofs[1] - ofs[0]; - } - - Mat getCatOfs() const CV_OVERRIDE { return catOfs; } - Mat getCatMap() const CV_OVERRIDE { return catMap; } - - Mat getDefaultSubstValues() const CV_OVERRIDE { return missingSubst; } - - void closeFile() { if(file) fclose(file); file=0; } - void clear() - { - closeFile(); - samples.release(); - missing.release(); - varType.release(); - varSymbolFlags.release(); - responses.release(); - sampleIdx.release(); - trainSampleIdx.release(); - testSampleIdx.release(); - normCatResponses.release(); - classLabels.release(); - classCounters.release(); - catMap.release(); - catOfs.release(); - nameMap = MapType(); - layout = ROW_SAMPLE; - } - - typedef std::map CatMapHash; - - void setData(InputArray _samples, int _layout, InputArray _responses, - InputArray _varIdx, InputArray _sampleIdx, InputArray _sampleWeights, - InputArray _varType, InputArray _missing) - { - clear(); - - CV_Assert(_layout == ROW_SAMPLE || _layout == COL_SAMPLE ); - samples = _samples.getMat(); - layout = _layout; - responses = _responses.getMat(); - varIdx = _varIdx.getMat(); - sampleIdx = _sampleIdx.getMat(); - sampleWeights = _sampleWeights.getMat(); - varType = _varType.getMat(); - missing = _missing.getMat(); - - int nsamples = layout == ROW_SAMPLE ? samples.rows : samples.cols; - int ninputvars = layout == ROW_SAMPLE ? samples.cols : samples.rows; - int i, noutputvars = 0; - - CV_Assert( samples.type() == CV_32F || samples.type() == CV_32S ); - - if( !sampleIdx.empty() ) - { - CV_Assert( (sampleIdx.checkVector(1, CV_32S, true) > 0 && - checkRange(sampleIdx, true, 0, 0, nsamples)) || - sampleIdx.checkVector(1, CV_8U, true) == nsamples ); - if( sampleIdx.type() == CV_8U ) - sampleIdx = convertMaskToIdx(sampleIdx); - } - - if( !sampleWeights.empty() ) - { - CV_Assert( sampleWeights.checkVector(1, CV_32F, true) == nsamples ); - } - else - { - sampleWeights = Mat::ones(nsamples, 1, CV_32F); - } - - if( !varIdx.empty() ) - { - CV_Assert( (varIdx.checkVector(1, CV_32S, true) > 0 && - checkRange(varIdx, true, 0, 0, ninputvars)) || - varIdx.checkVector(1, CV_8U, true) == ninputvars ); - if( varIdx.type() == CV_8U ) - varIdx = convertMaskToIdx(varIdx); - varIdx = varIdx.clone(); - std::sort(varIdx.ptr(), varIdx.ptr() + varIdx.total()); - } - - if( !responses.empty() ) - { - CV_Assert( responses.type() == CV_32F || responses.type() == CV_32S ); - if( (responses.cols == 1 || responses.rows == 1) && (int)responses.total() == nsamples ) - noutputvars = 1; - else - { - CV_Assert( (layout == ROW_SAMPLE && responses.rows == nsamples) || - (layout == COL_SAMPLE && responses.cols == nsamples) ); - noutputvars = layout == ROW_SAMPLE ? responses.cols : responses.rows; - } - if( !responses.isContinuous() || (layout == COL_SAMPLE && noutputvars > 1) ) - { - Mat temp; - transpose(responses, temp); - responses = temp; - } - } - - int nvars = ninputvars + noutputvars; - - if( !varType.empty() ) - { - CV_Assert( varType.checkVector(1, CV_8U, true) == nvars && - checkRange(varType, true, 0, VAR_ORDERED, VAR_CATEGORICAL+1) ); - } - else - { - varType.create(1, nvars, CV_8U); - varType = Scalar::all(VAR_ORDERED); - if( noutputvars == 1 ) - varType.at(ninputvars) = (uchar)(responses.type() < CV_32F ? VAR_CATEGORICAL : VAR_ORDERED); - } - - if( noutputvars > 1 ) - { - for( i = 0; i < noutputvars; i++ ) - CV_Assert( varType.at(ninputvars + i) == VAR_ORDERED ); - } - - catOfs = Mat::zeros(1, nvars, CV_32SC2); - missingSubst = Mat::zeros(1, nvars, CV_32F); - - vector labels, counters, sortbuf, tempCatMap; - vector tempCatOfs; - CatMapHash ofshash; - - AutoBuffer buf(nsamples); - Mat non_missing(layout == ROW_SAMPLE ? Size(1, nsamples) : Size(nsamples, 1), CV_8U, buf.data()); - bool haveMissing = !missing.empty(); - if( haveMissing ) - { - CV_Assert( missing.size() == samples.size() && missing.type() == CV_8U ); - } - - // we iterate through all the variables. For each categorical variable we build a map - // in order to convert input values of the variable into normalized values (0..catcount_vi-1) - // often many categorical variables are similar, so we compress the map - try to re-use - // maps for different variables if they are identical - for( i = 0; i < ninputvars; i++ ) - { - Mat values_i = layout == ROW_SAMPLE ? samples.col(i) : samples.row(i); - - if( varType.at(i) == VAR_CATEGORICAL ) - { - preprocessCategorical(values_i, 0, labels, 0, sortbuf); - missingSubst.at(i) = -1.f; - int j, m = (int)labels.size(); - CV_Assert( m > 0 ); - int a = labels.front(), b = labels.back(); - const int* currmap = &labels[0]; - int hashval = ((unsigned)a*127 + (unsigned)b)*127 + m; - CatMapHash::iterator it = ofshash.find(hashval); - if( it != ofshash.end() ) - { - int vi = it->second; - Vec2i ofs0 = tempCatOfs[vi]; - int m0 = ofs0[1] - ofs0[0]; - const int* map0 = &tempCatMap[ofs0[0]]; - if( m0 == m && map0[0] == a && map0[m0-1] == b ) - { - for( j = 0; j < m; j++ ) - if( map0[j] != currmap[j] ) - break; - if( j == m ) - { - // re-use the map - tempCatOfs.push_back(ofs0); - continue; - } - } - } - else - ofshash[hashval] = i; - Vec2i ofs; - ofs[0] = (int)tempCatMap.size(); - ofs[1] = ofs[0] + m; - tempCatOfs.push_back(ofs); - std::copy(labels.begin(), labels.end(), std::back_inserter(tempCatMap)); - } - else - { - tempCatOfs.push_back(Vec2i(0, 0)); - /*Mat missing_i = layout == ROW_SAMPLE ? missing.col(i) : missing.row(i); - compare(missing_i, Scalar::all(0), non_missing, CMP_EQ); - missingSubst.at(i) = (float)(mean(values_i, non_missing)[0]);*/ - missingSubst.at(i) = 0.f; - } - } - - if( !tempCatOfs.empty() ) - { - Mat(tempCatOfs).copyTo(catOfs); - Mat(tempCatMap).copyTo(catMap); - } - - if( noutputvars > 0 && varType.at(ninputvars) == VAR_CATEGORICAL ) - { - preprocessCategorical(responses, &normCatResponses, labels, &counters, sortbuf); - Mat(labels).copyTo(classLabels); - Mat(counters).copyTo(classCounters); - } - } - - Mat convertMaskToIdx(const Mat& mask) - { - int i, j, nz = countNonZero(mask), n = mask.cols + mask.rows - 1; - Mat idx(1, nz, CV_32S); - for( i = j = 0; i < n; i++ ) - if( mask.at(i) ) - idx.at(j++) = i; - return idx; - } - - struct CmpByIdx - { - CmpByIdx(const int* _data, int _step) : data(_data), step(_step) {} - bool operator ()(int i, int j) const { return data[i*step] < data[j*step]; } - const int* data; - int step; - }; - - void preprocessCategorical(const Mat& data, Mat* normdata, vector& labels, - vector* counters, vector& sortbuf) - { - CV_Assert((data.cols == 1 || data.rows == 1) && (data.type() == CV_32S || data.type() == CV_32F)); - int* odata = 0; - int ostep = 0; - - if(normdata) - { - normdata->create(data.size(), CV_32S); - odata = normdata->ptr(); - ostep = normdata->isContinuous() ? 1 : (int)normdata->step1(); - } - - int i, n = data.cols + data.rows - 1; - sortbuf.resize(n*2); - int* idx = &sortbuf[0]; - int* idata = (int*)data.ptr(); - int istep = data.isContinuous() ? 1 : (int)data.step1(); - - if( data.type() == CV_32F ) - { - idata = idx + n; - const float* fdata = data.ptr(); - for( i = 0; i < n; i++ ) - { - if( fdata[i*istep] == MISSED_VAL ) - idata[i] = -1; - else - { - idata[i] = cvRound(fdata[i*istep]); - CV_Assert( (float)idata[i] == fdata[i*istep] ); - } - } - istep = 1; - } - - for( i = 0; i < n; i++ ) - idx[i] = i; - - std::sort(idx, idx + n, CmpByIdx(idata, istep)); - - int clscount = 1; - for( i = 1; i < n; i++ ) - clscount += idata[idx[i]*istep] != idata[idx[i-1]*istep]; - - int clslabel = -1; - int prev = ~idata[idx[0]*istep]; - int previdx = 0; - - labels.resize(clscount); - if(counters) - counters->resize(clscount); - - for( i = 0; i < n; i++ ) - { - int l = idata[idx[i]*istep]; - if( l != prev ) - { - clslabel++; - labels[clslabel] = l; - int k = i - previdx; - if( clslabel > 0 && counters ) - counters->at(clslabel-1) = k; - prev = l; - previdx = i; - } - if(odata) - odata[idx[i]*ostep] = clslabel; - } - if(counters) - counters->at(clslabel) = i - previdx; - } - - bool loadCSV(const String& filename, int headerLines, - int responseStartIdx, int responseEndIdx, - const String& varTypeSpec, char delimiter, char missch) - { - const int M = 1000000; - const char delimiters[3] = { ' ', delimiter, '\0' }; - int nvars = 0; - bool varTypesSet = false; - - clear(); - - file = fopen( filename.c_str(), "rt" ); - - if( !file ) - return false; - - std::vector _buf(M); - std::vector allresponses; - std::vector rowvals; - std::vector vtypes, rowtypes; - std::vector vsymbolflags; - bool haveMissed = false; - char* buf = &_buf[0]; - - int i, ridx0 = responseStartIdx, ridx1 = responseEndIdx; - int ninputvars = 0, noutputvars = 0; - - Mat tempSamples, tempMissing, tempResponses; - MapType tempNameMap; - int catCounter = 1; - - // skip header lines - int lineno = 0; - for(;;lineno++) - { - if( !fgets(buf, M, file) ) - break; - if(lineno < headerLines ) - continue; - // trim trailing spaces - int idx = (int)strlen(buf)-1; - while( idx >= 0 && isspace(buf[idx]) ) - buf[idx--] = '\0'; - // skip spaces in the beginning - char* ptr = buf; - while( *ptr != '\0' && isspace(*ptr) ) - ptr++; - // skip commented off lines - if(*ptr == '#') - continue; - rowvals.clear(); - rowtypes.clear(); - - char* token = strtok(buf, delimiters); - if (!token) - break; - - for(;;) - { - float val=0.f; int tp = 0; - decodeElem( token, val, tp, missch, tempNameMap, catCounter ); - if( tp == VAR_MISSED ) - haveMissed = true; - rowvals.push_back(val); - rowtypes.push_back((uchar)tp); - token = strtok(NULL, delimiters); - if (!token) - break; - } - - if( nvars == 0 ) - { - if( rowvals.empty() ) - CV_Error(CV_StsBadArg, "invalid CSV format; no data found"); - nvars = (int)rowvals.size(); - if( !varTypeSpec.empty() && varTypeSpec.size() > 0 ) - { - setVarTypes(varTypeSpec, nvars, vtypes); - varTypesSet = true; - } - else - vtypes = rowtypes; - vsymbolflags.resize(nvars); - for( i = 0; i < nvars; i++ ) - vsymbolflags[i] = (uchar)(rowtypes[i] == VAR_CATEGORICAL); - - ridx0 = ridx0 >= 0 ? ridx0 : ridx0 == -1 ? nvars - 1 : -1; - ridx1 = ridx1 >= 0 ? ridx1 : ridx0 >= 0 ? ridx0+1 : -1; - CV_Assert(ridx1 > ridx0); - noutputvars = ridx0 >= 0 ? ridx1 - ridx0 : 0; - ninputvars = nvars - noutputvars; - } - else - CV_Assert( nvars == (int)rowvals.size() ); - - // check var types - for( i = 0; i < nvars; i++ ) - { - CV_Assert( (!varTypesSet && vtypes[i] == rowtypes[i]) || - (varTypesSet && (vtypes[i] == rowtypes[i] || rowtypes[i] == VAR_ORDERED)) ); - uchar sflag = (uchar)(rowtypes[i] == VAR_CATEGORICAL); - if( vsymbolflags[i] == VAR_MISSED ) - vsymbolflags[i] = sflag; - else - CV_Assert(vsymbolflags[i] == sflag || rowtypes[i] == VAR_MISSED); - } - - if( ridx0 >= 0 ) - { - for( i = ridx1; i < nvars; i++ ) - std::swap(rowvals[i], rowvals[i-noutputvars]); - for( i = ninputvars; i < nvars; i++ ) - allresponses.push_back(rowvals[i]); - rowvals.pop_back(); - } - Mat rmat(1, ninputvars, CV_32F, &rowvals[0]); - tempSamples.push_back(rmat); - } - - closeFile(); - - int nsamples = tempSamples.rows; - if( nsamples == 0 ) - return false; - - if( haveMissed ) - compare(tempSamples, MISSED_VAL, tempMissing, CMP_EQ); - - if( ridx0 >= 0 ) - { - for( i = ridx1; i < nvars; i++ ) - std::swap(vtypes[i], vtypes[i-noutputvars]); - if( noutputvars > 1 ) - { - for( i = ninputvars; i < nvars; i++ ) - if( vtypes[i] == VAR_CATEGORICAL ) - CV_Error(CV_StsBadArg, - "If responses are vector values, not scalars, they must be marked as ordered responses"); - } - } - - if( !varTypesSet && noutputvars == 1 && vtypes[ninputvars] == VAR_ORDERED ) - { - for( i = 0; i < nsamples; i++ ) - if( allresponses[i] != cvRound(allresponses[i]) ) - break; - if( i == nsamples ) - vtypes[ninputvars] = VAR_CATEGORICAL; - } - - //If there are responses in the csv file, save them. If not, responses matrix will contain just zeros - if (noutputvars != 0){ - Mat(nsamples, noutputvars, CV_32F, &allresponses[0]).copyTo(tempResponses); - setData(tempSamples, ROW_SAMPLE, tempResponses, noArray(), noArray(), - noArray(), Mat(vtypes).clone(), tempMissing); - } - else{ - Mat zero_mat(nsamples, 1, CV_32F, Scalar(0)); - zero_mat.copyTo(tempResponses); - setData(tempSamples, ROW_SAMPLE, tempResponses, noArray(), noArray(), - noArray(), noArray(), tempMissing); - } - bool ok = !samples.empty(); - if(ok) - { - std::swap(tempNameMap, nameMap); - Mat(vsymbolflags).copyTo(varSymbolFlags); - } - return ok; - } - - void decodeElem( const char* token, float& elem, int& type, - char missch, MapType& namemap, int& counter ) const - { - char* stopstring = NULL; - elem = (float)strtod( token, &stopstring ); - if( *stopstring == missch && strlen(stopstring) == 1 ) // missed value - { - elem = MISSED_VAL; - type = VAR_MISSED; - } - else if( *stopstring != '\0' ) - { - MapType::iterator it = namemap.find(token); - if( it == namemap.end() ) - { - elem = (float)counter; - namemap[token] = counter++; - } - else - elem = (float)it->second; - type = VAR_CATEGORICAL; - } - else - type = VAR_ORDERED; - } - - void setVarTypes( const String& s, int nvars, std::vector& vtypes ) const - { - const char* errmsg = "type spec is not correct; it should have format \"cat\", \"ord\" or " - "\"ord[n1,n2-n3,n4-n5,...]cat[m1-m2,m3,m4-m5,...]\", where n's and m's are 0-based variable indices"; - const char* str = s.c_str(); - int specCounter = 0; - - vtypes.resize(nvars); - - for( int k = 0; k < 2; k++ ) - { - const char* ptr = strstr(str, k == 0 ? "ord" : "cat"); - int tp = k == 0 ? VAR_ORDERED : VAR_CATEGORICAL; - if( ptr ) // parse ord/cat str - { - char* stopstring = NULL; - - if( ptr[3] == '\0' ) - { - for( int i = 0; i < nvars; i++ ) - vtypes[i] = (uchar)tp; - specCounter = nvars; - break; - } - - if ( ptr[3] != '[') - CV_Error( CV_StsBadArg, errmsg ); - - ptr += 4; // pass "ord[" - do - { - int b1 = (int)strtod( ptr, &stopstring ); - if( *stopstring == 0 || (*stopstring != ',' && *stopstring != ']' && *stopstring != '-') ) - CV_Error( CV_StsBadArg, errmsg ); - ptr = stopstring + 1; - if( (stopstring[0] == ',') || (stopstring[0] == ']')) - { - CV_Assert( 0 <= b1 && b1 < nvars ); - vtypes[b1] = (uchar)tp; - specCounter++; - } - else - { - if( stopstring[0] == '-') - { - int b2 = (int)strtod( ptr, &stopstring); - if ( (*stopstring == 0) || (*stopstring != ',' && *stopstring != ']') ) - CV_Error( CV_StsBadArg, errmsg ); - ptr = stopstring + 1; - CV_Assert( 0 <= b1 && b1 <= b2 && b2 < nvars ); - for (int i = b1; i <= b2; i++) - vtypes[i] = (uchar)tp; - specCounter += b2 - b1 + 1; - } - else - CV_Error( CV_StsBadArg, errmsg ); - - } - } - while(*stopstring != ']'); - } - } - - if( specCounter != nvars ) - CV_Error( CV_StsBadArg, "type of some variables is not specified" ); - } - - void setTrainTestSplitRatio(double ratio, bool shuffle) CV_OVERRIDE - { - CV_Assert( 0. <= ratio && ratio <= 1. ); - setTrainTestSplit(cvRound(getNSamples()*ratio), shuffle); - } - - void setTrainTestSplit(int count, bool shuffle) CV_OVERRIDE - { - int i, nsamples = getNSamples(); - CV_Assert( 0 <= count && count < nsamples ); - - trainSampleIdx.release(); - testSampleIdx.release(); - - if( count == 0 ) - trainSampleIdx = sampleIdx; - else if( count == nsamples ) - testSampleIdx = sampleIdx; - else - { - Mat mask(1, nsamples, CV_8U); - uchar* mptr = mask.ptr(); - for( i = 0; i < nsamples; i++ ) - mptr[i] = (uchar)(i < count); - trainSampleIdx.create(1, count, CV_32S); - testSampleIdx.create(1, nsamples - count, CV_32S); - int j0 = 0, j1 = 0; - const int* sptr = !sampleIdx.empty() ? sampleIdx.ptr() : 0; - int* trainptr = trainSampleIdx.ptr(); - int* testptr = testSampleIdx.ptr(); - for( i = 0; i < nsamples; i++ ) - { - int idx = sptr ? sptr[i] : i; - if( mptr[i] ) - trainptr[j0++] = idx; - else - testptr[j1++] = idx; - } - if( shuffle ) - shuffleTrainTest(); - } - } - - void shuffleTrainTest() CV_OVERRIDE - { - if( !trainSampleIdx.empty() && !testSampleIdx.empty() ) - { - int i, nsamples = getNSamples(), ntrain = getNTrainSamples(), ntest = getNTestSamples(); - int* trainIdx = trainSampleIdx.ptr(); - int* testIdx = testSampleIdx.ptr(); - RNG& rng = theRNG(); - - for( i = 0; i < nsamples; i++) - { - int a = rng.uniform(0, nsamples); - int b = rng.uniform(0, nsamples); - int* ptra = trainIdx; - int* ptrb = trainIdx; - if( a >= ntrain ) - { - ptra = testIdx; - a -= ntrain; - CV_Assert( a < ntest ); - } - if( b >= ntrain ) - { - ptrb = testIdx; - b -= ntrain; - CV_Assert( b < ntest ); - } - std::swap(ptra[a], ptrb[b]); - } - } - } - - Mat getTrainSamples(int _layout, - bool compressSamples, - bool compressVars) const CV_OVERRIDE - { - if( samples.empty() ) - return samples; - - if( (!compressSamples || (trainSampleIdx.empty() && sampleIdx.empty())) && - (!compressVars || varIdx.empty()) && - layout == _layout ) - return samples; - - int drows = getNTrainSamples(), dcols = getNVars(); - Mat sidx = getTrainSampleIdx(), vidx = getVarIdx(); - const float* src0 = samples.ptr(); - const int* sptr = !sidx.empty() ? sidx.ptr() : 0; - const int* vptr = !vidx.empty() ? vidx.ptr() : 0; - size_t sstep0 = samples.step/samples.elemSize(); - size_t sstep = layout == ROW_SAMPLE ? sstep0 : 1; - size_t vstep = layout == ROW_SAMPLE ? 1 : sstep0; - - if( _layout == COL_SAMPLE ) - { - std::swap(drows, dcols); - std::swap(sptr, vptr); - std::swap(sstep, vstep); - } - - Mat dsamples(drows, dcols, CV_32F); - - for( int i = 0; i < drows; i++ ) - { - const float* src = src0 + (sptr ? sptr[i] : i)*sstep; - float* dst = dsamples.ptr(i); - - for( int j = 0; j < dcols; j++ ) - dst[j] = src[(vptr ? vptr[j] : j)*vstep]; - } - - return dsamples; - } - - void getValues( int vi, InputArray _sidx, float* values ) const CV_OVERRIDE - { - Mat sidx = _sidx.getMat(); - int i, n = sidx.checkVector(1, CV_32S), nsamples = getNSamples(); - CV_Assert( 0 <= vi && vi < getNAllVars() ); - CV_Assert( n >= 0 ); - const int* s = n > 0 ? sidx.ptr() : 0; - if( n == 0 ) - n = nsamples; - - size_t step = samples.step/samples.elemSize(); - size_t sstep = layout == ROW_SAMPLE ? step : 1; - size_t vstep = layout == ROW_SAMPLE ? 1 : step; - - const float* src = samples.ptr() + vi*vstep; - float subst = missingSubst.at(vi); - for( i = 0; i < n; i++ ) - { - int j = i; - if( s ) - { - j = s[i]; - CV_Assert( 0 <= j && j < ((layout == ROW_SAMPLE) ? samples.rows : samples.cols) ); - } - values[i] = src[j*sstep]; - if( values[i] == MISSED_VAL ) - values[i] = subst; - } - } - - void getNormCatValues( int vi, InputArray _sidx, int* values ) const CV_OVERRIDE - { - float* fvalues = (float*)values; - getValues(vi, _sidx, fvalues); - int i, n = (int)_sidx.total(); - Vec2i ofs = catOfs.at(vi); - int m = ofs[1] - ofs[0]; - - CV_Assert( m > 0 ); // if m==0, vi is an ordered variable - const int* cmap = &catMap.at(ofs[0]); - bool fastMap = (m == cmap[m - 1] - cmap[0] + 1); - - if( fastMap ) - { - for( i = 0; i < n; i++ ) - { - int val = cvRound(fvalues[i]); - int idx = val - cmap[0]; - CV_Assert(cmap[idx] == val); - values[i] = idx; - } - } - else - { - for( i = 0; i < n; i++ ) - { - int val = cvRound(fvalues[i]); - int a = 0, b = m, c = -1; - - while( a < b ) - { - c = (a + b) >> 1; - if( val < cmap[c] ) - b = c; - else if( val > cmap[c] ) - a = c+1; - else - break; - } - - CV_DbgAssert( c >= 0 && val == cmap[c] ); - values[i] = c; - } - } - } - - void getSample(InputArray _vidx, int sidx, float* buf) const CV_OVERRIDE - { - CV_Assert(buf != 0 && 0 <= sidx && sidx < getNSamples()); - Mat vidx = _vidx.getMat(); - int i, n = vidx.checkVector(1, CV_32S), nvars = getNAllVars(); - CV_Assert( n >= 0 ); - const int* vptr = n > 0 ? vidx.ptr() : 0; - if( n == 0 ) - n = nvars; - - size_t step = samples.step/samples.elemSize(); - size_t sstep = layout == ROW_SAMPLE ? step : 1; - size_t vstep = layout == ROW_SAMPLE ? 1 : step; - - const float* src = samples.ptr() + sidx*sstep; - for( i = 0; i < n; i++ ) - { - int j = i; - if( vptr ) - { - j = vptr[i]; - CV_Assert( 0 <= j && j < nvars ); - } - buf[i] = src[j*vstep]; - } - } - - void getNames(std::vector& names) const CV_OVERRIDE - { - size_t n = nameMap.size(); - TrainDataImpl::MapType::const_iterator it = nameMap.begin(), - it_end = nameMap.end(); - names.resize(n+1); - names[0] = "?"; - for( ; it != it_end; ++it ) - { - String s = it->first; - int label = it->second; - CV_Assert( label > 0 && label <= (int)n ); - names[label] = s; - } - } - - Mat getVarSymbolFlags() const CV_OVERRIDE - { - return varSymbolFlags; - } - - FILE* file; - int layout; - Mat samples, missing, varType, varIdx, varSymbolFlags, responses, missingSubst; - Mat sampleIdx, trainSampleIdx, testSampleIdx; - Mat sampleWeights, catMap, catOfs; - Mat normCatResponses, classLabels, classCounters; - MapType nameMap; -}; - - -Ptr TrainData::loadFromCSV(const String& filename, - int headerLines, - int responseStartIdx, - int responseEndIdx, - const String& varTypeSpec, - char delimiter, char missch) -{ - CV_TRACE_FUNCTION_SKIP_NESTED(); - Ptr td = makePtr(); - if(!td->loadCSV(filename, headerLines, responseStartIdx, responseEndIdx, varTypeSpec, delimiter, missch)) - td.release(); - return td; -} - -Ptr TrainData::create(InputArray samples, int layout, InputArray responses, - InputArray varIdx, InputArray sampleIdx, InputArray sampleWeights, - InputArray varType) -{ - CV_TRACE_FUNCTION_SKIP_NESTED(); - Ptr td = makePtr(); - td->setData(samples, layout, responses, varIdx, sampleIdx, sampleWeights, varType, noArray()); - return td; -} - -}} - -/* End of file. */ diff --git a/modules/ml/src/em.cpp b/modules/ml/src/em.cpp deleted file mode 100644 index 3e0eeb560a..0000000000 --- a/modules/ml/src/em.cpp +++ /dev/null @@ -1,859 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// Intel License Agreement -// For Open Source Computer Vision Library -// -// Copyright( C) 2000, Intel Corporation, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of Intel Corporation may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -//(including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort(including negligence or otherwise) arising in any way out of -// the use of this software, even ifadvised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" - -namespace cv -{ -namespace ml -{ - -const double minEigenValue = DBL_EPSILON; - -class CV_EXPORTS EMImpl CV_FINAL : public EM -{ -public: - - int nclusters; - int covMatType; - TermCriteria termCrit; - - inline TermCriteria getTermCriteria() const CV_OVERRIDE { return termCrit; } - inline void setTermCriteria(const TermCriteria& val) CV_OVERRIDE { termCrit = val; } - - void setClustersNumber(int val) CV_OVERRIDE - { - nclusters = val; - CV_Assert(nclusters >= 1); - } - - int getClustersNumber() const CV_OVERRIDE - { - return nclusters; - } - - void setCovarianceMatrixType(int val) CV_OVERRIDE - { - covMatType = val; - CV_Assert(covMatType == COV_MAT_SPHERICAL || - covMatType == COV_MAT_DIAGONAL || - covMatType == COV_MAT_GENERIC); - } - - int getCovarianceMatrixType() const CV_OVERRIDE - { - return covMatType; - } - - EMImpl() - { - nclusters = DEFAULT_NCLUSTERS; - covMatType=EM::COV_MAT_DIAGONAL; - termCrit = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, EM::DEFAULT_MAX_ITERS, 1e-6); - } - - virtual ~EMImpl() {} - - void clear() CV_OVERRIDE - { - trainSamples.release(); - trainProbs.release(); - trainLogLikelihoods.release(); - trainLabels.release(); - - weights.release(); - means.release(); - covs.clear(); - - covsEigenValues.clear(); - invCovsEigenValues.clear(); - covsRotateMats.clear(); - - logWeightDivDet.release(); - } - - bool train(const Ptr& data, int) CV_OVERRIDE - { - CV_Assert(!data.empty()); - Mat samples = data->getTrainSamples(), labels; - return trainEM(samples, labels, noArray(), noArray()); - } - - bool trainEM(InputArray samples, - OutputArray logLikelihoods, - OutputArray labels, - OutputArray probs) CV_OVERRIDE - { - Mat samplesMat = samples.getMat(); - setTrainData(START_AUTO_STEP, samplesMat, 0, 0, 0, 0); - return doTrain(START_AUTO_STEP, logLikelihoods, labels, probs); - } - - bool trainE(InputArray samples, - InputArray _means0, - InputArray _covs0, - InputArray _weights0, - OutputArray logLikelihoods, - OutputArray labels, - OutputArray probs) CV_OVERRIDE - { - Mat samplesMat = samples.getMat(); - std::vector covs0; - _covs0.getMatVector(covs0); - - Mat means0 = _means0.getMat(), weights0 = _weights0.getMat(); - - setTrainData(START_E_STEP, samplesMat, 0, !_means0.empty() ? &means0 : 0, - !_covs0.empty() ? &covs0 : 0, !_weights0.empty() ? &weights0 : 0); - return doTrain(START_E_STEP, logLikelihoods, labels, probs); - } - - bool trainM(InputArray samples, - InputArray _probs0, - OutputArray logLikelihoods, - OutputArray labels, - OutputArray probs) CV_OVERRIDE - { - Mat samplesMat = samples.getMat(); - Mat probs0 = _probs0.getMat(); - - setTrainData(START_M_STEP, samplesMat, !_probs0.empty() ? &probs0 : 0, 0, 0, 0); - return doTrain(START_M_STEP, logLikelihoods, labels, probs); - } - - float predict(InputArray _inputs, OutputArray _outputs, int) const CV_OVERRIDE - { - bool needprobs = _outputs.needed(); - Mat samples = _inputs.getMat(), probs, probsrow; - int ptype = CV_64F; - float firstres = 0.f; - int i, nsamples = samples.rows; - - if( needprobs ) - { - if( _outputs.fixedType() ) - ptype = _outputs.type(); - _outputs.create(samples.rows, nclusters, ptype); - probs = _outputs.getMat(); - } - else - nsamples = std::min(nsamples, 1); - - for( i = 0; i < nsamples; i++ ) - { - if( needprobs ) - probsrow = probs.row(i); - Vec2d res = computeProbabilities(samples.row(i), needprobs ? &probsrow : 0, ptype); - if( i == 0 ) - firstres = (float)res[1]; - } - return firstres; - } - - Vec2d predict2(InputArray _sample, OutputArray _probs) const CV_OVERRIDE - { - int ptype = CV_64F; - Mat sample = _sample.getMat(); - CV_Assert(isTrained()); - - CV_Assert(!sample.empty()); - if(sample.type() != CV_64FC1) - { - Mat tmp; - sample.convertTo(tmp, CV_64FC1); - sample = tmp; - } - sample = sample.reshape(1, 1); - - Mat probs; - if( _probs.needed() ) - { - if( _probs.fixedType() ) - ptype = _probs.type(); - _probs.create(1, nclusters, ptype); - probs = _probs.getMat(); - } - - return computeProbabilities(sample, !probs.empty() ? &probs : 0, ptype); - } - - bool isTrained() const CV_OVERRIDE - { - return !means.empty(); - } - - bool isClassifier() const CV_OVERRIDE - { - return true; - } - - int getVarCount() const CV_OVERRIDE - { - return means.cols; - } - - String getDefaultName() const CV_OVERRIDE - { - return "opencv_ml_em"; - } - - static void checkTrainData(int startStep, const Mat& samples, - int nclusters, int covMatType, const Mat* probs, const Mat* means, - const std::vector* covs, const Mat* weights) - { - // Check samples. - CV_Assert(!samples.empty()); - CV_Assert(samples.channels() == 1); - - int nsamples = samples.rows; - int dim = samples.cols; - - // Check training params. - CV_Assert(nclusters > 0); - CV_Assert(nclusters <= nsamples); - CV_Assert(startStep == START_AUTO_STEP || - startStep == START_E_STEP || - startStep == START_M_STEP); - CV_Assert(covMatType == COV_MAT_GENERIC || - covMatType == COV_MAT_DIAGONAL || - covMatType == COV_MAT_SPHERICAL); - - CV_Assert(!probs || - (!probs->empty() && - probs->rows == nsamples && probs->cols == nclusters && - (probs->type() == CV_32FC1 || probs->type() == CV_64FC1))); - - CV_Assert(!weights || - (!weights->empty() && - (weights->cols == 1 || weights->rows == 1) && static_cast(weights->total()) == nclusters && - (weights->type() == CV_32FC1 || weights->type() == CV_64FC1))); - - CV_Assert(!means || - (!means->empty() && - means->rows == nclusters && means->cols == dim && - means->channels() == 1)); - - CV_Assert(!covs || - (!covs->empty() && - static_cast(covs->size()) == nclusters)); - if(covs) - { - const Size covSize(dim, dim); - for(size_t i = 0; i < covs->size(); i++) - { - const Mat& m = (*covs)[i]; - CV_Assert(!m.empty() && m.size() == covSize && (m.channels() == 1)); - } - } - - if(startStep == START_E_STEP) - { - CV_Assert(means); - } - else if(startStep == START_M_STEP) - { - CV_Assert(probs); - } - } - - static void preprocessSampleData(const Mat& src, Mat& dst, int dstType, bool isAlwaysClone) - { - if(src.type() == dstType && !isAlwaysClone) - dst = src; - else - src.convertTo(dst, dstType); - } - - static void preprocessProbability(Mat& probs) - { - max(probs, 0., probs); - - const double uniformProbability = (double)(1./probs.cols); - for(int y = 0; y < probs.rows; y++) - { - Mat sampleProbs = probs.row(y); - - double maxVal = 0; - minMaxLoc(sampleProbs, 0, &maxVal); - if(maxVal < FLT_EPSILON) - sampleProbs.setTo(uniformProbability); - else - normalize(sampleProbs, sampleProbs, 1, 0, NORM_L1); - } - } - - void setTrainData(int startStep, const Mat& samples, - const Mat* probs0, - const Mat* means0, - const std::vector* covs0, - const Mat* weights0) - { - clear(); - - checkTrainData(startStep, samples, nclusters, covMatType, probs0, means0, covs0, weights0); - - bool isKMeansInit = (startStep == START_AUTO_STEP) || (startStep == START_E_STEP && (covs0 == 0 || weights0 == 0)); - // Set checked data - preprocessSampleData(samples, trainSamples, isKMeansInit ? CV_32FC1 : CV_64FC1, false); - - // set probs - if(probs0 && startStep == START_M_STEP) - { - preprocessSampleData(*probs0, trainProbs, CV_64FC1, true); - preprocessProbability(trainProbs); - } - - // set weights - if(weights0 && (startStep == START_E_STEP && covs0)) - { - weights0->convertTo(weights, CV_64FC1); - weights = weights.reshape(1,1); - preprocessProbability(weights); - } - - // set means - if(means0 && (startStep == START_E_STEP/* || startStep == START_AUTO_STEP*/)) - means0->convertTo(means, isKMeansInit ? CV_32FC1 : CV_64FC1); - - // set covs - if(covs0 && (startStep == START_E_STEP && weights0)) - { - covs.resize(nclusters); - for(size_t i = 0; i < covs0->size(); i++) - (*covs0)[i].convertTo(covs[i], CV_64FC1); - } - } - - void decomposeCovs() - { - CV_Assert(!covs.empty()); - covsEigenValues.resize(nclusters); - if(covMatType == COV_MAT_GENERIC) - covsRotateMats.resize(nclusters); - invCovsEigenValues.resize(nclusters); - for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++) - { - CV_Assert(!covs[clusterIndex].empty()); - - SVD svd(covs[clusterIndex], SVD::MODIFY_A + SVD::FULL_UV); - - if(covMatType == COV_MAT_SPHERICAL) - { - double maxSingularVal = svd.w.at(0); - covsEigenValues[clusterIndex] = Mat(1, 1, CV_64FC1, Scalar(maxSingularVal)); - } - else if(covMatType == COV_MAT_DIAGONAL) - { - covsEigenValues[clusterIndex] = covs[clusterIndex].diag().clone(); //Preserve the original order of eigen values. - } - else //COV_MAT_GENERIC - { - covsEigenValues[clusterIndex] = svd.w; - covsRotateMats[clusterIndex] = svd.u; - } - max(covsEigenValues[clusterIndex], minEigenValue, covsEigenValues[clusterIndex]); - invCovsEigenValues[clusterIndex] = 1./covsEigenValues[clusterIndex]; - } - } - - void clusterTrainSamples() - { - int nsamples = trainSamples.rows; - - // Cluster samples, compute/update means - - // Convert samples and means to 32F, because kmeans requires this type. - Mat trainSamplesFlt, meansFlt; - if(trainSamples.type() != CV_32FC1) - trainSamples.convertTo(trainSamplesFlt, CV_32FC1); - else - trainSamplesFlt = trainSamples; - if(!means.empty()) - { - if(means.type() != CV_32FC1) - means.convertTo(meansFlt, CV_32FC1); - else - meansFlt = means; - } - - Mat labels; - kmeans(trainSamplesFlt, nclusters, labels, - TermCriteria(TermCriteria::COUNT, means.empty() ? 10 : 1, 0.5), - 10, KMEANS_PP_CENTERS, meansFlt); - - // Convert samples and means back to 64F. - CV_Assert(meansFlt.type() == CV_32FC1); - if(trainSamples.type() != CV_64FC1) - { - Mat trainSamplesBuffer; - trainSamplesFlt.convertTo(trainSamplesBuffer, CV_64FC1); - trainSamples = trainSamplesBuffer; - } - meansFlt.convertTo(means, CV_64FC1); - - // Compute weights and covs - weights = Mat(1, nclusters, CV_64FC1, Scalar(0)); - covs.resize(nclusters); - for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++) - { - Mat clusterSamples; - for(int sampleIndex = 0; sampleIndex < nsamples; sampleIndex++) - { - if(labels.at(sampleIndex) == clusterIndex) - { - const Mat sample = trainSamples.row(sampleIndex); - clusterSamples.push_back(sample); - } - } - CV_Assert(!clusterSamples.empty()); - - calcCovarMatrix(clusterSamples, covs[clusterIndex], means.row(clusterIndex), - CV_COVAR_NORMAL + CV_COVAR_ROWS + CV_COVAR_USE_AVG + CV_COVAR_SCALE, CV_64FC1); - weights.at(clusterIndex) = static_cast(clusterSamples.rows)/static_cast(nsamples); - } - - decomposeCovs(); - } - - void computeLogWeightDivDet() - { - CV_Assert(!covsEigenValues.empty()); - - Mat logWeights; - cv::max(weights, DBL_MIN, weights); - log(weights, logWeights); - - logWeightDivDet.create(1, nclusters, CV_64FC1); - // note: logWeightDivDet = log(weight_k) - 0.5 * log(|det(cov_k)|) - - for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++) - { - double logDetCov = 0.; - const int evalCount = static_cast(covsEigenValues[clusterIndex].total()); - for(int di = 0; di < evalCount; di++) - logDetCov += std::log(covsEigenValues[clusterIndex].at(covMatType != COV_MAT_SPHERICAL ? di : 0)); - - logWeightDivDet.at(clusterIndex) = logWeights.at(clusterIndex) - 0.5 * logDetCov; - } - } - - bool doTrain(int startStep, OutputArray logLikelihoods, OutputArray labels, OutputArray probs) - { - int dim = trainSamples.cols; - // Precompute the empty initial train data in the cases of START_E_STEP and START_AUTO_STEP - if(startStep != START_M_STEP) - { - if(covs.empty()) - { - CV_Assert(weights.empty()); - clusterTrainSamples(); - } - } - - if(!covs.empty() && covsEigenValues.empty() ) - { - CV_Assert(invCovsEigenValues.empty()); - decomposeCovs(); - } - - if(startStep == START_M_STEP) - mStep(); - - double trainLogLikelihood, prevTrainLogLikelihood = 0.; - int maxIters = (termCrit.type & TermCriteria::MAX_ITER) ? - termCrit.maxCount : DEFAULT_MAX_ITERS; - double epsilon = (termCrit.type & TermCriteria::EPS) ? termCrit.epsilon : 0.; - - for(int iter = 0; ; iter++) - { - eStep(); - trainLogLikelihood = sum(trainLogLikelihoods)[0]; - - if(iter >= maxIters - 1) - break; - - double trainLogLikelihoodDelta = trainLogLikelihood - prevTrainLogLikelihood; - if( iter != 0 && - (trainLogLikelihoodDelta < -DBL_EPSILON || - trainLogLikelihoodDelta < epsilon * std::fabs(trainLogLikelihood))) - break; - - mStep(); - - prevTrainLogLikelihood = trainLogLikelihood; - } - - if( trainLogLikelihood <= -DBL_MAX/10000. ) - { - clear(); - return false; - } - - // postprocess covs - covs.resize(nclusters); - for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++) - { - if(covMatType == COV_MAT_SPHERICAL) - { - covs[clusterIndex].create(dim, dim, CV_64FC1); - setIdentity(covs[clusterIndex], Scalar(covsEigenValues[clusterIndex].at(0))); - } - else if(covMatType == COV_MAT_DIAGONAL) - { - covs[clusterIndex] = Mat::diag(covsEigenValues[clusterIndex]); - } - } - - if(labels.needed()) - trainLabels.copyTo(labels); - if(probs.needed()) - trainProbs.copyTo(probs); - if(logLikelihoods.needed()) - trainLogLikelihoods.copyTo(logLikelihoods); - - trainSamples.release(); - trainProbs.release(); - trainLabels.release(); - trainLogLikelihoods.release(); - - return true; - } - - Vec2d computeProbabilities(const Mat& sample, Mat* probs, int ptype) const - { - // L_ik = log(weight_k) - 0.5 * log(|det(cov_k)|) - 0.5 *(x_i - mean_k)' cov_k^(-1) (x_i - mean_k)] - // q = arg(max_k(L_ik)) - // probs_ik = exp(L_ik - L_iq) / (1 + sum_j!=q (exp(L_ij - L_iq)) - // see Alex Smola's blog http://blog.smola.org/page/2 for - // details on the log-sum-exp trick - - int stype = sample.type(); - CV_Assert(!means.empty()); - CV_Assert((stype == CV_32F || stype == CV_64F) && (ptype == CV_32F || ptype == CV_64F)); - CV_Assert(sample.size() == Size(means.cols, 1)); - - int dim = sample.cols; - - Mat L(1, nclusters, CV_64FC1), centeredSample(1, dim, CV_64F); - int i, label = 0; - for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++) - { - const double* mptr = means.ptr(clusterIndex); - double* dptr = centeredSample.ptr(); - if( stype == CV_32F ) - { - const float* sptr = sample.ptr(); - for( i = 0; i < dim; i++ ) - dptr[i] = sptr[i] - mptr[i]; - } - else - { - const double* sptr = sample.ptr(); - for( i = 0; i < dim; i++ ) - dptr[i] = sptr[i] - mptr[i]; - } - - Mat rotatedCenteredSample = covMatType != COV_MAT_GENERIC ? - centeredSample : centeredSample * covsRotateMats[clusterIndex]; - - double Lval = 0; - for(int di = 0; di < dim; di++) - { - double w = invCovsEigenValues[clusterIndex].at(covMatType != COV_MAT_SPHERICAL ? di : 0); - double val = rotatedCenteredSample.at(di); - Lval += w * val * val; - } - CV_DbgAssert(!logWeightDivDet.empty()); - L.at(clusterIndex) = logWeightDivDet.at(clusterIndex) - 0.5 * Lval; - - if(L.at(clusterIndex) > L.at(label)) - label = clusterIndex; - } - - double maxLVal = L.at(label); - double expDiffSum = 0; - for( i = 0; i < L.cols; i++ ) - { - double v = std::exp(L.at(i) - maxLVal); - L.at(i) = v; - expDiffSum += v; // sum_j(exp(L_ij - L_iq)) - } - - CV_Assert(expDiffSum > 0); - if(probs) - L.convertTo(*probs, ptype, 1./expDiffSum); - - Vec2d res; - res[0] = std::log(expDiffSum) + maxLVal - 0.5 * dim * CV_LOG2PI; - res[1] = label; - - return res; - } - - void eStep() - { - // Compute probs_ik from means_k, covs_k and weights_k. - trainProbs.create(trainSamples.rows, nclusters, CV_64FC1); - trainLabels.create(trainSamples.rows, 1, CV_32SC1); - trainLogLikelihoods.create(trainSamples.rows, 1, CV_64FC1); - - computeLogWeightDivDet(); - - CV_DbgAssert(trainSamples.type() == CV_64FC1); - CV_DbgAssert(means.type() == CV_64FC1); - - for(int sampleIndex = 0; sampleIndex < trainSamples.rows; sampleIndex++) - { - Mat sampleProbs = trainProbs.row(sampleIndex); - Vec2d res = computeProbabilities(trainSamples.row(sampleIndex), &sampleProbs, CV_64F); - trainLogLikelihoods.at(sampleIndex) = res[0]; - trainLabels.at(sampleIndex) = static_cast(res[1]); - } - } - - void mStep() - { - // Update means_k, covs_k and weights_k from probs_ik - int dim = trainSamples.cols; - - // Update weights - // not normalized first - reduce(trainProbs, weights, 0, REDUCE_SUM); - - // Update means - means.create(nclusters, dim, CV_64FC1); - means = Scalar(0); - - const double minPosWeight = trainSamples.rows * DBL_EPSILON; - double minWeight = DBL_MAX; - int minWeightClusterIndex = -1; - for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++) - { - if(weights.at(clusterIndex) <= minPosWeight) - continue; - - if(weights.at(clusterIndex) < minWeight) - { - minWeight = weights.at(clusterIndex); - minWeightClusterIndex = clusterIndex; - } - - Mat clusterMean = means.row(clusterIndex); - for(int sampleIndex = 0; sampleIndex < trainSamples.rows; sampleIndex++) - clusterMean += trainProbs.at(sampleIndex, clusterIndex) * trainSamples.row(sampleIndex); - clusterMean /= weights.at(clusterIndex); - } - - // Update covsEigenValues and invCovsEigenValues - covs.resize(nclusters); - covsEigenValues.resize(nclusters); - if(covMatType == COV_MAT_GENERIC) - covsRotateMats.resize(nclusters); - invCovsEigenValues.resize(nclusters); - for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++) - { - if(weights.at(clusterIndex) <= minPosWeight) - continue; - - if(covMatType != COV_MAT_SPHERICAL) - covsEigenValues[clusterIndex].create(1, dim, CV_64FC1); - else - covsEigenValues[clusterIndex].create(1, 1, CV_64FC1); - - if(covMatType == COV_MAT_GENERIC) - covs[clusterIndex].create(dim, dim, CV_64FC1); - - Mat clusterCov = covMatType != COV_MAT_GENERIC ? - covsEigenValues[clusterIndex] : covs[clusterIndex]; - - clusterCov = Scalar(0); - - Mat centeredSample; - for(int sampleIndex = 0; sampleIndex < trainSamples.rows; sampleIndex++) - { - centeredSample = trainSamples.row(sampleIndex) - means.row(clusterIndex); - - if(covMatType == COV_MAT_GENERIC) - clusterCov += trainProbs.at(sampleIndex, clusterIndex) * centeredSample.t() * centeredSample; - else - { - double p = trainProbs.at(sampleIndex, clusterIndex); - for(int di = 0; di < dim; di++ ) - { - double val = centeredSample.at(di); - clusterCov.at(covMatType != COV_MAT_SPHERICAL ? di : 0) += p*val*val; - } - } - } - - if(covMatType == COV_MAT_SPHERICAL) - clusterCov /= dim; - - clusterCov /= weights.at(clusterIndex); - - // Update covsRotateMats for COV_MAT_GENERIC only - if(covMatType == COV_MAT_GENERIC) - { - SVD svd(covs[clusterIndex], SVD::MODIFY_A + SVD::FULL_UV); - covsEigenValues[clusterIndex] = svd.w; - covsRotateMats[clusterIndex] = svd.u; - } - - max(covsEigenValues[clusterIndex], minEigenValue, covsEigenValues[clusterIndex]); - - // update invCovsEigenValues - invCovsEigenValues[clusterIndex] = 1./covsEigenValues[clusterIndex]; - } - - for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++) - { - if(weights.at(clusterIndex) <= minPosWeight) - { - Mat clusterMean = means.row(clusterIndex); - means.row(minWeightClusterIndex).copyTo(clusterMean); - covs[minWeightClusterIndex].copyTo(covs[clusterIndex]); - covsEigenValues[minWeightClusterIndex].copyTo(covsEigenValues[clusterIndex]); - if(covMatType == COV_MAT_GENERIC) - covsRotateMats[minWeightClusterIndex].copyTo(covsRotateMats[clusterIndex]); - invCovsEigenValues[minWeightClusterIndex].copyTo(invCovsEigenValues[clusterIndex]); - } - } - - // Normalize weights - weights /= trainSamples.rows; - } - - void write_params(FileStorage& fs) const - { - fs << "nclusters" << nclusters; - fs << "cov_mat_type" << (covMatType == COV_MAT_SPHERICAL ? String("spherical") : - covMatType == COV_MAT_DIAGONAL ? String("diagonal") : - covMatType == COV_MAT_GENERIC ? String("generic") : - format("unknown_%d", covMatType)); - writeTermCrit(fs, termCrit); - } - - void write(FileStorage& fs) const CV_OVERRIDE - { - writeFormat(fs); - fs << "training_params" << "{"; - write_params(fs); - fs << "}"; - fs << "weights" << weights; - fs << "means" << means; - - size_t i, n = covs.size(); - - fs << "covs" << "["; - for( i = 0; i < n; i++ ) - fs << covs[i]; - fs << "]"; - } - - void read_params(const FileNode& fn) - { - nclusters = (int)fn["nclusters"]; - String s = (String)fn["cov_mat_type"]; - covMatType = s == "spherical" ? COV_MAT_SPHERICAL : - s == "diagonal" ? COV_MAT_DIAGONAL : - s == "generic" ? COV_MAT_GENERIC : -1; - CV_Assert(covMatType >= 0); - termCrit = readTermCrit(fn); - } - - void read(const FileNode& fn) CV_OVERRIDE - { - clear(); - read_params(fn["training_params"]); - - fn["weights"] >> weights; - fn["means"] >> means; - - FileNode cfn = fn["covs"]; - FileNodeIterator cfn_it = cfn.begin(); - int i, n = (int)cfn.size(); - covs.resize(n); - - for( i = 0; i < n; i++, ++cfn_it ) - (*cfn_it) >> covs[i]; - - decomposeCovs(); - computeLogWeightDivDet(); - } - - Mat getWeights() const CV_OVERRIDE { return weights; } - Mat getMeans() const CV_OVERRIDE { return means; } - void getCovs(std::vector& _covs) const CV_OVERRIDE - { - _covs.resize(covs.size()); - std::copy(covs.begin(), covs.end(), _covs.begin()); - } - - // all inner matrices have type CV_64FC1 - Mat trainSamples; - Mat trainProbs; - Mat trainLogLikelihoods; - Mat trainLabels; - - Mat weights; - Mat means; - std::vector covs; - - std::vector covsEigenValues; - std::vector covsRotateMats; - std::vector invCovsEigenValues; - Mat logWeightDivDet; -}; - -Ptr EM::create() -{ - return makePtr(); -} - -Ptr EM::load(const String& filepath, const String& nodeName) -{ - return Algorithm::load(filepath, nodeName); -} - -} -} // namespace cv - -/* End of file. */ diff --git a/modules/ml/src/gbt.cpp b/modules/ml/src/gbt.cpp deleted file mode 100644 index 57f2eb176b..0000000000 --- a/modules/ml/src/gbt.cpp +++ /dev/null @@ -1,1373 +0,0 @@ - -#include "precomp.hpp" -#include - -#if 0 - -#define pCvSeq CvSeq* -#define pCvDTreeNode CvDTreeNode* - -//=========================================================================== -//----------------------------- CvGBTreesParams ----------------------------- -//=========================================================================== - -CvGBTreesParams::CvGBTreesParams() - : CvDTreeParams( 3, 10, 0, false, 10, 0, false, false, 0 ) -{ - weak_count = 200; - loss_function_type = CvGBTrees::SQUARED_LOSS; - subsample_portion = 0.8f; - shrinkage = 0.01f; -} - -//=========================================================================== - -CvGBTreesParams::CvGBTreesParams( int _loss_function_type, int _weak_count, - float _shrinkage, float _subsample_portion, - int _max_depth, bool _use_surrogates ) - : CvDTreeParams( 3, 10, 0, false, 10, 0, false, false, 0 ) -{ - loss_function_type = _loss_function_type; - weak_count = _weak_count; - shrinkage = _shrinkage; - subsample_portion = _subsample_portion; - max_depth = _max_depth; - use_surrogates = _use_surrogates; -} - -//=========================================================================== -//------------------------------- CvGBTrees --------------------------------- -//=========================================================================== - -CvGBTrees::CvGBTrees() -{ - data = 0; - weak = 0; - default_model_name = "my_boost_tree"; - orig_response = sum_response = sum_response_tmp = 0; - subsample_train = subsample_test = 0; - missing = sample_idx = 0; - class_labels = 0; - class_count = 1; - delta = 0.0f; - - clear(); -} - -//=========================================================================== - -int CvGBTrees::get_len(const CvMat* mat) const -{ - return (mat->cols > mat->rows) ? mat->cols : mat->rows; -} - -//=========================================================================== - -void CvGBTrees::clear() -{ - if( weak ) - { - CvSeqReader reader; - CvSlice slice = CV_WHOLE_SEQ; - CvDTree* tree; - - //data->shared = false; - for (int i=0; iclear(); - delete tree; - tree = 0; - } - } - } - for (int i=0; istorage) ); - delete[] weak; - } - if (data) - { - data->shared = false; - delete data; - } - weak = 0; - data = 0; - delta = 0.0f; - cvReleaseMat( &orig_response ); - cvReleaseMat( &sum_response ); - cvReleaseMat( &sum_response_tmp ); - cvReleaseMat( &subsample_train ); - cvReleaseMat( &subsample_test ); - cvReleaseMat( &sample_idx ); - cvReleaseMat( &missing ); - cvReleaseMat( &class_labels ); -} - -//=========================================================================== - -CvGBTrees::~CvGBTrees() -{ - clear(); -} - -//=========================================================================== - -CvGBTrees::CvGBTrees( const CvMat* _train_data, int _tflag, - const CvMat* _responses, const CvMat* _var_idx, - const CvMat* _sample_idx, const CvMat* _var_type, - const CvMat* _missing_mask, CvGBTreesParams _params ) -{ - weak = 0; - data = 0; - default_model_name = "my_boost_tree"; - orig_response = sum_response = sum_response_tmp = 0; - subsample_train = subsample_test = 0; - missing = sample_idx = 0; - class_labels = 0; - class_count = 1; - delta = 0.0f; - - train( _train_data, _tflag, _responses, _var_idx, _sample_idx, - _var_type, _missing_mask, _params ); -} - -//=========================================================================== - -bool CvGBTrees::problem_type() const -{ - switch (params.loss_function_type) - { - case DEVIANCE_LOSS: return false; - default: return true; - } -} - -//=========================================================================== - -bool -CvGBTrees::train( CvMLData* _data, CvGBTreesParams _params, bool update ) -{ - bool result; - result = train ( _data->get_values(), CV_ROW_SAMPLE, - _data->get_responses(), _data->get_var_idx(), - _data->get_train_sample_idx(), _data->get_var_types(), - _data->get_missing(), _params, update); - //update is not supported - return result; -} - -//=========================================================================== - - -bool -CvGBTrees::train( const CvMat* _train_data, int _tflag, - const CvMat* _responses, const CvMat* _var_idx, - const CvMat* _sample_idx, const CvMat* _var_type, - const CvMat* _missing_mask, - CvGBTreesParams _params, bool /*_update*/ ) //update is not supported -{ - CvMemStorage* storage = 0; - - params = _params; - bool is_regression = problem_type(); - - clear(); - /* - n - count of samples - m - count of variables - */ - int n = _train_data->rows; - int m = _train_data->cols; - if (_tflag != CV_ROW_SAMPLE) - { - int tmp; - CV_SWAP(n,m,tmp); - } - - CvMat* new_responses = cvCreateMat( n, 1, CV_32F); - cvZero(new_responses); - - data = new CvDTreeTrainData( _train_data, _tflag, new_responses, _var_idx, - _sample_idx, _var_type, _missing_mask, _params, true, true ); - if (_missing_mask) - { - missing = cvCreateMat(_missing_mask->rows, _missing_mask->cols, - _missing_mask->type); - cvCopy( _missing_mask, missing); - } - - orig_response = cvCreateMat( 1, n, CV_32F ); - int step = (_responses->cols > _responses->rows) ? 1 : _responses->step / CV_ELEM_SIZE(_responses->type); - switch (CV_MAT_TYPE(_responses->type)) - { - case CV_32FC1: - { - for (int i=0; idata.fl[i] = _responses->data.fl[i*step]; - }; break; - case CV_32SC1: - { - for (int i=0; idata.fl[i] = (float) _responses->data.i[i*step]; - }; break; - default: - CV_Error(CV_StsUnmatchedFormats, "Response should be a 32fC1 or 32sC1 vector."); - } - - if (!is_regression) - { - class_count = 0; - unsigned char * mask = new unsigned char[n]; - memset(mask, 0, n); - // compute the count of different output classes - for (int i=0; idata.fl[j]) == int(orig_response->data.fl[i])) - mask[j] = 1; - } - delete[] mask; - - class_labels = cvCreateMat(1, class_count, CV_32S); - class_labels->data.i[0] = int(orig_response->data.fl[0]); - int j = 1; - for (int i=1; idata.fl[i]) - class_labels->data.i[k])) - k++; - if (k == j) - { - class_labels->data.i[k] = int(orig_response->data.fl[i]); - j++; - } - } - } - - // inside gbt learning process only regression decision trees are built - data->is_classifier = false; - - // preproccessing sample indices - if (_sample_idx) - { - int sample_idx_len = get_len(_sample_idx); - - switch (CV_MAT_TYPE(_sample_idx->type)) - { - case CV_32SC1: - { - sample_idx = cvCreateMat( 1, sample_idx_len, CV_32S ); - for (int i=0; idata.i[i] = _sample_idx->data.i[i]; - std::sort(sample_idx->data.i, sample_idx->data.i + sample_idx_len); - } break; - case CV_8S: - case CV_8U: - { - int active_samples_count = 0; - for (int i=0; idata.ptr[i] ); - sample_idx = cvCreateMat( 1, active_samples_count, CV_32S ); - active_samples_count = 0; - for (int i=0; idata.ptr[i] )) - sample_idx->data.i[active_samples_count++] = i; - - } break; - default: CV_Error(CV_StsUnmatchedFormats, "_sample_idx should be a 32sC1, 8sC1 or 8uC1 vector."); - } - } - else - { - sample_idx = cvCreateMat( 1, n, CV_32S ); - for (int i=0; idata.i[i] = i; - } - - sum_response = cvCreateMat(class_count, n, CV_32F); - sum_response_tmp = cvCreateMat(class_count, n, CV_32F); - cvZero(sum_response); - - delta = 0.0f; - /* - in the case of a regression problem the initial guess (the zero term - in the sum) is set to the mean of all the training responses, that is - the best constant model - */ - if (is_regression) base_value = find_optimal_value(sample_idx); - /* - in the case of a classification problem the initial guess (the zero term - in the sum) is set to zero for all the trees sequences - */ - else base_value = 0.0f; - /* - current predicition on all training samples is set to be - equal to the base_value - */ - cvSet( sum_response, cvScalar(base_value) ); - - weak = new pCvSeq[class_count]; - for (int i=0; itrain( data, subsample_train ); - change_values(tree, k); - - if (subsample_test) - { - CvMat x; - CvMat x_miss; - int* sample_data = sample_idx->data.i; - int* subsample_data = subsample_test->data.i; - int s_step = (sample_idx->cols > sample_idx->rows) ? 1 - : sample_idx->step/CV_ELEM_SIZE(sample_idx->type); - for (int j=0; jtrain_data, &x, idx); - else - cvGetCol( data->train_data, &x, idx); - - if (missing) - { - if (_tflag == CV_ROW_SAMPLE) - cvGetRow( missing, &x_miss, idx); - else - cvGetCol( missing, &x_miss, idx); - - res = (float)tree->predict(&x, &x_miss)->value; - } - else - { - res = (float)tree->predict(&x)->value; - } - sum_response_tmp->data.fl[idx + k*n] = - sum_response->data.fl[idx + k*n] + - params.shrinkage * res; - } - } - - cvSeqPush( weak[k], &tree ); - tree = 0; - } // k=0..class_count - CvMat* tmp; - tmp = sum_response_tmp; - sum_response_tmp = sum_response; - sum_response = tmp; - tmp = 0; - } // i=0..params.weak_count - - delete[] idx_data; - cvReleaseMat(&new_responses); - data->free_train_data(); - - return true; - -} // CvGBTrees::train(...) - -//=========================================================================== - -inline float Sign(float x) - { - if (x<0.0f) return -1.0f; - else if (x>0.0f) return 1.0f; - return 0.0f; - } - -//=========================================================================== - -void CvGBTrees::find_gradient(const int k) -{ - int* sample_data = sample_idx->data.i; - int* subsample_data = subsample_train->data.i; - float* grad_data = data->responses->data.fl; - float* resp_data = orig_response->data.fl; - float* current_data = sum_response->data.fl; - - switch (params.loss_function_type) - // loss_function_type in - // {SQUARED_LOSS, ABSOLUTE_LOSS, HUBER_LOSS, DEVIANCE_LOSS} - { - case SQUARED_LOSS: - { - for (int i=0; icols > sample_idx->rows) ? 1 - : sample_idx->step/CV_ELEM_SIZE(sample_idx->type); - int idx = *(sample_data + subsample_data[i]*s_step); - grad_data[idx] = resp_data[idx] - current_data[idx]; - } - }; break; - - case ABSOLUTE_LOSS: - { - for (int i=0; icols > sample_idx->rows) ? 1 - : sample_idx->step/CV_ELEM_SIZE(sample_idx->type); - int idx = *(sample_data + subsample_data[i]*s_step); - grad_data[idx] = Sign(resp_data[idx] - current_data[idx]); - } - }; break; - - case HUBER_LOSS: - { - float alpha = 0.2f; - int n = get_len(subsample_train); - int s_step = (sample_idx->cols > sample_idx->rows) ? 1 - : sample_idx->step/CV_ELEM_SIZE(sample_idx->type); - - float* residuals = new float[n]; - for (int i=0; i delta) ? delta*Sign(r) : r; - } - delete[] residuals; - - }; break; - - case DEVIANCE_LOSS: - { - for (int i=0; icols > sample_idx->rows) ? 1 - : sample_idx->step/CV_ELEM_SIZE(sample_idx->type); - int idx = *(sample_data + subsample_data[i]*s_step); - - for (int j=0; jcols]; - res = exp(res); - if (j == k) exp_fk = res; - exp_sfi += res; - } - int orig_label = int(resp_data[idx]); - /* - grad_data[idx] = (float)(!(k-class_labels->data.i[orig_label]+1)) - - (float)(exp_fk / exp_sfi); - */ - int ensemble_label = 0; - while (class_labels->data.i[ensemble_label] - orig_label) - ensemble_label++; - - grad_data[idx] = (float)(!(k-ensemble_label)) - - (float)(exp_fk / exp_sfi); - } - }; break; - - default: break; - } - -} // CvGBTrees::find_gradient(...) - -//=========================================================================== - -void CvGBTrees::change_values(CvDTree* tree, const int _k) -{ - CvDTreeNode** predictions = new pCvDTreeNode[get_len(subsample_train)]; - - int* sample_data = sample_idx->data.i; - int* subsample_data = subsample_train->data.i; - int s_step = (sample_idx->cols > sample_idx->rows) ? 1 - : sample_idx->step/CV_ELEM_SIZE(sample_idx->type); - - CvMat x; - CvMat miss_x; - - for (int i=0; itflag == CV_ROW_SAMPLE) - cvGetRow( data->train_data, &x, idx); - else - cvGetCol( data->train_data, &x, idx); - - if (missing) - { - if (data->tflag == CV_ROW_SAMPLE) - cvGetRow( missing, &miss_x, idx); - else - cvGetCol( missing, &miss_x, idx); - - predictions[i] = tree->predict(&x, &miss_x); - } - else - predictions[i] = tree->predict(&x); - } - - - CvDTreeNode** leaves; - int leaves_count = 0; - leaves = GetLeaves( tree, leaves_count); - - for (int i=0; ivalue = 0.0; - continue; - } - - CvMat* leaf_idx = cvCreateMat(1, samples_in_leaf, CV_32S); - int* leaf_idx_data = leaf_idx->data.i; - - for (int j=0; jvalue = value; - - leaf_idx_data = leaf_idx->data.i; - - int len = sum_response_tmp->cols; - for (int j=0; jdata.fl[idx + _k*len] = - sum_response->data.fl[idx + _k*len] + - params.shrinkage * value; - } - leaf_idx_data = 0; - cvReleaseMat(&leaf_idx); - } - - // releasing the memory - for (int i=0; icols; - CvMat leaf_idx; - leaf_idx.rows = 1; - - leaves = GetLeaves( tree, leaves_count); - - for (int i=0; isample_count; - int* leaf_idx_data = new int[n]; - data->get_sample_indices(leaves[i], leaf_idx_data); - //CvMat* leaf_idx = new CvMat(); - //cvInitMatHeader(leaf_idx, n, 1, CV_32S, leaf_idx_data); - leaf_idx.cols = n; - leaf_idx.data.i = leaf_idx_data; - - float value = find_optimal_value(&leaf_idx); - leaves[i]->value = value; - float val = params.shrinkage * value; - - - for (int j=0; jdata.fl[idx] = sum_response->data.fl[idx] + val; - } - //leaf_idx_data = 0; - //cvReleaseMat(&leaf_idx); - leaf_idx.data.i = 0; - //delete leaf_idx; - delete[] leaf_idx_data; - } - - // releasing the memory - for (int i=0; idata.i; - float* resp_data = orig_response->data.fl; - float* cur_data = sum_response->data.fl; - int n = get_len(_Idx); - - switch (params.loss_function_type) - // SQUARED_LOSS=0, ABSOLUTE_LOSS=1, HUBER_LOSS=3, DEVIANCE_LOSS=4 - { - case SQUARED_LOSS: - { - for (int i=0; i> 1; - float r_median = (n == n_half<<1) ? - (residuals[n_half-1] + residuals[n_half]) / 2.0f : - residuals[n_half]; - - for (int i=0; iresponses->data.fl; - double tmp1 = 0; - double tmp2 = 0; - double tmp = 0; - for (int i=0; ileft != NULL) leaves_get(leaves, count, node->left); - if (node->right != NULL) leaves_get(leaves, count, node->right); - if ((node->left == NULL) && (node->right == NULL)) - leaves[count++] = node; -} - -//--------------------------------------------------------------------------- - -CvDTreeNode** CvGBTrees::GetLeaves( const CvDTree* dtree, int& len ) -{ - len = 0; - CvDTreeNode** leaves = new pCvDTreeNode[(size_t)1 << params.max_depth]; - leaves_get(leaves, len, const_cast(dtree->get_root())); - return leaves; -} - -//=========================================================================== - -void CvGBTrees::do_subsample() -{ - - int n = get_len(sample_idx); - int* idx = subsample_train->data.i; - - for (int i = 0; i < n; i++ ) - idx[i] = i; - - if (subsample_test) - for (int i = 0; i < n; i++) - { - int a = (*rng)(n); - int b = (*rng)(n); - int t; - CV_SWAP( idx[a], idx[b], t ); - } - -/* - int n = get_len(sample_idx); - if (subsample_train == 0) - subsample_train = cvCreateMat(1, n, CV_32S); - int* subsample_data = subsample_train->data.i; - for (int i=0; itype) != CV_32F) - return 0.0f; - if ((k >= 0) && (krows != 1)) - return 0.0f; - if ((k == -1) && (weak_responses->rows != class_count)) - return 0.0f; - if (weak_responses->cols != weak_count) - return 0.0f; - } - - float* sum = new float[class_count]; - memset(sum, 0, class_count*sizeof(float)); - - for (int i=0; ipredict(_sample, _missing)->value); - sum[i] += params.shrinkage * p; - if (weak_responses) - weak_responses->data.fl[i*weak_count+j] = p; - } - } - } - - for (int i=0; i=0) && (k max) - { - max = sum[i]; - class_label = i; - } - - delete[] sum; - - /* - int orig_class_label = -1; - for (int i=0; idata.i[i] == class_label+1) - orig_class_label = i; - */ - int orig_class_label = class_labels->data.i[class_label]; - - return float(orig_class_label); -} - - -class Tree_predictor : public cv::ParallelLoopBody -{ -private: - pCvSeq* weak; - float* sum; - const int k; - const CvMat* sample; - const CvMat* missing; - const float shrinkage; - - static cv::Mutex SumMutex; - - -public: - Tree_predictor() : weak(0), sum(0), k(0), sample(0), missing(0), shrinkage(1.0f) {} - Tree_predictor(pCvSeq* _weak, const int _k, const float _shrinkage, - const CvMat* _sample, const CvMat* _missing, float* _sum ) : - weak(_weak), sum(_sum), k(_k), sample(_sample), - missing(_missing), shrinkage(_shrinkage) - {} - - Tree_predictor( const Tree_predictor& p, cv::Split ) : - weak(p.weak), sum(p.sum), k(p.k), sample(p.sample), - missing(p.missing), shrinkage(p.shrinkage) - {} - - Tree_predictor& operator=( const Tree_predictor& ) - { return *this; } - - virtual void operator()(const cv::Range& range) const - { - CvSeqReader reader; - int begin = range.start; - int end = range.end; - - int weak_count = end - begin; - CvDTree* tree; - - for (int i=0; ipredict(sample, missing)->value); - } - } - - { - cv::AutoLock lock(SumMutex); - sum[i] += tmp_sum; - } - } - } // Tree_predictor::operator() - - virtual ~Tree_predictor() {} - -}; // class Tree_predictor - -cv::Mutex Tree_predictor::SumMutex; - - -float CvGBTrees::predict( const CvMat* _sample, const CvMat* _missing, - CvMat* /*weak_responses*/, CvSlice slice, int k) const - { - float result = 0.0f; - if (!weak) return 0.0f; - float* sum = new float[class_count]; - for (int i=0; i=0) && (k max) - { - max = sum[i]; - class_label = i; - } - - delete[] sum; - int orig_class_label = class_labels->data.i[class_label]; - - return float(orig_class_label); - } - - -//=========================================================================== - -void CvGBTrees::write_params( CvFileStorage* fs ) const -{ - const char* loss_function_type_str = - params.loss_function_type == SQUARED_LOSS ? "SquaredLoss" : - params.loss_function_type == ABSOLUTE_LOSS ? "AbsoluteLoss" : - params.loss_function_type == HUBER_LOSS ? "HuberLoss" : - params.loss_function_type == DEVIANCE_LOSS ? "DevianceLoss" : 0; - - - if( loss_function_type_str ) - cvWriteString( fs, "loss_function", loss_function_type_str ); - else - cvWriteInt( fs, "loss_function", params.loss_function_type ); - - cvWriteInt( fs, "ensemble_length", params.weak_count ); - cvWriteReal( fs, "shrinkage", params.shrinkage ); - cvWriteReal( fs, "subsample_portion", params.subsample_portion ); - //cvWriteInt( fs, "max_tree_depth", params.max_depth ); - //cvWriteString( fs, "use_surrogate_splits", params.use_surrogates ? "true" : "false"); - if (class_labels) cvWrite( fs, "class_labels", class_labels); - - data->is_classifier = !problem_type(); - data->write_params( fs ); - data->is_classifier = 0; -} - - -//=========================================================================== - -void CvGBTrees::read_params( CvFileStorage* fs, CvFileNode* fnode ) -{ - CV_FUNCNAME( "CvGBTrees::read_params" ); - __BEGIN__; - - - CvFileNode* temp; - - if( !fnode || !CV_NODE_IS_MAP(fnode->tag) ) - return; - - data = new CvDTreeTrainData(); - CV_CALL( data->read_params(fs, fnode)); - data->shared = true; - - params.max_depth = data->params.max_depth; - params.min_sample_count = data->params.min_sample_count; - params.max_categories = data->params.max_categories; - params.priors = data->params.priors; - params.regression_accuracy = data->params.regression_accuracy; - params.use_surrogates = data->params.use_surrogates; - - temp = cvGetFileNodeByName( fs, fnode, "loss_function" ); - if( !temp ) - EXIT; - - if( temp && CV_NODE_IS_STRING(temp->tag) ) - { - const char* loss_function_type_str = cvReadString( temp, "" ); - params.loss_function_type = strcmp( loss_function_type_str, "SquaredLoss" ) == 0 ? SQUARED_LOSS : - strcmp( loss_function_type_str, "AbsoluteLoss" ) == 0 ? ABSOLUTE_LOSS : - strcmp( loss_function_type_str, "HuberLoss" ) == 0 ? HUBER_LOSS : - strcmp( loss_function_type_str, "DevianceLoss" ) == 0 ? DEVIANCE_LOSS : -1; - } - else - params.loss_function_type = cvReadInt( temp, -1 ); - - - if( params.loss_function_type < SQUARED_LOSS || params.loss_function_type > DEVIANCE_LOSS || params.loss_function_type == 2) - CV_ERROR( CV_StsBadArg, "Unknown loss function" ); - - params.weak_count = cvReadIntByName( fs, fnode, "ensemble_length" ); - params.shrinkage = (float)cvReadRealByName( fs, fnode, "shrinkage", 0.1 ); - params.subsample_portion = (float)cvReadRealByName( fs, fnode, "subsample_portion", 1.0 ); - - if (data->is_classifier) - { - class_labels = (CvMat*)cvReadByName( fs, fnode, "class_labels" ); - if( class_labels && !CV_IS_MAT(class_labels)) - CV_ERROR( CV_StsParseError, "class_labels must stored as a matrix"); - } - data->is_classifier = 0; - - __END__; -} - - - - -void CvGBTrees::write( CvFileStorage* fs, const char* name ) const -{ - CV_FUNCNAME( "CvGBTrees::write" ); - - __BEGIN__; - - CvSeqReader reader; - int i; - cv::String s; - - cvStartWriteStruct( fs, name, CV_NODE_MAP, CV_TYPE_NAME_ML_GBT ); - - if( !weak ) - CV_ERROR( CV_StsBadArg, "The model has not been trained yet" ); - - write_params( fs ); - cvWriteReal( fs, "base_value", base_value); - cvWriteInt( fs, "class_count", class_count); - - for ( int j=0; j < class_count; ++j ) - { - s = cv::format("trees_%d", j); - cvStartWriteStruct( fs, s.c_str(), CV_NODE_SEQ ); - - cvStartReadSeq( weak[j], &reader ); - - for( i = 0; i < weak[j]->total; i++ ) - { - CvDTree* tree; - CV_READ_SEQ_ELEM( tree, reader ); - cvStartWriteStruct( fs, 0, CV_NODE_MAP ); - tree->write( fs ); - cvEndWriteStruct( fs ); - } - - cvEndWriteStruct( fs ); - } - - cvEndWriteStruct( fs ); - - __END__; -} - - -//=========================================================================== - - -void CvGBTrees::read( CvFileStorage* fs, CvFileNode* node ) -{ - - CV_FUNCNAME( "CvGBTrees::read" ); - - __BEGIN__; - - CvSeqReader reader; - CvFileNode* trees_fnode; - CvMemStorage* storage; - int i, ntrees; - cv::String s; - - clear(); - read_params( fs, node ); - - if( !data ) - EXIT; - - base_value = (float)cvReadRealByName( fs, node, "base_value", 0.0 ); - class_count = cvReadIntByName( fs, node, "class_count", 1 ); - - weak = new pCvSeq[class_count]; - - - for (int j=0; jtag) ) - CV_ERROR( CV_StsParseError, " tag is missing" ); - - cvStartReadSeq( trees_fnode->data.seq, &reader ); - ntrees = trees_fnode->data.seq->total; - - if( ntrees != params.weak_count ) - CV_ERROR( CV_StsUnmatchedSizes, - "The number of trees stored does not match tag value" ); - - CV_CALL( storage = cvCreateMemStorage() ); - weak[j] = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvDTree*), storage ); - - for( i = 0; i < ntrees; i++ ) - { - CvDTree* tree = new CvDTree(); - CV_CALL(tree->read( fs, (CvFileNode*)reader.ptr, data )); - CV_NEXT_SEQ_ELEM( reader.seq->elem_size, reader ); - cvSeqPush( weak[j], &tree ); - } - } - - __END__; -} - -//=========================================================================== - -class Sample_predictor : public cv::ParallelLoopBody -{ -private: - const CvGBTrees* gbt; - float* predictions; - const CvMat* samples; - const CvMat* missing; - const CvMat* idx; - CvSlice slice; - -public: - Sample_predictor() : gbt(0), predictions(0), samples(0), missing(0), - idx(0), slice(CV_WHOLE_SEQ) - {} - - Sample_predictor(const CvGBTrees* _gbt, float* _predictions, - const CvMat* _samples, const CvMat* _missing, - const CvMat* _idx, CvSlice _slice=CV_WHOLE_SEQ) : - gbt(_gbt), predictions(_predictions), samples(_samples), - missing(_missing), idx(_idx), slice(_slice) - {} - - - Sample_predictor( const Sample_predictor& p, cv::Split ) : - gbt(p.gbt), predictions(p.predictions), - samples(p.samples), missing(p.missing), idx(p.idx), - slice(p.slice) - {} - - - virtual void operator()(const cv::Range& range) const - { - int begin = range.start; - int end = range.end; - - CvMat x; - CvMat miss; - - for (int i=begin; idata.i[i] : i; - cvGetRow(samples, &x, j); - if (!missing) - { - predictions[i] = gbt->predict_serial(&x,0,0,slice); - } - else - { - cvGetRow(missing, &miss, j); - predictions[i] = gbt->predict_serial(&x,&miss,0,slice); - } - } - } // Sample_predictor::operator() - - virtual ~Sample_predictor() {} - -}; // class Sample_predictor - - - -// type in {CV_TRAIN_ERROR, CV_TEST_ERROR} -float -CvGBTrees::calc_error( CvMLData* _data, int type, std::vector *resp ) -{ - - float err = 0.0f; - const CvMat* _sample_idx = (type == CV_TRAIN_ERROR) ? - _data->get_train_sample_idx() : - _data->get_test_sample_idx(); - const CvMat* response = _data->get_responses(); - - int n = _sample_idx ? get_len(_sample_idx) : 0; - n = (type == CV_TRAIN_ERROR && n == 0) ? _data->get_values()->rows : n; - - if (!n) - return -FLT_MAX; - - float* pred_resp = 0; - bool needsFreeing = false; - - if (resp) - { - resp->resize(n); - pred_resp = &((*resp)[0]); - } - else - { - pred_resp = new float[n]; - needsFreeing = true; - } - - Sample_predictor predictor = Sample_predictor(this, pred_resp, _data->get_values(), - _data->get_missing(), _sample_idx); - - cv::parallel_for_(cv::Range(0,n), predictor); - - int* sidx = _sample_idx ? _sample_idx->data.i : 0; - int r_step = CV_IS_MAT_CONT(response->type) ? - 1 : response->step / CV_ELEM_SIZE(response->type); - - - if ( !problem_type() ) - { - for( int i = 0; i < n; i++ ) - { - int si = sidx ? sidx[i] : i; - int d = fabs((double)pred_resp[i] - response->data.fl[si*r_step]) <= FLT_EPSILON ? 0 : 1; - err += d; - } - err = err / (float)n * 100.0f; - } - else - { - for( int i = 0; i < n; i++ ) - { - int si = sidx ? sidx[i] : i; - float d = pred_resp[i] - response->data.fl[si*r_step]; - err += d*d; - } - err = err / (float)n; - } - - if (needsFreeing) - delete[]pred_resp; - - return err; -} - - -CvGBTrees::CvGBTrees( const cv::Mat& trainData, int tflag, - const cv::Mat& responses, const cv::Mat& varIdx, - const cv::Mat& sampleIdx, const cv::Mat& varType, - const cv::Mat& missingDataMask, - CvGBTreesParams _params ) -{ - data = 0; - weak = 0; - default_model_name = "my_boost_tree"; - orig_response = sum_response = sum_response_tmp = 0; - subsample_train = subsample_test = 0; - missing = sample_idx = 0; - class_labels = 0; - class_count = 1; - delta = 0.0f; - - clear(); - - train(trainData, tflag, responses, varIdx, sampleIdx, varType, missingDataMask, _params, false); -} - -bool CvGBTrees::train( const cv::Mat& trainData, int tflag, - const cv::Mat& responses, const cv::Mat& varIdx, - const cv::Mat& sampleIdx, const cv::Mat& varType, - const cv::Mat& missingDataMask, - CvGBTreesParams _params, - bool update ) -{ - CvMat _trainData = trainData, _responses = responses; - CvMat _varIdx = varIdx, _sampleIdx = sampleIdx, _varType = varType; - CvMat _missingDataMask = missingDataMask; - - return train( &_trainData, tflag, &_responses, varIdx.empty() ? 0 : &_varIdx, - sampleIdx.empty() ? 0 : &_sampleIdx, varType.empty() ? 0 : &_varType, - missingDataMask.empty() ? 0 : &_missingDataMask, _params, update); -} - -float CvGBTrees::predict( const cv::Mat& sample, const cv::Mat& _missing, - const cv::Range& slice, int k ) const -{ - CvMat _sample = sample, miss = _missing; - return predict(&_sample, _missing.empty() ? 0 : &miss, 0, - slice==cv::Range::all() ? CV_WHOLE_SEQ : cvSlice(slice.start, slice.end), k); -} - -#endif diff --git a/modules/ml/src/inner_functions.cpp b/modules/ml/src/inner_functions.cpp deleted file mode 100644 index 6b3affcebc..0000000000 --- a/modules/ml/src/inner_functions.cpp +++ /dev/null @@ -1,222 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// Intel License Agreement -// -// Copyright (C) 2000, Intel Corporation, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of Intel Corporation may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" - -namespace cv { namespace ml { - -ParamGrid::ParamGrid() { minVal = maxVal = 0.; logStep = 1; } -ParamGrid::ParamGrid(double _minVal, double _maxVal, double _logStep) -{ - CV_TRACE_FUNCTION(); - minVal = std::min(_minVal, _maxVal); - maxVal = std::max(_minVal, _maxVal); - logStep = std::max(_logStep, 1.); -} - -Ptr ParamGrid::create(double minval, double maxval, double logstep) { - return makePtr(minval, maxval, logstep); -} - -bool StatModel::empty() const { return !isTrained(); } - -int StatModel::getVarCount() const { return 0; } - -bool StatModel::train(const Ptr& trainData, int ) -{ - CV_TRACE_FUNCTION(); - CV_Assert(!trainData.empty()); - CV_Error(CV_StsNotImplemented, ""); - return false; -} - -bool StatModel::train( InputArray samples, int layout, InputArray responses ) -{ - CV_TRACE_FUNCTION(); - CV_Assert(!samples.empty()); - return train(TrainData::create(samples, layout, responses)); -} - -class ParallelCalcError : public ParallelLoopBody -{ -private: - const Ptr& data; - bool &testerr; - Mat &resp; - const StatModel &s; - vector &errStrip; -public: - ParallelCalcError(const Ptr& d, bool &t, Mat &_r,const StatModel &w, vector &e) : - data(d), - testerr(t), - resp(_r), - s(w), - errStrip(e) - { - } - virtual void operator()(const Range& range) const CV_OVERRIDE - { - int idxErr = range.start; - CV_TRACE_FUNCTION_SKIP_NESTED(); - Mat samples = data->getSamples(); - Mat weights=testerr? data->getTestSampleWeights() : data->getTrainSampleWeights(); - int layout = data->getLayout(); - Mat sidx = testerr ? data->getTestSampleIdx() : data->getTrainSampleIdx(); - const int* sidx_ptr = sidx.ptr(); - bool isclassifier = s.isClassifier(); - Mat responses = data->getResponses(); - int responses_type = responses.type(); - double err = 0; - - - const float* sw = weights.empty() ? 0 : weights.ptr(); - for (int i = range.start; i < range.end; i++) - { - int si = sidx_ptr ? sidx_ptr[i] : i; - double sweight = sw ? static_cast(sw[i]) : 1.; - Mat sample = layout == ROW_SAMPLE ? samples.row(si) : samples.col(si); - float val = s.predict(sample); - float val0 = (responses_type == CV_32S) ? (float)responses.at(si) : responses.at(si); - - if (isclassifier) - err += sweight * fabs(val - val0) > FLT_EPSILON; - else - err += sweight * (val - val0)*(val - val0); - if (!resp.empty()) - resp.at(i) = val; - } - - - errStrip[idxErr]=err ; - - } - ParallelCalcError& operator=(const ParallelCalcError &) { - return *this; - } -}; - - -float StatModel::calcError(const Ptr& data, bool testerr, OutputArray _resp) const -{ - CV_TRACE_FUNCTION_SKIP_NESTED(); - CV_Assert(!data.empty()); - Mat samples = data->getSamples(); - Mat sidx = testerr ? data->getTestSampleIdx() : data->getTrainSampleIdx(); - Mat weights = testerr ? data->getTestSampleWeights() : data->getTrainSampleWeights(); - int n = (int)sidx.total(); - bool isclassifier = isClassifier(); - Mat responses = data->getResponses(); - - if (n == 0) - { - n = data->getNSamples(); - weights = data->getTrainSampleWeights(); - testerr =false; - } - - if (n == 0) - return -FLT_MAX; - - Mat resp; - if (_resp.needed()) - resp.create(n, 1, CV_32F); - - double err = 0; - vector errStrip(n,0.0); - ParallelCalcError x(data, testerr, resp, *this,errStrip); - - parallel_for_(Range(0,n),x); - - for (size_t i = 0; i < errStrip.size(); i++) - err += errStrip[i]; - float weightSum= weights.empty() ? n: static_cast(sum(weights)(0)); - if (_resp.needed()) - resp.copyTo(_resp); - - return (float)(err/ weightSum * (isclassifier ? 100 : 1)); -} - -/* Calculates upper triangular matrix S, where A is a symmetrical matrix A=S'*S */ -static void Cholesky( const Mat& A, Mat& S ) -{ - CV_TRACE_FUNCTION(); - CV_Assert(A.type() == CV_32F); - - S = A.clone(); - cv::Cholesky ((float*)S.ptr(),S.step, S.rows,NULL, 0, 0); - S = S.t(); - for (int i=1;i(i,j)=0; -} - -/* Generates from multivariate normal distribution, where - is an - average row vector, - symmetric covariation matrix */ -void randMVNormal( InputArray _mean, InputArray _cov, int nsamples, OutputArray _samples ) -{ - CV_TRACE_FUNCTION(); - // check mean vector and covariance matrix - Mat mean = _mean.getMat(), cov = _cov.getMat(); - int dim = (int)mean.total(); // dimensionality - CV_Assert(mean.rows == 1 || mean.cols == 1); - CV_Assert(cov.rows == dim && cov.cols == dim); - mean = mean.reshape(1,1); // ensure a row vector - - // generate n-samples of the same dimension, from ~N(0,1) - _samples.create(nsamples, dim, CV_32F); - Mat samples = _samples.getMat(); - randn(samples, Scalar::all(0), Scalar::all(1)); - - // decompose covariance using Cholesky: cov = U'*U - // (cov must be square, symmetric, and positive semi-definite matrix) - Mat utmat; - Cholesky(cov, utmat); - - // transform random numbers using specified mean and covariance - for( int i = 0; i < nsamples; i++ ) - { - Mat sample = samples.row(i); - sample = sample * utmat + mean; - } -} - -}} - -/* End of file */ diff --git a/modules/ml/src/kdtree.cpp b/modules/ml/src/kdtree.cpp deleted file mode 100644 index 8cdab98f73..0000000000 --- a/modules/ml/src/kdtree.cpp +++ /dev/null @@ -1,530 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Copyright (C) 2013, OpenCV Foundation, all rights reserved. -// Copyright (C) 2014, Itseez Inc, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" -#include "kdtree.hpp" - -namespace cv -{ -namespace ml -{ -// This is reimplementation of kd-trees from cvkdtree*.* by Xavier Delacour, cleaned-up and -// adopted to work with the new OpenCV data structures. - -// The algorithm is taken from: -// J.S. Beis and D.G. Lowe. Shape indexing using approximate nearest-neighbor search -// in highdimensional spaces. In Proc. IEEE Conf. Comp. Vision Patt. Recog., -// pages 1000--1006, 1997. http://citeseer.ist.psu.edu/beis97shape.html - -const int MAX_TREE_DEPTH = 32; - -KDTree::KDTree() -{ - maxDepth = -1; - normType = NORM_L2; -} - -KDTree::KDTree(InputArray _points, bool _copyData) -{ - maxDepth = -1; - normType = NORM_L2; - build(_points, _copyData); -} - -KDTree::KDTree(InputArray _points, InputArray _labels, bool _copyData) -{ - maxDepth = -1; - normType = NORM_L2; - build(_points, _labels, _copyData); -} - -struct SubTree -{ - SubTree() : first(0), last(0), nodeIdx(0), depth(0) {} - SubTree(int _first, int _last, int _nodeIdx, int _depth) - : first(_first), last(_last), nodeIdx(_nodeIdx), depth(_depth) {} - int first; - int last; - int nodeIdx; - int depth; -}; - - -static float -medianPartition( size_t* ofs, int a, int b, const float* vals ) -{ - int k, a0 = a, b0 = b; - int middle = (a + b)/2; - while( b > a ) - { - int i0 = a, i1 = (a+b)/2, i2 = b; - float v0 = vals[ofs[i0]], v1 = vals[ofs[i1]], v2 = vals[ofs[i2]]; - int ip = v0 < v1 ? (v1 < v2 ? i1 : v0 < v2 ? i2 : i0) : - v0 < v2 ? (v1 == v0 ? i2 : i0): (v1 < v2 ? i2 : i1); - float pivot = vals[ofs[ip]]; - std::swap(ofs[ip], ofs[i2]); - - for( i1 = i0, i0--; i1 <= i2; i1++ ) - if( vals[ofs[i1]] <= pivot ) - { - i0++; - std::swap(ofs[i0], ofs[i1]); - } - if( i0 == middle ) - break; - if( i0 > middle ) - b = i0 - (b == i0); - else - a = i0; - } - - float pivot = vals[ofs[middle]]; - for( k = a0; k < middle; k++ ) - { - CV_Assert(vals[ofs[k]] <= pivot); - } - for( k = b0; k > middle; k-- ) - { - CV_Assert(vals[ofs[k]] >= pivot); - } - - return vals[ofs[middle]]; -} - -static void -computeSums( const Mat& points, const size_t* ofs, int a, int b, double* sums ) -{ - int i, j, dims = points.cols; - const float* data = points.ptr(0); - for( j = 0; j < dims; j++ ) - sums[j*2] = sums[j*2+1] = 0; - for( i = a; i <= b; i++ ) - { - const float* row = data + ofs[i]; - for( j = 0; j < dims; j++ ) - { - double t = row[j], s = sums[j*2] + t, s2 = sums[j*2+1] + t*t; - sums[j*2] = s; sums[j*2+1] = s2; - } - } -} - - -void KDTree::build(InputArray _points, bool _copyData) -{ - build(_points, noArray(), _copyData); -} - - -void KDTree::build(InputArray __points, InputArray __labels, bool _copyData) -{ - Mat _points = __points.getMat(), _labels = __labels.getMat(); - CV_Assert(_points.type() == CV_32F && !_points.empty()); - std::vector().swap(nodes); - - if( !_copyData ) - points = _points; - else - { - points.release(); - points.create(_points.size(), _points.type()); - } - - int i, j, n = _points.rows, ptdims = _points.cols, top = 0; - const float* data = _points.ptr(0); - float* dstdata = points.ptr(0); - size_t step = _points.step1(); - size_t dstep = points.step1(); - int ptpos = 0; - labels.resize(n); - const int* _labels_data = 0; - - if( !_labels.empty() ) - { - int nlabels = _labels.checkVector(1, CV_32S, true); - CV_Assert(nlabels == n); - _labels_data = _labels.ptr(); - } - - Mat sumstack(MAX_TREE_DEPTH*2, ptdims*2, CV_64F); - SubTree stack[MAX_TREE_DEPTH*2]; - - std::vector _ptofs(n); - size_t* ptofs = &_ptofs[0]; - - for( i = 0; i < n; i++ ) - ptofs[i] = i*step; - - nodes.push_back(Node()); - computeSums(points, ptofs, 0, n-1, sumstack.ptr(top)); - stack[top++] = SubTree(0, n-1, 0, 0); - int _maxDepth = 0; - - while( --top >= 0 ) - { - int first = stack[top].first, last = stack[top].last; - int depth = stack[top].depth, nidx = stack[top].nodeIdx; - int count = last - first + 1, dim = -1; - const double* sums = sumstack.ptr(top); - double invCount = 1./count, maxVar = -1.; - - if( count == 1 ) - { - int idx0 = (int)(ptofs[first]/step); - int idx = _copyData ? ptpos++ : idx0; - nodes[nidx].idx = ~idx; - if( _copyData ) - { - const float* src = data + ptofs[first]; - float* dst = dstdata + idx*dstep; - for( j = 0; j < ptdims; j++ ) - dst[j] = src[j]; - } - labels[idx] = _labels_data ? _labels_data[idx0] : idx0; - _maxDepth = std::max(_maxDepth, depth); - continue; - } - - // find the dimensionality with the biggest variance - for( j = 0; j < ptdims; j++ ) - { - double m = sums[j*2]*invCount; - double varj = sums[j*2+1]*invCount - m*m; - if( maxVar < varj ) - { - maxVar = varj; - dim = j; - } - } - - int left = (int)nodes.size(), right = left + 1; - nodes.push_back(Node()); - nodes.push_back(Node()); - nodes[nidx].idx = dim; - nodes[nidx].left = left; - nodes[nidx].right = right; - nodes[nidx].boundary = medianPartition(ptofs, first, last, data + dim); - - int middle = (first + last)/2; - double *lsums = (double*)sums, *rsums = lsums + ptdims*2; - computeSums(points, ptofs, middle+1, last, rsums); - for( j = 0; j < ptdims*2; j++ ) - lsums[j] = sums[j] - rsums[j]; - stack[top++] = SubTree(first, middle, left, depth+1); - stack[top++] = SubTree(middle+1, last, right, depth+1); - } - maxDepth = _maxDepth; -} - - -struct PQueueElem -{ - PQueueElem() : dist(0), idx(0) {} - PQueueElem(float _dist, int _idx) : dist(_dist), idx(_idx) {} - float dist; - int idx; -}; - - -int KDTree::findNearest(InputArray _vec, int K, int emax, - OutputArray _neighborsIdx, OutputArray _neighbors, - OutputArray _dist, OutputArray _labels) const - -{ - Mat vecmat = _vec.getMat(); - CV_Assert( vecmat.isContinuous() && vecmat.type() == CV_32F && vecmat.total() == (size_t)points.cols ); - const float* vec = vecmat.ptr(); - K = std::min(K, points.rows); - int ptdims = points.cols; - - CV_Assert(K > 0 && (normType == NORM_L2 || normType == NORM_L1)); - - AutoBuffer _buf((K+1)*(sizeof(float) + sizeof(int))); - int* idx = (int*)_buf.data(); - float* dist = (float*)(idx + K + 1); - int i, j, ncount = 0, e = 0; - - int qsize = 0, maxqsize = 1 << 10; - AutoBuffer _pqueue(maxqsize*sizeof(PQueueElem)); - PQueueElem* pqueue = (PQueueElem*)_pqueue.data(); - emax = std::max(emax, 1); - - for( e = 0; e < emax; ) - { - float d, alt_d = 0.f; - int nidx; - - if( e == 0 ) - nidx = 0; - else - { - // take the next node from the priority queue - if( qsize == 0 ) - break; - nidx = pqueue[0].idx; - alt_d = pqueue[0].dist; - if( --qsize > 0 ) - { - std::swap(pqueue[0], pqueue[qsize]); - d = pqueue[0].dist; - for( i = 0;;) - { - int left = i*2 + 1, right = i*2 + 2; - if( left >= qsize ) - break; - if( right < qsize && pqueue[right].dist < pqueue[left].dist ) - left = right; - if( pqueue[left].dist >= d ) - break; - std::swap(pqueue[i], pqueue[left]); - i = left; - } - } - - if( ncount == K && alt_d > dist[ncount-1] ) - continue; - } - - for(;;) - { - if( nidx < 0 ) - break; - const Node& n = nodes[nidx]; - - if( n.idx < 0 ) - { - i = ~n.idx; - const float* row = points.ptr(i); - if( normType == NORM_L2 ) - for( j = 0, d = 0.f; j < ptdims; j++ ) - { - float t = vec[j] - row[j]; - d += t*t; - } - else - for( j = 0, d = 0.f; j < ptdims; j++ ) - d += std::abs(vec[j] - row[j]); - - dist[ncount] = d; - idx[ncount] = i; - for( i = ncount-1; i >= 0; i-- ) - { - if( dist[i] <= d ) - break; - std::swap(dist[i], dist[i+1]); - std::swap(idx[i], idx[i+1]); - } - ncount += ncount < K; - e++; - break; - } - - int alt; - if( vec[n.idx] <= n.boundary ) - { - nidx = n.left; - alt = n.right; - } - else - { - nidx = n.right; - alt = n.left; - } - - d = vec[n.idx] - n.boundary; - if( normType == NORM_L2 ) - d = d*d + alt_d; - else - d = std::abs(d) + alt_d; - // subtree prunning - if( ncount == K && d > dist[ncount-1] ) - continue; - // add alternative subtree to the priority queue - pqueue[qsize] = PQueueElem(d, alt); - for( i = qsize; i > 0; ) - { - int parent = (i-1)/2; - if( parent < 0 || pqueue[parent].dist <= d ) - break; - std::swap(pqueue[i], pqueue[parent]); - i = parent; - } - qsize += qsize+1 < maxqsize; - } - } - - K = std::min(K, ncount); - if( _neighborsIdx.needed() ) - { - _neighborsIdx.create(K, 1, CV_32S, -1, true); - Mat nidx = _neighborsIdx.getMat(); - Mat(nidx.size(), CV_32S, &idx[0]).copyTo(nidx); - } - if( _dist.needed() ) - sqrt(Mat(K, 1, CV_32F, dist), _dist); - - if( _neighbors.needed() || _labels.needed() ) - getPoints(Mat(K, 1, CV_32S, idx), _neighbors, _labels); - return K; -} - - -void KDTree::findOrthoRange(InputArray _lowerBound, - InputArray _upperBound, - OutputArray _neighborsIdx, - OutputArray _neighbors, - OutputArray _labels ) const -{ - int ptdims = points.cols; - Mat lowerBound = _lowerBound.getMat(), upperBound = _upperBound.getMat(); - CV_Assert( lowerBound.size == upperBound.size && - lowerBound.isContinuous() && - upperBound.isContinuous() && - lowerBound.type() == upperBound.type() && - lowerBound.type() == CV_32F && - lowerBound.total() == (size_t)ptdims ); - const float* L = lowerBound.ptr(); - const float* R = upperBound.ptr(); - - std::vector idx; - AutoBuffer _stack(MAX_TREE_DEPTH*2 + 1); - int* stack = _stack.data(); - int top = 0; - - stack[top++] = 0; - - while( --top >= 0 ) - { - int nidx = stack[top]; - if( nidx < 0 ) - break; - const Node& n = nodes[nidx]; - if( n.idx < 0 ) - { - int j, i = ~n.idx; - const float* row = points.ptr(i); - for( j = 0; j < ptdims; j++ ) - if( row[j] < L[j] || row[j] >= R[j] ) - break; - if( j == ptdims ) - idx.push_back(i); - continue; - } - if( L[n.idx] <= n.boundary ) - stack[top++] = n.left; - if( R[n.idx] > n.boundary ) - stack[top++] = n.right; - } - - if( _neighborsIdx.needed() ) - { - _neighborsIdx.create((int)idx.size(), 1, CV_32S, -1, true); - Mat nidx = _neighborsIdx.getMat(); - Mat(nidx.size(), CV_32S, &idx[0]).copyTo(nidx); - } - getPoints( idx, _neighbors, _labels ); -} - - -void KDTree::getPoints(InputArray _idx, OutputArray _pts, OutputArray _labels) const -{ - Mat idxmat = _idx.getMat(), pts, labelsmat; - CV_Assert( idxmat.isContinuous() && idxmat.type() == CV_32S && - (idxmat.cols == 1 || idxmat.rows == 1) ); - const int* idx = idxmat.ptr(); - int* dstlabels = 0; - - int ptdims = points.cols; - int i, nidx = (int)idxmat.total(); - if( nidx == 0 ) - { - _pts.release(); - _labels.release(); - return; - } - - if( _pts.needed() ) - { - _pts.create( nidx, ptdims, points.type()); - pts = _pts.getMat(); - } - - if(_labels.needed()) - { - _labels.create(nidx, 1, CV_32S, -1, true); - labelsmat = _labels.getMat(); - CV_Assert( labelsmat.isContinuous() ); - dstlabels = labelsmat.ptr(); - } - const int* srclabels = !labels.empty() ? &labels[0] : 0; - - for( i = 0; i < nidx; i++ ) - { - int k = idx[i]; - CV_Assert( (unsigned)k < (unsigned)points.rows ); - const float* src = points.ptr(k); - if( !pts.empty() ) - std::copy(src, src + ptdims, pts.ptr(i)); - if( dstlabels ) - dstlabels[i] = srclabels ? srclabels[k] : k; - } -} - - -const float* KDTree::getPoint(int ptidx, int* label) const -{ - CV_Assert( (unsigned)ptidx < (unsigned)points.rows); - if(label) - *label = labels[ptidx]; - return points.ptr(ptidx); -} - - -int KDTree::dims() const -{ - return !points.empty() ? points.cols : 0; -} - -} -} diff --git a/modules/ml/src/kdtree.hpp b/modules/ml/src/kdtree.hpp deleted file mode 100644 index 2975c7c75f..0000000000 --- a/modules/ml/src/kdtree.hpp +++ /dev/null @@ -1,97 +0,0 @@ -#ifndef KDTREE_H -#define KDTREE_H - -#include "precomp.hpp" - -namespace cv -{ -namespace ml -{ - -/*! - Fast Nearest Neighbor Search Class. - - The class implements D. Lowe BBF (Best-Bin-First) algorithm for the last - approximate (or accurate) nearest neighbor search in multi-dimensional spaces. - - First, a set of vectors is passed to KDTree::KDTree() constructor - or KDTree::build() method, where it is reordered. - - Then arbitrary vectors can be passed to KDTree::findNearest() methods, which - find the K nearest neighbors among the vectors from the initial set. - The user can balance between the speed and accuracy of the search by varying Emax - parameter, which is the number of leaves that the algorithm checks. - The larger parameter values yield more accurate results at the expense of lower processing speed. - - \code - KDTree T(points, false); - const int K = 3, Emax = INT_MAX; - int idx[K]; - float dist[K]; - T.findNearest(query_vec, K, Emax, idx, 0, dist); - CV_Assert(dist[0] <= dist[1] && dist[1] <= dist[2]); - \endcode -*/ -class CV_EXPORTS_W KDTree -{ -public: - /*! - The node of the search tree. - */ - struct Node - { - Node() : idx(-1), left(-1), right(-1), boundary(0.f) {} - Node(int _idx, int _left, int _right, float _boundary) - : idx(_idx), left(_left), right(_right), boundary(_boundary) {} - - //! split dimension; >=0 for nodes (dim), < 0 for leaves (index of the point) - int idx; - //! node indices of the left and the right branches - int left, right; - //! go to the left if query_vec[node.idx]<=node.boundary, otherwise go to the right - float boundary; - }; - - //! the default constructor - CV_WRAP KDTree(); - //! the full constructor that builds the search tree - CV_WRAP KDTree(InputArray points, bool copyAndReorderPoints = false); - //! the full constructor that builds the search tree - CV_WRAP KDTree(InputArray points, InputArray _labels, - bool copyAndReorderPoints = false); - //! builds the search tree - CV_WRAP void build(InputArray points, bool copyAndReorderPoints = false); - //! builds the search tree - CV_WRAP void build(InputArray points, InputArray labels, - bool copyAndReorderPoints = false); - //! finds the K nearest neighbors of "vec" while looking at Emax (at most) leaves - CV_WRAP int findNearest(InputArray vec, int K, int Emax, - OutputArray neighborsIdx, - OutputArray neighbors = noArray(), - OutputArray dist = noArray(), - OutputArray labels = noArray()) const; - //! finds all the points from the initial set that belong to the specified box - CV_WRAP void findOrthoRange(InputArray minBounds, - InputArray maxBounds, - OutputArray neighborsIdx, - OutputArray neighbors = noArray(), - OutputArray labels = noArray()) const; - //! returns vectors with the specified indices - CV_WRAP void getPoints(InputArray idx, OutputArray pts, - OutputArray labels = noArray()) const; - //! return a vector with the specified index - const float* getPoint(int ptidx, int* label = 0) const; - //! returns the search space dimensionality - CV_WRAP int dims() const; - - std::vector nodes; //!< all the tree nodes - CV_PROP Mat points; //!< all the points. It can be a reordered copy of the input vector set or the original vector set. - CV_PROP std::vector labels; //!< the parallel array of labels. - CV_PROP int maxDepth; //!< maximum depth of the search tree. Do not modify it - CV_PROP_RW int normType; //!< type of the distance (cv::NORM_L1 or cv::NORM_L2) used for search. Initially set to cv::NORM_L2, but you can modify it -}; - -} -} - -#endif diff --git a/modules/ml/src/knearest.cpp b/modules/ml/src/knearest.cpp deleted file mode 100644 index 3d8f9b5d2e..0000000000 --- a/modules/ml/src/knearest.cpp +++ /dev/null @@ -1,521 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000, Intel Corporation, all rights reserved. -// Copyright (C) 2014, Itseez Inc, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" -#include "kdtree.hpp" - -/****************************************************************************************\ -* K-Nearest Neighbors Classifier * -\****************************************************************************************/ - -namespace cv { -namespace ml { - -const String NAME_BRUTE_FORCE = "opencv_ml_knn"; -const String NAME_KDTREE = "opencv_ml_knn_kd"; - -class Impl -{ -public: - Impl() - { - defaultK = 10; - isclassifier = true; - Emax = INT_MAX; - } - - virtual ~Impl() {} - virtual String getModelName() const = 0; - virtual int getType() const = 0; - virtual float findNearest( InputArray _samples, int k, - OutputArray _results, - OutputArray _neighborResponses, - OutputArray _dists ) const = 0; - - bool train( const Ptr& data, int flags ) - { - CV_Assert(!data.empty()); - Mat new_samples = data->getTrainSamples(ROW_SAMPLE); - Mat new_responses; - data->getTrainResponses().convertTo(new_responses, CV_32F); - bool update = (flags & ml::KNearest::UPDATE_MODEL) != 0 && !samples.empty(); - - CV_Assert( new_samples.type() == CV_32F ); - - if( !update ) - { - clear(); - } - else - { - CV_Assert( new_samples.cols == samples.cols && - new_responses.cols == responses.cols ); - } - - samples.push_back(new_samples); - responses.push_back(new_responses); - - doTrain(samples); - - return true; - } - - virtual void doTrain(InputArray points) { CV_UNUSED(points); } - - void clear() - { - samples.release(); - responses.release(); - } - - void read( const FileNode& fn ) - { - clear(); - isclassifier = (int)fn["is_classifier"] != 0; - defaultK = (int)fn["default_k"]; - - fn["samples"] >> samples; - fn["responses"] >> responses; - } - - void write( FileStorage& fs ) const - { - fs << "is_classifier" << (int)isclassifier; - fs << "default_k" << defaultK; - - fs << "samples" << samples; - fs << "responses" << responses; - } - -public: - int defaultK; - bool isclassifier; - int Emax; - - Mat samples; - Mat responses; -}; - -class BruteForceImpl CV_FINAL : public Impl -{ -public: - String getModelName() const CV_OVERRIDE { return NAME_BRUTE_FORCE; } - int getType() const CV_OVERRIDE { return ml::KNearest::BRUTE_FORCE; } - - void findNearestCore( const Mat& _samples, int k, const Range& range, - Mat* results, Mat* neighbor_responses, - Mat* dists, float* presult ) const - { - int testidx, baseidx, i, j, d = samples.cols, nsamples = samples.rows; - int testcount = range.end - range.start; - - AutoBuffer buf(testcount*k*2); - float* dbuf = buf.data(); - float* rbuf = dbuf + testcount*k; - - const float* rptr = responses.ptr(); - - for( testidx = 0; testidx < testcount; testidx++ ) - { - for( i = 0; i < k; i++ ) - { - dbuf[testidx*k + i] = FLT_MAX; - rbuf[testidx*k + i] = 0.f; - } - } - - for( baseidx = 0; baseidx < nsamples; baseidx++ ) - { - for( testidx = 0; testidx < testcount; testidx++ ) - { - const float* v = samples.ptr(baseidx); - const float* u = _samples.ptr(testidx + range.start); - - float s = 0; - for( i = 0; i <= d - 4; i += 4 ) - { - float t0 = u[i] - v[i], t1 = u[i+1] - v[i+1]; - float t2 = u[i+2] - v[i+2], t3 = u[i+3] - v[i+3]; - s += t0*t0 + t1*t1 + t2*t2 + t3*t3; - } - - for( ; i < d; i++ ) - { - float t0 = u[i] - v[i]; - s += t0*t0; - } - - Cv32suf si; - si.f = (float)s; - Cv32suf* dd = (Cv32suf*)(&dbuf[testidx*k]); - float* nr = &rbuf[testidx*k]; - - for( i = k; i > 0; i-- ) - if( si.i >= dd[i-1].i ) - break; - if( i >= k ) - continue; - - for( j = k-2; j >= i; j-- ) - { - dd[j+1].i = dd[j].i; - nr[j+1] = nr[j]; - } - dd[i].i = si.i; - nr[i] = rptr[baseidx]; - } - } - - float result = 0.f; - float inv_scale = 1.f/k; - - for( testidx = 0; testidx < testcount; testidx++ ) - { - if( neighbor_responses ) - { - float* nr = neighbor_responses->ptr(testidx + range.start); - for( j = 0; j < k; j++ ) - nr[j] = rbuf[testidx*k + j]; - for( ; j < k; j++ ) - nr[j] = 0.f; - } - - if( dists ) - { - float* dptr = dists->ptr(testidx + range.start); - for( j = 0; j < k; j++ ) - dptr[j] = dbuf[testidx*k + j]; - for( ; j < k; j++ ) - dptr[j] = 0.f; - } - - if( results || testidx+range.start == 0 ) - { - if( !isclassifier || k == 1 ) - { - float s = 0.f; - for( j = 0; j < k; j++ ) - s += rbuf[testidx*k + j]; - result = (float)(s*inv_scale); - } - else - { - float* rp = rbuf + testidx*k; - std::sort(rp, rp+k); - - result = rp[0]; - int prev_start = 0; - int best_count = 0; - for( j = 1; j <= k; j++ ) - { - if( j == k || rp[j] != rp[j-1] ) - { - int count = j - prev_start; - if( best_count < count ) - { - best_count = count; - result = rp[j-1]; - } - prev_start = j; - } - } - } - if( results ) - results->at(testidx + range.start) = result; - if( presult && testidx+range.start == 0 ) - *presult = result; - } - } - } - - struct findKNearestInvoker : public ParallelLoopBody - { - findKNearestInvoker(const BruteForceImpl* _p, int _k, const Mat& __samples, - Mat* __results, Mat* __neighbor_responses, Mat* __dists, float* _presult) - { - p = _p; - k = _k; - _samples = &__samples; - _results = __results; - _neighbor_responses = __neighbor_responses; - _dists = __dists; - presult = _presult; - } - - void operator()(const Range& range) const CV_OVERRIDE - { - int delta = std::min(range.end - range.start, 256); - for( int start = range.start; start < range.end; start += delta ) - { - p->findNearestCore( *_samples, k, Range(start, std::min(start + delta, range.end)), - _results, _neighbor_responses, _dists, presult ); - } - } - - const BruteForceImpl* p; - int k; - const Mat* _samples; - Mat* _results; - Mat* _neighbor_responses; - Mat* _dists; - float* presult; - }; - - float findNearest( InputArray _samples, int k, - OutputArray _results, - OutputArray _neighborResponses, - OutputArray _dists ) const CV_OVERRIDE - { - float result = 0.f; - CV_Assert( 0 < k ); - k = std::min(k, samples.rows); - - Mat test_samples = _samples.getMat(); - CV_Assert( test_samples.type() == CV_32F && test_samples.cols == samples.cols ); - int testcount = test_samples.rows; - - if( testcount == 0 ) - { - _results.release(); - _neighborResponses.release(); - _dists.release(); - return 0.f; - } - - Mat res, nr, d, *pres = 0, *pnr = 0, *pd = 0; - if( _results.needed() ) - { - _results.create(testcount, 1, CV_32F); - pres = &(res = _results.getMat()); - } - if( _neighborResponses.needed() ) - { - _neighborResponses.create(testcount, k, CV_32F); - pnr = &(nr = _neighborResponses.getMat()); - } - if( _dists.needed() ) - { - _dists.create(testcount, k, CV_32F); - pd = &(d = _dists.getMat()); - } - - findKNearestInvoker invoker(this, k, test_samples, pres, pnr, pd, &result); - parallel_for_(Range(0, testcount), invoker); - //invoker(Range(0, testcount)); - return result; - } -}; - - -class KDTreeImpl CV_FINAL : public Impl -{ -public: - String getModelName() const CV_OVERRIDE { return NAME_KDTREE; } - int getType() const CV_OVERRIDE { return ml::KNearest::KDTREE; } - - void doTrain(InputArray points) CV_OVERRIDE - { - tr.build(points); - } - - float findNearest( InputArray _samples, int k, - OutputArray _results, - OutputArray _neighborResponses, - OutputArray _dists ) const CV_OVERRIDE - { - float result = 0.f; - CV_Assert( 0 < k ); - k = std::min(k, samples.rows); - - Mat test_samples = _samples.getMat(); - CV_Assert( test_samples.type() == CV_32F && test_samples.cols == samples.cols ); - int testcount = test_samples.rows; - - if( testcount == 0 ) - { - _results.release(); - _neighborResponses.release(); - _dists.release(); - return 0.f; - } - - Mat res, nr, d; - if( _results.needed() ) - { - res = _results.getMat(); - } - if( _neighborResponses.needed() ) - { - nr = _neighborResponses.getMat(); - } - if( _dists.needed() ) - { - d = _dists.getMat(); - } - - for (int i=0; idefaultK; } - inline void setDefaultK(int val) CV_OVERRIDE { impl->defaultK = val; } - inline bool getIsClassifier() const CV_OVERRIDE { return impl->isclassifier; } - inline void setIsClassifier(bool val) CV_OVERRIDE { impl->isclassifier = val; } - inline int getEmax() const CV_OVERRIDE { return impl->Emax; } - inline void setEmax(int val) CV_OVERRIDE { impl->Emax = val; } - -public: - int getAlgorithmType() const CV_OVERRIDE - { - return impl->getType(); - } - void setAlgorithmType(int val) CV_OVERRIDE - { - if (val != BRUTE_FORCE && val != KDTREE) - val = BRUTE_FORCE; - - int k = getDefaultK(); - int e = getEmax(); - bool c = getIsClassifier(); - - initImpl(val); - - setDefaultK(k); - setEmax(e); - setIsClassifier(c); - } - -public: - KNearestImpl() - { - initImpl(BRUTE_FORCE); - } - ~KNearestImpl() - { - } - - bool isClassifier() const CV_OVERRIDE { return impl->isclassifier; } - bool isTrained() const CV_OVERRIDE { return !impl->samples.empty(); } - - int getVarCount() const CV_OVERRIDE { return impl->samples.cols; } - - void write( FileStorage& fs ) const CV_OVERRIDE - { - writeFormat(fs); - impl->write(fs); - } - - void read( const FileNode& fn ) CV_OVERRIDE - { - int algorithmType = BRUTE_FORCE; - if (fn.name() == NAME_KDTREE) - algorithmType = KDTREE; - initImpl(algorithmType); - impl->read(fn); - } - - float findNearest( InputArray samples, int k, - OutputArray results, - OutputArray neighborResponses=noArray(), - OutputArray dist=noArray() ) const CV_OVERRIDE - { - return impl->findNearest(samples, k, results, neighborResponses, dist); - } - - float predict(InputArray inputs, OutputArray outputs, int) const CV_OVERRIDE - { - return impl->findNearest( inputs, impl->defaultK, outputs, noArray(), noArray() ); - } - - bool train( const Ptr& data, int flags ) CV_OVERRIDE - { - CV_Assert(!data.empty()); - return impl->train(data, flags); - } - - String getDefaultName() const CV_OVERRIDE { return impl->getModelName(); } - -protected: - void initImpl(int algorithmType) - { - if (algorithmType != KDTREE) - impl = makePtr(); - else - impl = makePtr(); - } - Ptr impl; -}; - -Ptr KNearest::create() -{ - return makePtr(); -} - -Ptr KNearest::load(const String& filepath) -{ - FileStorage fs; - fs.open(filepath, FileStorage::READ); - - Ptr knearest = makePtr(); - - ((KNearestImpl*)knearest.get())->read(fs.getFirstTopLevelNode()); - return knearest; -} - -} -} - -/* End of file */ diff --git a/modules/ml/src/lr.cpp b/modules/ml/src/lr.cpp deleted file mode 100644 index b43e104045..0000000000 --- a/modules/ml/src/lr.cpp +++ /dev/null @@ -1,604 +0,0 @@ -// This file is part of OpenCV project. -// It is subject to the license terms in the LICENSE file found in the top-level directory -// of this distribution and at http://opencv.org/license.html. -// -// AUTHOR: Rahul Kavi rahulkavi[at]live[at]com - -// -// This is a implementation of the Logistic Regression algorithm -// - -#include "precomp.hpp" - -using namespace std; - -namespace cv { -namespace ml { - -class LrParams -{ -public: - LrParams() - { - alpha = 0.001; - num_iters = 1000; - norm = LogisticRegression::REG_L2; - train_method = LogisticRegression::BATCH; - mini_batch_size = 1; - term_crit = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, num_iters, alpha); - } - - double alpha; //!< learning rate. - int num_iters; //!< number of iterations. - int norm; - int train_method; - int mini_batch_size; - TermCriteria term_crit; -}; - -class LogisticRegressionImpl CV_FINAL : public LogisticRegression -{ -public: - - LogisticRegressionImpl() { } - virtual ~LogisticRegressionImpl() {} - - inline double getLearningRate() const CV_OVERRIDE { return params.alpha; } - inline void setLearningRate(double val) CV_OVERRIDE { params.alpha = val; } - inline int getIterations() const CV_OVERRIDE { return params.num_iters; } - inline void setIterations(int val) CV_OVERRIDE { params.num_iters = val; } - inline int getRegularization() const CV_OVERRIDE { return params.norm; } - inline void setRegularization(int val) CV_OVERRIDE { params.norm = val; } - inline int getTrainMethod() const CV_OVERRIDE { return params.train_method; } - inline void setTrainMethod(int val) CV_OVERRIDE { params.train_method = val; } - inline int getMiniBatchSize() const CV_OVERRIDE { return params.mini_batch_size; } - inline void setMiniBatchSize(int val) CV_OVERRIDE { params.mini_batch_size = val; } - inline TermCriteria getTermCriteria() const CV_OVERRIDE { return params.term_crit; } - inline void setTermCriteria(TermCriteria val) CV_OVERRIDE { params.term_crit = val; } - - virtual bool train( const Ptr& trainData, int=0 ) CV_OVERRIDE; - virtual float predict(InputArray samples, OutputArray results, int flags=0) const CV_OVERRIDE; - virtual void clear() CV_OVERRIDE; - virtual void write(FileStorage& fs) const CV_OVERRIDE; - virtual void read(const FileNode& fn) CV_OVERRIDE; - virtual Mat get_learnt_thetas() const CV_OVERRIDE { return learnt_thetas; } - virtual int getVarCount() const CV_OVERRIDE { return learnt_thetas.cols; } - virtual bool isTrained() const CV_OVERRIDE { return !learnt_thetas.empty(); } - virtual bool isClassifier() const CV_OVERRIDE { return true; } - virtual String getDefaultName() const CV_OVERRIDE { return "opencv_ml_lr"; } -protected: - Mat calc_sigmoid(const Mat& data) const; - double compute_cost(const Mat& _data, const Mat& _labels, const Mat& _init_theta); - void compute_gradient(const Mat& _data, const Mat& _labels, const Mat &_theta, const double _lambda, Mat & _gradient ); - Mat batch_gradient_descent(const Mat& _data, const Mat& _labels, const Mat& _init_theta); - Mat mini_batch_gradient_descent(const Mat& _data, const Mat& _labels, const Mat& _init_theta); - bool set_label_map(const Mat& _labels_i); - Mat remap_labels(const Mat& _labels_i, const map& lmap) const; -protected: - LrParams params; - Mat learnt_thetas; - map forward_mapper; - map reverse_mapper; - Mat labels_o; - Mat labels_n; -}; - -Ptr LogisticRegression::create() -{ - return makePtr(); -} - -Ptr LogisticRegression::load(const String& filepath, const String& nodeName) -{ - return Algorithm::load(filepath, nodeName); -} - - -bool LogisticRegressionImpl::train(const Ptr& trainData, int) -{ - CV_TRACE_FUNCTION_SKIP_NESTED(); - CV_Assert(!trainData.empty()); - - // return value - bool ok = false; - clear(); - Mat _data_i = trainData->getSamples(); - Mat _labels_i = trainData->getResponses(); - - // check size and type of training data - CV_Assert( !_labels_i.empty() && !_data_i.empty()); - if(_labels_i.cols != 1) - { - CV_Error( CV_StsBadArg, "labels should be a column matrix" ); - } - if(_data_i.type() != CV_32FC1 || _labels_i.type() != CV_32FC1) - { - CV_Error( CV_StsBadArg, "data and labels must be a floating point matrix" ); - } - if(_labels_i.rows != _data_i.rows) - { - CV_Error( CV_StsBadArg, "number of rows in data and labels should be equal" ); - } - - // class labels - set_label_map(_labels_i); - Mat labels_l = remap_labels(_labels_i, this->forward_mapper); - int num_classes = (int) this->forward_mapper.size(); - if(num_classes < 2) - { - CV_Error( CV_StsBadArg, "data should have at least 2 classes" ); - } - - // add a column of ones to the data (bias/intercept term) - Mat data_t; - hconcat( cv::Mat::ones( _data_i.rows, 1, CV_32F ), _data_i, data_t ); - - // coefficient matrix (zero-initialized) - Mat thetas; - Mat init_theta = Mat::zeros(data_t.cols, 1, CV_32F); - - // fit the model (handles binary and multiclass cases) - Mat new_theta; - Mat labels; - if(num_classes == 2) - { - labels_l.convertTo(labels, CV_32F); - if(this->params.train_method == LogisticRegression::BATCH) - new_theta = batch_gradient_descent(data_t, labels, init_theta); - else - new_theta = mini_batch_gradient_descent(data_t, labels, init_theta); - thetas = new_theta.t(); - } - else - { - /* take each class and rename classes you will get a theta per class - as in multi class class scenario, we will have n thetas for n classes */ - thetas.create(num_classes, data_t.cols, CV_32F); - Mat labels_binary; - int ii = 0; - for(map::iterator it = this->forward_mapper.begin(); it != this->forward_mapper.end(); ++it) - { - // one-vs-rest (OvR) scheme - labels_binary = (labels_l == it->second)/255; - labels_binary.convertTo(labels, CV_32F); - if(this->params.train_method == LogisticRegression::BATCH) - new_theta = batch_gradient_descent(data_t, labels, init_theta); - else - new_theta = mini_batch_gradient_descent(data_t, labels, init_theta); - hconcat(new_theta.t(), thetas.row(ii)); - ii += 1; - } - } - - // check that the estimates are stable and finite - this->learnt_thetas = thetas.clone(); - if( cvIsNaN( (double)sum(this->learnt_thetas)[0] ) ) - { - CV_Error( CV_StsBadArg, "check training parameters. Invalid training classifier" ); - } - - // success - ok = true; - return ok; -} - -float LogisticRegressionImpl::predict(InputArray samples, OutputArray results, int flags) const -{ - // check if learnt_mats array is populated - if(!this->isTrained()) - { - CV_Error( CV_StsBadArg, "classifier should be trained first" ); - } - - // coefficient matrix - Mat thetas; - if ( learnt_thetas.type() == CV_32F ) - { - thetas = learnt_thetas; - } - else - { - this->learnt_thetas.convertTo( thetas, CV_32F ); - } - CV_Assert(thetas.rows > 0); - - // data samples - Mat data = samples.getMat(); - if(data.type() != CV_32F) - { - CV_Error( CV_StsBadArg, "data must be of floating type" ); - } - - // add a column of ones to the data (bias/intercept term) - Mat data_t; - hconcat( cv::Mat::ones( data.rows, 1, CV_32F ), data, data_t ); - CV_Assert(data_t.cols == thetas.cols); - - // predict class labels for samples (handles binary and multiclass cases) - Mat labels_c; - Mat pred_m; - Mat temp_pred; - if(thetas.rows == 1) - { - // apply sigmoid function - temp_pred = calc_sigmoid(data_t * thetas.t()); - CV_Assert(temp_pred.cols==1); - pred_m = temp_pred.clone(); - - // if greater than 0.5, predict class 0 or predict class 1 - temp_pred = (temp_pred > 0.5f) / 255; - temp_pred.convertTo(labels_c, CV_32S); - } - else - { - // apply sigmoid function - pred_m.create(data_t.rows, thetas.rows, data.type()); - for(int i = 0; i < thetas.rows; i++) - { - temp_pred = calc_sigmoid(data_t * thetas.row(i).t()); - vconcat(temp_pred, pred_m.col(i)); - } - - // predict class with the maximum output - Point max_loc; - Mat labels; - for(int i = 0; i < pred_m.rows; i++) - { - temp_pred = pred_m.row(i); - minMaxLoc( temp_pred, NULL, NULL, NULL, &max_loc ); - labels.push_back(max_loc.x); - } - labels.convertTo(labels_c, CV_32S); - } - - // return label of the predicted class. class names can be 1,2,3,... - Mat pred_labs = remap_labels(labels_c, this->reverse_mapper); - pred_labs.convertTo(pred_labs, CV_32S); - - // return either the labels or the raw output - if ( results.needed() ) - { - if ( flags & StatModel::RAW_OUTPUT ) - { - pred_m.copyTo( results ); - } - else - { - pred_labs.copyTo(results); - } - } - - return ( pred_labs.empty() ? 0.f : static_cast(pred_labs.at(0)) ); -} - -Mat LogisticRegressionImpl::calc_sigmoid(const Mat& data) const -{ - CV_TRACE_FUNCTION(); - Mat dest; - exp(-data, dest); - return 1.0/(1.0+dest); -} - -double LogisticRegressionImpl::compute_cost(const Mat& _data, const Mat& _labels, const Mat& _init_theta) -{ - CV_TRACE_FUNCTION(); - float llambda = 0; /*changed llambda from int to float to solve issue #7924*/ - int m; - int n; - double cost = 0; - double rparameter = 0; - Mat theta_b; - Mat theta_c; - Mat d_a; - Mat d_b; - - m = _data.rows; - n = _data.cols; - - theta_b = _init_theta(Range(1, n), Range::all()); - - if (params.norm != REG_DISABLE) - { - llambda = 1; - } - - if(this->params.norm == LogisticRegression::REG_L1) - { - rparameter = (llambda/(2*m)) * sum(theta_b)[0]; - } - else - { - // assuming it to be L2 by default - multiply(theta_b, theta_b, theta_c, 1); - rparameter = (llambda/(2*m)) * sum(theta_c)[0]; - } - - d_a = calc_sigmoid(_data * _init_theta); - log(d_a, d_a); - multiply(d_a, _labels, d_a); - - // use the fact that: log(1 - sigmoid(x)) = log(sigmoid(-x)) - d_b = calc_sigmoid(- _data * _init_theta); - log(d_b, d_b); - multiply(d_b, 1-_labels, d_b); - - cost = (-1.0/m) * (sum(d_a)[0] + sum(d_b)[0]); - cost = cost + rparameter; - - if(cvIsNaN( cost ) == 1) - { - CV_Error( CV_StsBadArg, "check training parameters. Invalid training classifier" ); - } - - return cost; -} - -struct LogisticRegressionImpl_ComputeDradient_Impl : ParallelLoopBody -{ - const Mat* data; - const Mat* theta; - const Mat* pcal_a; - Mat* gradient; - double lambda; - - LogisticRegressionImpl_ComputeDradient_Impl(const Mat& _data, const Mat &_theta, const Mat& _pcal_a, const double _lambda, Mat & _gradient) - : data(&_data) - , theta(&_theta) - , pcal_a(&_pcal_a) - , gradient(&_gradient) - , lambda(_lambda) - { - - } - - void operator()(const cv::Range& r) const CV_OVERRIDE - { - const Mat& _data = *data; - const Mat &_theta = *theta; - Mat & _gradient = *gradient; - const Mat & _pcal_a = *pcal_a; - const int m = _data.rows; - Mat pcal_ab; - - for (int ii = r.start; iiparams.alpha<=0) - { - CV_Error( CV_StsBadArg, "check training parameters (learning rate) for the classifier" ); - } - - if(this->params.num_iters <= 0) - { - CV_Error( CV_StsBadArg, "number of iterations cannot be zero or a negative number" ); - } - - int llambda = 0; - int m; - Mat theta_p = _init_theta.clone(); - Mat gradient( theta_p.rows, theta_p.cols, theta_p.type() ); - m = _data.rows; - - if (params.norm != REG_DISABLE) - { - llambda = 1; - } - - for(int i = 0;iparams.num_iters;i++) - { - // this seems to only be called to ensure that cost is not NaN - compute_cost(_data, _labels, theta_p); - - compute_gradient( _data, _labels, theta_p, llambda, gradient ); - - theta_p = theta_p - ( static_cast(this->params.alpha)/m)*gradient; - } - return theta_p; -} - -Mat LogisticRegressionImpl::mini_batch_gradient_descent(const Mat& _data, const Mat& _labels, const Mat& _init_theta) -{ - // implements batch gradient descent - int lambda_l = 0; - int m; - int j = 0; - int size_b = this->params.mini_batch_size; - - if(this->params.mini_batch_size <= 0 || this->params.alpha == 0) - { - CV_Error( CV_StsBadArg, "check training parameters for the classifier" ); - } - - if(this->params.num_iters <= 0) - { - CV_Error( CV_StsBadArg, "number of iterations cannot be zero or a negative number" ); - } - - Mat theta_p = _init_theta.clone(); - Mat gradient( theta_p.rows, theta_p.cols, theta_p.type() ); - Mat data_d; - Mat labels_l; - - if (params.norm != REG_DISABLE) - { - lambda_l = 1; - } - - for(int i = 0;iparams.term_crit.maxCount;i++) - { - if(j+size_b<=_data.rows) - { - data_d = _data(Range(j,j+size_b), Range::all()); - labels_l = _labels(Range(j,j+size_b),Range::all()); - } - else - { - data_d = _data(Range(j, _data.rows), Range::all()); - labels_l = _labels(Range(j, _labels.rows),Range::all()); - } - - m = data_d.rows; - - // this seems to only be called to ensure that cost is not NaN - compute_cost(data_d, labels_l, theta_p); - - compute_gradient(data_d, labels_l, theta_p, lambda_l, gradient); - - theta_p = theta_p - ( static_cast(this->params.alpha)/m)*gradient; - - j += this->params.mini_batch_size; - - // if parsed through all data variables - if (j >= _data.rows) { - j = 0; - } - } - return theta_p; -} - -bool LogisticRegressionImpl::set_label_map(const Mat &_labels_i) -{ - // this function creates two maps to map user defined labels to program friendly labels two ways. - int ii = 0; - Mat labels; - - this->labels_o = Mat(0,1, CV_8U); - this->labels_n = Mat(0,1, CV_8U); - - _labels_i.convertTo(labels, CV_32S); - - for(int i = 0;iforward_mapper[labels.at(i)] += 1; - } - - for(map::iterator it = this->forward_mapper.begin(); it != this->forward_mapper.end(); ++it) - { - this->forward_mapper[it->first] = ii; - this->labels_o.push_back(it->first); - this->labels_n.push_back(ii); - ii += 1; - } - - for(map::iterator it = this->forward_mapper.begin(); it != this->forward_mapper.end(); ++it) - { - this->reverse_mapper[it->second] = it->first; - } - - return true; -} - -Mat LogisticRegressionImpl::remap_labels(const Mat& _labels_i, const map& lmap) const -{ - Mat labels; - _labels_i.convertTo(labels, CV_32S); - - Mat new_labels = Mat::zeros(labels.rows, labels.cols, labels.type()); - - CV_Assert( !lmap.empty() ); - - for(int i =0;i::const_iterator val = lmap.find(labels.at(i,0)); - CV_Assert(val != lmap.end()); - new_labels.at(i,0) = val->second; - } - return new_labels; -} - -void LogisticRegressionImpl::clear() -{ - this->learnt_thetas.release(); - this->labels_o.release(); - this->labels_n.release(); -} - -void LogisticRegressionImpl::write(FileStorage& fs) const -{ - // check if open - if(fs.isOpened() == 0) - { - CV_Error(CV_StsBadArg,"file can't open. Check file path"); - } - writeFormat(fs); - string desc = "Logistic Regression Classifier"; - fs<<"classifier"<params.alpha; - fs<<"iterations"<params.num_iters; - fs<<"norm"<params.norm; - fs<<"train_method"<params.train_method; - if(this->params.train_method == LogisticRegression::MINI_BATCH) - { - fs<<"mini_batch_size"<params.mini_batch_size; - } - fs<<"learnt_thetas"<learnt_thetas; - fs<<"n_labels"<labels_n; - fs<<"o_labels"<labels_o; -} - -void LogisticRegressionImpl::read(const FileNode& fn) -{ - // check if empty - if(fn.empty()) - { - CV_Error( CV_StsBadArg, "empty FileNode object" ); - } - - this->params.alpha = (double)fn["alpha"]; - this->params.num_iters = (int)fn["iterations"]; - this->params.norm = (int)fn["norm"]; - this->params.train_method = (int)fn["train_method"]; - - if(this->params.train_method == LogisticRegression::MINI_BATCH) - { - this->params.mini_batch_size = (int)fn["mini_batch_size"]; - } - - fn["learnt_thetas"] >> this->learnt_thetas; - fn["o_labels"] >> this->labels_o; - fn["n_labels"] >> this->labels_n; - - for(int ii =0;iiforward_mapper[labels_o.at(ii,0)] = labels_n.at(ii,0); - this->reverse_mapper[labels_n.at(ii,0)] = labels_o.at(ii,0); - } -} - -} -} - -/* End of file. */ diff --git a/modules/ml/src/nbayes.cpp b/modules/ml/src/nbayes.cpp deleted file mode 100644 index 60dda0c7d4..0000000000 --- a/modules/ml/src/nbayes.cpp +++ /dev/null @@ -1,471 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// Intel License Agreement -// -// Copyright (C) 2000, Intel Corporation, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of Intel Corporation may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" - -namespace cv { -namespace ml { - - -class NormalBayesClassifierImpl : public NormalBayesClassifier -{ -public: - NormalBayesClassifierImpl() - { - nallvars = 0; - } - - bool train( const Ptr& trainData, int flags ) CV_OVERRIDE - { - CV_Assert(!trainData.empty()); - const float min_variation = FLT_EPSILON; - Mat responses = trainData->getNormCatResponses(); - Mat __cls_labels = trainData->getClassLabels(); - Mat __var_idx = trainData->getVarIdx(); - Mat samples = trainData->getTrainSamples(); - int nclasses = (int)__cls_labels.total(); - - int nvars = trainData->getNVars(); - int s, c1, c2, cls; - - int __nallvars = trainData->getNAllVars(); - bool update = (flags & UPDATE_MODEL) != 0; - - if( !update ) - { - nallvars = __nallvars; - count.resize(nclasses); - sum.resize(nclasses); - productsum.resize(nclasses); - avg.resize(nclasses); - inv_eigen_values.resize(nclasses); - cov_rotate_mats.resize(nclasses); - - for( cls = 0; cls < nclasses; cls++ ) - { - count[cls] = Mat::zeros( 1, nvars, CV_32SC1 ); - sum[cls] = Mat::zeros( 1, nvars, CV_64FC1 ); - productsum[cls] = Mat::zeros( nvars, nvars, CV_64FC1 ); - avg[cls] = Mat::zeros( 1, nvars, CV_64FC1 ); - inv_eigen_values[cls] = Mat::zeros( 1, nvars, CV_64FC1 ); - cov_rotate_mats[cls] = Mat::zeros( nvars, nvars, CV_64FC1 ); - } - - var_idx = __var_idx; - cls_labels = __cls_labels; - - c.create(1, nclasses, CV_64FC1); - } - else - { - // check that the new training data has the same dimensionality etc. - if( nallvars != __nallvars || - var_idx.size() != __var_idx.size() || - norm(var_idx, __var_idx, NORM_INF) != 0 || - cls_labels.size() != __cls_labels.size() || - norm(cls_labels, __cls_labels, NORM_INF) != 0 ) - CV_Error( CV_StsBadArg, - "The new training data is inconsistent with the original training data; varIdx and the class labels should be the same" ); - } - - Mat cov( nvars, nvars, CV_64FC1 ); - int nsamples = samples.rows; - - // process train data (count, sum , productsum) - for( s = 0; s < nsamples; s++ ) - { - cls = responses.at(s); - int* count_data = count[cls].ptr(); - double* sum_data = sum[cls].ptr(); - double* prod_data = productsum[cls].ptr(); - const float* train_vec = samples.ptr(s); - - for( c1 = 0; c1 < nvars; c1++, prod_data += nvars ) - { - double val1 = train_vec[c1]; - sum_data[c1] += val1; - count_data[c1]++; - for( c2 = c1; c2 < nvars; c2++ ) - prod_data[c2] += train_vec[c2]*val1; - } - } - - Mat vt; - - // calculate avg, covariance matrix, c - for( cls = 0; cls < nclasses; cls++ ) - { - double det = 1; - int i, j; - Mat& w = inv_eigen_values[cls]; - int* count_data = count[cls].ptr(); - double* avg_data = avg[cls].ptr(); - double* sum1 = sum[cls].ptr(); - - completeSymm(productsum[cls], 0); - - for( j = 0; j < nvars; j++ ) - { - int n = count_data[j]; - avg_data[j] = n ? sum1[j] / n : 0.; - } - - count_data = count[cls].ptr(); - avg_data = avg[cls].ptr(); - sum1 = sum[cls].ptr(); - - for( i = 0; i < nvars; i++ ) - { - double* avg2_data = avg[cls].ptr(); - double* sum2 = sum[cls].ptr(); - double* prod_data = productsum[cls].ptr(i); - double* cov_data = cov.ptr(i); - double s1val = sum1[i]; - double avg1 = avg_data[i]; - int _count = count_data[i]; - - for( j = 0; j <= i; j++ ) - { - double avg2 = avg2_data[j]; - double cov_val = prod_data[j] - avg1 * sum2[j] - avg2 * s1val + avg1 * avg2 * _count; - cov_val = (_count > 1) ? cov_val / (_count - 1) : cov_val; - cov_data[j] = cov_val; - } - } - - completeSymm( cov, 1 ); - - SVD::compute(cov, w, cov_rotate_mats[cls], noArray()); - transpose(cov_rotate_mats[cls], cov_rotate_mats[cls]); - cv::max(w, min_variation, w); - for( j = 0; j < nvars; j++ ) - det *= w.at(j); - - divide(1., w, w); - c.at(cls) = det > 0 ? log(det) : -700; - } - - return true; - } - - class NBPredictBody : public ParallelLoopBody - { - public: - NBPredictBody( const Mat& _c, const vector& _cov_rotate_mats, - const vector& _inv_eigen_values, - const vector& _avg, - const Mat& _samples, const Mat& _vidx, const Mat& _cls_labels, - Mat& _results, Mat& _results_prob, bool _rawOutput ) - { - c = &_c; - cov_rotate_mats = &_cov_rotate_mats; - inv_eigen_values = &_inv_eigen_values; - avg = &_avg; - samples = &_samples; - vidx = &_vidx; - cls_labels = &_cls_labels; - results = &_results; - results_prob = !_results_prob.empty() ? &_results_prob : 0; - rawOutput = _rawOutput; - value = 0; - } - - const Mat* c; - const vector* cov_rotate_mats; - const vector* inv_eigen_values; - const vector* avg; - const Mat* samples; - const Mat* vidx; - const Mat* cls_labels; - - Mat* results_prob; - Mat* results; - float* value; - bool rawOutput; - - void operator()(const Range& range) const CV_OVERRIDE - { - int cls = -1; - int rtype = 0, rptype = 0; - size_t rstep = 0, rpstep = 0; - int nclasses = (int)cls_labels->total(); - int nvars = avg->at(0).cols; - double probability = 0; - const int* vptr = vidx && !vidx->empty() ? vidx->ptr() : 0; - - if (results) - { - rtype = results->type(); - rstep = results->isContinuous() ? 1 : results->step/results->elemSize(); - } - if (results_prob) - { - rptype = results_prob->type(); - rpstep = results_prob->isContinuous() ? results_prob->cols : results_prob->step/results_prob->elemSize(); - } - // allocate memory and initializing headers for calculating - cv::AutoBuffer _buffer(nvars*2); - double* _diffin = _buffer.data(); - double* _diffout = _buffer.data() + nvars; - Mat diffin( 1, nvars, CV_64FC1, _diffin ); - Mat diffout( 1, nvars, CV_64FC1, _diffout ); - - for(int k = range.start; k < range.end; k++ ) - { - double opt = FLT_MAX; - - for(int i = 0; i < nclasses; i++ ) - { - double cur = c->at(i); - const Mat& u = cov_rotate_mats->at(i); - const Mat& w = inv_eigen_values->at(i); - - const double* avg_data = avg->at(i).ptr(); - const float* x = samples->ptr(k); - - // cov = u w u' --> cov^(-1) = u w^(-1) u' - for(int j = 0; j < nvars; j++ ) - _diffin[j] = avg_data[j] - x[vptr ? vptr[j] : j]; - - gemm( diffin, u, 1, noArray(), 0, diffout, GEMM_2_T ); - for(int j = 0; j < nvars; j++ ) - { - double d = _diffout[j]; - cur += d*d*w.ptr()[j]; - } - - if( cur < opt ) - { - cls = i; - opt = cur; - } - probability = exp( -0.5 * cur ); - - if( results_prob ) - { - if ( rptype == CV_32FC1 ) - results_prob->ptr()[k*rpstep + i] = (float)probability; - else - results_prob->ptr()[k*rpstep + i] = probability; - } - } - - int ival = rawOutput ? cls : cls_labels->at(cls); - if( results ) - { - if( rtype == CV_32SC1 ) - results->ptr()[k*rstep] = ival; - else - results->ptr()[k*rstep] = (float)ival; - } - } - } - }; - - float predict( InputArray _samples, OutputArray _results, int flags ) const CV_OVERRIDE - { - return predictProb(_samples, _results, noArray(), flags); - } - - float predictProb( InputArray _samples, OutputArray _results, OutputArray _resultsProb, int flags ) const CV_OVERRIDE - { - int value=0; - Mat samples = _samples.getMat(), results, resultsProb; - int nsamples = samples.rows, nclasses = (int)cls_labels.total(); - bool rawOutput = (flags & RAW_OUTPUT) != 0; - - if( samples.type() != CV_32F || samples.cols != nallvars ) - CV_Error( CV_StsBadArg, - "The input samples must be 32f matrix with the number of columns = nallvars" ); - - if( (samples.rows > 1) && (! _results.needed()) ) - CV_Error( CV_StsNullPtr, - "When the number of input samples is >1, the output vector of results must be passed" ); - - if( _results.needed() ) - { - _results.create(nsamples, 1, CV_32S); - results = _results.getMat(); - } - else - results = Mat(1, 1, CV_32S, &value); - - if( _resultsProb.needed() ) - { - _resultsProb.create(nsamples, nclasses, CV_32F); - resultsProb = _resultsProb.getMat(); - } - - cv::parallel_for_(cv::Range(0, nsamples), - NBPredictBody(c, cov_rotate_mats, inv_eigen_values, avg, samples, - var_idx, cls_labels, results, resultsProb, rawOutput)); - - return (float)value; - } - - void write( FileStorage& fs ) const CV_OVERRIDE - { - int nclasses = (int)cls_labels.total(), i; - - writeFormat(fs); - fs << "var_count" << (var_idx.empty() ? nallvars : (int)var_idx.total()); - fs << "var_all" << nallvars; - - if( !var_idx.empty() ) - fs << "var_idx" << var_idx; - fs << "cls_labels" << cls_labels; - - fs << "count" << "["; - for( i = 0; i < nclasses; i++ ) - fs << count[i]; - - fs << "]" << "sum" << "["; - for( i = 0; i < nclasses; i++ ) - fs << sum[i]; - - fs << "]" << "productsum" << "["; - for( i = 0; i < nclasses; i++ ) - fs << productsum[i]; - - fs << "]" << "avg" << "["; - for( i = 0; i < nclasses; i++ ) - fs << avg[i]; - - fs << "]" << "inv_eigen_values" << "["; - for( i = 0; i < nclasses; i++ ) - fs << inv_eigen_values[i]; - - fs << "]" << "cov_rotate_mats" << "["; - for( i = 0; i < nclasses; i++ ) - fs << cov_rotate_mats[i]; - - fs << "]"; - - fs << "c" << c; - } - - void read( const FileNode& fn ) CV_OVERRIDE - { - clear(); - - fn["var_all"] >> nallvars; - - if( nallvars <= 0 ) - CV_Error( CV_StsParseError, - "The field \"var_count\" of NBayes classifier is missing or non-positive" ); - - fn["var_idx"] >> var_idx; - fn["cls_labels"] >> cls_labels; - - int nclasses = (int)cls_labels.total(), i; - - if( cls_labels.empty() || nclasses < 1 ) - CV_Error( CV_StsParseError, "No or invalid \"cls_labels\" in NBayes classifier" ); - - FileNodeIterator - count_it = fn["count"].begin(), - sum_it = fn["sum"].begin(), - productsum_it = fn["productsum"].begin(), - avg_it = fn["avg"].begin(), - inv_eigen_values_it = fn["inv_eigen_values"].begin(), - cov_rotate_mats_it = fn["cov_rotate_mats"].begin(); - - count.resize(nclasses); - sum.resize(nclasses); - productsum.resize(nclasses); - avg.resize(nclasses); - inv_eigen_values.resize(nclasses); - cov_rotate_mats.resize(nclasses); - - for( i = 0; i < nclasses; i++, ++count_it, ++sum_it, ++productsum_it, ++avg_it, - ++inv_eigen_values_it, ++cov_rotate_mats_it ) - { - *count_it >> count[i]; - *sum_it >> sum[i]; - *productsum_it >> productsum[i]; - *avg_it >> avg[i]; - *inv_eigen_values_it >> inv_eigen_values[i]; - *cov_rotate_mats_it >> cov_rotate_mats[i]; - } - - fn["c"] >> c; - } - - void clear() CV_OVERRIDE - { - count.clear(); - sum.clear(); - productsum.clear(); - avg.clear(); - inv_eigen_values.clear(); - cov_rotate_mats.clear(); - - var_idx.release(); - cls_labels.release(); - c.release(); - nallvars = 0; - } - - bool isTrained() const CV_OVERRIDE { return !avg.empty(); } - bool isClassifier() const CV_OVERRIDE { return true; } - int getVarCount() const CV_OVERRIDE { return nallvars; } - String getDefaultName() const CV_OVERRIDE { return "opencv_ml_nbayes"; } - - int nallvars; - Mat var_idx, cls_labels, c; - vector count, sum, productsum, avg, inv_eigen_values, cov_rotate_mats; -}; - - -Ptr NormalBayesClassifier::create() -{ - Ptr p = makePtr(); - return p; -} - -Ptr NormalBayesClassifier::load(const String& filepath, const String& nodeName) -{ - return Algorithm::load(filepath, nodeName); -} - -} -} - -/* End of file. */ diff --git a/modules/ml/src/precomp.hpp b/modules/ml/src/precomp.hpp deleted file mode 100644 index 328cc4732a..0000000000 --- a/modules/ml/src/precomp.hpp +++ /dev/null @@ -1,400 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// Intel License Agreement -// -// Copyright (C) 2000, Intel Corporation, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of Intel Corporation may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#ifndef __OPENCV_ML_PRECOMP_HPP__ -#define __OPENCV_ML_PRECOMP_HPP__ - -#include "opencv2/core.hpp" -#include "opencv2/ml.hpp" -#include "opencv2/core/core_c.h" -#include "opencv2/core/utility.hpp" - -#include "opencv2/core/private.hpp" - -#include -#include -#include -#include -#include -#include -#include -#include - -/****************************************************************************************\ - * Main struct definitions * - \****************************************************************************************/ - -/* log(2*PI) */ -#define CV_LOG2PI (1.8378770664093454835606594728112) - -namespace cv -{ -namespace ml -{ - using std::vector; - - #define CV_DTREE_CAT_DIR(idx,subset) \ - (2*((subset[(idx)>>5]&(1 << ((idx) & 31)))==0)-1) - - template struct cmp_lt_idx - { - cmp_lt_idx(const _Tp* _arr) : arr(_arr) {} - bool operator ()(int a, int b) const { return arr[a] < arr[b]; } - const _Tp* arr; - }; - - template struct cmp_lt_ptr - { - cmp_lt_ptr() {} - bool operator ()(const _Tp* a, const _Tp* b) const { return *a < *b; } - }; - - static inline void setRangeVector(std::vector& vec, int n) - { - vec.resize(n); - for( int i = 0; i < n; i++ ) - vec[i] = i; - } - - static inline void writeTermCrit(FileStorage& fs, const TermCriteria& termCrit) - { - if( (termCrit.type & TermCriteria::EPS) != 0 ) - fs << "epsilon" << termCrit.epsilon; - if( (termCrit.type & TermCriteria::COUNT) != 0 ) - fs << "iterations" << termCrit.maxCount; - } - - static inline TermCriteria readTermCrit(const FileNode& fn) - { - TermCriteria termCrit; - double epsilon = (double)fn["epsilon"]; - if( epsilon > 0 ) - { - termCrit.type |= TermCriteria::EPS; - termCrit.epsilon = epsilon; - } - int iters = (int)fn["iterations"]; - if( iters > 0 ) - { - termCrit.type |= TermCriteria::COUNT; - termCrit.maxCount = iters; - } - return termCrit; - } - - struct TreeParams - { - TreeParams(); - TreeParams( int maxDepth, int minSampleCount, - double regressionAccuracy, bool useSurrogates, - int maxCategories, int CVFolds, - bool use1SERule, bool truncatePrunedTree, - const Mat& priors ); - - inline void setMaxCategories(int val) - { - if( val < 2 ) - CV_Error( CV_StsOutOfRange, "max_categories should be >= 2" ); - maxCategories = std::min(val, 15 ); - } - inline void setMaxDepth(int val) - { - if( val < 0 ) - CV_Error( CV_StsOutOfRange, "max_depth should be >= 0" ); - maxDepth = std::min( val, 25 ); - } - inline void setMinSampleCount(int val) - { - minSampleCount = std::max(val, 1); - } - inline void setCVFolds(int val) - { - if( val < 0 ) - CV_Error( CV_StsOutOfRange, - "params.CVFolds should be =0 (the tree is not pruned) " - "or n>0 (tree is pruned using n-fold cross-validation)" ); - if(val > 1) - CV_Error( CV_StsNotImplemented, - "tree pruning using cross-validation is not implemented." - "Set CVFolds to 1"); - - if( val == 1 ) - val = 0; - CVFolds = val; - } - inline void setRegressionAccuracy(float val) - { - if( val < 0 ) - CV_Error( CV_StsOutOfRange, "params.regression_accuracy should be >= 0" ); - regressionAccuracy = val; - } - - inline int getMaxCategories() const { return maxCategories; } - inline int getMaxDepth() const { return maxDepth; } - inline int getMinSampleCount() const { return minSampleCount; } - inline int getCVFolds() const { return CVFolds; } - inline float getRegressionAccuracy() const { return regressionAccuracy; } - - inline bool getUseSurrogates() const { return useSurrogates; } - inline void setUseSurrogates(bool val) { useSurrogates = val; } - inline bool getUse1SERule() const { return use1SERule; } - inline void setUse1SERule(bool val) { use1SERule = val; } - inline bool getTruncatePrunedTree() const { return truncatePrunedTree; } - inline void setTruncatePrunedTree(bool val) { truncatePrunedTree = val; } - inline cv::Mat getPriors() const { return priors; } - inline void setPriors(const cv::Mat& val) { priors = val; } - - public: - bool useSurrogates; - bool use1SERule; - bool truncatePrunedTree; - Mat priors; - - protected: - int maxCategories; - int maxDepth; - int minSampleCount; - int CVFolds; - float regressionAccuracy; - }; - - struct RTreeParams - { - RTreeParams(); - RTreeParams(bool calcVarImportance, int nactiveVars, TermCriteria termCrit ); - bool calcVarImportance; - int nactiveVars; - TermCriteria termCrit; - }; - - struct BoostTreeParams - { - BoostTreeParams(); - BoostTreeParams(int boostType, int weakCount, double weightTrimRate); - int boostType; - int weakCount; - double weightTrimRate; - }; - - class DTreesImpl : public DTrees - { - public: - struct WNode - { - WNode() - { - class_idx = sample_count = depth = complexity = 0; - parent = left = right = split = defaultDir = -1; - Tn = INT_MAX; - value = maxlr = alpha = node_risk = tree_risk = tree_error = 0.; - } - - int class_idx; - double Tn; - double value; - - int parent; - int left; - int right; - int defaultDir; - - int split; - - int sample_count; - int depth; - double maxlr; - - // global pruning data - int complexity; - double alpha; - double node_risk, tree_risk, tree_error; - }; - - struct WSplit - { - WSplit() - { - varIdx = next = 0; - inversed = false; - quality = c = 0.f; - subsetOfs = -1; - } - - int varIdx; - bool inversed; - float quality; - int next; - float c; - int subsetOfs; - }; - - struct WorkData - { - WorkData(const Ptr& _data); - - Ptr data; - vector wnodes; - vector wsplits; - vector wsubsets; - vector cv_Tn; - vector cv_node_risk; - vector cv_node_error; - vector cv_labels; - vector sample_weights; - vector cat_responses; - vector ord_responses; - vector sidx; - int maxSubsetSize; - }; - - inline int getMaxCategories() const CV_OVERRIDE { return params.getMaxCategories(); } - inline void setMaxCategories(int val) CV_OVERRIDE { params.setMaxCategories(val); } - inline int getMaxDepth() const CV_OVERRIDE { return params.getMaxDepth(); } - inline void setMaxDepth(int val) CV_OVERRIDE { params.setMaxDepth(val); } - inline int getMinSampleCount() const CV_OVERRIDE { return params.getMinSampleCount(); } - inline void setMinSampleCount(int val) CV_OVERRIDE { params.setMinSampleCount(val); } - inline int getCVFolds() const CV_OVERRIDE { return params.getCVFolds(); } - inline void setCVFolds(int val) CV_OVERRIDE { params.setCVFolds(val); } - inline bool getUseSurrogates() const CV_OVERRIDE { return params.getUseSurrogates(); } - inline void setUseSurrogates(bool val) CV_OVERRIDE { params.setUseSurrogates(val); } - inline bool getUse1SERule() const CV_OVERRIDE { return params.getUse1SERule(); } - inline void setUse1SERule(bool val) CV_OVERRIDE { params.setUse1SERule(val); } - inline bool getTruncatePrunedTree() const CV_OVERRIDE { return params.getTruncatePrunedTree(); } - inline void setTruncatePrunedTree(bool val) CV_OVERRIDE { params.setTruncatePrunedTree(val); } - inline float getRegressionAccuracy() const CV_OVERRIDE { return params.getRegressionAccuracy(); } - inline void setRegressionAccuracy(float val) CV_OVERRIDE { params.setRegressionAccuracy(val); } - inline cv::Mat getPriors() const CV_OVERRIDE { return params.getPriors(); } - inline void setPriors(const cv::Mat& val) CV_OVERRIDE { params.setPriors(val); } - - DTreesImpl(); - virtual ~DTreesImpl() CV_OVERRIDE; - virtual void clear() CV_OVERRIDE; - - String getDefaultName() const CV_OVERRIDE { return "opencv_ml_dtree"; } - bool isTrained() const CV_OVERRIDE { return !roots.empty(); } - bool isClassifier() const CV_OVERRIDE { return _isClassifier; } - int getVarCount() const CV_OVERRIDE { return varType.empty() ? 0 : (int)(varType.size() - 1); } - int getCatCount(int vi) const { return catOfs[vi][1] - catOfs[vi][0]; } - int getSubsetSize(int vi) const { return (getCatCount(vi) + 31)/32; } - - virtual void setDParams(const TreeParams& _params); - virtual void startTraining( const Ptr& trainData, int flags ); - virtual void endTraining(); - virtual void initCompVarIdx(); - virtual bool train( const Ptr& trainData, int flags ) CV_OVERRIDE; - - virtual int addTree( const vector& sidx ); - virtual int addNodeAndTrySplit( int parent, const vector& sidx ); - virtual const vector& getActiveVars(); - virtual int findBestSplit( const vector& _sidx ); - virtual void calcValue( int nidx, const vector& _sidx ); - - virtual WSplit findSplitOrdClass( int vi, const vector& _sidx, double initQuality ); - - // simple k-means, slightly modified to take into account the "weight" (L1-norm) of each vector. - virtual void clusterCategories( const double* vectors, int n, int m, double* csums, int k, int* labels ); - virtual WSplit findSplitCatClass( int vi, const vector& _sidx, double initQuality, int* subset ); - - virtual WSplit findSplitOrdReg( int vi, const vector& _sidx, double initQuality ); - virtual WSplit findSplitCatReg( int vi, const vector& _sidx, double initQuality, int* subset ); - - virtual int calcDir( int splitidx, const vector& _sidx, vector& _sleft, vector& _sright ); - virtual int pruneCV( int root ); - - virtual double updateTreeRNC( int root, double T, int fold ); - virtual bool cutTree( int root, double T, int fold, double min_alpha ); - virtual float predictTrees( const Range& range, const Mat& sample, int flags ) const; - virtual float predict( InputArray inputs, OutputArray outputs, int flags ) const CV_OVERRIDE; - - virtual void writeTrainingParams( FileStorage& fs ) const; - virtual void writeParams( FileStorage& fs ) const; - virtual void writeSplit( FileStorage& fs, int splitidx ) const; - virtual void writeNode( FileStorage& fs, int nidx, int depth ) const; - virtual void writeTree( FileStorage& fs, int root ) const; - virtual void write( FileStorage& fs ) const CV_OVERRIDE; - - virtual void readParams( const FileNode& fn ); - virtual int readSplit( const FileNode& fn ); - virtual int readNode( const FileNode& fn ); - virtual int readTree( const FileNode& fn ); - virtual void read( const FileNode& fn ) CV_OVERRIDE; - - virtual const std::vector& getRoots() const CV_OVERRIDE { return roots; } - virtual const std::vector& getNodes() const CV_OVERRIDE { return nodes; } - virtual const std::vector& getSplits() const CV_OVERRIDE { return splits; } - virtual const std::vector& getSubsets() const CV_OVERRIDE { return subsets; } - - TreeParams params; - - vector varIdx; - vector compVarIdx; - vector varType; - vector catOfs; - vector catMap; - vector roots; - vector nodes; - vector splits; - vector subsets; - vector classLabels; - vector missingSubst; - vector varMapping; - bool _isClassifier; - - Ptr w; - }; - - template - static inline void readVectorOrMat(const FileNode & node, std::vector & v) - { - if (node.type() == FileNode::MAP) - { - Mat m; - node >> m; - m.copyTo(v); - } - else if (node.type() == FileNode::SEQ) - { - node >> v; - } - } - -}} - -#endif /* __OPENCV_ML_PRECOMP_HPP__ */ diff --git a/modules/ml/src/rtrees.cpp b/modules/ml/src/rtrees.cpp deleted file mode 100644 index 2cad961f99..0000000000 --- a/modules/ml/src/rtrees.cpp +++ /dev/null @@ -1,531 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000, Intel Corporation, all rights reserved. -// Copyright (C) 2014, Itseez Inc, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" -namespace cv { -namespace ml { - -////////////////////////////////////////////////////////////////////////////////////////// -// Random trees // -////////////////////////////////////////////////////////////////////////////////////////// -RTreeParams::RTreeParams() -{ - CV_TRACE_FUNCTION(); - calcVarImportance = false; - nactiveVars = 0; - termCrit = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 50, 0.1); -} - -RTreeParams::RTreeParams(bool _calcVarImportance, - int _nactiveVars, - TermCriteria _termCrit ) -{ - CV_TRACE_FUNCTION(); - calcVarImportance = _calcVarImportance; - nactiveVars = _nactiveVars; - termCrit = _termCrit; -} - - -class DTreesImplForRTrees CV_FINAL : public DTreesImpl -{ -public: - DTreesImplForRTrees() - { - CV_TRACE_FUNCTION(); - params.setMaxDepth(5); - params.setMinSampleCount(10); - params.setRegressionAccuracy(0.f); - params.useSurrogates = false; - params.setMaxCategories(10); - params.setCVFolds(0); - params.use1SERule = false; - params.truncatePrunedTree = false; - params.priors = Mat(); - oobError = 0; - } - virtual ~DTreesImplForRTrees() {} - - void clear() CV_OVERRIDE - { - CV_TRACE_FUNCTION(); - DTreesImpl::clear(); - oobError = 0.; - } - - const vector& getActiveVars() CV_OVERRIDE - { - CV_TRACE_FUNCTION(); - RNG &rng = theRNG(); - int i, nvars = (int)allVars.size(), m = (int)activeVars.size(); - for( i = 0; i < nvars; i++ ) - { - int i1 = rng.uniform(0, nvars); - int i2 = rng.uniform(0, nvars); - std::swap(allVars[i1], allVars[i2]); - } - for( i = 0; i < m; i++ ) - activeVars[i] = allVars[i]; - return activeVars; - } - - void startTraining( const Ptr& trainData, int flags ) CV_OVERRIDE - { - CV_TRACE_FUNCTION(); - CV_Assert(!trainData.empty()); - DTreesImpl::startTraining(trainData, flags); - int nvars = w->data->getNVars(); - int i, m = rparams.nactiveVars > 0 ? rparams.nactiveVars : cvRound(std::sqrt((double)nvars)); - m = std::min(std::max(m, 1), nvars); - allVars.resize(nvars); - activeVars.resize(m); - for( i = 0; i < nvars; i++ ) - allVars[i] = varIdx[i]; - } - - void endTraining() CV_OVERRIDE - { - CV_TRACE_FUNCTION(); - DTreesImpl::endTraining(); - vector a, b; - std::swap(allVars, a); - std::swap(activeVars, b); - } - - bool train( const Ptr& trainData, int flags ) CV_OVERRIDE - { - CV_TRACE_FUNCTION(); - RNG &rng = theRNG(); - CV_Assert(!trainData.empty()); - startTraining(trainData, flags); - int treeidx, ntrees = (rparams.termCrit.type & TermCriteria::COUNT) != 0 ? - rparams.termCrit.maxCount : 10000; - int i, j, k, vi, vi_, n = (int)w->sidx.size(); - int nclasses = (int)classLabels.size(); - double eps = (rparams.termCrit.type & TermCriteria::EPS) != 0 && - rparams.termCrit.epsilon > 0 ? rparams.termCrit.epsilon : 0.; - vector sidx(n); - vector oobmask(n); - vector oobidx; - vector oobperm; - vector oobres(n, 0.); - vector oobcount(n, 0); - vector oobvotes(n*nclasses, 0); - int nvars = w->data->getNVars(); - int nallvars = w->data->getNAllVars(); - const int* vidx = !varIdx.empty() ? &varIdx[0] : 0; - vector samplebuf(nallvars); - Mat samples = w->data->getSamples(); - float* psamples = samples.ptr(); - size_t sstep0 = samples.step1(), sstep1 = 1; - Mat sample0, sample(nallvars, 1, CV_32F, &samplebuf[0]); - int predictFlags = _isClassifier ? (PREDICT_MAX_VOTE + RAW_OUTPUT) : PREDICT_SUM; - - bool calcOOBError = eps > 0 || rparams.calcVarImportance; - double max_response = 0.; - - if( w->data->getLayout() == COL_SAMPLE ) - std::swap(sstep0, sstep1); - - if( !_isClassifier ) - { - for( i = 0; i < n; i++ ) - { - double val = std::abs(w->ord_responses[w->sidx[i]]); - max_response = std::max(max_response, val); - } - CV_Assert(fabs(max_response) > 0); - } - - if( rparams.calcVarImportance ) - varImportance.resize(nallvars, 0.f); - - for( treeidx = 0; treeidx < ntrees; treeidx++ ) - { - for( i = 0; i < n; i++ ) - oobmask[i] = (uchar)1; - - for( i = 0; i < n; i++ ) - { - j = rng.uniform(0, n); - sidx[i] = w->sidx[j]; - oobmask[j] = (uchar)0; - } - int root = addTree( sidx ); - if( root < 0 ) - return false; - - if( calcOOBError ) - { - oobidx.clear(); - for( i = 0; i < n; i++ ) - { - if( oobmask[i] ) - oobidx.push_back(i); - } - int n_oob = (int)oobidx.size(); - // if there is no out-of-bag samples, we can not compute OOB error - // nor update the variable importance vector; so we proceed to the next tree - if( n_oob == 0 ) - continue; - double ncorrect_responses = 0.; - - oobError = 0.; - for( i = 0; i < n_oob; i++ ) - { - j = oobidx[i]; - sample = Mat( nallvars, 1, CV_32F, psamples + sstep0*w->sidx[j], sstep1*sizeof(psamples[0]) ); - - double val = predictTrees(Range(treeidx, treeidx+1), sample, predictFlags); - double sample_weight = w->sample_weights[w->sidx[j]]; - if( !_isClassifier ) - { - oobres[j] += val; - oobcount[j]++; - double true_val = w->ord_responses[w->sidx[j]]; - double a = oobres[j]/oobcount[j] - true_val; - oobError += sample_weight * a*a; - val = (val - true_val)/max_response; - ncorrect_responses += std::exp( -val*val ); - } - else - { - int ival = cvRound(val); - //Voting scheme to combine OOB errors of each tree - int* votes = &oobvotes[j*nclasses]; - votes[ival]++; - int best_class = 0; - for( k = 1; k < nclasses; k++ ) - if( votes[best_class] < votes[k] ) - best_class = k; - int diff = best_class != w->cat_responses[w->sidx[j]]; - oobError += sample_weight * diff; - ncorrect_responses += diff == 0; - } - } - - oobError /= n_oob; - if( rparams.calcVarImportance && n_oob > 1 ) - { - Mat sample_clone; - oobperm.resize(n_oob); - for( i = 0; i < n_oob; i++ ) - oobperm[i] = oobidx[i]; - for (i = n_oob - 1; i > 0; --i) //Randomly shuffle indices so we can permute features - { - int r_i = rng.uniform(0, n_oob); - std::swap(oobperm[i], oobperm[r_i]); - } - - for( vi_ = 0; vi_ < nvars; vi_++ ) - { - vi = vidx ? vidx[vi_] : vi_; //Ensure that only the user specified predictors are used for training - double ncorrect_responses_permuted = 0; - - for( i = 0; i < n_oob; i++ ) - { - j = oobidx[i]; - int vj = oobperm[i]; - sample0 = Mat( nallvars, 1, CV_32F, psamples + sstep0*w->sidx[j], sstep1*sizeof(psamples[0]) ); - sample0.copyTo(sample_clone); //create a copy so we don't mess up the original data - sample_clone.at(vi) = psamples[sstep0*w->sidx[vj] + sstep1*vi]; - - double val = predictTrees(Range(treeidx, treeidx+1), sample_clone, predictFlags); - if( !_isClassifier ) - { - val = (val - w->ord_responses[w->sidx[j]])/max_response; - ncorrect_responses_permuted += exp( -val*val ); - } - else - { - ncorrect_responses_permuted += cvRound(val) == w->cat_responses[w->sidx[j]]; - } - } - varImportance[vi] += (float)(ncorrect_responses - ncorrect_responses_permuted); - } - } - } - if( calcOOBError && oobError < eps ) - break; - } - - if( rparams.calcVarImportance ) - { - for( vi_ = 0; vi_ < nallvars; vi_++ ) - varImportance[vi_] = std::max(varImportance[vi_], 0.f); - normalize(varImportance, varImportance, 1., 0, NORM_L1); - } - endTraining(); - return true; - } - - void writeTrainingParams( FileStorage& fs ) const CV_OVERRIDE - { - CV_TRACE_FUNCTION(); - DTreesImpl::writeTrainingParams(fs); - fs << "nactive_vars" << rparams.nactiveVars; - } - - void write( FileStorage& fs ) const CV_OVERRIDE - { - CV_TRACE_FUNCTION(); - if( roots.empty() ) - CV_Error( CV_StsBadArg, "RTrees have not been trained" ); - - writeFormat(fs); - writeParams(fs); - - fs << "oob_error" << oobError; - if( !varImportance.empty() ) - fs << "var_importance" << varImportance; - - int k, ntrees = (int)roots.size(); - - fs << "ntrees" << ntrees - << "trees" << "["; - - for( k = 0; k < ntrees; k++ ) - { - fs << "{"; - writeTree(fs, roots[k]); - fs << "}"; - } - - fs << "]"; - } - - void readParams( const FileNode& fn ) CV_OVERRIDE - { - CV_TRACE_FUNCTION(); - DTreesImpl::readParams(fn); - - FileNode tparams_node = fn["training_params"]; - rparams.nactiveVars = (int)tparams_node["nactive_vars"]; - } - - void read( const FileNode& fn ) CV_OVERRIDE - { - CV_TRACE_FUNCTION(); - clear(); - - //int nclasses = (int)fn["nclasses"]; - //int nsamples = (int)fn["nsamples"]; - oobError = (double)fn["oob_error"]; - int ntrees = (int)fn["ntrees"]; - - readVectorOrMat(fn["var_importance"], varImportance); - - readParams(fn); - - FileNode trees_node = fn["trees"]; - FileNodeIterator it = trees_node.begin(); - CV_Assert( ntrees == (int)trees_node.size() ); - - for( int treeidx = 0; treeidx < ntrees; treeidx++, ++it ) - { - FileNode nfn = (*it)["nodes"]; - readTree(nfn); - } - } - - void getVotes( InputArray input, OutputArray output, int flags ) const - { - CV_TRACE_FUNCTION(); - CV_Assert( !roots.empty() ); - int nclasses = (int)classLabels.size(), ntrees = (int)roots.size(); - Mat samples = input.getMat(), results; - int i, j, nsamples = samples.rows; - - int predictType = flags & PREDICT_MASK; - if( predictType == PREDICT_AUTO ) - { - predictType = !_isClassifier || (classLabels.size() == 2 && (flags & RAW_OUTPUT) != 0) ? - PREDICT_SUM : PREDICT_MAX_VOTE; - } - - if( predictType == PREDICT_SUM ) - { - output.create(nsamples, ntrees, CV_32F); - results = output.getMat(); - for( i = 0; i < nsamples; i++ ) - { - for( j = 0; j < ntrees; j++ ) - { - float val = predictTrees( Range(j, j+1), samples.row(i), flags); - results.at (i, j) = val; - } - } - } else - { - vector votes; - output.create(nsamples+1, nclasses, CV_32S); - results = output.getMat(); - - for ( j = 0; j < nclasses; j++) - { - results.at (0, j) = classLabels[j]; - } - - for( i = 0; i < nsamples; i++ ) - { - votes.clear(); - for( j = 0; j < ntrees; j++ ) - { - int val = (int)predictTrees( Range(j, j+1), samples.row(i), flags); - votes.push_back(val); - } - - for ( j = 0; j < nclasses; j++) - { - results.at (i+1, j) = (int)std::count(votes.begin(), votes.end(), classLabels[j]); - } - } - } - } - - double getOOBError() const { - return oobError; - } - - RTreeParams rparams; - double oobError; - vector varImportance; - vector allVars, activeVars; -}; - - -class RTreesImpl CV_FINAL : public RTrees -{ -public: - inline bool getCalculateVarImportance() const CV_OVERRIDE { return impl.rparams.calcVarImportance; } - inline void setCalculateVarImportance(bool val) CV_OVERRIDE { impl.rparams.calcVarImportance = val; } - inline int getActiveVarCount() const CV_OVERRIDE { return impl.rparams.nactiveVars; } - inline void setActiveVarCount(int val) CV_OVERRIDE { impl.rparams.nactiveVars = val; } - inline TermCriteria getTermCriteria() const CV_OVERRIDE { return impl.rparams.termCrit; } - inline void setTermCriteria(const TermCriteria& val) CV_OVERRIDE { impl.rparams.termCrit = val; } - - inline int getMaxCategories() const CV_OVERRIDE { return impl.params.getMaxCategories(); } - inline void setMaxCategories(int val) CV_OVERRIDE { impl.params.setMaxCategories(val); } - inline int getMaxDepth() const CV_OVERRIDE { return impl.params.getMaxDepth(); } - inline void setMaxDepth(int val) CV_OVERRIDE { impl.params.setMaxDepth(val); } - inline int getMinSampleCount() const CV_OVERRIDE { return impl.params.getMinSampleCount(); } - inline void setMinSampleCount(int val) CV_OVERRIDE { impl.params.setMinSampleCount(val); } - inline int getCVFolds() const CV_OVERRIDE { return impl.params.getCVFolds(); } - inline void setCVFolds(int val) CV_OVERRIDE { impl.params.setCVFolds(val); } - inline bool getUseSurrogates() const CV_OVERRIDE { return impl.params.getUseSurrogates(); } - inline void setUseSurrogates(bool val) CV_OVERRIDE { impl.params.setUseSurrogates(val); } - inline bool getUse1SERule() const CV_OVERRIDE { return impl.params.getUse1SERule(); } - inline void setUse1SERule(bool val) CV_OVERRIDE { impl.params.setUse1SERule(val); } - inline bool getTruncatePrunedTree() const CV_OVERRIDE { return impl.params.getTruncatePrunedTree(); } - inline void setTruncatePrunedTree(bool val) CV_OVERRIDE { impl.params.setTruncatePrunedTree(val); } - inline float getRegressionAccuracy() const CV_OVERRIDE { return impl.params.getRegressionAccuracy(); } - inline void setRegressionAccuracy(float val) CV_OVERRIDE { impl.params.setRegressionAccuracy(val); } - inline cv::Mat getPriors() const CV_OVERRIDE { return impl.params.getPriors(); } - inline void setPriors(const cv::Mat& val) CV_OVERRIDE { impl.params.setPriors(val); } - inline void getVotes(InputArray input, OutputArray output, int flags) const CV_OVERRIDE {return impl.getVotes(input,output,flags);} - - RTreesImpl() {} - virtual ~RTreesImpl() CV_OVERRIDE {} - - String getDefaultName() const CV_OVERRIDE { return "opencv_ml_rtrees"; } - - bool train( const Ptr& trainData, int flags ) CV_OVERRIDE - { - CV_TRACE_FUNCTION(); - CV_Assert(!trainData.empty()); - if (impl.getCVFolds() != 0) - CV_Error(Error::StsBadArg, "Cross validation for RTrees is not implemented"); - return impl.train(trainData, flags); - } - - float predict( InputArray samples, OutputArray results, int flags ) const CV_OVERRIDE - { - CV_TRACE_FUNCTION(); - CV_CheckEQ(samples.cols(), getVarCount(), ""); - return impl.predict(samples, results, flags); - } - - void write( FileStorage& fs ) const CV_OVERRIDE - { - CV_TRACE_FUNCTION(); - impl.write(fs); - } - - void read( const FileNode& fn ) CV_OVERRIDE - { - CV_TRACE_FUNCTION(); - impl.read(fn); - } - - Mat getVarImportance() const CV_OVERRIDE { return Mat_(impl.varImportance, true); } - int getVarCount() const CV_OVERRIDE { return impl.getVarCount(); } - - bool isTrained() const CV_OVERRIDE { return impl.isTrained(); } - bool isClassifier() const CV_OVERRIDE { return impl.isClassifier(); } - - const vector& getRoots() const CV_OVERRIDE { return impl.getRoots(); } - const vector& getNodes() const CV_OVERRIDE { return impl.getNodes(); } - const vector& getSplits() const CV_OVERRIDE { return impl.getSplits(); } - const vector& getSubsets() const CV_OVERRIDE { return impl.getSubsets(); } - double getOOBError() const CV_OVERRIDE { return impl.getOOBError(); } - - - DTreesImplForRTrees impl; -}; - - -Ptr RTrees::create() -{ - CV_TRACE_FUNCTION(); - return makePtr(); -} - -//Function needed for Python and Java wrappers -Ptr RTrees::load(const String& filepath, const String& nodeName) -{ - CV_TRACE_FUNCTION(); - return Algorithm::load(filepath, nodeName); -} - -}} - -// End of file. diff --git a/modules/ml/src/svm.cpp b/modules/ml/src/svm.cpp deleted file mode 100644 index 40c18c03ea..0000000000 --- a/modules/ml/src/svm.cpp +++ /dev/null @@ -1,2357 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000, Intel Corporation, all rights reserved. -// Copyright (C) 2014, Itseez Inc, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" - -#include -#include - -/****************************************************************************************\ - COPYRIGHT NOTICE - ---------------- - - The code has been derived from libsvm library (version 2.6) - (http://www.csie.ntu.edu.tw/~cjlin/libsvm). - - Here is the original copyright: ------------------------------------------------------------------------------------------- - Copyright (c) 2000-2003 Chih-Chung Chang and Chih-Jen Lin - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - 3. Neither name of copyright holders nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -\****************************************************************************************/ - -namespace cv { namespace ml { - -typedef float Qfloat; -const int QFLOAT_TYPE = DataDepth::value; - -// Param Grid -static void checkParamGrid(const ParamGrid& pg) -{ - if( pg.minVal > pg.maxVal ) - CV_Error( CV_StsBadArg, "Lower bound of the grid must be less then the upper one" ); - if( pg.minVal < DBL_EPSILON ) - CV_Error( CV_StsBadArg, "Lower bound of the grid must be positive" ); - if( pg.logStep < 1. + FLT_EPSILON ) - CV_Error( CV_StsBadArg, "Grid step must greater than 1" ); -} - -// SVM training parameters -struct SvmParams -{ - int svmType; - int kernelType; - double gamma; - double coef0; - double degree; - double C; - double nu; - double p; - Mat classWeights; - TermCriteria termCrit; - - SvmParams() - { - svmType = SVM::C_SVC; - kernelType = SVM::RBF; - degree = 0; - gamma = 1; - coef0 = 0; - C = 1; - nu = 0; - p = 0; - termCrit = TermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 1000, FLT_EPSILON ); - } - - SvmParams( int _svmType, int _kernelType, - double _degree, double _gamma, double _coef0, - double _Con, double _nu, double _p, - const Mat& _classWeights, TermCriteria _termCrit ) - { - svmType = _svmType; - kernelType = _kernelType; - degree = _degree; - gamma = _gamma; - coef0 = _coef0; - C = _Con; - nu = _nu; - p = _p; - classWeights = _classWeights; - termCrit = _termCrit; - } - -}; - -/////////////////////////////////////// SVM kernel /////////////////////////////////////// -class SVMKernelImpl CV_FINAL : public SVM::Kernel -{ -public: - SVMKernelImpl( const SvmParams& _params = SvmParams() ) - { - params = _params; - } - - int getType() const CV_OVERRIDE - { - return params.kernelType; - } - - void calc_non_rbf_base( int vcount, int var_count, const float* vecs, - const float* another, Qfloat* results, - double alpha, double beta ) - { - int j, k; - for( j = 0; j < vcount; j++ ) - { - const float* sample = &vecs[j*var_count]; - double s = 0; - for( k = 0; k <= var_count - 4; k += 4 ) - s += sample[k]*another[k] + sample[k+1]*another[k+1] + - sample[k+2]*another[k+2] + sample[k+3]*another[k+3]; - for( ; k < var_count; k++ ) - s += sample[k]*another[k]; - results[j] = (Qfloat)(s*alpha + beta); - } - } - - void calc_linear( int vcount, int var_count, const float* vecs, - const float* another, Qfloat* results ) - { - calc_non_rbf_base( vcount, var_count, vecs, another, results, 1, 0 ); - } - - void calc_poly( int vcount, int var_count, const float* vecs, - const float* another, Qfloat* results ) - { - Mat R( 1, vcount, QFLOAT_TYPE, results ); - calc_non_rbf_base( vcount, var_count, vecs, another, results, params.gamma, params.coef0 ); - if( vcount > 0 ) - pow( R, params.degree, R ); - } - - void calc_sigmoid( int vcount, int var_count, const float* vecs, - const float* another, Qfloat* results ) - { - int j; - calc_non_rbf_base( vcount, var_count, vecs, another, results, - 2*params.gamma, 2*params.coef0 ); - // TODO: speedup this - for( j = 0; j < vcount; j++ ) - { - Qfloat t = results[j]; - Qfloat e = std::exp(std::abs(t)); // Inf value is possible here - Qfloat r = (Qfloat)((e - 1.) / (e + 1.)); // NaN value is possible here (Inf/Inf or similar) - if (cvIsNaN(r)) - r = std::numeric_limits::infinity(); - if (t < 0) - r = -r; - CV_DbgAssert(!cvIsNaN(r)); - results[j] = r; - } - } - - void calc_rbf( int vcount, int var_count, const float* vecs, - const float* another, Qfloat* results ) - { - double gamma = -params.gamma; - int j, k; - - for( j = 0; j < vcount; j++ ) - { - const float* sample = &vecs[j*var_count]; - double s = 0; - - for( k = 0; k <= var_count - 4; k += 4 ) - { - double t0 = sample[k] - another[k]; - double t1 = sample[k+1] - another[k+1]; - - s += t0*t0 + t1*t1; - - t0 = sample[k+2] - another[k+2]; - t1 = sample[k+3] - another[k+3]; - - s += t0*t0 + t1*t1; - } - - for( ; k < var_count; k++ ) - { - double t0 = sample[k] - another[k]; - s += t0*t0; - } - results[j] = (Qfloat)(s*gamma); - } - - if( vcount > 0 ) - { - Mat R( 1, vcount, QFLOAT_TYPE, results ); - exp( R, R ); - } - } - - /// Histogram intersection kernel - void calc_intersec( int vcount, int var_count, const float* vecs, - const float* another, Qfloat* results ) - { - int j, k; - for( j = 0; j < vcount; j++ ) - { - const float* sample = &vecs[j*var_count]; - double s = 0; - for( k = 0; k <= var_count - 4; k += 4 ) - s += std::min(sample[k],another[k]) + std::min(sample[k+1],another[k+1]) + - std::min(sample[k+2],another[k+2]) + std::min(sample[k+3],another[k+3]); - for( ; k < var_count; k++ ) - s += std::min(sample[k],another[k]); - results[j] = (Qfloat)(s); - } - } - - /// Exponential chi2 kernel - void calc_chi2( int vcount, int var_count, const float* vecs, - const float* another, Qfloat* results ) - { - Mat R( 1, vcount, QFLOAT_TYPE, results ); - double gamma = -params.gamma; - int j, k; - for( j = 0; j < vcount; j++ ) - { - const float* sample = &vecs[j*var_count]; - double chi2 = 0; - for(k = 0 ; k < var_count; k++ ) - { - double d = sample[k]-another[k]; - double devisor = sample[k]+another[k]; - /// if devisor == 0, the Chi2 distance would be zero, - // but calculation would rise an error because of dividing by zero - if (devisor != 0) - { - chi2 += d*d/devisor; - } - } - results[j] = (Qfloat) (gamma*chi2); - } - if( vcount > 0 ) - exp( R, R ); - } - - void calc( int vcount, int var_count, const float* vecs, - const float* another, Qfloat* results ) CV_OVERRIDE - { - switch( params.kernelType ) - { - case SVM::LINEAR: - calc_linear(vcount, var_count, vecs, another, results); - break; - case SVM::RBF: - calc_rbf(vcount, var_count, vecs, another, results); - break; - case SVM::POLY: - calc_poly(vcount, var_count, vecs, another, results); - break; - case SVM::SIGMOID: - calc_sigmoid(vcount, var_count, vecs, another, results); - break; - case SVM::CHI2: - calc_chi2(vcount, var_count, vecs, another, results); - break; - case SVM::INTER: - calc_intersec(vcount, var_count, vecs, another, results); - break; - default: - CV_Error(CV_StsBadArg, "Unknown kernel type"); - } - const Qfloat max_val = (Qfloat)(FLT_MAX*1e-3); - for( int j = 0; j < vcount; j++ ) - { - if (!(results[j] <= max_val)) // handle NaNs too - results[j] = max_val; - } - } - - SvmParams params; -}; - - - -///////////////////////////////////////////////////////////////////////// - -static void sortSamplesByClasses( const Mat& _samples, const Mat& _responses, - vector& sidx_all, vector& class_ranges ) -{ - int i, nsamples = _samples.rows; - CV_Assert( _responses.isContinuous() && _responses.checkVector(1, CV_32S) == nsamples ); - - setRangeVector(sidx_all, nsamples); - - const int* rptr = _responses.ptr(); - std::sort(sidx_all.begin(), sidx_all.end(), cmp_lt_idx(rptr)); - class_ranges.clear(); - class_ranges.push_back(0); - - for( i = 0; i < nsamples; i++ ) - { - if( i == nsamples-1 || rptr[sidx_all[i]] != rptr[sidx_all[i+1]] ) - class_ranges.push_back(i+1); - } -} - -//////////////////////// SVM implementation ////////////////////////////// - -Ptr SVM::getDefaultGridPtr( int param_id) -{ - ParamGrid grid = getDefaultGrid(param_id); // this is not a nice solution.. - return makePtr(grid.minVal, grid.maxVal, grid.logStep); -} - -ParamGrid SVM::getDefaultGrid( int param_id ) -{ - ParamGrid grid; - if( param_id == SVM::C ) - { - grid.minVal = 0.1; - grid.maxVal = 500; - grid.logStep = 5; // total iterations = 5 - } - else if( param_id == SVM::GAMMA ) - { - grid.minVal = 1e-5; - grid.maxVal = 0.6; - grid.logStep = 15; // total iterations = 4 - } - else if( param_id == SVM::P ) - { - grid.minVal = 0.01; - grid.maxVal = 100; - grid.logStep = 7; // total iterations = 4 - } - else if( param_id == SVM::NU ) - { - grid.minVal = 0.01; - grid.maxVal = 0.2; - grid.logStep = 3; // total iterations = 3 - } - else if( param_id == SVM::COEF ) - { - grid.minVal = 0.1; - grid.maxVal = 300; - grid.logStep = 14; // total iterations = 3 - } - else if( param_id == SVM::DEGREE ) - { - grid.minVal = 0.01; - grid.maxVal = 4; - grid.logStep = 7; // total iterations = 3 - } - else - cvError( CV_StsBadArg, "SVM::getDefaultGrid", "Invalid type of parameter " - "(use one of SVM::C, SVM::GAMMA et al.)", __FILE__, __LINE__ ); - return grid; -} - - -class SVMImpl CV_FINAL : public SVM -{ -public: - struct DecisionFunc - { - DecisionFunc(double _rho, int _ofs) : rho(_rho), ofs(_ofs) {} - DecisionFunc() : rho(0.), ofs(0) {} - double rho; - int ofs; - }; - - // Generalized SMO+SVMlight algorithm - // Solves: - // - // min [0.5(\alpha^T Q \alpha) + b^T \alpha] - // - // y^T \alpha = \delta - // y_i = +1 or -1 - // 0 <= alpha_i <= Cp for y_i = 1 - // 0 <= alpha_i <= Cn for y_i = -1 - // - // Given: - // - // Q, b, y, Cp, Cn, and an initial feasible point \alpha - // l is the size of vectors and matrices - // eps is the stopping criterion - // - // solution will be put in \alpha, objective value will be put in obj - // - class Solver - { - public: - enum { MIN_CACHE_SIZE = (40 << 20) /* 40Mb */, MAX_CACHE_SIZE = (500 << 20) /* 500Mb */ }; - - typedef bool (Solver::*SelectWorkingSet)( int& i, int& j ); - typedef Qfloat* (Solver::*GetRow)( int i, Qfloat* row, Qfloat* dst, bool existed ); - typedef void (Solver::*CalcRho)( double& rho, double& r ); - - struct KernelRow - { - KernelRow() { idx = -1; prev = next = 0; } - KernelRow(int _idx, int _prev, int _next) : idx(_idx), prev(_prev), next(_next) {} - int idx; - int prev; - int next; - }; - - struct SolutionInfo - { - SolutionInfo() { obj = rho = upper_bound_p = upper_bound_n = r = 0; } - double obj; - double rho; - double upper_bound_p; - double upper_bound_n; - double r; // for Solver_NU - }; - - void clear() - { - alpha_vec = 0; - select_working_set_func = 0; - calc_rho_func = 0; - get_row_func = 0; - lru_cache.clear(); - } - - Solver( const Mat& _samples, const vector& _y, - vector& _alpha, const vector& _b, - double _Cp, double _Cn, - const Ptr& _kernel, GetRow _get_row, - SelectWorkingSet _select_working_set, CalcRho _calc_rho, - TermCriteria _termCrit ) - { - clear(); - - samples = _samples; - sample_count = samples.rows; - var_count = samples.cols; - - y_vec = _y; - alpha_vec = &_alpha; - alpha_count = (int)alpha_vec->size(); - b_vec = _b; - kernel = _kernel; - - C[0] = _Cn; - C[1] = _Cp; - eps = _termCrit.epsilon; - max_iter = _termCrit.maxCount; - - G_vec.resize(alpha_count); - alpha_status_vec.resize(alpha_count); - buf[0].resize(sample_count*2); - buf[1].resize(sample_count*2); - - select_working_set_func = _select_working_set; - CV_Assert(select_working_set_func != 0); - - calc_rho_func = _calc_rho; - CV_Assert(calc_rho_func != 0); - - get_row_func = _get_row; - CV_Assert(get_row_func != 0); - - // assume that for large training sets ~25% of Q matrix is used - int64 csize = (int64)sample_count*sample_count/4; - csize = std::max(csize, (int64)(MIN_CACHE_SIZE/sizeof(Qfloat)) ); - csize = std::min(csize, (int64)(MAX_CACHE_SIZE/sizeof(Qfloat)) ); - max_cache_size = (int)((csize + sample_count-1)/sample_count); - max_cache_size = std::min(std::max(max_cache_size, 1), sample_count); - cache_size = 0; - - lru_cache.clear(); - lru_cache.resize(sample_count+1, KernelRow(-1, 0, 0)); - lru_first = lru_last = 0; - lru_cache_data.create(max_cache_size, sample_count, QFLOAT_TYPE); - } - - Qfloat* get_row_base( int i, bool* _existed ) - { - int i1 = i < sample_count ? i : i - sample_count; - KernelRow& kr = lru_cache[i1+1]; - if( _existed ) - *_existed = kr.idx >= 0; - if( kr.idx < 0 ) - { - if( cache_size < max_cache_size ) - { - kr.idx = cache_size; - cache_size++; - if (!lru_last) - lru_last = i1+1; - } - else - { - KernelRow& last = lru_cache[lru_last]; - kr.idx = last.idx; - last.idx = -1; - lru_cache[last.prev].next = 0; - lru_last = last.prev; - last.prev = 0; - last.next = 0; - } - kernel->calc( sample_count, var_count, samples.ptr(), - samples.ptr(i1), lru_cache_data.ptr(kr.idx) ); - } - else - { - if( kr.next ) - lru_cache[kr.next].prev = kr.prev; - else - lru_last = kr.prev; - if( kr.prev ) - lru_cache[kr.prev].next = kr.next; - else - lru_first = kr.next; - } - if (lru_first) - lru_cache[lru_first].prev = i1+1; - kr.next = lru_first; - kr.prev = 0; - lru_first = i1+1; - - return lru_cache_data.ptr(kr.idx); - } - - Qfloat* get_row_svc( int i, Qfloat* row, Qfloat*, bool existed ) - { - if( !existed ) - { - const schar* _y = &y_vec[0]; - int j, len = sample_count; - - if( _y[i] > 0 ) - { - for( j = 0; j < len; j++ ) - row[j] = _y[j]*row[j]; - } - else - { - for( j = 0; j < len; j++ ) - row[j] = -_y[j]*row[j]; - } - } - return row; - } - - Qfloat* get_row_one_class( int, Qfloat* row, Qfloat*, bool ) - { - return row; - } - - Qfloat* get_row_svr( int i, Qfloat* row, Qfloat* dst, bool ) - { - int j, len = sample_count; - Qfloat* dst_pos = dst; - Qfloat* dst_neg = dst + len; - if( i >= len ) - std::swap(dst_pos, dst_neg); - - for( j = 0; j < len; j++ ) - { - Qfloat t = row[j]; - dst_pos[j] = t; - dst_neg[j] = -t; - } - return dst; - } - - Qfloat* get_row( int i, float* dst ) - { - bool existed = false; - float* row = get_row_base( i, &existed ); - return (this->*get_row_func)( i, row, dst, existed ); - } - - #undef is_upper_bound - #define is_upper_bound(i) (alpha_status[i] > 0) - - #undef is_lower_bound - #define is_lower_bound(i) (alpha_status[i] < 0) - - #undef get_C - #define get_C(i) (C[y[i]>0]) - - #undef update_alpha_status - #define update_alpha_status(i) \ - alpha_status[i] = (schar)(alpha[i] >= get_C(i) ? 1 : alpha[i] <= 0 ? -1 : 0) - - bool solve_generic( SolutionInfo& si ) - { - const schar* y = &y_vec[0]; - double* alpha = &alpha_vec->at(0); - schar* alpha_status = &alpha_status_vec[0]; - double* G = &G_vec[0]; - double* b = &b_vec[0]; - - int iter = 0; - int i, j, k; - - // 1. initialize gradient and alpha status - for( i = 0; i < alpha_count; i++ ) - { - update_alpha_status(i); - G[i] = b[i]; - if( fabs(G[i]) > 1e200 ) - return false; - } - - for( i = 0; i < alpha_count; i++ ) - { - if( !is_lower_bound(i) ) - { - const Qfloat *Q_i = get_row( i, &buf[0][0] ); - double alpha_i = alpha[i]; - - for( j = 0; j < alpha_count; j++ ) - G[j] += alpha_i*Q_i[j]; - } - } - - // 2. optimization loop - for(;;) - { - const Qfloat *Q_i, *Q_j; - double C_i, C_j; - double old_alpha_i, old_alpha_j, alpha_i, alpha_j; - double delta_alpha_i, delta_alpha_j; - - #ifdef _DEBUG - for( i = 0; i < alpha_count; i++ ) - { - if( fabs(G[i]) > 1e+300 ) - return false; - - if( fabs(alpha[i]) > 1e16 ) - return false; - } - #endif - - if( (this->*select_working_set_func)( i, j ) != 0 || iter++ >= max_iter ) - break; - - Q_i = get_row( i, &buf[0][0] ); - Q_j = get_row( j, &buf[1][0] ); - - C_i = get_C(i); - C_j = get_C(j); - - alpha_i = old_alpha_i = alpha[i]; - alpha_j = old_alpha_j = alpha[j]; - - if( y[i] != y[j] ) - { - double denom = Q_i[i]+Q_j[j]+2*Q_i[j]; - double delta = (-G[i]-G[j])/MAX(fabs(denom),FLT_EPSILON); - double diff = alpha_i - alpha_j; - alpha_i += delta; - alpha_j += delta; - - if( diff > 0 && alpha_j < 0 ) - { - alpha_j = 0; - alpha_i = diff; - } - else if( diff <= 0 && alpha_i < 0 ) - { - alpha_i = 0; - alpha_j = -diff; - } - - if( diff > C_i - C_j && alpha_i > C_i ) - { - alpha_i = C_i; - alpha_j = C_i - diff; - } - else if( diff <= C_i - C_j && alpha_j > C_j ) - { - alpha_j = C_j; - alpha_i = C_j + diff; - } - } - else - { - double denom = Q_i[i]+Q_j[j]-2*Q_i[j]; - double delta = (G[i]-G[j])/MAX(fabs(denom),FLT_EPSILON); - double sum = alpha_i + alpha_j; - alpha_i -= delta; - alpha_j += delta; - - if( sum > C_i && alpha_i > C_i ) - { - alpha_i = C_i; - alpha_j = sum - C_i; - } - else if( sum <= C_i && alpha_j < 0) - { - alpha_j = 0; - alpha_i = sum; - } - - if( sum > C_j && alpha_j > C_j ) - { - alpha_j = C_j; - alpha_i = sum - C_j; - } - else if( sum <= C_j && alpha_i < 0 ) - { - alpha_i = 0; - alpha_j = sum; - } - } - - // update alpha - alpha[i] = alpha_i; - alpha[j] = alpha_j; - update_alpha_status(i); - update_alpha_status(j); - - // update G - delta_alpha_i = alpha_i - old_alpha_i; - delta_alpha_j = alpha_j - old_alpha_j; - - for( k = 0; k < alpha_count; k++ ) - G[k] += Q_i[k]*delta_alpha_i + Q_j[k]*delta_alpha_j; - } - - // calculate rho - (this->*calc_rho_func)( si.rho, si.r ); - - // calculate objective value - for( i = 0, si.obj = 0; i < alpha_count; i++ ) - si.obj += alpha[i] * (G[i] + b[i]); - - si.obj *= 0.5; - - si.upper_bound_p = C[1]; - si.upper_bound_n = C[0]; - - return true; - } - - // return 1 if already optimal, return 0 otherwise - bool select_working_set( int& out_i, int& out_j ) - { - // return i,j which maximize -grad(f)^T d , under constraint - // if alpha_i == C, d != +1 - // if alpha_i == 0, d != -1 - double Gmax1 = -DBL_MAX; // max { -grad(f)_i * d | y_i*d = +1 } - int Gmax1_idx = -1; - - double Gmax2 = -DBL_MAX; // max { -grad(f)_i * d | y_i*d = -1 } - int Gmax2_idx = -1; - - const schar* y = &y_vec[0]; - const schar* alpha_status = &alpha_status_vec[0]; - const double* G = &G_vec[0]; - - for( int i = 0; i < alpha_count; i++ ) - { - double t; - - if( y[i] > 0 ) // y = +1 - { - if( !is_upper_bound(i) && (t = -G[i]) > Gmax1 ) // d = +1 - { - Gmax1 = t; - Gmax1_idx = i; - } - if( !is_lower_bound(i) && (t = G[i]) > Gmax2 ) // d = -1 - { - Gmax2 = t; - Gmax2_idx = i; - } - } - else // y = -1 - { - if( !is_upper_bound(i) && (t = -G[i]) > Gmax2 ) // d = +1 - { - Gmax2 = t; - Gmax2_idx = i; - } - if( !is_lower_bound(i) && (t = G[i]) > Gmax1 ) // d = -1 - { - Gmax1 = t; - Gmax1_idx = i; - } - } - } - - out_i = Gmax1_idx; - out_j = Gmax2_idx; - - return Gmax1 + Gmax2 < eps; - } - - void calc_rho( double& rho, double& r ) - { - int nr_free = 0; - double ub = DBL_MAX, lb = -DBL_MAX, sum_free = 0; - const schar* y = &y_vec[0]; - const schar* alpha_status = &alpha_status_vec[0]; - const double* G = &G_vec[0]; - - for( int i = 0; i < alpha_count; i++ ) - { - double yG = y[i]*G[i]; - - if( is_lower_bound(i) ) - { - if( y[i] > 0 ) - ub = MIN(ub,yG); - else - lb = MAX(lb,yG); - } - else if( is_upper_bound(i) ) - { - if( y[i] < 0) - ub = MIN(ub,yG); - else - lb = MAX(lb,yG); - } - else - { - ++nr_free; - sum_free += yG; - } - } - - rho = nr_free > 0 ? sum_free/nr_free : (ub + lb)*0.5; - r = 0; - } - - bool select_working_set_nu_svm( int& out_i, int& out_j ) - { - // return i,j which maximize -grad(f)^T d , under constraint - // if alpha_i == C, d != +1 - // if alpha_i == 0, d != -1 - double Gmax1 = -DBL_MAX; // max { -grad(f)_i * d | y_i = +1, d = +1 } - int Gmax1_idx = -1; - - double Gmax2 = -DBL_MAX; // max { -grad(f)_i * d | y_i = +1, d = -1 } - int Gmax2_idx = -1; - - double Gmax3 = -DBL_MAX; // max { -grad(f)_i * d | y_i = -1, d = +1 } - int Gmax3_idx = -1; - - double Gmax4 = -DBL_MAX; // max { -grad(f)_i * d | y_i = -1, d = -1 } - int Gmax4_idx = -1; - - const schar* y = &y_vec[0]; - const schar* alpha_status = &alpha_status_vec[0]; - const double* G = &G_vec[0]; - - for( int i = 0; i < alpha_count; i++ ) - { - double t; - - if( y[i] > 0 ) // y == +1 - { - if( !is_upper_bound(i) && (t = -G[i]) > Gmax1 ) // d = +1 - { - Gmax1 = t; - Gmax1_idx = i; - } - if( !is_lower_bound(i) && (t = G[i]) > Gmax2 ) // d = -1 - { - Gmax2 = t; - Gmax2_idx = i; - } - } - else // y == -1 - { - if( !is_upper_bound(i) && (t = -G[i]) > Gmax3 ) // d = +1 - { - Gmax3 = t; - Gmax3_idx = i; - } - if( !is_lower_bound(i) && (t = G[i]) > Gmax4 ) // d = -1 - { - Gmax4 = t; - Gmax4_idx = i; - } - } - } - - if( MAX(Gmax1 + Gmax2, Gmax3 + Gmax4) < eps ) - return 1; - - if( Gmax1 + Gmax2 > Gmax3 + Gmax4 ) - { - out_i = Gmax1_idx; - out_j = Gmax2_idx; - } - else - { - out_i = Gmax3_idx; - out_j = Gmax4_idx; - } - return 0; - } - - void calc_rho_nu_svm( double& rho, double& r ) - { - int nr_free1 = 0, nr_free2 = 0; - double ub1 = DBL_MAX, ub2 = DBL_MAX; - double lb1 = -DBL_MAX, lb2 = -DBL_MAX; - double sum_free1 = 0, sum_free2 = 0; - - const schar* y = &y_vec[0]; - const schar* alpha_status = &alpha_status_vec[0]; - const double* G = &G_vec[0]; - - for( int i = 0; i < alpha_count; i++ ) - { - double G_i = G[i]; - if( y[i] > 0 ) - { - if( is_lower_bound(i) ) - ub1 = MIN( ub1, G_i ); - else if( is_upper_bound(i) ) - lb1 = MAX( lb1, G_i ); - else - { - ++nr_free1; - sum_free1 += G_i; - } - } - else - { - if( is_lower_bound(i) ) - ub2 = MIN( ub2, G_i ); - else if( is_upper_bound(i) ) - lb2 = MAX( lb2, G_i ); - else - { - ++nr_free2; - sum_free2 += G_i; - } - } - } - - double r1 = nr_free1 > 0 ? sum_free1/nr_free1 : (ub1 + lb1)*0.5; - double r2 = nr_free2 > 0 ? sum_free2/nr_free2 : (ub2 + lb2)*0.5; - - rho = (r1 - r2)*0.5; - r = (r1 + r2)*0.5; - } - - /* - ///////////////////////// construct and solve various formulations /////////////////////// - */ - static bool solve_c_svc( const Mat& _samples, const vector& _y, - double _Cp, double _Cn, const Ptr& _kernel, - vector& _alpha, SolutionInfo& _si, TermCriteria termCrit ) - { - int sample_count = _samples.rows; - - _alpha.assign(sample_count, 0.); - vector _b(sample_count, -1.); - - Solver solver( _samples, _y, _alpha, _b, _Cp, _Cn, _kernel, - &Solver::get_row_svc, - &Solver::select_working_set, - &Solver::calc_rho, - termCrit ); - - if( !solver.solve_generic( _si )) - return false; - - for( int i = 0; i < sample_count; i++ ) - _alpha[i] *= _y[i]; - - return true; - } - - - static bool solve_nu_svc( const Mat& _samples, const vector& _y, - double nu, const Ptr& _kernel, - vector& _alpha, SolutionInfo& _si, - TermCriteria termCrit ) - { - int sample_count = _samples.rows; - - _alpha.resize(sample_count); - vector _b(sample_count, 0.); - - double sum_pos = nu * sample_count * 0.5; - double sum_neg = nu * sample_count * 0.5; - - for( int i = 0; i < sample_count; i++ ) - { - double a; - if( _y[i] > 0 ) - { - a = std::min(1.0, sum_pos); - sum_pos -= a; - } - else - { - a = std::min(1.0, sum_neg); - sum_neg -= a; - } - _alpha[i] = a; - } - - Solver solver( _samples, _y, _alpha, _b, 1., 1., _kernel, - &Solver::get_row_svc, - &Solver::select_working_set_nu_svm, - &Solver::calc_rho_nu_svm, - termCrit ); - - if( !solver.solve_generic( _si )) - return false; - - double inv_r = 1./_si.r; - - for( int i = 0; i < sample_count; i++ ) - _alpha[i] *= _y[i]*inv_r; - - _si.rho *= inv_r; - _si.obj *= (inv_r*inv_r); - _si.upper_bound_p = inv_r; - _si.upper_bound_n = inv_r; - - return true; - } - - static bool solve_one_class( const Mat& _samples, double nu, - const Ptr& _kernel, - vector& _alpha, SolutionInfo& _si, - TermCriteria termCrit ) - { - int sample_count = _samples.rows; - vector _y(sample_count, 1); - vector _b(sample_count, 0.); - - int i, n = cvRound( nu*sample_count ); - - _alpha.resize(sample_count); - for( i = 0; i < sample_count; i++ ) - _alpha[i] = i < n ? 1 : 0; - - if( n < sample_count ) - _alpha[n] = nu * sample_count - n; - else - _alpha[n-1] = nu * sample_count - (n-1); - - Solver solver( _samples, _y, _alpha, _b, 1., 1., _kernel, - &Solver::get_row_one_class, - &Solver::select_working_set, - &Solver::calc_rho, - termCrit ); - - return solver.solve_generic(_si); - } - - static bool solve_eps_svr( const Mat& _samples, const vector& _yf, - double p, double C, const Ptr& _kernel, - vector& _alpha, SolutionInfo& _si, - TermCriteria termCrit ) - { - int sample_count = _samples.rows; - int alpha_count = sample_count*2; - - CV_Assert( (int)_yf.size() == sample_count ); - - _alpha.assign(alpha_count, 0.); - vector _y(alpha_count); - vector _b(alpha_count); - - for( int i = 0; i < sample_count; i++ ) - { - _b[i] = p - _yf[i]; - _y[i] = 1; - - _b[i+sample_count] = p + _yf[i]; - _y[i+sample_count] = -1; - } - - Solver solver( _samples, _y, _alpha, _b, C, C, _kernel, - &Solver::get_row_svr, - &Solver::select_working_set, - &Solver::calc_rho, - termCrit ); - - if( !solver.solve_generic( _si )) - return false; - - for( int i = 0; i < sample_count; i++ ) - _alpha[i] -= _alpha[i+sample_count]; - - return true; - } - - - static bool solve_nu_svr( const Mat& _samples, const vector& _yf, - double nu, double C, const Ptr& _kernel, - vector& _alpha, SolutionInfo& _si, - TermCriteria termCrit ) - { - int sample_count = _samples.rows; - int alpha_count = sample_count*2; - double sum = C * nu * sample_count * 0.5; - - CV_Assert( (int)_yf.size() == sample_count ); - - _alpha.resize(alpha_count); - vector _y(alpha_count); - vector _b(alpha_count); - - for( int i = 0; i < sample_count; i++ ) - { - _alpha[i] = _alpha[i + sample_count] = std::min(sum, C); - sum -= _alpha[i]; - - _b[i] = -_yf[i]; - _y[i] = 1; - - _b[i + sample_count] = _yf[i]; - _y[i + sample_count] = -1; - } - - Solver solver( _samples, _y, _alpha, _b, 1., 1., _kernel, - &Solver::get_row_svr, - &Solver::select_working_set_nu_svm, - &Solver::calc_rho_nu_svm, - termCrit ); - - if( !solver.solve_generic( _si )) - return false; - - for( int i = 0; i < sample_count; i++ ) - _alpha[i] -= _alpha[i+sample_count]; - - return true; - } - - int sample_count; - int var_count; - int cache_size; - int max_cache_size; - Mat samples; - SvmParams params; - vector lru_cache; - int lru_first; - int lru_last; - Mat lru_cache_data; - - int alpha_count; - - vector G_vec; - vector* alpha_vec; - vector y_vec; - // -1 - lower bound, 0 - free, 1 - upper bound - vector alpha_status_vec; - vector b_vec; - - vector buf[2]; - double eps; - int max_iter; - double C[2]; // C[0] == Cn, C[1] == Cp - Ptr kernel; - - SelectWorkingSet select_working_set_func; - CalcRho calc_rho_func; - GetRow get_row_func; - }; - - ////////////////////////////////////////////////////////////////////////////////////////// - SVMImpl() - { - clear(); - checkParams(); - } - - ~SVMImpl() - { - clear(); - } - - void clear() CV_OVERRIDE - { - decision_func.clear(); - df_alpha.clear(); - df_index.clear(); - sv.release(); - uncompressed_sv.release(); - } - - Mat getUncompressedSupportVectors() const CV_OVERRIDE - { - return uncompressed_sv; - } - - Mat getSupportVectors() const CV_OVERRIDE - { - return sv; - } - - inline int getType() const CV_OVERRIDE { return params.svmType; } - inline void setType(int val) CV_OVERRIDE { params.svmType = val; } - inline double getGamma() const CV_OVERRIDE { return params.gamma; } - inline void setGamma(double val) CV_OVERRIDE { params.gamma = val; } - inline double getCoef0() const CV_OVERRIDE { return params.coef0; } - inline void setCoef0(double val) CV_OVERRIDE { params.coef0 = val; } - inline double getDegree() const CV_OVERRIDE { return params.degree; } - inline void setDegree(double val) CV_OVERRIDE { params.degree = val; } - inline double getC() const CV_OVERRIDE { return params.C; } - inline void setC(double val) CV_OVERRIDE { params.C = val; } - inline double getNu() const CV_OVERRIDE { return params.nu; } - inline void setNu(double val) CV_OVERRIDE { params.nu = val; } - inline double getP() const CV_OVERRIDE { return params.p; } - inline void setP(double val) CV_OVERRIDE { params.p = val; } - inline cv::Mat getClassWeights() const CV_OVERRIDE { return params.classWeights; } - inline void setClassWeights(const cv::Mat& val) CV_OVERRIDE { params.classWeights = val; } - inline cv::TermCriteria getTermCriteria() const CV_OVERRIDE { return params.termCrit; } - inline void setTermCriteria(const cv::TermCriteria& val) CV_OVERRIDE { params.termCrit = val; } - - int getKernelType() const CV_OVERRIDE { return params.kernelType; } - void setKernel(int kernelType) CV_OVERRIDE - { - params.kernelType = kernelType; - if (kernelType != CUSTOM) - kernel = makePtr(params); - } - - void setCustomKernel(const Ptr &_kernel) CV_OVERRIDE - { - params.kernelType = CUSTOM; - kernel = _kernel; - } - - void checkParams() - { - int kernelType = params.kernelType; - if (kernelType != CUSTOM) - { - if( kernelType != LINEAR && kernelType != POLY && - kernelType != SIGMOID && kernelType != RBF && - kernelType != INTER && kernelType != CHI2) - CV_Error( CV_StsBadArg, "Unknown/unsupported kernel type" ); - - if( kernelType == LINEAR ) - params.gamma = 1; - else if( params.gamma <= 0 ) - CV_Error( CV_StsOutOfRange, "gamma parameter of the kernel must be positive" ); - - if( kernelType != SIGMOID && kernelType != POLY ) - params.coef0 = 0; - - if( kernelType != POLY ) - params.degree = 0; - else if( params.degree <= 0 ) - CV_Error( CV_StsOutOfRange, "The kernel parameter must be positive" ); - - kernel = makePtr(params); - } - else - { - if (!kernel) - CV_Error( CV_StsBadArg, "Custom kernel is not set" ); - } - - int svmType = params.svmType; - - if( svmType != C_SVC && svmType != NU_SVC && - svmType != ONE_CLASS && svmType != EPS_SVR && - svmType != NU_SVR ) - CV_Error( CV_StsBadArg, "Unknown/unsupported SVM type" ); - - if( svmType == ONE_CLASS || svmType == NU_SVC ) - params.C = 0; - else if( params.C <= 0 ) - CV_Error( CV_StsOutOfRange, "The parameter C must be positive" ); - - if( svmType == C_SVC || svmType == EPS_SVR ) - params.nu = 0; - else if( params.nu <= 0 || params.nu >= 1 ) - CV_Error( CV_StsOutOfRange, "The parameter nu must be between 0 and 1" ); - - if( svmType != EPS_SVR ) - params.p = 0; - else if( params.p <= 0 ) - CV_Error( CV_StsOutOfRange, "The parameter p must be positive" ); - - if( svmType != C_SVC ) - params.classWeights.release(); - - if( !(params.termCrit.type & TermCriteria::EPS) ) - params.termCrit.epsilon = DBL_EPSILON; - params.termCrit.epsilon = std::max(params.termCrit.epsilon, DBL_EPSILON); - if( !(params.termCrit.type & TermCriteria::COUNT) ) - params.termCrit.maxCount = INT_MAX; - params.termCrit.maxCount = std::max(params.termCrit.maxCount, 1); - } - - void setParams( const SvmParams& _params) - { - params = _params; - checkParams(); - } - - int getSVCount(int i) const - { - return (i < (int)(decision_func.size()-1) ? decision_func[i+1].ofs : - (int)df_index.size()) - decision_func[i].ofs; - } - - bool do_train( const Mat& _samples, const Mat& _responses ) - { - int svmType = params.svmType; - int i, j, k, sample_count = _samples.rows; - vector _alpha; - Solver::SolutionInfo sinfo; - - CV_Assert( _samples.type() == CV_32F ); - var_count = _samples.cols; - - if( svmType == ONE_CLASS || svmType == EPS_SVR || svmType == NU_SVR ) - { - int sv_count = 0; - decision_func.clear(); - - vector _yf; - if( !_responses.empty() ) - _responses.convertTo(_yf, CV_32F); - - bool ok = - svmType == ONE_CLASS ? Solver::solve_one_class( _samples, params.nu, kernel, _alpha, sinfo, params.termCrit ) : - svmType == EPS_SVR ? Solver::solve_eps_svr( _samples, _yf, params.p, params.C, kernel, _alpha, sinfo, params.termCrit ) : - svmType == NU_SVR ? Solver::solve_nu_svr( _samples, _yf, params.nu, params.C, kernel, _alpha, sinfo, params.termCrit ) : false; - - if( !ok ) - return false; - - for( i = 0; i < sample_count; i++ ) - sv_count += fabs(_alpha[i]) > 0; - - CV_Assert(sv_count != 0); - - sv.create(sv_count, _samples.cols, CV_32F); - df_alpha.resize(sv_count); - df_index.resize(sv_count); - - for( i = k = 0; i < sample_count; i++ ) - { - if( std::abs(_alpha[i]) > 0 ) - { - _samples.row(i).copyTo(sv.row(k)); - df_alpha[k] = _alpha[i]; - df_index[k] = k; - k++; - } - } - - decision_func.push_back(DecisionFunc(sinfo.rho, 0)); - } - else - { - int class_count = (int)class_labels.total(); - vector svidx, sidx, sidx_all, sv_tab(sample_count, 0); - Mat temp_samples, class_weights; - vector class_ranges; - vector temp_y; - double nu = params.nu; - CV_Assert( svmType == C_SVC || svmType == NU_SVC ); - - if( svmType == C_SVC && !params.classWeights.empty() ) - { - const Mat cw = params.classWeights; - - if( (cw.cols != 1 && cw.rows != 1) || - (int)cw.total() != class_count || - (cw.type() != CV_32F && cw.type() != CV_64F) ) - CV_Error( CV_StsBadArg, "params.class_weights must be 1d floating-point vector " - "containing as many elements as the number of classes" ); - - cw.convertTo(class_weights, CV_64F, params.C); - //normalize(cw, class_weights, params.C, 0, NORM_L1, CV_64F); - } - - decision_func.clear(); - df_alpha.clear(); - df_index.clear(); - - sortSamplesByClasses( _samples, _responses, sidx_all, class_ranges ); - - //check that while cross-validation there were the samples from all the classes - if ((int)class_ranges.size() < class_count + 1) - CV_Error( CV_StsBadArg, "While cross-validation one or more of the classes have " - "been fell out of the sample. Try to reduce " ); - - if( svmType == NU_SVC ) - { - // check if nu is feasible - for( i = 0; i < class_count; i++ ) - { - int ci = class_ranges[i+1] - class_ranges[i]; - for( j = i+1; j< class_count; j++ ) - { - int cj = class_ranges[j+1] - class_ranges[j]; - if( nu*(ci + cj)*0.5 > std::min( ci, cj ) ) - // TODO: add some diagnostic - return false; - } - } - } - - size_t samplesize = _samples.cols*_samples.elemSize(); - - // train n*(n-1)/2 classifiers - for( i = 0; i < class_count; i++ ) - { - for( j = i+1; j < class_count; j++ ) - { - int si = class_ranges[i], ci = class_ranges[i+1] - si; - int sj = class_ranges[j], cj = class_ranges[j+1] - sj; - double Cp = params.C, Cn = Cp; - - temp_samples.create(ci + cj, _samples.cols, _samples.type()); - sidx.resize(ci + cj); - temp_y.resize(ci + cj); - - // form input for the binary classification problem - for( k = 0; k < ci+cj; k++ ) - { - int idx = k < ci ? si+k : sj+k-ci; - memcpy(temp_samples.ptr(k), _samples.ptr(sidx_all[idx]), samplesize); - sidx[k] = sidx_all[idx]; - temp_y[k] = k < ci ? 1 : -1; - } - - if( !class_weights.empty() ) - { - Cp = class_weights.at(i); - Cn = class_weights.at(j); - } - - DecisionFunc df; - bool ok = params.svmType == C_SVC ? - Solver::solve_c_svc( temp_samples, temp_y, Cp, Cn, - kernel, _alpha, sinfo, params.termCrit ) : - params.svmType == NU_SVC ? - Solver::solve_nu_svc( temp_samples, temp_y, params.nu, - kernel, _alpha, sinfo, params.termCrit ) : - false; - if( !ok ) - return false; - df.rho = sinfo.rho; - df.ofs = (int)df_index.size(); - decision_func.push_back(df); - - for( k = 0; k < ci + cj; k++ ) - { - if( std::abs(_alpha[k]) > 0 ) - { - int idx = k < ci ? si+k : sj+k-ci; - sv_tab[sidx_all[idx]] = 1; - df_index.push_back(sidx_all[idx]); - df_alpha.push_back(_alpha[k]); - } - } - } - } - - // allocate support vectors and initialize sv_tab - for( i = 0, k = 0; i < sample_count; i++ ) - { - if( sv_tab[i] ) - sv_tab[i] = ++k; - } - - int sv_total = k; - sv.create(sv_total, _samples.cols, _samples.type()); - - for( i = 0; i < sample_count; i++ ) - { - if( !sv_tab[i] ) - continue; - memcpy(sv.ptr(sv_tab[i]-1), _samples.ptr(i), samplesize); - } - - // set sv pointers - int n = (int)df_index.size(); - for( i = 0; i < n; i++ ) - { - CV_Assert( sv_tab[df_index[i]] > 0 ); - df_index[i] = sv_tab[df_index[i]] - 1; - } - } - - optimize_linear_svm(); - - return true; - } - - void optimize_linear_svm() - { - // we optimize only linear SVM: compress all the support vectors into one. - if( params.kernelType != LINEAR ) - return; - - int i, df_count = (int)decision_func.size(); - - for( i = 0; i < df_count; i++ ) - { - if( getSVCount(i) != 1 ) - break; - } - - // if every decision functions uses a single support vector; - // it's already compressed. skip it then. - if( i == df_count ) - return; - - AutoBuffer vbuf(var_count); - double* v = vbuf.data(); - Mat new_sv(df_count, var_count, CV_32F); - - vector new_df; - - for( i = 0; i < df_count; i++ ) - { - float* dst = new_sv.ptr(i); - memset(v, 0, var_count*sizeof(v[0])); - int j, k, sv_count = getSVCount(i); - const DecisionFunc& df = decision_func[i]; - const int* sv_index = &df_index[df.ofs]; - const double* sv_alpha = &df_alpha[df.ofs]; - for( j = 0; j < sv_count; j++ ) - { - const float* src = sv.ptr(sv_index[j]); - double a = sv_alpha[j]; - for( k = 0; k < var_count; k++ ) - v[k] += src[k]*a; - } - for( k = 0; k < var_count; k++ ) - dst[k] = (float)v[k]; - new_df.push_back(DecisionFunc(df.rho, i)); - } - - setRangeVector(df_index, df_count); - df_alpha.assign(df_count, 1.); - sv.copyTo(uncompressed_sv); - std::swap(sv, new_sv); - std::swap(decision_func, new_df); - } - - bool train( const Ptr& data, int ) CV_OVERRIDE - { - CV_Assert(!data.empty()); - clear(); - - checkParams(); - - int svmType = params.svmType; - Mat samples = data->getTrainSamples(); - Mat responses; - - if( svmType == C_SVC || svmType == NU_SVC ) - { - responses = data->getTrainNormCatResponses(); - if( responses.empty() ) - CV_Error(CV_StsBadArg, "in the case of classification problem the responses must be categorical; " - "either specify varType when creating TrainData, or pass integer responses"); - class_labels = data->getClassLabels(); - } - else - responses = data->getTrainResponses(); - - if( !do_train( samples, responses )) - { - clear(); - return false; - } - - return true; - } - - class TrainAutoBody : public ParallelLoopBody - { - public: - TrainAutoBody(const vector& _parameters, - const cv::Mat& _samples, - const cv::Mat& _responses, - const cv::Mat& _labels, - const vector& _sidx, - bool _is_classification, - int _k_fold, - std::vector& _result) : - parameters(_parameters), samples(_samples), responses(_responses), labels(_labels), - sidx(_sidx), is_classification(_is_classification), k_fold(_k_fold), result(_result) - {} - - void operator()( const cv::Range& range ) const CV_OVERRIDE - { - int sample_count = samples.rows; - int var_count_ = samples.cols; - size_t sample_size = var_count_*samples.elemSize(); - - int test_sample_count = (sample_count + k_fold/2)/k_fold; - int train_sample_count = sample_count - test_sample_count; - - // Use a local instance - cv::Ptr svm = makePtr(); - svm->class_labels = labels; - - int rtype = responses.type(); - - Mat temp_train_samples(train_sample_count, var_count_, CV_32F); - Mat temp_test_samples(test_sample_count, var_count_, CV_32F); - Mat temp_train_responses(train_sample_count, 1, rtype); - Mat temp_test_responses; - - for( int p = range.start; p < range.end; p++ ) - { - svm->setParams(parameters[p]); - - double error = 0; - for( int k = 0; k < k_fold; k++ ) - { - int start = (k*sample_count + k_fold/2)/k_fold; - for( int i = 0; i < train_sample_count; i++ ) - { - int j = sidx[(i+start)%sample_count]; - memcpy(temp_train_samples.ptr(i), samples.ptr(j), sample_size); - if( is_classification ) - temp_train_responses.at(i) = responses.at(j); - else if( !responses.empty() ) - temp_train_responses.at(i) = responses.at(j); - } - - // Train SVM on samples - if( !svm->do_train( temp_train_samples, temp_train_responses )) - continue; - - for( int i = 0; i < test_sample_count; i++ ) - { - int j = sidx[(i+start+train_sample_count) % sample_count]; - memcpy(temp_test_samples.ptr(i), samples.ptr(j), sample_size); - } - - svm->predict(temp_test_samples, temp_test_responses, 0); - for( int i = 0; i < test_sample_count; i++ ) - { - float val = temp_test_responses.at(i); - int j = sidx[(i+start+train_sample_count) % sample_count]; - if( is_classification ) - error += (float)(val != responses.at(j)); - else - { - val -= responses.at(j); - error += val*val; - } - } - } - - result[p] = error; - } - } - - private: - const vector& parameters; - const cv::Mat& samples; - const cv::Mat& responses; - const cv::Mat& labels; - const vector& sidx; - bool is_classification; - int k_fold; - std::vector& result; - }; - - bool trainAuto( const Ptr& data, int k_fold, - ParamGrid C_grid, ParamGrid gamma_grid, ParamGrid p_grid, - ParamGrid nu_grid, ParamGrid coef_grid, ParamGrid degree_grid, - bool balanced ) CV_OVERRIDE - { - CV_Assert(!data.empty()); - checkParams(); - - int svmType = params.svmType; - RNG rng((uint64)-1); - - if( svmType == ONE_CLASS ) - // current implementation of "auto" svm does not support the 1-class case. - return train( data, 0 ); - - clear(); - - CV_Assert( k_fold >= 2 ); - - // All the parameters except, possibly, are positive. - // is nonnegative - #define CHECK_GRID(grid, param) \ - if( grid.logStep <= 1 ) \ - { \ - grid.minVal = grid.maxVal = params.param; \ - grid.logStep = 10; \ - } \ - else \ - checkParamGrid(grid) - - CHECK_GRID(C_grid, C); - CHECK_GRID(gamma_grid, gamma); - CHECK_GRID(p_grid, p); - CHECK_GRID(nu_grid, nu); - CHECK_GRID(coef_grid, coef0); - CHECK_GRID(degree_grid, degree); - - // these parameters are not used: - if( params.kernelType != POLY ) - degree_grid.minVal = degree_grid.maxVal = params.degree; - if( params.kernelType == LINEAR ) - gamma_grid.minVal = gamma_grid.maxVal = params.gamma; - if( params.kernelType != POLY && params.kernelType != SIGMOID ) - coef_grid.minVal = coef_grid.maxVal = params.coef0; - if( svmType == NU_SVC || svmType == ONE_CLASS ) - C_grid.minVal = C_grid.maxVal = params.C; - if( svmType == C_SVC || svmType == EPS_SVR ) - nu_grid.minVal = nu_grid.maxVal = params.nu; - if( svmType != EPS_SVR ) - p_grid.minVal = p_grid.maxVal = params.p; - - Mat samples = data->getTrainSamples(); - Mat responses; - bool is_classification = false; - Mat class_labels0; - int class_count = (int)class_labels.total(); - - if( svmType == C_SVC || svmType == NU_SVC ) - { - responses = data->getTrainNormCatResponses(); - class_labels = data->getClassLabels(); - class_count = (int)class_labels.total(); - is_classification = true; - - vector temp_class_labels; - setRangeVector(temp_class_labels, class_count); - - // temporarily replace class labels with 0, 1, ..., NCLASSES-1 - class_labels0 = class_labels; - class_labels = Mat(temp_class_labels).clone(); - } - else - responses = data->getTrainResponses(); - - CV_Assert(samples.type() == CV_32F); - - int sample_count = samples.rows; - var_count = samples.cols; - - vector sidx; - setRangeVector(sidx, sample_count); - - // randomly permute training samples - for( int i = 0; i < sample_count; i++ ) - { - int i1 = rng.uniform(0, sample_count); - int i2 = rng.uniform(0, sample_count); - std::swap(sidx[i1], sidx[i2]); - } - - if( is_classification && class_count == 2 && balanced ) - { - // reshuffle the training set in such a way that - // instances of each class are divided more or less evenly - // between the k_fold parts. - vector sidx0, sidx1; - - for( int i = 0; i < sample_count; i++ ) - { - if( responses.at(sidx[i]) == 0 ) - sidx0.push_back(sidx[i]); - else - sidx1.push_back(sidx[i]); - } - - int n0 = (int)sidx0.size(), n1 = (int)sidx1.size(); - int a0 = 0, a1 = 0; - sidx.clear(); - for( int k = 0; k < k_fold; k++ ) - { - int b0 = ((k+1)*n0 + k_fold/2)/k_fold, b1 = ((k+1)*n1 + k_fold/2)/k_fold; - int a = (int)sidx.size(), b = a + (b0 - a0) + (b1 - a1); - for( int i = a0; i < b0; i++ ) - sidx.push_back(sidx0[i]); - for( int i = a1; i < b1; i++ ) - sidx.push_back(sidx1[i]); - for( int i = 0; i < (b - a); i++ ) - { - int i1 = rng.uniform(a, b); - int i2 = rng.uniform(a, b); - std::swap(sidx[i1], sidx[i2]); - } - a0 = b0; a1 = b1; - } - } - - // If grid.minVal == grid.maxVal, this will allow one and only one pass through the loop with params.var = grid.minVal. - #define FOR_IN_GRID(var, grid) \ - for( params.var = grid.minVal; params.var == grid.minVal || params.var < grid.maxVal; params.var = (grid.minVal == grid.maxVal) ? grid.maxVal + 1 : params.var * grid.logStep ) - - // Create the list of parameters to test - std::vector parameters; - FOR_IN_GRID(C, C_grid) - FOR_IN_GRID(gamma, gamma_grid) - FOR_IN_GRID(p, p_grid) - FOR_IN_GRID(nu, nu_grid) - FOR_IN_GRID(coef0, coef_grid) - FOR_IN_GRID(degree, degree_grid) - { - parameters.push_back(params); - } - - std::vector result(parameters.size()); - TrainAutoBody invoker(parameters, samples, responses, class_labels, sidx, - is_classification, k_fold, result); - parallel_for_(cv::Range(0,(int)parameters.size()), invoker); - - // Extract the best parameters - SvmParams best_params = params; - double min_error = FLT_MAX; - for( int i = 0; i < (int)result.size(); i++ ) - { - if( result[i] < min_error ) - { - min_error = result[i]; - best_params = parameters[i]; - } - } - - class_labels = class_labels0; - setParams(best_params); - return do_train( samples, responses ); - } - - struct PredictBody : ParallelLoopBody - { - PredictBody( const SVMImpl* _svm, const Mat& _samples, Mat& _results, bool _returnDFVal ) - { - svm = _svm; - results = &_results; - samples = &_samples; - returnDFVal = _returnDFVal; - } - - void operator()(const Range& range) const CV_OVERRIDE - { - int svmType = svm->params.svmType; - int sv_total = svm->sv.rows; - int class_count = !svm->class_labels.empty() ? (int)svm->class_labels.total() : svmType == ONE_CLASS ? 1 : 0; - - AutoBuffer _buffer(sv_total + (class_count+1)*2); - float* buffer = _buffer.data(); - - int i, j, dfi, k, si; - - if( svmType == EPS_SVR || svmType == NU_SVR || svmType == ONE_CLASS ) - { - for( si = range.start; si < range.end; si++ ) - { - const float* row_sample = samples->ptr(si); - svm->kernel->calc( sv_total, svm->var_count, svm->sv.ptr(), row_sample, buffer ); - - const SVMImpl::DecisionFunc* df = &svm->decision_func[0]; - double sum = -df->rho; - for( i = 0; i < sv_total; i++ ) - sum += buffer[i]*svm->df_alpha[i]; - float result = svm->params.svmType == ONE_CLASS && !returnDFVal ? (float)(sum > 0) : (float)sum; - results->at(si) = result; - } - } - else if( svmType == C_SVC || svmType == NU_SVC ) - { - int* vote = (int*)(buffer + sv_total); - - for( si = range.start; si < range.end; si++ ) - { - svm->kernel->calc( sv_total, svm->var_count, svm->sv.ptr(), - samples->ptr(si), buffer ); - double sum = 0.; - - memset( vote, 0, class_count*sizeof(vote[0])); - - for( i = dfi = 0; i < class_count; i++ ) - { - for( j = i+1; j < class_count; j++, dfi++ ) - { - const DecisionFunc& df = svm->decision_func[dfi]; - sum = -df.rho; - int sv_count = svm->getSVCount(dfi); - CV_DbgAssert(sv_count > 0); - const double* alpha = &svm->df_alpha[df.ofs]; - const int* sv_index = &svm->df_index[df.ofs]; - for( k = 0; k < sv_count; k++ ) - sum += alpha[k]*buffer[sv_index[k]]; - - vote[sum > 0 ? i : j]++; - } - } - - for( i = 1, k = 0; i < class_count; i++ ) - { - if( vote[i] > vote[k] ) - k = i; - } - float result = returnDFVal && class_count == 2 ? - (float)sum : (float)(svm->class_labels.at(k)); - results->at(si) = result; - } - } - else - CV_Error( CV_StsBadArg, "INTERNAL ERROR: Unknown SVM type, " - "the SVM structure is probably corrupted" ); - } - - const SVMImpl* svm; - const Mat* samples; - Mat* results; - bool returnDFVal; - }; - - bool trainAuto(InputArray samples, int layout, - InputArray responses, int kfold, Ptr Cgrid, - Ptr gammaGrid, Ptr pGrid, Ptr nuGrid, - Ptr coeffGrid, Ptr degreeGrid, bool balanced) CV_OVERRIDE - { - Ptr data = TrainData::create(samples, layout, responses); - return this->trainAuto( - data, kfold, - *Cgrid.get(), - *gammaGrid.get(), - *pGrid.get(), - *nuGrid.get(), - *coeffGrid.get(), - *degreeGrid.get(), - balanced); - } - - - float predict( InputArray _samples, OutputArray _results, int flags ) const CV_OVERRIDE - { - float result = 0; - Mat samples = _samples.getMat(), results; - int nsamples = samples.rows; - bool returnDFVal = (flags & RAW_OUTPUT) != 0; - - CV_Assert( samples.cols == var_count && samples.type() == CV_32F ); - - if( _results.needed() ) - { - _results.create( nsamples, 1, samples.type() ); - results = _results.getMat(); - } - else - { - CV_Assert( nsamples == 1 ); - results = Mat(1, 1, CV_32F, &result); - } - - PredictBody invoker(this, samples, results, returnDFVal); - if( nsamples < 10 ) - invoker(Range(0, nsamples)); - else - parallel_for_(Range(0, nsamples), invoker); - return result; - } - - double getDecisionFunction(int i, OutputArray _alpha, OutputArray _svidx ) const CV_OVERRIDE - { - CV_Assert( 0 <= i && i < (int)decision_func.size()); - const DecisionFunc& df = decision_func[i]; - int count = getSVCount(i); - Mat(1, count, CV_64F, (double*)&df_alpha[df.ofs]).copyTo(_alpha); - Mat(1, count, CV_32S, (int*)&df_index[df.ofs]).copyTo(_svidx); - return df.rho; - } - - void write_params( FileStorage& fs ) const - { - int svmType = params.svmType; - int kernelType = params.kernelType; - - String svm_type_str = - svmType == C_SVC ? "C_SVC" : - svmType == NU_SVC ? "NU_SVC" : - svmType == ONE_CLASS ? "ONE_CLASS" : - svmType == EPS_SVR ? "EPS_SVR" : - svmType == NU_SVR ? "NU_SVR" : format("Unknown_%d", svmType); - String kernel_type_str = - kernelType == LINEAR ? "LINEAR" : - kernelType == POLY ? "POLY" : - kernelType == RBF ? "RBF" : - kernelType == SIGMOID ? "SIGMOID" : - kernelType == CHI2 ? "CHI2" : - kernelType == INTER ? "INTER" : format("Unknown_%d", kernelType); - - fs << "svmType" << svm_type_str; - - // save kernel - fs << "kernel" << "{" << "type" << kernel_type_str; - - if( kernelType == POLY ) - fs << "degree" << params.degree; - - if( kernelType != LINEAR ) - fs << "gamma" << params.gamma; - - if( kernelType == POLY || kernelType == SIGMOID ) - fs << "coef0" << params.coef0; - - fs << "}"; - - if( svmType == C_SVC || svmType == EPS_SVR || svmType == NU_SVR ) - fs << "C" << params.C; - - if( svmType == NU_SVC || svmType == ONE_CLASS || svmType == NU_SVR ) - fs << "nu" << params.nu; - - if( svmType == EPS_SVR ) - fs << "p" << params.p; - - fs << "term_criteria" << "{:"; - if( params.termCrit.type & TermCriteria::EPS ) - fs << "epsilon" << params.termCrit.epsilon; - if( params.termCrit.type & TermCriteria::COUNT ) - fs << "iterations" << params.termCrit.maxCount; - fs << "}"; - } - - bool isTrained() const CV_OVERRIDE - { - return !sv.empty(); - } - - bool isClassifier() const CV_OVERRIDE - { - return params.svmType == C_SVC || params.svmType == NU_SVC || params.svmType == ONE_CLASS; - } - - int getVarCount() const CV_OVERRIDE - { - return var_count; - } - - String getDefaultName() const CV_OVERRIDE - { - return "opencv_ml_svm"; - } - - void write( FileStorage& fs ) const CV_OVERRIDE - { - int class_count = !class_labels.empty() ? (int)class_labels.total() : - params.svmType == ONE_CLASS ? 1 : 0; - if( !isTrained() ) - CV_Error( CV_StsParseError, "SVM model data is invalid, check sv_count, var_* and class_count tags" ); - - writeFormat(fs); - write_params( fs ); - - fs << "var_count" << var_count; - - if( class_count > 0 ) - { - fs << "class_count" << class_count; - - if( !class_labels.empty() ) - fs << "class_labels" << class_labels; - - if( !params.classWeights.empty() ) - fs << "class_weights" << params.classWeights; - } - - // write the joint collection of support vectors - int i, sv_total = sv.rows; - fs << "sv_total" << sv_total; - fs << "support_vectors" << "["; - for( i = 0; i < sv_total; i++ ) - { - fs << "[:"; - fs.writeRaw("f", sv.ptr(i), sv.cols*sv.elemSize()); - fs << "]"; - } - fs << "]"; - - if ( !uncompressed_sv.empty() ) - { - // write the joint collection of uncompressed support vectors - int uncompressed_sv_total = uncompressed_sv.rows; - fs << "uncompressed_sv_total" << uncompressed_sv_total; - fs << "uncompressed_support_vectors" << "["; - for( i = 0; i < uncompressed_sv_total; i++ ) - { - fs << "[:"; - fs.writeRaw("f", uncompressed_sv.ptr(i), uncompressed_sv.cols*uncompressed_sv.elemSize()); - fs << "]"; - } - fs << "]"; - } - - // write decision functions - int df_count = (int)decision_func.size(); - - fs << "decision_functions" << "["; - for( i = 0; i < df_count; i++ ) - { - const DecisionFunc& df = decision_func[i]; - int sv_count = getSVCount(i); - fs << "{" << "sv_count" << sv_count - << "rho" << df.rho - << "alpha" << "[:"; - fs.writeRaw("d", (const uchar*)&df_alpha[df.ofs], sv_count*sizeof(df_alpha[0])); - fs << "]"; - if( class_count >= 2 ) - { - fs << "index" << "[:"; - fs.writeRaw("i", (const uchar*)&df_index[df.ofs], sv_count*sizeof(df_index[0])); - fs << "]"; - } - else - CV_Assert( sv_count == sv_total ); - fs << "}"; - } - fs << "]"; - } - - void read_params( const FileNode& fn ) - { - SvmParams _params; - - // check for old naming - String svm_type_str = (String)(fn["svm_type"].empty() ? fn["svmType"] : fn["svm_type"]); - int svmType = - svm_type_str == "C_SVC" ? C_SVC : - svm_type_str == "NU_SVC" ? NU_SVC : - svm_type_str == "ONE_CLASS" ? ONE_CLASS : - svm_type_str == "EPS_SVR" ? EPS_SVR : - svm_type_str == "NU_SVR" ? NU_SVR : -1; - - if( svmType < 0 ) - CV_Error( CV_StsParseError, "Missing or invalid SVM type" ); - - FileNode kernel_node = fn["kernel"]; - if( kernel_node.empty() ) - CV_Error( CV_StsParseError, "SVM kernel tag is not found" ); - - String kernel_type_str = (String)kernel_node["type"]; - int kernelType = - kernel_type_str == "LINEAR" ? LINEAR : - kernel_type_str == "POLY" ? POLY : - kernel_type_str == "RBF" ? RBF : - kernel_type_str == "SIGMOID" ? SIGMOID : - kernel_type_str == "CHI2" ? CHI2 : - kernel_type_str == "INTER" ? INTER : CUSTOM; - - if( kernelType == CUSTOM ) - CV_Error( CV_StsParseError, "Invalid SVM kernel type (or custom kernel)" ); - - _params.svmType = svmType; - _params.kernelType = kernelType; - _params.degree = (double)kernel_node["degree"]; - _params.gamma = (double)kernel_node["gamma"]; - _params.coef0 = (double)kernel_node["coef0"]; - - _params.C = (double)fn["C"]; - _params.nu = (double)fn["nu"]; - _params.p = (double)fn["p"]; - _params.classWeights = Mat(); - - FileNode tcnode = fn["term_criteria"]; - if( !tcnode.empty() ) - { - _params.termCrit.epsilon = (double)tcnode["epsilon"]; - _params.termCrit.maxCount = (int)tcnode["iterations"]; - _params.termCrit.type = (_params.termCrit.epsilon > 0 ? TermCriteria::EPS : 0) + - (_params.termCrit.maxCount > 0 ? TermCriteria::COUNT : 0); - } - else - _params.termCrit = TermCriteria( TermCriteria::EPS + TermCriteria::COUNT, 1000, FLT_EPSILON ); - - setParams( _params ); - } - - void read( const FileNode& fn ) CV_OVERRIDE - { - clear(); - - // read SVM parameters - read_params( fn ); - - // and top-level data - int i, sv_total = (int)fn["sv_total"]; - var_count = (int)fn["var_count"]; - int class_count = (int)fn["class_count"]; - - if( sv_total <= 0 || var_count <= 0 ) - CV_Error( CV_StsParseError, "SVM model data is invalid, check sv_count, var_* and class_count tags" ); - - FileNode m = fn["class_labels"]; - if( !m.empty() ) - m >> class_labels; - m = fn["class_weights"]; - if( !m.empty() ) - m >> params.classWeights; - - if( class_count > 1 && (class_labels.empty() || (int)class_labels.total() != class_count)) - CV_Error( CV_StsParseError, "Array of class labels is missing or invalid" ); - - // read support vectors - FileNode sv_node = fn["support_vectors"]; - - CV_Assert((int)sv_node.size() == sv_total); - - sv.create(sv_total, var_count, CV_32F); - FileNodeIterator sv_it = sv_node.begin(); - for( i = 0; i < sv_total; i++, ++sv_it ) - { - (*sv_it).readRaw("f", sv.ptr(i), var_count*sv.elemSize()); - } - - int uncompressed_sv_total = (int)fn["uncompressed_sv_total"]; - - if( uncompressed_sv_total > 0 ) - { - // read uncompressed support vectors - FileNode uncompressed_sv_node = fn["uncompressed_support_vectors"]; - - CV_Assert((int)uncompressed_sv_node.size() == uncompressed_sv_total); - uncompressed_sv.create(uncompressed_sv_total, var_count, CV_32F); - - FileNodeIterator uncompressed_sv_it = uncompressed_sv_node.begin(); - for( i = 0; i < uncompressed_sv_total; i++, ++uncompressed_sv_it ) - { - (*uncompressed_sv_it).readRaw("f", uncompressed_sv.ptr(i), var_count*uncompressed_sv.elemSize()); - } - } - - // read decision functions - int df_count = class_count > 1 ? class_count*(class_count-1)/2 : 1; - FileNode df_node = fn["decision_functions"]; - - CV_Assert((int)df_node.size() == df_count); - - FileNodeIterator df_it = df_node.begin(); - for( i = 0; i < df_count; i++, ++df_it ) - { - FileNode dfi = *df_it; - DecisionFunc df; - int sv_count = (int)dfi["sv_count"]; - int ofs = (int)df_index.size(); - df.rho = (double)dfi["rho"]; - df.ofs = ofs; - df_index.resize(ofs + sv_count); - df_alpha.resize(ofs + sv_count); - dfi["alpha"].readRaw("d", (uchar*)&df_alpha[ofs], sv_count*sizeof(df_alpha[0])); - if( class_count >= 2 ) - dfi["index"].readRaw("i", (uchar*)&df_index[ofs], sv_count*sizeof(df_index[0])); - decision_func.push_back(df); - } - if( class_count < 2 ) - setRangeVector(df_index, sv_total); - if( (int)fn["optimize_linear"] != 0 ) - optimize_linear_svm(); - } - - SvmParams params; - Mat class_labels; - int var_count; - Mat sv, uncompressed_sv; - vector decision_func; - vector df_alpha; - vector df_index; - - Ptr kernel; -}; - - -Ptr SVM::create() -{ - return makePtr(); -} - -Ptr SVM::load(const String& filepath) -{ - FileStorage fs; - fs.open(filepath, FileStorage::READ); - - Ptr svm = makePtr(); - - ((SVMImpl*)svm.get())->read(fs.getFirstTopLevelNode()); - return svm; -} - - -} -} - -/* End of file. */ diff --git a/modules/ml/src/svmsgd.cpp b/modules/ml/src/svmsgd.cpp deleted file mode 100644 index 266c7cf300..0000000000 --- a/modules/ml/src/svmsgd.cpp +++ /dev/null @@ -1,524 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000, Intel Corporation, all rights reserved. -// Copyright (C) 2016, Itseez Inc, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" -#include "limits" - -#include - -using std::cout; -using std::endl; - -/****************************************************************************************\ -* Stochastic Gradient Descent SVM Classifier * -\****************************************************************************************/ - -namespace cv -{ -namespace ml -{ - -class SVMSGDImpl CV_FINAL : public SVMSGD -{ - -public: - SVMSGDImpl(); - - virtual ~SVMSGDImpl() {} - - virtual bool train(const Ptr& data, int) CV_OVERRIDE; - - virtual float predict( InputArray samples, OutputArray results=noArray(), int flags = 0 ) const CV_OVERRIDE; - - virtual bool isClassifier() const CV_OVERRIDE; - - virtual bool isTrained() const CV_OVERRIDE; - - virtual void clear() CV_OVERRIDE; - - virtual void write(FileStorage &fs) const CV_OVERRIDE; - - virtual void read(const FileNode &fn) CV_OVERRIDE; - - virtual Mat getWeights() CV_OVERRIDE { return weights_; } - - virtual float getShift() CV_OVERRIDE { return shift_; } - - virtual int getVarCount() const CV_OVERRIDE { return weights_.cols; } - - virtual String getDefaultName() const CV_OVERRIDE {return "opencv_ml_svmsgd";} - - virtual void setOptimalParameters(int svmsgdType = ASGD, int marginType = SOFT_MARGIN) CV_OVERRIDE; - - inline int getSvmsgdType() const CV_OVERRIDE { return params.svmsgdType; } - inline void setSvmsgdType(int val) CV_OVERRIDE { params.svmsgdType = val; } - inline int getMarginType() const CV_OVERRIDE { return params.marginType; } - inline void setMarginType(int val) CV_OVERRIDE { params.marginType = val; } - inline float getMarginRegularization() const CV_OVERRIDE { return params.marginRegularization; } - inline void setMarginRegularization(float val) CV_OVERRIDE { params.marginRegularization = val; } - inline float getInitialStepSize() const CV_OVERRIDE { return params.initialStepSize; } - inline void setInitialStepSize(float val) CV_OVERRIDE { params.initialStepSize = val; } - inline float getStepDecreasingPower() const CV_OVERRIDE { return params.stepDecreasingPower; } - inline void setStepDecreasingPower(float val) CV_OVERRIDE { params.stepDecreasingPower = val; } - inline cv::TermCriteria getTermCriteria() const CV_OVERRIDE { return params.termCrit; } - inline void setTermCriteria(const cv::TermCriteria& val) CV_OVERRIDE { params.termCrit = val; } - -private: - void updateWeights(InputArray sample, bool positive, float stepSize, Mat &weights); - - void writeParams( FileStorage &fs ) const; - - void readParams( const FileNode &fn ); - - static inline bool isPositive(float val) { return val > 0; } - - static void normalizeSamples(Mat &matrix, Mat &average, float &multiplier); - - float calcShift(InputArray _samples, InputArray _responses) const; - - static void makeExtendedTrainSamples(const Mat &trainSamples, Mat &extendedTrainSamples, Mat &average, float &multiplier); - - // Vector with SVM weights - Mat weights_; - float shift_; - - // Parameters for learning - struct SVMSGDParams - { - float marginRegularization; - float initialStepSize; - float stepDecreasingPower; - TermCriteria termCrit; - int svmsgdType; - int marginType; - }; - - SVMSGDParams params; -}; - -Ptr SVMSGD::create() -{ - return makePtr(); -} - -Ptr SVMSGD::load(const String& filepath, const String& nodeName) -{ - return Algorithm::load(filepath, nodeName); -} - - -void SVMSGDImpl::normalizeSamples(Mat &samples, Mat &average, float &multiplier) -{ - int featuresCount = samples.cols; - int samplesCount = samples.rows; - - average = Mat(1, featuresCount, samples.type()); - CV_Assert(average.type() == CV_32FC1); - for (int featureIndex = 0; featureIndex < featuresCount; featureIndex++) - { - average.at(featureIndex) = static_cast(mean(samples.col(featureIndex))[0]); - } - - for (int sampleIndex = 0; sampleIndex < samplesCount; sampleIndex++) - { - samples.row(sampleIndex) -= average; - } - - double normValue = norm(samples); - - multiplier = static_cast(sqrt(static_cast(samples.total())) / normValue); - - samples *= multiplier; -} - -void SVMSGDImpl::makeExtendedTrainSamples(const Mat &trainSamples, Mat &extendedTrainSamples, Mat &average, float &multiplier) -{ - Mat normalizedTrainSamples = trainSamples.clone(); - int samplesCount = normalizedTrainSamples.rows; - - normalizeSamples(normalizedTrainSamples, average, multiplier); - - Mat onesCol = Mat::ones(samplesCount, 1, CV_32F); - cv::hconcat(normalizedTrainSamples, onesCol, extendedTrainSamples); -} - -void SVMSGDImpl::updateWeights(InputArray _sample, bool positive, float stepSize, Mat& weights) -{ - Mat sample = _sample.getMat(); - - int response = positive ? 1 : -1; // ensure that trainResponses are -1 or 1 - - if ( sample.dot(weights) * response > 1) - { - // Not a support vector, only apply weight decay - weights *= (1.f - stepSize * params.marginRegularization); - } - else - { - // It's a support vector, add it to the weights - weights -= (stepSize * params.marginRegularization) * weights - (stepSize * response) * sample; - } -} - -float SVMSGDImpl::calcShift(InputArray _samples, InputArray _responses) const -{ - float margin[2] = { std::numeric_limits::max(), std::numeric_limits::max() }; - - Mat trainSamples = _samples.getMat(); - int trainSamplesCount = trainSamples.rows; - - Mat trainResponses = _responses.getMat(); - - CV_Assert(trainResponses.type() == CV_32FC1); - for (int samplesIndex = 0; samplesIndex < trainSamplesCount; samplesIndex++) - { - Mat currentSample = trainSamples.row(samplesIndex); - float dotProduct = static_cast(currentSample.dot(weights_)); - - bool positive = isPositive(trainResponses.at(samplesIndex)); - int index = positive ? 0 : 1; - float signToMul = positive ? 1.f : -1.f; - float curMargin = dotProduct * signToMul; - - if (curMargin < margin[index]) - { - margin[index] = curMargin; - } - } - - return -(margin[0] - margin[1]) / 2.f; -} - -bool SVMSGDImpl::train(const Ptr& data, int) -{ - CV_Assert(!data.empty()); - clear(); - CV_Assert( isClassifier() ); //toDo: consider - - Mat trainSamples = data->getTrainSamples(); - - int featureCount = trainSamples.cols; - Mat trainResponses = data->getTrainResponses(); // (trainSamplesCount x 1) matrix - - CV_Assert(trainResponses.rows == trainSamples.rows); - - if (trainResponses.empty()) - { - return false; - } - - int positiveCount = countNonZero(trainResponses >= 0); - int negativeCount = countNonZero(trainResponses < 0); - - if ( positiveCount <= 0 || negativeCount <= 0 ) - { - weights_ = Mat::zeros(1, featureCount, CV_32F); - shift_ = (positiveCount > 0) ? 1.f : -1.f; - return true; - } - - Mat extendedTrainSamples; - Mat average; - float multiplier = 0; - makeExtendedTrainSamples(trainSamples, extendedTrainSamples, average, multiplier); - - int extendedTrainSamplesCount = extendedTrainSamples.rows; - int extendedFeatureCount = extendedTrainSamples.cols; - - Mat extendedWeights = Mat::zeros(1, extendedFeatureCount, CV_32F); - Mat previousWeights = Mat::zeros(1, extendedFeatureCount, CV_32F); - Mat averageExtendedWeights; - if (params.svmsgdType == ASGD) - { - averageExtendedWeights = Mat::zeros(1, extendedFeatureCount, CV_32F); - } - - RNG rng(0); - - CV_Assert (params.termCrit.type & TermCriteria::COUNT || params.termCrit.type & TermCriteria::EPS); - int maxCount = (params.termCrit.type & TermCriteria::COUNT) ? params.termCrit.maxCount : INT_MAX; - double epsilon = (params.termCrit.type & TermCriteria::EPS) ? params.termCrit.epsilon : 0; - - double err = DBL_MAX; - CV_Assert (trainResponses.type() == CV_32FC1); - // Stochastic gradient descent SVM - for (int iter = 0; (iter < maxCount) && (err > epsilon); iter++) - { - int randomNumber = rng.uniform(0, extendedTrainSamplesCount); //generate sample number - - Mat currentSample = extendedTrainSamples.row(randomNumber); - - float stepSize = params.initialStepSize * std::pow((1 + params.marginRegularization * params.initialStepSize * (float)iter), (-params.stepDecreasingPower)); //update stepSize - - updateWeights( currentSample, isPositive(trainResponses.at(randomNumber)), stepSize, extendedWeights ); - - //average weights (only for ASGD model) - if (params.svmsgdType == ASGD) - { - averageExtendedWeights = ((float)iter/ (1 + (float)iter)) * averageExtendedWeights + extendedWeights / (1 + (float) iter); - err = norm(averageExtendedWeights - previousWeights); - averageExtendedWeights.copyTo(previousWeights); - } - else - { - err = norm(extendedWeights - previousWeights); - extendedWeights.copyTo(previousWeights); - } - } - - if (params.svmsgdType == ASGD) - { - extendedWeights = averageExtendedWeights; - } - - Rect roi(0, 0, featureCount, 1); - weights_ = extendedWeights(roi); - weights_ *= multiplier; - - CV_Assert((params.marginType == SOFT_MARGIN || params.marginType == HARD_MARGIN) && (extendedWeights.type() == CV_32FC1)); - - if (params.marginType == SOFT_MARGIN) - { - shift_ = extendedWeights.at(featureCount) - static_cast(weights_.dot(average)); - } - else - { - shift_ = calcShift(trainSamples, trainResponses); - } - - return true; -} - -float SVMSGDImpl::predict( InputArray _samples, OutputArray _results, int ) const -{ - float result = 0; - cv::Mat samples = _samples.getMat(); - int nSamples = samples.rows; - cv::Mat results; - - CV_Assert( samples.cols == weights_.cols && samples.type() == CV_32FC1); - - if( _results.needed() ) - { - _results.create( nSamples, 1, samples.type() ); - results = _results.getMat(); - } - else - { - CV_Assert( nSamples == 1 ); - results = Mat(1, 1, CV_32FC1, &result); - } - - for (int sampleIndex = 0; sampleIndex < nSamples; sampleIndex++) - { - Mat currentSample = samples.row(sampleIndex); - float criterion = static_cast(currentSample.dot(weights_)) + shift_; - results.at(sampleIndex) = (criterion >= 0) ? 1.f : -1.f; - } - - return result; -} - -bool SVMSGDImpl::isClassifier() const -{ - return (params.svmsgdType == SGD || params.svmsgdType == ASGD) - && - (params.marginType == SOFT_MARGIN || params.marginType == HARD_MARGIN) - && - (params.marginRegularization > 0) && (params.initialStepSize > 0) && (params.stepDecreasingPower >= 0); -} - -bool SVMSGDImpl::isTrained() const -{ - return !weights_.empty(); -} - -void SVMSGDImpl::write(FileStorage& fs) const -{ - if( !isTrained() ) - CV_Error( CV_StsParseError, "SVMSGD model data is invalid, it hasn't been trained" ); - - writeFormat(fs); - writeParams( fs ); - - fs << "weights" << weights_; - fs << "shift" << shift_; -} - -void SVMSGDImpl::writeParams( FileStorage& fs ) const -{ - String SvmsgdTypeStr; - - switch (params.svmsgdType) - { - case SGD: - SvmsgdTypeStr = "SGD"; - break; - case ASGD: - SvmsgdTypeStr = "ASGD"; - break; - default: - SvmsgdTypeStr = format("Unknown_%d", params.svmsgdType); - } - - fs << "svmsgdType" << SvmsgdTypeStr; - - String marginTypeStr; - - switch (params.marginType) - { - case SOFT_MARGIN: - marginTypeStr = "SOFT_MARGIN"; - break; - case HARD_MARGIN: - marginTypeStr = "HARD_MARGIN"; - break; - default: - marginTypeStr = format("Unknown_%d", params.marginType); - } - - fs << "marginType" << marginTypeStr; - - fs << "marginRegularization" << params.marginRegularization; - fs << "initialStepSize" << params.initialStepSize; - fs << "stepDecreasingPower" << params.stepDecreasingPower; - - fs << "term_criteria" << "{:"; - if( params.termCrit.type & TermCriteria::EPS ) - fs << "epsilon" << params.termCrit.epsilon; - if( params.termCrit.type & TermCriteria::COUNT ) - fs << "iterations" << params.termCrit.maxCount; - fs << "}"; -} -void SVMSGDImpl::readParams( const FileNode& fn ) -{ - String svmsgdTypeStr = (String)fn["svmsgdType"]; - int svmsgdType = - svmsgdTypeStr == "SGD" ? SGD : - svmsgdTypeStr == "ASGD" ? ASGD : -1; - - if( svmsgdType < 0 ) - CV_Error( CV_StsParseError, "Missing or invalid SVMSGD type" ); - - params.svmsgdType = svmsgdType; - - String marginTypeStr = (String)fn["marginType"]; - int marginType = - marginTypeStr == "SOFT_MARGIN" ? SOFT_MARGIN : - marginTypeStr == "HARD_MARGIN" ? HARD_MARGIN : -1; - - if( marginType < 0 ) - CV_Error( CV_StsParseError, "Missing or invalid margin type" ); - - params.marginType = marginType; - - CV_Assert ( fn["marginRegularization"].isReal() ); - params.marginRegularization = (float)fn["marginRegularization"]; - - CV_Assert ( fn["initialStepSize"].isReal() ); - params.initialStepSize = (float)fn["initialStepSize"]; - - CV_Assert ( fn["stepDecreasingPower"].isReal() ); - params.stepDecreasingPower = (float)fn["stepDecreasingPower"]; - - FileNode tcnode = fn["term_criteria"]; - CV_Assert(!tcnode.empty()); - params.termCrit.epsilon = (double)tcnode["epsilon"]; - params.termCrit.maxCount = (int)tcnode["iterations"]; - params.termCrit.type = (params.termCrit.epsilon > 0 ? TermCriteria::EPS : 0) + - (params.termCrit.maxCount > 0 ? TermCriteria::COUNT : 0); - CV_Assert ((params.termCrit.type & TermCriteria::COUNT || params.termCrit.type & TermCriteria::EPS)); -} - -void SVMSGDImpl::read(const FileNode& fn) -{ - clear(); - - readParams(fn); - - fn["weights"] >> weights_; - fn["shift"] >> shift_; -} - -void SVMSGDImpl::clear() -{ - weights_.release(); - shift_ = 0; -} - - -SVMSGDImpl::SVMSGDImpl() -{ - clear(); - setOptimalParameters(); -} - -void SVMSGDImpl::setOptimalParameters(int svmsgdType, int marginType) -{ - switch (svmsgdType) - { - case SGD: - params.svmsgdType = SGD; - params.marginType = (marginType == SOFT_MARGIN) ? SOFT_MARGIN : - (marginType == HARD_MARGIN) ? HARD_MARGIN : -1; - params.marginRegularization = 0.0001f; - params.initialStepSize = 0.05f; - params.stepDecreasingPower = 1.f; - params.termCrit = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100000, 0.00001); - break; - - case ASGD: - params.svmsgdType = ASGD; - params.marginType = (marginType == SOFT_MARGIN) ? SOFT_MARGIN : - (marginType == HARD_MARGIN) ? HARD_MARGIN : -1; - params.marginRegularization = 0.00001f; - params.initialStepSize = 0.05f; - params.stepDecreasingPower = 0.75f; - params.termCrit = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100000, 0.00001); - break; - - default: - CV_Error( CV_StsParseError, "SVMSGD model data is invalid" ); - } -} -} //ml -} //cv diff --git a/modules/ml/src/testset.cpp b/modules/ml/src/testset.cpp deleted file mode 100644 index 48cd134154..0000000000 --- a/modules/ml/src/testset.cpp +++ /dev/null @@ -1,113 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// Intel License Agreement -// -// Copyright (C) 2000, Intel Corporation, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of Intel Corporation may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" - -namespace cv { namespace ml { - -struct PairDI -{ - double d; - int i; -}; - -struct CmpPairDI -{ - bool operator ()(const PairDI& e1, const PairDI& e2) const - { - return (e1.d < e2.d) || (e1.d == e2.d && e1.i < e2.i); - } -}; - -void createConcentricSpheresTestSet( int num_samples, int num_features, int num_classes, - OutputArray _samples, OutputArray _responses) -{ - if( num_samples < 1 ) - CV_Error( CV_StsBadArg, "num_samples parameter must be positive" ); - - if( num_features < 1 ) - CV_Error( CV_StsBadArg, "num_features parameter must be positive" ); - - if( num_classes < 1 ) - CV_Error( CV_StsBadArg, "num_classes parameter must be positive" ); - - int i, cur_class; - - _samples.create( num_samples, num_features, CV_32F ); - _responses.create( 1, num_samples, CV_32S ); - - Mat responses = _responses.getMat(); - - Mat mean = Mat::zeros(1, num_features, CV_32F); - Mat cov = Mat::eye(num_features, num_features, CV_32F); - - // fill the feature values matrix with random numbers drawn from standard normal distribution - randMVNormal( mean, cov, num_samples, _samples ); - Mat samples = _samples.getMat(); - - // calculate distances from the origin to the samples and put them - // into the sequence along with indices - std::vector dis(samples.rows); - - for( i = 0; i < samples.rows; i++ ) - { - PairDI& elem = dis[i]; - elem.i = i; - elem.d = norm(samples.row(i), NORM_L2); - } - - std::sort(dis.begin(), dis.end(), CmpPairDI()); - - // assign class labels - num_classes = std::min( num_samples, num_classes ); - for( i = 0, cur_class = 0; i < num_samples; ++cur_class ) - { - int last_idx = num_samples * (cur_class + 1) / num_classes - 1; - double max_dst = dis[last_idx].d; - max_dst = std::max( max_dst, dis[i].d ); - - for( ; i < num_samples && dis[i].d <= max_dst; ++i ) - responses.at(dis[i].i) = cur_class; - } -} - -}} - -/* End of file. */ diff --git a/modules/ml/src/tree.cpp b/modules/ml/src/tree.cpp deleted file mode 100644 index b69ddaece2..0000000000 --- a/modules/ml/src/tree.cpp +++ /dev/null @@ -1,1990 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000, Intel Corporation, all rights reserved. -// Copyright (C) 2014, Itseez Inc, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" -#include - -#include - -namespace cv { -namespace ml { - -using std::vector; - -TreeParams::TreeParams() -{ - maxDepth = INT_MAX; - minSampleCount = 10; - regressionAccuracy = 0.01f; - useSurrogates = false; - maxCategories = 10; - CVFolds = 10; - use1SERule = true; - truncatePrunedTree = true; - priors = Mat(); -} - -TreeParams::TreeParams(int _maxDepth, int _minSampleCount, - double _regressionAccuracy, bool _useSurrogates, - int _maxCategories, int _CVFolds, - bool _use1SERule, bool _truncatePrunedTree, - const Mat& _priors) -{ - maxDepth = _maxDepth; - minSampleCount = _minSampleCount; - regressionAccuracy = (float)_regressionAccuracy; - useSurrogates = _useSurrogates; - maxCategories = _maxCategories; - CVFolds = _CVFolds; - use1SERule = _use1SERule; - truncatePrunedTree = _truncatePrunedTree; - priors = _priors; -} - -DTrees::Node::Node() -{ - classIdx = 0; - value = 0; - parent = left = right = split = defaultDir = -1; -} - -DTrees::Split::Split() -{ - varIdx = 0; - inversed = false; - quality = 0.f; - next = -1; - c = 0.f; - subsetOfs = 0; -} - - -DTreesImpl::WorkData::WorkData(const Ptr& _data) -{ - CV_Assert(!_data.empty()); - data = _data; - vector subsampleIdx; - Mat sidx0 = _data->getTrainSampleIdx(); - if( !sidx0.empty() ) - { - sidx0.copyTo(sidx); - std::sort(sidx.begin(), sidx.end()); - } - else - { - int n = _data->getNSamples(); - setRangeVector(sidx, n); - } - - maxSubsetSize = 0; -} - -DTreesImpl::DTreesImpl() : _isClassifier(false) {} -DTreesImpl::~DTreesImpl() {} -void DTreesImpl::clear() -{ - varIdx.clear(); - compVarIdx.clear(); - varType.clear(); - catOfs.clear(); - catMap.clear(); - roots.clear(); - nodes.clear(); - splits.clear(); - subsets.clear(); - classLabels.clear(); - - w.release(); - _isClassifier = false; -} - -void DTreesImpl::startTraining( const Ptr& data, int ) -{ - CV_Assert(!data.empty()); - clear(); - w = makePtr(data); - - Mat vtype = data->getVarType(); - vtype.copyTo(varType); - - data->getCatOfs().copyTo(catOfs); - data->getCatMap().copyTo(catMap); - data->getDefaultSubstValues().copyTo(missingSubst); - - int nallvars = data->getNAllVars(); - - Mat vidx0 = data->getVarIdx(); - if( !vidx0.empty() ) - vidx0.copyTo(varIdx); - else - setRangeVector(varIdx, nallvars); - - initCompVarIdx(); - - w->maxSubsetSize = 0; - - int i, nvars = (int)varIdx.size(); - for( i = 0; i < nvars; i++ ) - w->maxSubsetSize = std::max(w->maxSubsetSize, getCatCount(varIdx[i])); - - w->maxSubsetSize = std::max((w->maxSubsetSize + 31)/32, 1); - - data->getSampleWeights().copyTo(w->sample_weights); - - _isClassifier = data->getResponseType() == VAR_CATEGORICAL; - - if( _isClassifier ) - { - data->getNormCatResponses().copyTo(w->cat_responses); - data->getClassLabels().copyTo(classLabels); - int nclasses = (int)classLabels.size(); - - Mat class_weights = params.priors; - if( !class_weights.empty() ) - { - if( class_weights.type() != CV_64F || !class_weights.isContinuous() ) - { - Mat temp; - class_weights.convertTo(temp, CV_64F); - class_weights = temp; - } - CV_Assert( class_weights.checkVector(1, CV_64F) == nclasses ); - - int nsamples = (int)w->cat_responses.size(); - const double* cw = class_weights.ptr(); - CV_Assert( (int)w->sample_weights.size() == nsamples ); - - for( i = 0; i < nsamples; i++ ) - { - int ci = w->cat_responses[i]; - CV_Assert( 0 <= ci && ci < nclasses ); - w->sample_weights[i] *= cw[ci]; - } - } - } - else - data->getResponses().copyTo(w->ord_responses); -} - - -void DTreesImpl::initCompVarIdx() -{ - int nallvars = (int)varType.size(); - compVarIdx.assign(nallvars, -1); - int i, nvars = (int)varIdx.size(), prevIdx = -1; - for( i = 0; i < nvars; i++ ) - { - int vi = varIdx[i]; - CV_Assert( 0 <= vi && vi < nallvars && vi > prevIdx ); - prevIdx = vi; - compVarIdx[vi] = i; - } -} - -void DTreesImpl::endTraining() -{ - w.release(); -} - -bool DTreesImpl::train( const Ptr& trainData, int flags ) -{ - CV_Assert(!trainData.empty()); - startTraining(trainData, flags); - bool ok = addTree( w->sidx ) >= 0; - w.release(); - endTraining(); - return ok; -} - -const vector& DTreesImpl::getActiveVars() -{ - return varIdx; -} - -int DTreesImpl::addTree(const vector& sidx ) -{ - size_t n = (params.getMaxDepth() > 0 ? (1 << params.getMaxDepth()) : 1024) + w->wnodes.size(); - - w->wnodes.reserve(n); - w->wsplits.reserve(n); - w->wsubsets.reserve(n*w->maxSubsetSize); - w->wnodes.clear(); - w->wsplits.clear(); - w->wsubsets.clear(); - - int cv_n = params.getCVFolds(); - - if( cv_n > 0 ) - { - w->cv_Tn.resize(n*cv_n); - w->cv_node_error.resize(n*cv_n); - w->cv_node_risk.resize(n*cv_n); - } - - // build the tree recursively - int w_root = addNodeAndTrySplit(-1, sidx); - int maxdepth = INT_MAX;//pruneCV(root); - - int w_nidx = w_root, pidx = -1, depth = 0; - int root = (int)nodes.size(); - - for(;;) - { - const WNode& wnode = w->wnodes[w_nidx]; - Node node; - node.parent = pidx; - node.classIdx = wnode.class_idx; - node.value = wnode.value; - node.defaultDir = wnode.defaultDir; - - int wsplit_idx = wnode.split; - if( wsplit_idx >= 0 ) - { - const WSplit& wsplit = w->wsplits[wsplit_idx]; - Split split; - split.c = wsplit.c; - split.quality = wsplit.quality; - split.inversed = wsplit.inversed; - split.varIdx = wsplit.varIdx; - split.subsetOfs = -1; - if( wsplit.subsetOfs >= 0 ) - { - int ssize = getSubsetSize(split.varIdx); - split.subsetOfs = (int)subsets.size(); - subsets.resize(split.subsetOfs + ssize); - // This check verifies that subsets index is in the correct range - // as in case ssize == 0 no real resize performed. - // Thus memory kept safe. - // Also this skips useless memcpy call when size parameter is zero - if(ssize > 0) - { - memcpy(&subsets[split.subsetOfs], &w->wsubsets[wsplit.subsetOfs], ssize*sizeof(int)); - } - } - node.split = (int)splits.size(); - splits.push_back(split); - } - int nidx = (int)nodes.size(); - nodes.push_back(node); - if( pidx >= 0 ) - { - int w_pidx = w->wnodes[w_nidx].parent; - if( w->wnodes[w_pidx].left == w_nidx ) - { - nodes[pidx].left = nidx; - } - else - { - CV_Assert(w->wnodes[w_pidx].right == w_nidx); - nodes[pidx].right = nidx; - } - } - - if( wnode.left >= 0 && depth+1 < maxdepth ) - { - w_nidx = wnode.left; - pidx = nidx; - depth++; - } - else - { - int w_pidx = wnode.parent; - while( w_pidx >= 0 && w->wnodes[w_pidx].right == w_nidx ) - { - w_nidx = w_pidx; - w_pidx = w->wnodes[w_pidx].parent; - nidx = pidx; - pidx = nodes[pidx].parent; - depth--; - } - - if( w_pidx < 0 ) - break; - - w_nidx = w->wnodes[w_pidx].right; - CV_Assert( w_nidx >= 0 ); - } - } - roots.push_back(root); - return root; -} - -void DTreesImpl::setDParams(const TreeParams& _params) -{ - params = _params; -} - -int DTreesImpl::addNodeAndTrySplit( int parent, const vector& sidx ) -{ - w->wnodes.push_back(WNode()); - int nidx = (int)(w->wnodes.size() - 1); - WNode& node = w->wnodes.back(); - - node.parent = parent; - node.depth = parent >= 0 ? w->wnodes[parent].depth + 1 : 0; - int nfolds = params.getCVFolds(); - - if( nfolds > 0 ) - { - w->cv_Tn.resize((nidx+1)*nfolds); - w->cv_node_error.resize((nidx+1)*nfolds); - w->cv_node_risk.resize((nidx+1)*nfolds); - } - - int i, n = node.sample_count = (int)sidx.size(); - bool can_split = true; - vector sleft, sright; - - calcValue( nidx, sidx ); - - if( n <= params.getMinSampleCount() || node.depth >= params.getMaxDepth() ) - can_split = false; - else if( _isClassifier ) - { - const int* responses = &w->cat_responses[0]; - const int* s = &sidx[0]; - int first = responses[s[0]]; - for( i = 1; i < n; i++ ) - if( responses[s[i]] != first ) - break; - if( i == n ) - can_split = false; - } - else - { - if( sqrt(node.node_risk) < params.getRegressionAccuracy() ) - can_split = false; - } - - if( can_split ) - node.split = findBestSplit( sidx ); - - //printf("depth=%d, nidx=%d, parent=%d, n=%d, %s, value=%.1f, risk=%.1f\n", node.depth, nidx, node.parent, n, (node.split < 0 ? "leaf" : varType[w->wsplits[node.split].varIdx] == VAR_CATEGORICAL ? "cat" : "ord"), node.value, node.node_risk); - - if( node.split >= 0 ) - { - node.defaultDir = calcDir( node.split, sidx, sleft, sright ); - if( params.useSurrogates ) - CV_Error( CV_StsNotImplemented, "surrogate splits are not implemented yet"); - - int left = addNodeAndTrySplit( nidx, sleft ); - int right = addNodeAndTrySplit( nidx, sright ); - w->wnodes[nidx].left = left; - w->wnodes[nidx].right = right; - CV_Assert( w->wnodes[nidx].left > 0 && w->wnodes[nidx].right > 0 ); - } - - return nidx; -} - -int DTreesImpl::findBestSplit( const vector& _sidx ) -{ - const vector& activeVars = getActiveVars(); - int splitidx = -1; - int vi_, nv = (int)activeVars.size(); - AutoBuffer buf(w->maxSubsetSize*2); - int *subset = buf.data(), *best_subset = subset + w->maxSubsetSize; - WSplit split, best_split; - best_split.quality = 0.; - - for( vi_ = 0; vi_ < nv; vi_++ ) - { - int vi = activeVars[vi_]; - if( varType[vi] == VAR_CATEGORICAL ) - { - if( _isClassifier ) - split = findSplitCatClass(vi, _sidx, 0, subset); - else - split = findSplitCatReg(vi, _sidx, 0, subset); - } - else - { - if( _isClassifier ) - split = findSplitOrdClass(vi, _sidx, 0); - else - split = findSplitOrdReg(vi, _sidx, 0); - } - if( split.quality > best_split.quality ) - { - best_split = split; - std::swap(subset, best_subset); - } - } - - if( best_split.quality > 0 ) - { - int best_vi = best_split.varIdx; - CV_Assert( compVarIdx[best_split.varIdx] >= 0 && best_vi >= 0 ); - int i, prevsz = (int)w->wsubsets.size(), ssize = getSubsetSize(best_vi); - w->wsubsets.resize(prevsz + ssize); - for( i = 0; i < ssize; i++ ) - w->wsubsets[prevsz + i] = best_subset[i]; - best_split.subsetOfs = prevsz; - w->wsplits.push_back(best_split); - splitidx = (int)(w->wsplits.size()-1); - } - - return splitidx; -} - -void DTreesImpl::calcValue( int nidx, const vector& _sidx ) -{ - WNode* node = &w->wnodes[nidx]; - int i, j, k, n = (int)_sidx.size(), cv_n = params.getCVFolds(); - int m = (int)classLabels.size(); - - cv::AutoBuffer buf(std::max(m, 3)*(cv_n+1)); - - if( cv_n > 0 ) - { - size_t sz = w->cv_Tn.size(); - w->cv_Tn.resize(sz + cv_n); - w->cv_node_risk.resize(sz + cv_n); - w->cv_node_error.resize(sz + cv_n); - } - - if( _isClassifier ) - { - // in case of classification tree: - // * node value is the label of the class that has the largest weight in the node. - // * node risk is the weighted number of misclassified samples, - // * j-th cross-validation fold value and risk are calculated as above, - // but using the samples with cv_labels(*)!=j. - // * j-th cross-validation fold error is calculated as the weighted number of - // misclassified samples with cv_labels(*)==j. - - // compute the number of instances of each class - double* cls_count = buf.data(); - double* cv_cls_count = cls_count + m; - - double max_val = -1, total_weight = 0; - int max_k = -1; - - for( k = 0; k < m; k++ ) - cls_count[k] = 0; - - if( cv_n == 0 ) - { - for( i = 0; i < n; i++ ) - { - int si = _sidx[i]; - cls_count[w->cat_responses[si]] += w->sample_weights[si]; - } - } - else - { - for( j = 0; j < cv_n; j++ ) - for( k = 0; k < m; k++ ) - cv_cls_count[j*m + k] = 0; - - for( i = 0; i < n; i++ ) - { - int si = _sidx[i]; - j = w->cv_labels[si]; k = w->cat_responses[si]; - cv_cls_count[j*m + k] += w->sample_weights[si]; - } - - for( j = 0; j < cv_n; j++ ) - for( k = 0; k < m; k++ ) - cls_count[k] += cv_cls_count[j*m + k]; - } - - for( k = 0; k < m; k++ ) - { - double val = cls_count[k]; - total_weight += val; - if( max_val < val ) - { - max_val = val; - max_k = k; - } - } - - node->class_idx = max_k; - node->value = classLabels[max_k]; - node->node_risk = total_weight - max_val; - - for( j = 0; j < cv_n; j++ ) - { - double sum_k = 0, sum = 0, max_val_k = 0; - max_val = -1; max_k = -1; - - for( k = 0; k < m; k++ ) - { - double val_k = cv_cls_count[j*m + k]; - double val = cls_count[k] - val_k; - sum_k += val_k; - sum += val; - if( max_val < val ) - { - max_val = val; - max_val_k = val_k; - max_k = k; - } - } - - w->cv_Tn[nidx*cv_n + j] = INT_MAX; - w->cv_node_risk[nidx*cv_n + j] = sum - max_val; - w->cv_node_error[nidx*cv_n + j] = sum_k - max_val_k; - } - } - else - { - // in case of regression tree: - // * node value is 1/n*sum_i(Y_i), where Y_i is i-th response, - // n is the number of samples in the node. - // * node risk is the sum of squared errors: sum_i((Y_i - )^2) - // * j-th cross-validation fold value and risk are calculated as above, - // but using the samples with cv_labels(*)!=j. - // * j-th cross-validation fold error is calculated - // using samples with cv_labels(*)==j as the test subset: - // error_j = sum_(i,cv_labels(i)==j)((Y_i - )^2), - // where node_value_j is the node value calculated - // as described in the previous bullet, and summation is done - // over the samples with cv_labels(*)==j. - double sum = 0, sum2 = 0, sumw = 0; - - if( cv_n == 0 ) - { - for( i = 0; i < n; i++ ) - { - int si = _sidx[i]; - double wval = w->sample_weights[si]; - double t = w->ord_responses[si]; - sum += t*wval; - sum2 += t*t*wval; - sumw += wval; - } - } - else - { - double *cv_sum = buf.data(), *cv_sum2 = cv_sum + cv_n; - double* cv_count = (double*)(cv_sum2 + cv_n); - - for( j = 0; j < cv_n; j++ ) - { - cv_sum[j] = cv_sum2[j] = 0.; - cv_count[j] = 0; - } - - for( i = 0; i < n; i++ ) - { - int si = _sidx[i]; - j = w->cv_labels[si]; - double wval = w->sample_weights[si]; - double t = w->ord_responses[si]; - cv_sum[j] += t*wval; - cv_sum2[j] += t*t*wval; - cv_count[j] += wval; - } - - for( j = 0; j < cv_n; j++ ) - { - sum += cv_sum[j]; - sum2 += cv_sum2[j]; - sumw += cv_count[j]; - } - - for( j = 0; j < cv_n; j++ ) - { - double s = sum - cv_sum[j], si = sum - s; - double s2 = sum2 - cv_sum2[j], s2i = sum2 - s2; - double c = cv_count[j], ci = sumw - c; - double r = si/std::max(ci, DBL_EPSILON); - w->cv_node_risk[nidx*cv_n + j] = s2i - r*r*ci; - w->cv_node_error[nidx*cv_n + j] = s2 - 2*r*s + c*r*r; - w->cv_Tn[nidx*cv_n + j] = INT_MAX; - } - } - CV_Assert(fabs(sumw) > 0); - node->node_risk = sum2 - (sum/sumw)*sum; - node->node_risk /= sumw; - node->value = sum/sumw; - } -} - -DTreesImpl::WSplit DTreesImpl::findSplitOrdClass( int vi, const vector& _sidx, double initQuality ) -{ - int n = (int)_sidx.size(); - int m = (int)classLabels.size(); - - cv::AutoBuffer buf(n*(sizeof(float) + sizeof(int)) + m*2*sizeof(double)); - const int* sidx = &_sidx[0]; - const int* responses = &w->cat_responses[0]; - const double* weights = &w->sample_weights[0]; - double* lcw = (double*)buf.data(); - double* rcw = lcw + m; - float* values = (float*)(rcw + m); - int* sorted_idx = (int*)(values + n); - int i, best_i = -1; - double best_val = initQuality; - - for( i = 0; i < m; i++ ) - lcw[i] = rcw[i] = 0.; - - w->data->getValues( vi, _sidx, values ); - - for( i = 0; i < n; i++ ) - { - sorted_idx[i] = i; - int si = sidx[i]; - rcw[responses[si]] += weights[si]; - } - - std::sort(sorted_idx, sorted_idx + n, cmp_lt_idx(values)); - - double L = 0, R = 0, lsum2 = 0, rsum2 = 0; - for( i = 0; i < m; i++ ) - { - double wval = rcw[i]; - R += wval; - rsum2 += wval*wval; - } - - for( i = 0; i < n - 1; i++ ) - { - int curr = sorted_idx[i]; - int next = sorted_idx[i+1]; - int si = sidx[curr]; - double wval = weights[si], w2 = wval*wval; - L += wval; R -= wval; - int idx = responses[si]; - double lv = lcw[idx], rv = rcw[idx]; - lsum2 += 2*lv*wval + w2; - rsum2 -= 2*rv*wval - w2; - lcw[idx] = lv + wval; rcw[idx] = rv - wval; - - float value_between = (values[next] + values[curr]) * 0.5f; - if( value_between > values[curr] && value_between < values[next] ) - { - double val = (lsum2*R + rsum2*L)/(L*R); - if( best_val < val ) - { - best_val = val; - best_i = i; - } - } - } - - WSplit split; - if( best_i >= 0 ) - { - split.varIdx = vi; - split.c = (values[sorted_idx[best_i]] + values[sorted_idx[best_i+1]])*0.5f; - split.inversed = false; - split.quality = (float)best_val; - } - return split; -} - -// simple k-means, slightly modified to take into account the "weight" (L1-norm) of each vector. -void DTreesImpl::clusterCategories( const double* vectors, int n, int m, double* csums, int k, int* labels ) -{ - int iters = 0, max_iters = 100; - int i, j, idx; - cv::AutoBuffer buf(n + k); - double *v_weights = buf.data(), *c_weights = buf.data() + n; - bool modified = true; - RNG r((uint64)-1); - - // assign labels randomly - for( i = 0; i < n; i++ ) - { - double sum = 0; - const double* v = vectors + i*m; - labels[i] = i < k ? i : r.uniform(0, k); - - // compute weight of each vector - for( j = 0; j < m; j++ ) - sum += v[j]; - v_weights[i] = sum ? 1./sum : 0.; - } - - for( i = 0; i < n; i++ ) - { - int i1 = r.uniform(0, n); - int i2 = r.uniform(0, n); - std::swap( labels[i1], labels[i2] ); - } - - for( iters = 0; iters <= max_iters; iters++ ) - { - // calculate csums - for( i = 0; i < k; i++ ) - { - for( j = 0; j < m; j++ ) - csums[i*m + j] = 0; - } - - for( i = 0; i < n; i++ ) - { - const double* v = vectors + i*m; - double* s = csums + labels[i]*m; - for( j = 0; j < m; j++ ) - s[j] += v[j]; - } - - // exit the loop here, when we have up-to-date csums - if( iters == max_iters || !modified ) - break; - - modified = false; - - // calculate weight of each cluster - for( i = 0; i < k; i++ ) - { - const double* s = csums + i*m; - double sum = 0; - for( j = 0; j < m; j++ ) - sum += s[j]; - c_weights[i] = sum ? 1./sum : 0; - } - - // now for each vector determine the closest cluster - for( i = 0; i < n; i++ ) - { - const double* v = vectors + i*m; - double alpha = v_weights[i]; - double min_dist2 = DBL_MAX; - int min_idx = -1; - - for( idx = 0; idx < k; idx++ ) - { - const double* s = csums + idx*m; - double dist2 = 0., beta = c_weights[idx]; - for( j = 0; j < m; j++ ) - { - double t = v[j]*alpha - s[j]*beta; - dist2 += t*t; - } - if( min_dist2 > dist2 ) - { - min_dist2 = dist2; - min_idx = idx; - } - } - - if( min_idx != labels[i] ) - modified = true; - labels[i] = min_idx; - } - } -} - -DTreesImpl::WSplit DTreesImpl::findSplitCatClass( int vi, const vector& _sidx, - double initQuality, int* subset ) -{ - int _mi = getCatCount(vi), mi = _mi; - int n = (int)_sidx.size(); - int m = (int)classLabels.size(); - - int base_size = m*(3 + mi) + mi + 1; - if( m > 2 && mi > params.getMaxCategories() ) - base_size += m*std::min(params.getMaxCategories(), n) + mi; - else - base_size += mi; - AutoBuffer buf(base_size + n); - - double* lc = buf.data(); - double* rc = lc + m; - double* _cjk = rc + m*2, *cjk = _cjk; - double* c_weights = cjk + m*mi; - - int* labels = (int*)(buf.data() + base_size); - w->data->getNormCatValues(vi, _sidx, labels); - const int* responses = &w->cat_responses[0]; - const double* weights = &w->sample_weights[0]; - - int* cluster_labels = 0; - double** dbl_ptr = 0; - int i, j, k, si, idx; - double L = 0, R = 0; - double best_val = initQuality; - int prevcode = 0, best_subset = -1, subset_i, subset_n, subtract = 0; - - // init array of counters: - // c_{jk} - number of samples that have vi-th input variable = j and response = k. - for( j = -1; j < mi; j++ ) - for( k = 0; k < m; k++ ) - cjk[j*m + k] = 0; - - for( i = 0; i < n; i++ ) - { - si = _sidx[i]; - j = labels[i]; - k = responses[si]; - cjk[j*m + k] += weights[si]; - } - - if( m > 2 ) - { - if( mi > params.getMaxCategories() ) - { - mi = std::min(params.getMaxCategories(), n); - cjk = c_weights + _mi; - cluster_labels = (int*)(cjk + m*mi); - clusterCategories( _cjk, _mi, m, cjk, mi, cluster_labels ); - } - subset_i = 1; - subset_n = 1 << mi; - } - else - { - CV_Assert( m == 2 ); - dbl_ptr = (double**)(c_weights + _mi); - for( j = 0; j < mi; j++ ) - dbl_ptr[j] = cjk + j*2 + 1; - std::sort(dbl_ptr, dbl_ptr + mi, cmp_lt_ptr()); - subset_i = 0; - subset_n = mi; - } - - for( k = 0; k < m; k++ ) - { - double sum = 0; - for( j = 0; j < mi; j++ ) - sum += cjk[j*m + k]; - CV_Assert(sum > 0); - rc[k] = sum; - lc[k] = 0; - } - - for( j = 0; j < mi; j++ ) - { - double sum = 0; - for( k = 0; k < m; k++ ) - sum += cjk[j*m + k]; - c_weights[j] = sum; - R += c_weights[j]; - } - - for( ; subset_i < subset_n; subset_i++ ) - { - double lsum2 = 0, rsum2 = 0; - - if( m == 2 ) - idx = (int)(dbl_ptr[subset_i] - cjk)/2; - else - { - int graycode = (subset_i>>1)^subset_i; - int diff = graycode ^ prevcode; - - // determine index of the changed bit. - Cv32suf u; - idx = diff >= (1 << 16) ? 16 : 0; - u.f = (float)(((diff >> 16) | diff) & 65535); - idx += (u.i >> 23) - 127; - subtract = graycode < prevcode; - prevcode = graycode; - } - - double* crow = cjk + idx*m; - double weight = c_weights[idx]; - if( weight < FLT_EPSILON ) - continue; - - if( !subtract ) - { - for( k = 0; k < m; k++ ) - { - double t = crow[k]; - double lval = lc[k] + t; - double rval = rc[k] - t; - lsum2 += lval*lval; - rsum2 += rval*rval; - lc[k] = lval; rc[k] = rval; - } - L += weight; - R -= weight; - } - else - { - for( k = 0; k < m; k++ ) - { - double t = crow[k]; - double lval = lc[k] - t; - double rval = rc[k] + t; - lsum2 += lval*lval; - rsum2 += rval*rval; - lc[k] = lval; rc[k] = rval; - } - L -= weight; - R += weight; - } - - if( L > FLT_EPSILON && R > FLT_EPSILON ) - { - double val = (lsum2*R + rsum2*L)/(L*R); - if( best_val < val ) - { - best_val = val; - best_subset = subset_i; - } - } - } - - WSplit split; - if( best_subset >= 0 ) - { - split.varIdx = vi; - split.quality = (float)best_val; - memset( subset, 0, getSubsetSize(vi) * sizeof(int) ); - if( m == 2 ) - { - for( i = 0; i <= best_subset; i++ ) - { - idx = (int)(dbl_ptr[i] - cjk) >> 1; - subset[idx >> 5] |= 1 << (idx & 31); - } - } - else - { - for( i = 0; i < _mi; i++ ) - { - idx = cluster_labels ? cluster_labels[i] : i; - if( best_subset & (1 << idx) ) - subset[i >> 5] |= 1 << (i & 31); - } - } - } - return split; -} - -DTreesImpl::WSplit DTreesImpl::findSplitOrdReg( int vi, const vector& _sidx, double initQuality ) -{ - const double* weights = &w->sample_weights[0]; - int n = (int)_sidx.size(); - - AutoBuffer buf(n*(sizeof(int) + sizeof(float))); - - float* values = (float*)buf.data(); - int* sorted_idx = (int*)(values + n); - w->data->getValues(vi, _sidx, values); - const double* responses = &w->ord_responses[0]; - - int i, si, best_i = -1; - double L = 0, R = 0; - double best_val = initQuality, lsum = 0, rsum = 0; - - for( i = 0; i < n; i++ ) - { - sorted_idx[i] = i; - si = _sidx[i]; - R += weights[si]; - rsum += weights[si]*responses[si]; - } - - std::sort(sorted_idx, sorted_idx + n, cmp_lt_idx(values)); - - // find the optimal split - for( i = 0; i < n - 1; i++ ) - { - int curr = sorted_idx[i]; - int next = sorted_idx[i+1]; - si = _sidx[curr]; - double wval = weights[si]; - double t = responses[si]*wval; - L += wval; R -= wval; - lsum += t; rsum -= t; - - float value_between = (values[next] + values[curr]) * 0.5f; - if( value_between > values[curr] && value_between < values[next] ) - { - double val = (lsum*lsum*R + rsum*rsum*L)/(L*R); - if( best_val < val ) - { - best_val = val; - best_i = i; - } - } - } - - WSplit split; - if( best_i >= 0 ) - { - split.varIdx = vi; - split.c = (values[sorted_idx[best_i]] + values[sorted_idx[best_i+1]])*0.5f; - split.inversed = false; - split.quality = (float)best_val; - } - return split; -} - -DTreesImpl::WSplit DTreesImpl::findSplitCatReg( int vi, const vector& _sidx, - double initQuality, int* subset ) -{ - const double* weights = &w->sample_weights[0]; - const double* responses = &w->ord_responses[0]; - int n = (int)_sidx.size(); - int mi = getCatCount(vi); - - AutoBuffer buf(3*mi + 3 + n); - double* sum = buf.data() + 1; - double* counts = sum + mi + 1; - double** sum_ptr = (double**)(counts + mi); - int* cat_labels = (int*)(sum_ptr + mi); - - w->data->getNormCatValues(vi, _sidx, cat_labels); - - double L = 0, R = 0, best_val = initQuality, lsum = 0, rsum = 0; - int i, si, best_subset = -1, subset_i; - - for( i = -1; i < mi; i++ ) - sum[i] = counts[i] = 0; - - // calculate sum response and weight of each category of the input var - for( i = 0; i < n; i++ ) - { - int idx = cat_labels[i]; - si = _sidx[i]; - double wval = weights[si]; - sum[idx] += responses[si]*wval; - counts[idx] += wval; - } - - // calculate average response in each category - for( i = 0; i < mi; i++ ) - { - R += counts[i]; - rsum += sum[i]; - sum[i] = fabs(counts[i]) > DBL_EPSILON ? sum[i]/counts[i] : 0; - sum_ptr[i] = sum + i; - } - - std::sort(sum_ptr, sum_ptr + mi, cmp_lt_ptr()); - - // revert back to unnormalized sums - // (there should be a very little loss in accuracy) - for( i = 0; i < mi; i++ ) - sum[i] *= counts[i]; - - for( subset_i = 0; subset_i < mi-1; subset_i++ ) - { - int idx = (int)(sum_ptr[subset_i] - sum); - double ni = counts[idx]; - - if( ni > FLT_EPSILON ) - { - double s = sum[idx]; - lsum += s; L += ni; - rsum -= s; R -= ni; - - if( L > FLT_EPSILON && R > FLT_EPSILON ) - { - double val = (lsum*lsum*R + rsum*rsum*L)/(L*R); - if( best_val < val ) - { - best_val = val; - best_subset = subset_i; - } - } - } - } - - WSplit split; - if( best_subset >= 0 ) - { - split.varIdx = vi; - split.quality = (float)best_val; - memset( subset, 0, getSubsetSize(vi) * sizeof(int)); - for( i = 0; i <= best_subset; i++ ) - { - int idx = (int)(sum_ptr[i] - sum); - subset[idx >> 5] |= 1 << (idx & 31); - } - } - return split; -} - -int DTreesImpl::calcDir( int splitidx, const vector& _sidx, - vector& _sleft, vector& _sright ) -{ - WSplit split = w->wsplits[splitidx]; - int i, si, n = (int)_sidx.size(), vi = split.varIdx; - _sleft.reserve(n); - _sright.reserve(n); - _sleft.clear(); - _sright.clear(); - - AutoBuffer buf(n); - int mi = getCatCount(vi); - double wleft = 0, wright = 0; - const double* weights = &w->sample_weights[0]; - - if( mi <= 0 ) // split on an ordered variable - { - float c = split.c; - float* values = buf.data(); - w->data->getValues(vi, _sidx, values); - - for( i = 0; i < n; i++ ) - { - si = _sidx[i]; - if( values[i] <= c ) - { - _sleft.push_back(si); - wleft += weights[si]; - } - else - { - _sright.push_back(si); - wright += weights[si]; - } - } - } - else - { - const int* subset = &w->wsubsets[split.subsetOfs]; - int* cat_labels = (int*)buf.data(); - w->data->getNormCatValues(vi, _sidx, cat_labels); - - for( i = 0; i < n; i++ ) - { - si = _sidx[i]; - unsigned u = cat_labels[i]; - if( CV_DTREE_CAT_DIR(u, subset) < 0 ) - { - _sleft.push_back(si); - wleft += weights[si]; - } - else - { - _sright.push_back(si); - wright += weights[si]; - } - } - } - CV_Assert( (int)_sleft.size() < n && (int)_sright.size() < n ); - return wleft > wright ? -1 : 1; -} - -int DTreesImpl::pruneCV( int root ) -{ - vector ab; - - // 1. build tree sequence for each cv fold, calculate error_{Tj,beta_k}. - // 2. choose the best tree index (if need, apply 1SE rule). - // 3. store the best index and cut the branches. - - int ti, tree_count = 0, j, cv_n = params.getCVFolds(), n = w->wnodes[root].sample_count; - // currently, 1SE for regression is not implemented - bool use_1se = params.use1SERule != 0 && _isClassifier; - double min_err = 0, min_err_se = 0; - int min_idx = -1; - - // build the main tree sequence, calculate alpha's - for(;;tree_count++) - { - double min_alpha = updateTreeRNC(root, tree_count, -1); - if( cutTree(root, tree_count, -1, min_alpha) ) - break; - - ab.push_back(min_alpha); - } - - if( tree_count > 0 ) - { - ab[0] = 0.; - - for( ti = 1; ti < tree_count-1; ti++ ) - ab[ti] = std::sqrt(ab[ti]*ab[ti+1]); - ab[tree_count-1] = DBL_MAX*0.5; - - Mat err_jk(cv_n, tree_count, CV_64F); - - for( j = 0; j < cv_n; j++ ) - { - int tj = 0, tk = 0; - for( ; tj < tree_count; tj++ ) - { - double min_alpha = updateTreeRNC(root, tj, j); - if( cutTree(root, tj, j, min_alpha) ) - min_alpha = DBL_MAX; - - for( ; tk < tree_count; tk++ ) - { - if( ab[tk] > min_alpha ) - break; - err_jk.at(j, tk) = w->wnodes[root].tree_error; - } - } - } - - for( ti = 0; ti < tree_count; ti++ ) - { - double sum_err = 0; - for( j = 0; j < cv_n; j++ ) - sum_err += err_jk.at(j, ti); - if( ti == 0 || sum_err < min_err ) - { - min_err = sum_err; - min_idx = ti; - if( use_1se ) - min_err_se = sqrt( sum_err*(n - sum_err) ); - } - else if( sum_err < min_err + min_err_se ) - min_idx = ti; - } - } - - return min_idx; -} - -double DTreesImpl::updateTreeRNC( int root, double T, int fold ) -{ - int nidx = root, pidx = -1, cv_n = params.getCVFolds(); - double min_alpha = DBL_MAX; - - for(;;) - { - WNode *node = 0, *parent = 0; - - for(;;) - { - node = &w->wnodes[nidx]; - double t = fold >= 0 ? w->cv_Tn[nidx*cv_n + fold] : node->Tn; - if( t <= T || node->left < 0 ) - { - node->complexity = 1; - node->tree_risk = node->node_risk; - node->tree_error = 0.; - if( fold >= 0 ) - { - node->tree_risk = w->cv_node_risk[nidx*cv_n + fold]; - node->tree_error = w->cv_node_error[nidx*cv_n + fold]; - } - break; - } - nidx = node->left; - } - - for( pidx = node->parent; pidx >= 0 && w->wnodes[pidx].right == nidx; - nidx = pidx, pidx = w->wnodes[pidx].parent ) - { - node = &w->wnodes[nidx]; - parent = &w->wnodes[pidx]; - parent->complexity += node->complexity; - parent->tree_risk += node->tree_risk; - parent->tree_error += node->tree_error; - - parent->alpha = ((fold >= 0 ? w->cv_node_risk[pidx*cv_n + fold] : parent->node_risk) - - parent->tree_risk)/(parent->complexity - 1); - min_alpha = std::min( min_alpha, parent->alpha ); - } - - if( pidx < 0 ) - break; - - node = &w->wnodes[nidx]; - parent = &w->wnodes[pidx]; - parent->complexity = node->complexity; - parent->tree_risk = node->tree_risk; - parent->tree_error = node->tree_error; - nidx = parent->right; - } - - return min_alpha; -} - -bool DTreesImpl::cutTree( int root, double T, int fold, double min_alpha ) -{ - int cv_n = params.getCVFolds(), nidx = root, pidx = -1; - WNode* node = &w->wnodes[root]; - if( node->left < 0 ) - return true; - - for(;;) - { - for(;;) - { - node = &w->wnodes[nidx]; - double t = fold >= 0 ? w->cv_Tn[nidx*cv_n + fold] : node->Tn; - if( t <= T || node->left < 0 ) - break; - if( node->alpha <= min_alpha + FLT_EPSILON ) - { - if( fold >= 0 ) - w->cv_Tn[nidx*cv_n + fold] = T; - else - node->Tn = T; - if( nidx == root ) - return true; - break; - } - nidx = node->left; - } - - for( pidx = node->parent; pidx >= 0 && w->wnodes[pidx].right == nidx; - nidx = pidx, pidx = w->wnodes[pidx].parent ) - ; - - if( pidx < 0 ) - break; - - nidx = w->wnodes[pidx].right; - } - - return false; -} - -float DTreesImpl::predictTrees( const Range& range, const Mat& sample, int flags ) const -{ - CV_Assert( sample.type() == CV_32F ); - - int predictType = flags & PREDICT_MASK; - int nvars = (int)varIdx.size(); - if( nvars == 0 ) - nvars = (int)varType.size(); - int i, ncats = (int)catOfs.size(), nclasses = (int)classLabels.size(); - int catbufsize = ncats > 0 ? nvars : 0; - AutoBuffer buf(nclasses + catbufsize + 1); - int* votes = buf.data(); - int* catbuf = votes + nclasses; - const int* cvidx = (flags & (COMPRESSED_INPUT|PREPROCESSED_INPUT)) == 0 && !varIdx.empty() ? &compVarIdx[0] : 0; - const uchar* vtype = &varType[0]; - const Vec2i* cofs = !catOfs.empty() ? &catOfs[0] : 0; - const int* cmap = !catMap.empty() ? &catMap[0] : 0; - const float* psample = sample.ptr(); - const float* missingSubstPtr = !missingSubst.empty() ? &missingSubst[0] : 0; - size_t sstep = sample.isContinuous() ? 1 : sample.step/sizeof(float); - double sum = 0.; - int lastClassIdx = -1; - const float MISSED_VAL = TrainData::missingValue(); - - for( i = 0; i < catbufsize; i++ ) - catbuf[i] = -1; - - if( predictType == PREDICT_AUTO ) - { - predictType = !_isClassifier || (classLabels.size() == 2 && (flags & RAW_OUTPUT) != 0) ? - PREDICT_SUM : PREDICT_MAX_VOTE; - } - - if( predictType == PREDICT_MAX_VOTE ) - { - for( i = 0; i < nclasses; i++ ) - votes[i] = 0; - } - - for( int ridx = range.start; ridx < range.end; ridx++ ) - { - int nidx = roots[ridx], prev = nidx, c = 0; - - for(;;) - { - prev = nidx; - const Node& node = nodes[nidx]; - if( node.split < 0 ) - break; - const Split& split = splits[node.split]; - int vi = split.varIdx; - int ci = cvidx ? cvidx[vi] : vi; - float val = psample[ci*sstep]; - if( val == MISSED_VAL ) - { - if( !missingSubstPtr ) - { - nidx = node.defaultDir < 0 ? node.left : node.right; - continue; - } - val = missingSubstPtr[vi]; - } - - if( vtype[vi] == VAR_ORDERED ) - nidx = val <= split.c ? node.left : node.right; - else - { - if( flags & PREPROCESSED_INPUT ) - c = cvRound(val); - else - { - c = catbuf[ci]; - if( c < 0 ) - { - int a = c = cofs[vi][0]; - int b = cofs[vi][1]; - - int ival = cvRound(val); - if( ival != val ) - CV_Error( CV_StsBadArg, - "one of input categorical variable is not an integer" ); - - CV_Assert(cmap != NULL); - while( a < b ) - { - c = (a + b) >> 1; - if( ival < cmap[c] ) - b = c; - else if( ival > cmap[c] ) - a = c+1; - else - break; - } - - CV_Assert( c >= 0 && ival == cmap[c] ); - - c -= cofs[vi][0]; - catbuf[ci] = c; - } - const int* subset = &subsets[split.subsetOfs]; - unsigned u = c; - nidx = CV_DTREE_CAT_DIR(u, subset) < 0 ? node.left : node.right; - } - } - } - - if( predictType == PREDICT_SUM ) - sum += nodes[prev].value; - else - { - lastClassIdx = nodes[prev].classIdx; - votes[lastClassIdx]++; - } - } - - if( predictType == PREDICT_MAX_VOTE ) - { - int best_idx = lastClassIdx; - if( range.end - range.start > 1 ) - { - best_idx = 0; - for( i = 1; i < nclasses; i++ ) - if( votes[best_idx] < votes[i] ) - best_idx = i; - } - sum = (flags & RAW_OUTPUT) ? (float)best_idx : classLabels[best_idx]; - } - - return (float)sum; -} - - -float DTreesImpl::predict( InputArray _samples, OutputArray _results, int flags ) const -{ - CV_Assert( !roots.empty() ); - Mat samples = _samples.getMat(), results; - int i, nsamples = samples.rows; - int rtype = CV_32F; - bool needresults = _results.needed(); - float retval = 0.f; - bool iscls = isClassifier(); - float scale = !iscls ? 1.f/(int)roots.size() : 1.f; - - if( iscls && (flags & PREDICT_MASK) == PREDICT_MAX_VOTE ) - rtype = CV_32S; - - if( needresults ) - { - _results.create(nsamples, 1, rtype); - results = _results.getMat(); - } - else - nsamples = std::min(nsamples, 1); - - for( i = 0; i < nsamples; i++ ) - { - float val = predictTrees( Range(0, (int)roots.size()), samples.row(i), flags )*scale; - if( needresults ) - { - if( rtype == CV_32F ) - results.at(i) = val; - else - results.at(i) = cvRound(val); - } - if( i == 0 ) - retval = val; - } - return retval; -} - -void DTreesImpl::writeTrainingParams(FileStorage& fs) const -{ - fs << "use_surrogates" << (params.useSurrogates ? 1 : 0); - fs << "max_categories" << params.getMaxCategories(); - fs << "regression_accuracy" << params.getRegressionAccuracy(); - - fs << "max_depth" << params.getMaxDepth(); - fs << "min_sample_count" << params.getMinSampleCount(); - fs << "cross_validation_folds" << params.getCVFolds(); - - if( params.getCVFolds() > 1 ) - fs << "use_1se_rule" << (params.use1SERule ? 1 : 0); - - if( !params.priors.empty() ) - fs << "priors" << params.priors; -} - -void DTreesImpl::writeParams(FileStorage& fs) const -{ - fs << "is_classifier" << isClassifier(); - fs << "var_all" << (int)varType.size(); - fs << "var_count" << getVarCount(); - - int ord_var_count = 0, cat_var_count = 0; - int i, n = (int)varType.size(); - for( i = 0; i < n; i++ ) - if( varType[i] == VAR_ORDERED ) - ord_var_count++; - else - cat_var_count++; - fs << "ord_var_count" << ord_var_count; - fs << "cat_var_count" << cat_var_count; - - fs << "training_params" << "{"; - writeTrainingParams(fs); - - fs << "}"; - - if( !varIdx.empty() ) - { - fs << "global_var_idx" << 1; - fs << "var_idx" << varIdx; - } - - fs << "var_type" << varType; - - if( !catOfs.empty() ) - fs << "cat_ofs" << catOfs; - if( !catMap.empty() ) - fs << "cat_map" << catMap; - if( !classLabels.empty() ) - fs << "class_labels" << classLabels; - if( !missingSubst.empty() ) - fs << "missing_subst" << missingSubst; -} - -void DTreesImpl::writeSplit( FileStorage& fs, int splitidx ) const -{ - const Split& split = splits[splitidx]; - - fs << "{:"; - - int vi = split.varIdx; - fs << "var" << vi; - fs << "quality" << split.quality; - - if( varType[vi] == VAR_CATEGORICAL ) // split on a categorical var - { - int i, n = getCatCount(vi), to_right = 0; - const int* subset = &subsets[split.subsetOfs]; - for( i = 0; i < n; i++ ) - to_right += CV_DTREE_CAT_DIR(i, subset) > 0; - - // ad-hoc rule when to use inverse categorical split notation - // to achieve more compact and clear representation - int default_dir = to_right <= 1 || to_right <= std::min(3, n/2) || to_right <= n/3 ? -1 : 1; - - fs << (default_dir*(split.inversed ? -1 : 1) > 0 ? "in" : "not_in") << "[:"; - - for( i = 0; i < n; i++ ) - { - int dir = CV_DTREE_CAT_DIR(i, subset); - if( dir*default_dir < 0 ) - fs << i; - } - - fs << "]"; - } - else - fs << (!split.inversed ? "le" : "gt") << split.c; - - fs << "}"; -} - -void DTreesImpl::writeNode( FileStorage& fs, int nidx, int depth ) const -{ - const Node& node = nodes[nidx]; - fs << "{"; - fs << "depth" << depth; - fs << "value" << node.value; - - if( _isClassifier ) - fs << "norm_class_idx" << node.classIdx; - - if( node.split >= 0 ) - { - fs << "splits" << "["; - - for( int splitidx = node.split; splitidx >= 0; splitidx = splits[splitidx].next ) - writeSplit( fs, splitidx ); - - fs << "]"; - } - - fs << "}"; -} - -void DTreesImpl::writeTree( FileStorage& fs, int root ) const -{ - fs << "nodes" << "["; - - int nidx = root, pidx = 0, depth = 0; - const Node *node = 0; - - // traverse the tree and save all the nodes in depth-first order - for(;;) - { - for(;;) - { - writeNode( fs, nidx, depth ); - node = &nodes[nidx]; - if( node->left < 0 ) - break; - nidx = node->left; - depth++; - } - - for( pidx = node->parent; pidx >= 0 && nodes[pidx].right == nidx; - nidx = pidx, pidx = nodes[pidx].parent ) - depth--; - - if( pidx < 0 ) - break; - - nidx = nodes[pidx].right; - } - - fs << "]"; -} - -void DTreesImpl::write( FileStorage& fs ) const -{ - writeFormat(fs); - writeParams(fs); - writeTree(fs, roots[0]); -} - -void DTreesImpl::readParams( const FileNode& fn ) -{ - _isClassifier = (int)fn["is_classifier"] != 0; - int varAll = (int)fn["var_all"]; - int varCount = (int)fn["var_count"]; - /*int cat_var_count = (int)fn["cat_var_count"]; - int ord_var_count = (int)fn["ord_var_count"];*/ - - if (varAll <= 0) - CV_Error(Error::StsParseError, "The field \"var_all\" of DTree classifier is missing or non-positive"); - - FileNode tparams_node = fn["training_params"]; - - TreeParams params0 = TreeParams(); - - if( !tparams_node.empty() ) // training parameters are not necessary - { - params0.useSurrogates = (int)tparams_node["use_surrogates"] != 0; - params0.setMaxCategories((int)(tparams_node["max_categories"].empty() ? 16 : tparams_node["max_categories"])); - params0.setRegressionAccuracy((float)tparams_node["regression_accuracy"]); - params0.setMaxDepth((int)tparams_node["max_depth"]); - params0.setMinSampleCount((int)tparams_node["min_sample_count"]); - params0.setCVFolds((int)tparams_node["cross_validation_folds"]); - - if( params0.getCVFolds() > 1 ) - { - params.use1SERule = (int)tparams_node["use_1se_rule"] != 0; - } - - tparams_node["priors"] >> params0.priors; - } - - readVectorOrMat(fn["var_idx"], varIdx); - fn["var_type"] >> varType; - - bool isLegacy = false; - if (fn["format"].empty()) // Export bug until OpenCV 3.2: https://github.com/opencv/opencv/pull/6314 - { - if (!fn["cat_ofs"].empty()) - isLegacy = false; // 2.4 doesn't store "cat_ofs" - else if (!fn["missing_subst"].empty()) - isLegacy = false; // 2.4 doesn't store "missing_subst" - else if (!fn["class_labels"].empty()) - isLegacy = false; // 2.4 doesn't store "class_labels" - else if ((int)varType.size() != varAll) - isLegacy = true; // 3.0+: https://github.com/opencv/opencv/blame/3.0.0/modules/ml/src/tree.cpp#L1576 - else if (/*(int)varType.size() == varAll &&*/ varCount == varAll) - isLegacy = true; - else - { - // 3.0+: - // - https://github.com/opencv/opencv/blame/3.0.0/modules/ml/src/tree.cpp#L1552-L1553 - // - https://github.com/opencv/opencv/blame/3.0.0/modules/ml/src/precomp.hpp#L296 - isLegacy = !(varCount + 1 == varAll); - } - CV_LOG_INFO(NULL, "ML/DTrees: possible missing 'format' field due to bug of OpenCV export implementation. " - "Details: https://github.com/opencv/opencv/issues/5412. Consider re-exporting of saved ML model. " - "isLegacy = " << isLegacy); - } - else - { - int format = 0; - fn["format"] >> format; - CV_CheckGT(format, 0, ""); - isLegacy = format < 3; - } - - if (isLegacy && (int)varType.size() <= varAll) - { - std::vector extendedTypes(varAll + 1, 0); - - int i = 0, n; - if (!varIdx.empty()) - { - n = (int)varIdx.size(); - for (; i < n; ++i) - { - int var = varIdx[i]; - extendedTypes[var] = varType[i]; - } - } - else - { - n = (int)varType.size(); - for (; i < n; ++i) - { - extendedTypes[i] = varType[i]; - } - } - extendedTypes[varAll] = (uchar)(_isClassifier ? VAR_CATEGORICAL : VAR_ORDERED); - extendedTypes.swap(varType); - } - - readVectorOrMat(fn["cat_map"], catMap); - - if (isLegacy) - { - // generating "catOfs" from "cat_count" - catOfs.clear(); - classLabels.clear(); - std::vector counts; - readVectorOrMat(fn["cat_count"], counts); - unsigned int i = 0, j = 0, curShift = 0, size = (int)varType.size() - 1; - for (; i < size; ++i) - { - Vec2i newOffsets(0, 0); - if (varType[i] == VAR_CATEGORICAL) // only categorical vars are represented in catMap - { - newOffsets[0] = curShift; - curShift += counts[j]; - newOffsets[1] = curShift; - ++j; - } - catOfs.push_back(newOffsets); - } - // other elements in "catMap" are "classLabels" - if (curShift < catMap.size()) - { - classLabels.insert(classLabels.end(), catMap.begin() + curShift, catMap.end()); - catMap.erase(catMap.begin() + curShift, catMap.end()); - } - } - else - { - fn["cat_ofs"] >> catOfs; - fn["missing_subst"] >> missingSubst; - fn["class_labels"] >> classLabels; - } - - // init var mapping for node reading (var indexes or varIdx indexes) - bool globalVarIdx = false; - fn["global_var_idx"] >> globalVarIdx; - if (globalVarIdx || varIdx.empty()) - setRangeVector(varMapping, (int)varType.size()); - else - varMapping = varIdx; - - initCompVarIdx(); - setDParams(params0); -} - -int DTreesImpl::readSplit( const FileNode& fn ) -{ - Split split; - - int vi = (int)fn["var"]; - CV_Assert( 0 <= vi && vi <= (int)varType.size() ); - vi = varMapping[vi]; // convert to varIdx if needed - split.varIdx = vi; - - if( varType[vi] == VAR_CATEGORICAL ) // split on categorical var - { - int i, val, ssize = getSubsetSize(vi); - split.subsetOfs = (int)subsets.size(); - for( i = 0; i < ssize; i++ ) - subsets.push_back(0); - int* subset = &subsets[split.subsetOfs]; - FileNode fns = fn["in"]; - if( fns.empty() ) - { - fns = fn["not_in"]; - split.inversed = true; - } - - if( fns.isInt() ) - { - val = (int)fns; - subset[val >> 5] |= 1 << (val & 31); - } - else - { - FileNodeIterator it = fns.begin(); - int n = (int)fns.size(); - for( i = 0; i < n; i++, ++it ) - { - val = (int)*it; - subset[val >> 5] |= 1 << (val & 31); - } - } - - // for categorical splits we do not use inversed splits, - // instead we inverse the variable set in the split - if( split.inversed ) - { - for( i = 0; i < ssize; i++ ) - subset[i] ^= -1; - split.inversed = false; - } - } - else - { - FileNode cmpNode = fn["le"]; - if( cmpNode.empty() ) - { - cmpNode = fn["gt"]; - split.inversed = true; - } - split.c = (float)cmpNode; - } - - split.quality = (float)fn["quality"]; - splits.push_back(split); - - return (int)(splits.size() - 1); -} - -int DTreesImpl::readNode( const FileNode& fn ) -{ - Node node; - node.value = (double)fn["value"]; - - if( _isClassifier ) - node.classIdx = (int)fn["norm_class_idx"]; - - FileNode sfn = fn["splits"]; - if( !sfn.empty() ) - { - int i, n = (int)sfn.size(), prevsplit = -1; - FileNodeIterator it = sfn.begin(); - - for( i = 0; i < n; i++, ++it ) - { - int splitidx = readSplit(*it); - if( splitidx < 0 ) - break; - if( prevsplit < 0 ) - node.split = splitidx; - else - splits[prevsplit].next = splitidx; - prevsplit = splitidx; - } - } - nodes.push_back(node); - return (int)(nodes.size() - 1); -} - -int DTreesImpl::readTree( const FileNode& fn ) -{ - int i, n = (int)fn.size(), root = -1, pidx = -1; - FileNodeIterator it = fn.begin(); - - for( i = 0; i < n; i++, ++it ) - { - int nidx = readNode(*it); - if( nidx < 0 ) - break; - Node& node = nodes[nidx]; - node.parent = pidx; - if( pidx < 0 ) - root = nidx; - else - { - Node& parent = nodes[pidx]; - if( parent.left < 0 ) - parent.left = nidx; - else - parent.right = nidx; - } - if( node.split >= 0 ) - pidx = nidx; - else - { - while( pidx >= 0 && nodes[pidx].right >= 0 ) - pidx = nodes[pidx].parent; - } - } - roots.push_back(root); - return root; -} - -void DTreesImpl::read( const FileNode& fn ) -{ - clear(); - readParams(fn); - - FileNode fnodes = fn["nodes"]; - CV_Assert( !fnodes.empty() ); - readTree(fnodes); -} - -Ptr DTrees::create() -{ - return makePtr(); -} - -Ptr DTrees::load(const String& filepath, const String& nodeName) -{ - return Algorithm::load(filepath, nodeName); -} - - -} -} - -/* End of file. */ diff --git a/modules/ml/test/test_ann.cpp b/modules/ml/test/test_ann.cpp deleted file mode 100644 index a3782d25a8..0000000000 --- a/modules/ml/test/test_ann.cpp +++ /dev/null @@ -1,200 +0,0 @@ -// This file is part of OpenCV project. -// It is subject to the license terms in the LICENSE file found in the top-level directory -// of this distribution and at http://opencv.org/license.html. - -#include "test_precomp.hpp" - -// #define GENERATE_TESTDATA - -namespace opencv_test { namespace { - -struct Activation -{ - int id; - const char * name; -}; -void PrintTo(const Activation &a, std::ostream *os) { *os << a.name; } - -Activation activation_list[] = -{ - { ml::ANN_MLP::IDENTITY, "identity" }, - { ml::ANN_MLP::SIGMOID_SYM, "sigmoid_sym" }, - { ml::ANN_MLP::GAUSSIAN, "gaussian" }, - { ml::ANN_MLP::RELU, "relu" }, - { ml::ANN_MLP::LEAKYRELU, "leakyrelu" }, -}; - -typedef testing::TestWithParam< Activation > ML_ANN_Params; - -TEST_P(ML_ANN_Params, ActivationFunction) -{ - const Activation &activation = GetParam(); - const string dataname = "waveform"; - const string data_path = findDataFile(dataname + ".data"); - const string model_name = dataname + "_" + activation.name + ".yml"; - - Ptr tdata = TrainData::loadFromCSV(data_path, 0); - ASSERT_FALSE(tdata.empty()); - - // hack? - const uint64 old_state = theRNG().state; - theRNG().state = 1027401484159173092; - tdata->setTrainTestSplit(500); - theRNG().state = old_state; - - Mat_ layerSizes(1, 4); - layerSizes(0, 0) = tdata->getNVars(); - layerSizes(0, 1) = 100; - layerSizes(0, 2) = 100; - layerSizes(0, 3) = tdata->getResponses().cols; - - Mat testSamples = tdata->getTestSamples(); - Mat rx, ry; - - { - Ptr x = ml::ANN_MLP::create(); - x->setActivationFunction(activation.id); - x->setLayerSizes(layerSizes); - x->setTrainMethod(ml::ANN_MLP::RPROP, 0.01, 0.1); - x->setTermCriteria(TermCriteria(TermCriteria::COUNT, 300, 0.01)); - x->train(tdata, ml::ANN_MLP::NO_OUTPUT_SCALE); - ASSERT_TRUE(x->isTrained()); - x->predict(testSamples, rx); -#ifdef GENERATE_TESTDATA - x->save(cvtest::TS::ptr()->get_data_path() + model_name); -#endif - } - - { - const string model_path = findDataFile(model_name); - Ptr y = Algorithm::load(model_path); - ASSERT_TRUE(y); - y->predict(testSamples, ry); - EXPECT_MAT_NEAR(rx, ry, FLT_EPSILON); - } -} - -INSTANTIATE_TEST_CASE_P(/**/, ML_ANN_Params, testing::ValuesIn(activation_list)); - -//================================================================================================== - -CV_ENUM(ANN_MLP_METHOD, ANN_MLP::RPROP, ANN_MLP::ANNEAL) - -typedef tuple ML_ANN_METHOD_Params; -typedef TestWithParam ML_ANN_METHOD; - -TEST_P(ML_ANN_METHOD, Test) -{ - int methodType = get<0>(GetParam()); - string methodName = get<1>(GetParam()); - int N = get<2>(GetParam()); - - String folder = string(cvtest::TS::ptr()->get_data_path()); - String original_path = findDataFile("waveform.data"); - string dataname = "waveform_" + methodName; - string weight_name = dataname + "_init_weight.yml.gz"; - string model_name = dataname + ".yml.gz"; - string response_name = dataname + "_response.yml.gz"; - - Ptr tdata2 = TrainData::loadFromCSV(original_path, 0); - ASSERT_FALSE(tdata2.empty()); - - Mat samples = tdata2->getSamples()(Range(0, N), Range::all()); - Mat responses(N, 3, CV_32FC1, Scalar(0)); - for (int i = 0; i < N; i++) - responses.at(i, static_cast(tdata2->getResponses().at(i, 0))) = 1; - - Ptr tdata = TrainData::create(samples, ml::ROW_SAMPLE, responses); - ASSERT_FALSE(tdata.empty()); - - // hack? - const uint64 old_state = theRNG().state; - theRNG().state = 0; - tdata->setTrainTestSplitRatio(0.8); - theRNG().state = old_state; - - Mat testSamples = tdata->getTestSamples(); - - // train 1st stage - - Ptr xx = ml::ANN_MLP::create(); - Mat_ layerSizes(1, 4); - layerSizes(0, 0) = tdata->getNVars(); - layerSizes(0, 1) = 30; - layerSizes(0, 2) = 30; - layerSizes(0, 3) = tdata->getResponses().cols; - xx->setLayerSizes(layerSizes); - xx->setActivationFunction(ml::ANN_MLP::SIGMOID_SYM); - xx->setTrainMethod(ml::ANN_MLP::RPROP); - xx->setTermCriteria(TermCriteria(TermCriteria::COUNT, 1, 0.01)); - xx->train(tdata, ml::ANN_MLP::NO_OUTPUT_SCALE + ml::ANN_MLP::NO_INPUT_SCALE); -#ifdef GENERATE_TESTDATA - { - FileStorage fs; - fs.open(cvtest::TS::ptr()->get_data_path() + weight_name, FileStorage::WRITE + FileStorage::BASE64); - xx->write(fs); - } -#endif - - // train 2nd stage - Mat r_gold; - Ptr x = ml::ANN_MLP::create(); - { - const string weight_file = findDataFile(weight_name); - FileStorage fs; - fs.open(weight_file, FileStorage::READ); - x->read(fs.root()); - } - x->setTrainMethod(methodType); - if (methodType == ml::ANN_MLP::ANNEAL) - { - x->setAnnealEnergyRNG(RNG(CV_BIG_INT(0xffffffff))); - x->setAnnealInitialT(12); - x->setAnnealFinalT(0.15); - x->setAnnealCoolingRatio(0.96); - x->setAnnealItePerStep(11); - } - x->setTermCriteria(TermCriteria(TermCriteria::COUNT, 100, 0.01)); - x->train(tdata, ml::ANN_MLP::NO_OUTPUT_SCALE + ml::ANN_MLP::NO_INPUT_SCALE + ml::ANN_MLP::UPDATE_WEIGHTS); - ASSERT_TRUE(x->isTrained()); -#ifdef GENERATE_TESTDATA - x->save(cvtest::TS::ptr()->get_data_path() + model_name); - x->predict(testSamples, r_gold); - { - FileStorage fs_response(cvtest::TS::ptr()->get_data_path() + response_name, FileStorage::WRITE + FileStorage::BASE64); - fs_response << "response" << r_gold; - } -#endif - { - const string response_file = findDataFile(response_name); - FileStorage fs_response(response_file, FileStorage::READ); - fs_response["response"] >> r_gold; - } - ASSERT_FALSE(r_gold.empty()); - - // verify - const string model_file = findDataFile(model_name); - Ptr y = Algorithm::load(model_file); - ASSERT_TRUE(y); - Mat rx, ry; - for (int j = 0; j < 4; j++) - { - rx = x->getWeights(j); - ry = y->getWeights(j); - EXPECT_MAT_NEAR(rx, ry, FLT_EPSILON) << "Weights are not equal for layer: " << j; - } - x->predict(testSamples, rx); - y->predict(testSamples, ry); - EXPECT_MAT_NEAR(ry, rx, FLT_EPSILON) << "Predict are not equal to result of the saved model"; - EXPECT_MAT_NEAR(r_gold, rx, FLT_EPSILON) << "Predict are not equal to 'gold' response"; -} - -INSTANTIATE_TEST_CASE_P(/*none*/, ML_ANN_METHOD, - testing::Values( - ML_ANN_METHOD_Params(ml::ANN_MLP::RPROP, "rprop", 5000), - ML_ANN_METHOD_Params(ml::ANN_MLP::ANNEAL, "anneal", 1000) - // ML_ANN_METHOD_Params(ml::ANN_MLP::BACKPROP, "backprop", 500) -----> NO BACKPROP TEST - ) -); - -}} // namespace diff --git a/modules/ml/test/test_bayes.cpp b/modules/ml/test/test_bayes.cpp deleted file mode 100644 index 07ff8b2a36..0000000000 --- a/modules/ml/test/test_bayes.cpp +++ /dev/null @@ -1,56 +0,0 @@ -// This file is part of OpenCV project. -// It is subject to the license terms in the LICENSE file found in the top-level directory -// of this distribution and at http://opencv.org/license.html. - -#include "test_precomp.hpp" - -namespace opencv_test { namespace { - -TEST(ML_NBAYES, regression_5911) -{ - int N=12; - Ptr nb = cv::ml::NormalBayesClassifier::create(); - - // data: - float X_data[] = { - 1,2,3,4, 1,2,3,4, 1,2,3,4, 1,2,3,4, - 5,5,5,5, 5,5,5,5, 5,5,5,5, 5,5,5,5, - 4,3,2,1, 4,3,2,1, 4,3,2,1, 4,3,2,1 - }; - Mat_ X(N, 4, X_data); - - // labels: - int Y_data[] = { 0,0,0,0, 1,1,1,1, 2,2,2,2 }; - Mat_ Y(N, 1, Y_data); - - nb->train(X, ml::ROW_SAMPLE, Y); - - // single prediction: - Mat R1,P1; - for (int i=0; ipredictProb(X.row(i), r, p); - R1.push_back(r); - P1.push_back(p); - } - - // bulk prediction (continuous memory): - Mat R2,P2; - nb->predictProb(X, R2, P2); - - EXPECT_EQ(255 * R2.total(), sum(R1 == R2)[0]); - EXPECT_EQ(255 * P2.total(), sum(P1 == P2)[0]); - - // bulk prediction, with non-continuous memory storage - Mat R3_(N, 1+1, CV_32S), - P3_(N, 3+1, CV_32F); - nb->predictProb(X, R3_.col(0), P3_.colRange(0,3)); - Mat R3 = R3_.col(0).clone(), - P3 = P3_.colRange(0,3).clone(); - - EXPECT_EQ(255 * R3.total(), sum(R1 == R3)[0]); - EXPECT_EQ(255 * P3.total(), sum(P1 == P3)[0]); -} - -}} // namespace diff --git a/modules/ml/test/test_em.cpp b/modules/ml/test/test_em.cpp deleted file mode 100644 index 373385d406..0000000000 --- a/modules/ml/test/test_em.cpp +++ /dev/null @@ -1,186 +0,0 @@ -// This file is part of OpenCV project. -// It is subject to the license terms in the LICENSE file found in the top-level directory -// of this distribution and at http://opencv.org/license.html. - -#include "test_precomp.hpp" - -namespace opencv_test { namespace { - -CV_ENUM(EM_START_STEP, EM::START_AUTO_STEP, EM::START_M_STEP, EM::START_E_STEP) -CV_ENUM(EM_COV_MAT, EM::COV_MAT_GENERIC, EM::COV_MAT_DIAGONAL, EM::COV_MAT_SPHERICAL) - -typedef testing::TestWithParam< tuple > ML_EM_Params; - -TEST_P(ML_EM_Params, accuracy) -{ - const int nclusters = 3; - const int sizesArr[] = { 500, 700, 800 }; - const vector sizes( sizesArr, sizesArr + sizeof(sizesArr) / sizeof(sizesArr[0]) ); - const int pointsCount = sizesArr[0] + sizesArr[1] + sizesArr[2]; - Mat means; - vector covs; - defaultDistribs( means, covs, CV_64FC1 ); - Mat trainData(pointsCount, 2, CV_64FC1 ); - Mat trainLabels; - generateData( trainData, trainLabels, sizes, means, covs, CV_64FC1, CV_32SC1 ); - Mat testData( pointsCount, 2, CV_64FC1 ); - Mat testLabels; - generateData( testData, testLabels, sizes, means, covs, CV_64FC1, CV_32SC1 ); - Mat probs(trainData.rows, nclusters, CV_64FC1, cv::Scalar(1)); - Mat weights(1, nclusters, CV_64FC1, cv::Scalar(1)); - TermCriteria termCrit(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 100, FLT_EPSILON); - int startStep = get<0>(GetParam()); - int covMatType = get<1>(GetParam()); - cv::Mat labels; - - Ptr em = EM::create(); - em->setClustersNumber(nclusters); - em->setCovarianceMatrixType(covMatType); - em->setTermCriteria(termCrit); - if( startStep == EM::START_AUTO_STEP ) - em->trainEM( trainData, noArray(), labels, noArray() ); - else if( startStep == EM::START_E_STEP ) - em->trainE( trainData, means, covs, weights, noArray(), labels, noArray() ); - else if( startStep == EM::START_M_STEP ) - em->trainM( trainData, probs, noArray(), labels, noArray() ); - - { - SCOPED_TRACE("Train"); - float err = 1000; - EXPECT_TRUE(calcErr( labels, trainLabels, sizes, err , false, false )); - EXPECT_LE(err, 0.008f); - } - - { - SCOPED_TRACE("Test"); - float err = 1000; - labels.create( testData.rows, 1, CV_32SC1 ); - for( int i = 0; i < testData.rows; i++ ) - { - Mat sample = testData.row(i); - Mat out_probs; - labels.at(i) = static_cast(em->predict2( sample, out_probs )[1]); - } - EXPECT_TRUE(calcErr( labels, testLabels, sizes, err, false, false )); - EXPECT_LE(err, 0.008f); - } -} - -INSTANTIATE_TEST_CASE_P(/**/, ML_EM_Params, - testing::Combine( - testing::Values(EM::START_AUTO_STEP, EM::START_M_STEP, EM::START_E_STEP), - testing::Values(EM::COV_MAT_GENERIC, EM::COV_MAT_DIAGONAL, EM::COV_MAT_SPHERICAL) - )); - -//================================================================================================== - -TEST(ML_EM, save_load) -{ - const int nclusters = 2; - Mat_ samples(3, 1); - samples << 1., 2., 3.; - - std::vector firstResult; - string filename = cv::tempfile(".xml"); - { - Mat labels; - Ptr em = EM::create(); - em->setClustersNumber(nclusters); - em->trainEM(samples, noArray(), labels, noArray()); - for( int i = 0; i < samples.rows; i++) - { - Vec2d res = em->predict2(samples.row(i), noArray()); - firstResult.push_back(res[1]); - } - { - FileStorage fs = FileStorage(filename, FileStorage::WRITE); - ASSERT_NO_THROW(fs << "em" << "{"); - ASSERT_NO_THROW(em->write(fs)); - ASSERT_NO_THROW(fs << "}"); - } - } - { - Ptr em; - ASSERT_NO_THROW(em = Algorithm::load(filename)); - for( int i = 0; i < samples.rows; i++) - { - SCOPED_TRACE(i); - Vec2d res = em->predict2(samples.row(i), noArray()); - EXPECT_DOUBLE_EQ(firstResult[i], res[1]); - } - } - remove(filename.c_str()); -} - -//================================================================================================== - -TEST(ML_EM, classification) -{ - // This test classifies spam by the following way: - // 1. estimates distributions of "spam" / "not spam" - // 2. predict classID using Bayes classifier for estimated distributions. - string dataFilename = findDataFile("spambase.data"); - Ptr data = TrainData::loadFromCSV(dataFilename, 0); - ASSERT_FALSE(data.empty()); - - Mat samples = data->getSamples(); - ASSERT_EQ(samples.cols, 57); - Mat responses = data->getResponses(); - - vector trainSamplesMask(samples.rows, 0); - const int trainSamplesCount = (int)(0.5f * samples.rows); - const int testSamplesCount = samples.rows - trainSamplesCount; - for(int i = 0; i < trainSamplesCount; i++) - trainSamplesMask[i] = 1; - RNG &rng = cv::theRNG(); - for(size_t i = 0; i < trainSamplesMask.size(); i++) - { - int i1 = rng(static_cast(trainSamplesMask.size())); - int i2 = rng(static_cast(trainSamplesMask.size())); - std::swap(trainSamplesMask[i1], trainSamplesMask[i2]); - } - - Mat samples0, samples1; - for(int i = 0; i < samples.rows; i++) - { - if(trainSamplesMask[i]) - { - Mat sample = samples.row(i); - int resp = (int)responses.at(i); - if(resp == 0) - samples0.push_back(sample); - else - samples1.push_back(sample); - } - } - - Ptr model0 = EM::create(); - model0->setClustersNumber(3); - model0->trainEM(samples0, noArray(), noArray(), noArray()); - - Ptr model1 = EM::create(); - model1->setClustersNumber(3); - model1->trainEM(samples1, noArray(), noArray(), noArray()); - - // confusion matrices - Mat_ trainCM(2, 2, 0); - Mat_ testCM(2, 2, 0); - const double lambda = 1.; - for(int i = 0; i < samples.rows; i++) - { - Mat sample = samples.row(i); - double sampleLogLikelihoods0 = model0->predict2(sample, noArray())[0]; - double sampleLogLikelihoods1 = model1->predict2(sample, noArray())[0]; - int classID = (sampleLogLikelihoods0 >= lambda * sampleLogLikelihoods1) ? 0 : 1; - int resp = (int)responses.at(i); - EXPECT_TRUE(resp == 0 || resp == 1); - if(trainSamplesMask[i]) - trainCM(resp, classID)++; - else - testCM(resp, classID)++; - } - EXPECT_LE((double)(trainCM(1,0) + trainCM(0,1)) / trainSamplesCount, 0.23); - EXPECT_LE((double)(testCM(1,0) + testCM(0,1)) / testSamplesCount, 0.26); -} - -}} // namespace diff --git a/modules/ml/test/test_kmeans.cpp b/modules/ml/test/test_kmeans.cpp deleted file mode 100644 index 153ed642d3..0000000000 --- a/modules/ml/test/test_kmeans.cpp +++ /dev/null @@ -1,53 +0,0 @@ -// This file is part of OpenCV project. -// It is subject to the license terms in the LICENSE file found in the top-level directory -// of this distribution and at http://opencv.org/license.html. - -#include "test_precomp.hpp" - -namespace opencv_test { namespace { - -TEST(ML_KMeans, accuracy) -{ - const int iters = 100; - int sizesArr[] = { 5000, 7000, 8000 }; - int pointsCount = sizesArr[0]+ sizesArr[1] + sizesArr[2]; - - Mat data( pointsCount, 2, CV_32FC1 ), labels; - vector sizes( sizesArr, sizesArr + sizeof(sizesArr) / sizeof(sizesArr[0]) ); - Mat means; - vector covs; - defaultDistribs( means, covs ); - generateData( data, labels, sizes, means, covs, CV_32FC1, CV_32SC1 ); - TermCriteria termCriteria( TermCriteria::COUNT, iters, 0.0); - - { - SCOPED_TRACE("KMEANS_PP_CENTERS"); - float err = 1000; - Mat bestLabels; - kmeans( data, 3, bestLabels, termCriteria, 0, KMEANS_PP_CENTERS, noArray() ); - EXPECT_TRUE(calcErr( bestLabels, labels, sizes, err , false )); - EXPECT_LE(err, 0.01f); - } - { - SCOPED_TRACE("KMEANS_RANDOM_CENTERS"); - float err = 1000; - Mat bestLabels; - kmeans( data, 3, bestLabels, termCriteria, 0, KMEANS_RANDOM_CENTERS, noArray() ); - EXPECT_TRUE(calcErr( bestLabels, labels, sizes, err, false )); - EXPECT_LE(err, 0.01f); - } - { - SCOPED_TRACE("KMEANS_USE_INITIAL_LABELS"); - float err = 1000; - Mat bestLabels; - labels.copyTo( bestLabels ); - RNG &rng = cv::theRNG(); - for( int i = 0; i < 0.5f * pointsCount; i++ ) - bestLabels.at( rng.next() % pointsCount, 0 ) = rng.next() % 3; - kmeans( data, 3, bestLabels, termCriteria, 0, KMEANS_USE_INITIAL_LABELS, noArray() ); - EXPECT_TRUE(calcErr( bestLabels, labels, sizes, err, false )); - EXPECT_LE(err, 0.01f); - } -} - -}} // namespace diff --git a/modules/ml/test/test_knearest.cpp b/modules/ml/test/test_knearest.cpp deleted file mode 100644 index 80baed9626..0000000000 --- a/modules/ml/test/test_knearest.cpp +++ /dev/null @@ -1,112 +0,0 @@ -// This file is part of OpenCV project. -// It is subject to the license terms in the LICENSE file found in the top-level directory -// of this distribution and at http://opencv.org/license.html. - -#include "test_precomp.hpp" - -namespace opencv_test { namespace { - -using cv::ml::TrainData; -using cv::ml::EM; -using cv::ml::KNearest; - -TEST(ML_KNearest, accuracy) -{ - int sizesArr[] = { 500, 700, 800 }; - int pointsCount = sizesArr[0]+ sizesArr[1] + sizesArr[2]; - - Mat trainData( pointsCount, 2, CV_32FC1 ), trainLabels; - vector sizes( sizesArr, sizesArr + sizeof(sizesArr) / sizeof(sizesArr[0]) ); - Mat means; - vector covs; - defaultDistribs( means, covs ); - generateData( trainData, trainLabels, sizes, means, covs, CV_32FC1, CV_32FC1 ); - - Mat testData( pointsCount, 2, CV_32FC1 ); - Mat testLabels; - generateData( testData, testLabels, sizes, means, covs, CV_32FC1, CV_32FC1 ); - - { - SCOPED_TRACE("Default"); - Mat bestLabels; - float err = 1000; - Ptr knn = KNearest::create(); - knn->train(trainData, ml::ROW_SAMPLE, trainLabels); - knn->findNearest(testData, 4, bestLabels); - EXPECT_TRUE(calcErr( bestLabels, testLabels, sizes, err, true )); - EXPECT_LE(err, 0.01f); - } - { - SCOPED_TRACE("KDTree"); - Mat neighborIndexes; - float err = 1000; - Ptr knn = KNearest::create(); - knn->setAlgorithmType(KNearest::KDTREE); - knn->train(trainData, ml::ROW_SAMPLE, trainLabels); - knn->findNearest(testData, 4, neighborIndexes); - Mat bestLabels; - // The output of the KDTree are the neighbor indexes, not actual class labels - // so we need to do some extra work to get actual predictions - for(int row_num = 0; row_num < neighborIndexes.rows; ++row_num){ - vector labels; - for(int index = 0; index < neighborIndexes.row(row_num).cols; ++index) { - labels.push_back(trainLabels.at(neighborIndexes.row(row_num).at(0, index) , 0)); - } - // computing the mode of the output class predictions to determine overall prediction - std::vector histogram(3,0); - for( int i=0; i<3; ++i ) - ++histogram[ static_cast(labels[i]) ]; - int bestLabel = static_cast(std::max_element( histogram.begin(), histogram.end() ) - histogram.begin()); - bestLabels.push_back(bestLabel); - } - bestLabels.convertTo(bestLabels, testLabels.type()); - EXPECT_TRUE(calcErr( bestLabels, testLabels, sizes, err, true )); - EXPECT_LE(err, 0.01f); - } -} - -TEST(ML_KNearest, regression_12347) -{ - Mat xTrainData = (Mat_(5,2) << 1, 1.1, 1.1, 1, 2, 2, 2.1, 2, 2.1, 2.1); - Mat yTrainLabels = (Mat_(5,1) << 1, 1, 2, 2, 2); - Ptr knn = KNearest::create(); - knn->train(xTrainData, ml::ROW_SAMPLE, yTrainLabels); - - Mat xTestData = (Mat_(2,2) << 1.1, 1.1, 2, 2.2); - Mat zBestLabels, neighbours, dist; - // check output shapes: - int K = 16, Kexp = std::min(K, xTrainData.rows); - knn->findNearest(xTestData, K, zBestLabels, neighbours, dist); - EXPECT_EQ(xTestData.rows, zBestLabels.rows); - EXPECT_EQ(neighbours.cols, Kexp); - EXPECT_EQ(dist.cols, Kexp); - // see if the result is still correct: - K = 2; - knn->findNearest(xTestData, K, zBestLabels, neighbours, dist); - EXPECT_EQ(1, zBestLabels.at(0,0)); - EXPECT_EQ(2, zBestLabels.at(1,0)); -} - -TEST(ML_KNearest, bug_11877) -{ - Mat trainData = (Mat_(5,2) << 3, 3, 3, 3, 4, 4, 4, 4, 4, 4); - Mat trainLabels = (Mat_(5,1) << 0, 0, 1, 1, 1); - - Ptr knnKdt = KNearest::create(); - knnKdt->setAlgorithmType(KNearest::KDTREE); - knnKdt->setIsClassifier(true); - - knnKdt->train(trainData, ml::ROW_SAMPLE, trainLabels); - - Mat testData = (Mat_(2,2) << 3.1, 3.1, 4, 4.1); - Mat testLabels = (Mat_(2,1) << 0, 1); - Mat result; - - knnKdt->findNearest(testData, 1, result); - - EXPECT_EQ(1, int(result.at(0, 0))); - EXPECT_EQ(2, int(result.at(1, 0))); - EXPECT_EQ(0, trainLabels.at(result.at(0, 0), 0)); -} - -}} // namespace diff --git a/modules/ml/test/test_lr.cpp b/modules/ml/test/test_lr.cpp deleted file mode 100644 index ec77fcbdda..0000000000 --- a/modules/ml/test/test_lr.cpp +++ /dev/null @@ -1,81 +0,0 @@ -// This file is part of OpenCV project. -// It is subject to the license terms in the LICENSE file found in the top-level directory -// of this distribution and at http://opencv.org/license.html. -// -// AUTHOR: Rahul Kavi rahulkavi[at]live[at]com - -// -// Test data uses subset of data from the popular Iris Dataset (1936): -// - http://archive.ics.uci.edu/ml/datasets/Iris -// - https://en.wikipedia.org/wiki/Iris_flower_data_set -// - -#include "test_precomp.hpp" - -namespace opencv_test { namespace { - -TEST(ML_LR, accuracy) -{ - std::string dataFileName = findDataFile("iris.data"); - Ptr tdata = TrainData::loadFromCSV(dataFileName, 0); - ASSERT_FALSE(tdata.empty()); - - Ptr p = LogisticRegression::create(); - p->setLearningRate(1.0); - p->setIterations(10001); - p->setRegularization(LogisticRegression::REG_L2); - p->setTrainMethod(LogisticRegression::BATCH); - p->setMiniBatchSize(10); - p->train(tdata); - - Mat responses; - p->predict(tdata->getSamples(), responses); - - float error = 1000; - EXPECT_TRUE(calculateError(responses, tdata->getResponses(), error)); - EXPECT_LE(error, 0.05f); -} - -//================================================================================================== - -TEST(ML_LR, save_load) -{ - string dataFileName = findDataFile("iris.data"); - Ptr tdata = TrainData::loadFromCSV(dataFileName, 0); - ASSERT_FALSE(tdata.empty()); - Mat responses1, responses2; - Mat learnt_mat1, learnt_mat2; - String filename = tempfile(".xml"); - { - Ptr lr1 = LogisticRegression::create(); - lr1->setLearningRate(1.0); - lr1->setIterations(10001); - lr1->setRegularization(LogisticRegression::REG_L2); - lr1->setTrainMethod(LogisticRegression::BATCH); - lr1->setMiniBatchSize(10); - ASSERT_NO_THROW(lr1->train(tdata)); - ASSERT_NO_THROW(lr1->predict(tdata->getSamples(), responses1)); - ASSERT_NO_THROW(lr1->save(filename)); - learnt_mat1 = lr1->get_learnt_thetas(); - } - { - Ptr lr2; - ASSERT_NO_THROW(lr2 = Algorithm::load(filename)); - ASSERT_NO_THROW(lr2->predict(tdata->getSamples(), responses2)); - learnt_mat2 = lr2->get_learnt_thetas(); - } - // compare difference in prediction outputs and stored inputs - EXPECT_MAT_NEAR(responses1, responses2, 0.f); - - Mat comp_learnt_mats; - comp_learnt_mats = (learnt_mat1 == learnt_mat2); - comp_learnt_mats = comp_learnt_mats.reshape(1, comp_learnt_mats.rows*comp_learnt_mats.cols); - comp_learnt_mats.convertTo(comp_learnt_mats, CV_32S); - comp_learnt_mats = comp_learnt_mats/255; - // check if there is any difference between computed learnt mat and retrieved mat - EXPECT_EQ(comp_learnt_mats.rows, sum(comp_learnt_mats)[0]); - - remove( filename.c_str() ); -} - -}} // namespace diff --git a/modules/ml/test/test_main.cpp b/modules/ml/test/test_main.cpp deleted file mode 100644 index aab717ee51..0000000000 --- a/modules/ml/test/test_main.cpp +++ /dev/null @@ -1,10 +0,0 @@ -// This file is part of OpenCV project. -// It is subject to the license terms in the LICENSE file found in the top-level directory -// of this distribution and at http://opencv.org/license.html. -#include "test_precomp.hpp" - -#if defined(HAVE_HPX) - #include -#endif - -CV_TEST_MAIN("ml") diff --git a/modules/ml/test/test_mltests.cpp b/modules/ml/test/test_mltests.cpp deleted file mode 100644 index a67f6b0bf2..0000000000 --- a/modules/ml/test/test_mltests.cpp +++ /dev/null @@ -1,373 +0,0 @@ -// This file is part of OpenCV project. -// It is subject to the license terms in the LICENSE file found in the top-level directory -// of this distribution and at http://opencv.org/license.html. - -#include "test_precomp.hpp" - -namespace opencv_test { namespace { - -struct DatasetDesc -{ - string name; - int resp_idx; - int train_count; - int cat_num; - string type_desc; -public: - Ptr load() - { - string filename = findDataFile(name + ".data"); - Ptr data = TrainData::loadFromCSV(filename, 0, resp_idx, resp_idx + 1, type_desc); - data->setTrainTestSplit(train_count); - data->shuffleTrainTest(); - return data; - } -}; - -// see testdata/ml/protocol.txt (?) -DatasetDesc datasets[] = { - { "mushroom", 0, 4000, 16, "cat" }, - { "adult", 14, 22561, 16, "ord[0,2,4,10-12],cat[1,3,5-9,13,14]" }, - { "vehicle", 18, 761, 4, "ord[0-17],cat[18]" }, - { "abalone", 8, 3133, 16, "ord[1-8],cat[0]" }, - { "ringnorm", 20, 300, 2, "ord[0-19],cat[20]" }, - { "spambase", 57, 3221, 3, "ord[0-56],cat[57]" }, - { "waveform", 21, 300, 3, "ord[0-20],cat[21]" }, - { "elevators", 18, 5000, 0, "ord" }, - { "letter", 16, 10000, 26, "ord[0-15],cat[16]" }, - { "twonorm", 20, 300, 3, "ord[0-19],cat[20]" }, - { "poletelecomm", 48, 2500, 0, "ord" }, -}; - -static DatasetDesc & getDataset(const string & name) -{ - const int sz = sizeof(datasets)/sizeof(datasets[0]); - for (int i = 0; i < sz; ++i) - { - DatasetDesc & desc = datasets[i]; - if (desc.name == name) - return desc; - } - CV_Error(Error::StsInternal, ""); -} - -//================================================================================================== - -// interfaces and templates - -template string modelName() { return "Unknown"; } -template Ptr tuneModel(const DatasetDesc &, Ptr m) { return m; } - -struct IModelFactory -{ - virtual Ptr createNew(const DatasetDesc &dataset) const = 0; - virtual Ptr loadFromFile(const string &filename) const = 0; - virtual string name() const = 0; - virtual ~IModelFactory() {} -}; - -template -struct ModelFactory : public IModelFactory -{ - Ptr createNew(const DatasetDesc &dataset) const CV_OVERRIDE - { - return tuneModel(dataset, T::create()); - } - Ptr loadFromFile(const string & filename) const CV_OVERRIDE - { - return T::load(filename); - } - string name() const CV_OVERRIDE { return modelName(); } -}; - -// implementation - -template <> string modelName() { return "NormalBayesClassifier"; } -template <> string modelName() { return "DTrees"; } -template <> string modelName() { return "KNearest"; } -template <> string modelName() { return "RTrees"; } -template <> string modelName() { return "SVMSGD"; } - -template<> Ptr tuneModel(const DatasetDesc &dataset, Ptr m) -{ - m->setMaxDepth(10); - m->setMinSampleCount(2); - m->setRegressionAccuracy(0); - m->setUseSurrogates(false); - m->setCVFolds(0); - m->setUse1SERule(false); - m->setTruncatePrunedTree(false); - m->setPriors(Mat()); - m->setMaxCategories(dataset.cat_num); - return m; -} - -template<> Ptr tuneModel(const DatasetDesc &dataset, Ptr m) -{ - m->setMaxDepth(20); - m->setMinSampleCount(2); - m->setRegressionAccuracy(0); - m->setUseSurrogates(false); - m->setPriors(Mat()); - m->setCalculateVarImportance(true); - m->setActiveVarCount(0); - m->setTermCriteria(TermCriteria(TermCriteria::COUNT, 100, 0.0)); - m->setMaxCategories(dataset.cat_num); - return m; -} - -template<> Ptr tuneModel(const DatasetDesc &, Ptr m) -{ - m->setSvmsgdType(SVMSGD::ASGD); - m->setMarginType(SVMSGD::SOFT_MARGIN); - m->setMarginRegularization(0.00001f); - m->setInitialStepSize(0.1f); - m->setStepDecreasingPower(0.75); - m->setTermCriteria(TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 10000, 0.00001)); - return m; -} - -template <> -struct ModelFactory : public IModelFactory -{ - ModelFactory(int boostType_) : boostType(boostType_) {} - Ptr createNew(const DatasetDesc &) const CV_OVERRIDE - { - Ptr m = Boost::create(); - m->setBoostType(boostType); - m->setWeakCount(20); - m->setWeightTrimRate(0.95); - m->setMaxDepth(4); - m->setUseSurrogates(false); - m->setPriors(Mat()); - return m; - } - Ptr loadFromFile(const string &filename) const { return Boost::load(filename); } - string name() const CV_OVERRIDE { return "Boost"; } - int boostType; -}; - -template <> -struct ModelFactory : public IModelFactory -{ - ModelFactory(int svmType_, int kernelType_, double gamma_, double c_, double nu_) - : svmType(svmType_), kernelType(kernelType_), gamma(gamma_), c(c_), nu(nu_) {} - Ptr createNew(const DatasetDesc &) const CV_OVERRIDE - { - Ptr m = SVM::create(); - m->setType(svmType); - m->setKernel(kernelType); - m->setDegree(0); - m->setGamma(gamma); - m->setCoef0(0); - m->setC(c); - m->setNu(nu); - m->setP(0); - return m; - } - Ptr loadFromFile(const string &filename) const { return SVM::load(filename); } - string name() const CV_OVERRIDE { return "SVM"; } - int svmType; - int kernelType; - double gamma; - double c; - double nu; -}; - -//================================================================================================== - -struct ML_Params_t -{ - Ptr factory; - string dataset; - float mean; - float sigma; -}; - -void PrintTo(const ML_Params_t & param, std::ostream *os) -{ - *os << param.factory->name() << "_" << param.dataset; -} - -ML_Params_t ML_Params_List[] = { - { makePtr< ModelFactory >(), "mushroom", 0.027401f, 0.036236f }, - { makePtr< ModelFactory >(), "adult", 14.279000f, 0.354323f }, - { makePtr< ModelFactory >(), "vehicle", 29.761162f, 4.823927f }, - { makePtr< ModelFactory >(), "abalone", 7.297540f, 0.510058f }, - { makePtr< ModelFactory >(Boost::REAL), "adult", 13.894001f, 0.337763f }, - { makePtr< ModelFactory >(Boost::DISCRETE), "mushroom", 0.007274f, 0.029400f }, - { makePtr< ModelFactory >(Boost::LOGIT), "ringnorm", 9.993943f, 0.860256f }, - { makePtr< ModelFactory >(Boost::GENTLE), "spambase", 5.404347f, 0.581716f }, - { makePtr< ModelFactory >(), "waveform", 17.100641f, 0.630052f }, - { makePtr< ModelFactory >(), "mushroom", 0.006547f, 0.028248f }, - { makePtr< ModelFactory >(), "adult", 13.5129f, 0.266065f }, - { makePtr< ModelFactory >(), "abalone", 4.745199f, 0.282112f }, - { makePtr< ModelFactory >(), "vehicle", 24.964712f, 4.469287f }, - { makePtr< ModelFactory >(), "letter", 5.334999f, 0.261142f }, - { makePtr< ModelFactory >(), "ringnorm", 6.248733f, 0.904713f }, - { makePtr< ModelFactory >(), "twonorm", 4.506479f, 0.449739f }, - { makePtr< ModelFactory >(), "spambase", 5.243477f, 0.54232f }, -}; - -typedef testing::TestWithParam ML_Params; - -TEST_P(ML_Params, accuracy) -{ - const ML_Params_t & param = GetParam(); - DatasetDesc &dataset = getDataset(param.dataset); - Ptr data = dataset.load(); - ASSERT_TRUE(data); - ASSERT_TRUE(data->getNSamples() > 0); - - Ptr m = param.factory->createNew(dataset); - ASSERT_TRUE(m); - ASSERT_TRUE(m->train(data, 0)); - - float err = m->calcError(data, true, noArray()); - EXPECT_NEAR(err, param.mean, 4 * param.sigma); -} - -INSTANTIATE_TEST_CASE_P(/**/, ML_Params, testing::ValuesIn(ML_Params_List)); - - -//================================================================================================== - -struct ML_SL_Params_t -{ - Ptr factory; - string dataset; -}; - -void PrintTo(const ML_SL_Params_t & param, std::ostream *os) -{ - *os << param.factory->name() << "_" << param.dataset; -} - -ML_SL_Params_t ML_SL_Params_List[] = { - { makePtr< ModelFactory >(), "waveform" }, - { makePtr< ModelFactory >(), "waveform" }, - { makePtr< ModelFactory >(), "abalone" }, - { makePtr< ModelFactory >(SVM::C_SVC, SVM::LINEAR, 1, 0.5, 0), "waveform" }, - { makePtr< ModelFactory >(SVM::NU_SVR, SVM::RBF, 0.00225, 62.5, 0.03), "poletelecomm" }, - { makePtr< ModelFactory >(), "mushroom" }, - { makePtr< ModelFactory >(), "abalone" }, - { makePtr< ModelFactory >(Boost::REAL), "adult" }, - { makePtr< ModelFactory >(), "waveform" }, - { makePtr< ModelFactory >(), "abalone" }, - { makePtr< ModelFactory >(), "waveform" }, -}; - -typedef testing::TestWithParam ML_SL_Params; - -TEST_P(ML_SL_Params, save_load) -{ - const ML_SL_Params_t & param = GetParam(); - - DatasetDesc &dataset = getDataset(param.dataset); - Ptr data = dataset.load(); - ASSERT_TRUE(data); - ASSERT_TRUE(data->getNSamples() > 0); - - Mat responses1, responses2; - string file1 = tempfile(".json.gz"); - string file2 = tempfile(".json.gz"); - { - Ptr m = param.factory->createNew(dataset); - ASSERT_TRUE(m); - ASSERT_TRUE(m->train(data, 0)); - m->calcError(data, true, responses1); - m->save(file1 + "?base64"); - } - { - Ptr m = param.factory->loadFromFile(file1); - ASSERT_TRUE(m); - m->calcError(data, true, responses2); - m->save(file2 + "?base64"); - } - EXPECT_MAT_NEAR(responses1, responses2, 0.0); - { - ifstream f1(file1.c_str(), std::ios_base::binary); - ifstream f2(file2.c_str(), std::ios_base::binary); - ASSERT_TRUE(f1.is_open() && f2.is_open()); - const size_t BUFSZ = 10000; - vector buf1(BUFSZ, 0); - vector buf2(BUFSZ, 0); - while (true) - { - f1.read(&buf1[0], BUFSZ); - f2.read(&buf2[0], BUFSZ); - EXPECT_EQ(f1.gcount(), f2.gcount()); - EXPECT_EQ(f1.eof(), f2.eof()); - if (!f1.good() || !f2.good() || f1.gcount() != f2.gcount()) - break; - ASSERT_EQ(buf1, buf2); - } - } - remove(file1.c_str()); - remove(file2.c_str()); -} - -INSTANTIATE_TEST_CASE_P(/**/, ML_SL_Params, testing::ValuesIn(ML_SL_Params_List)); - -//================================================================================================== - -TEST(TrainDataGet, layout_ROW_SAMPLE) // Details: #12236 -{ - cv::Mat test = cv::Mat::ones(150, 30, CV_32FC1) * 2; - test.col(3) += Scalar::all(3); - cv::Mat labels = cv::Mat::ones(150, 3, CV_32SC1) * 5; - labels.col(1) += 1; - cv::Ptr train_data = cv::ml::TrainData::create(test, cv::ml::ROW_SAMPLE, labels); - train_data->setTrainTestSplitRatio(0.9); - - Mat tidx = train_data->getTestSampleIdx(); - EXPECT_EQ((size_t)15, tidx.total()); - - Mat tresp = train_data->getTestResponses(); - EXPECT_EQ(15, tresp.rows); - EXPECT_EQ(labels.cols, tresp.cols); - EXPECT_EQ(5, tresp.at(0, 0)) << tresp; - EXPECT_EQ(6, tresp.at(0, 1)) << tresp; - EXPECT_EQ(6, tresp.at(14, 1)) << tresp; - EXPECT_EQ(5, tresp.at(14, 2)) << tresp; - - Mat tsamples = train_data->getTestSamples(); - EXPECT_EQ(15, tsamples.rows); - EXPECT_EQ(test.cols, tsamples.cols); - EXPECT_EQ(2, tsamples.at(0, 0)) << tsamples; - EXPECT_EQ(5, tsamples.at(0, 3)) << tsamples; - EXPECT_EQ(2, tsamples.at(14, test.cols - 1)) << tsamples; - EXPECT_EQ(5, tsamples.at(14, 3)) << tsamples; -} - -TEST(TrainDataGet, layout_COL_SAMPLE) // Details: #12236 -{ - cv::Mat test = cv::Mat::ones(30, 150, CV_32FC1) * 3; - test.row(3) += Scalar::all(3); - cv::Mat labels = cv::Mat::ones(3, 150, CV_32SC1) * 5; - labels.row(1) += 1; - cv::Ptr train_data = cv::ml::TrainData::create(test, cv::ml::COL_SAMPLE, labels); - train_data->setTrainTestSplitRatio(0.9); - - Mat tidx = train_data->getTestSampleIdx(); - EXPECT_EQ((size_t)15, tidx.total()); - - Mat tresp = train_data->getTestResponses(); // always row-based, transposed - EXPECT_EQ(15, tresp.rows); - EXPECT_EQ(labels.rows, tresp.cols); - EXPECT_EQ(5, tresp.at(0, 0)) << tresp; - EXPECT_EQ(6, tresp.at(0, 1)) << tresp; - EXPECT_EQ(6, tresp.at(14, 1)) << tresp; - EXPECT_EQ(5, tresp.at(14, 2)) << tresp; - - - Mat tsamples = train_data->getTestSamples(); - EXPECT_EQ(15, tsamples.cols); - EXPECT_EQ(test.rows, tsamples.rows); - EXPECT_EQ(3, tsamples.at(0, 0)) << tsamples; - EXPECT_EQ(6, tsamples.at(3, 0)) << tsamples; - EXPECT_EQ(6, tsamples.at(3, 14)) << tsamples; - EXPECT_EQ(3, tsamples.at(test.rows - 1, 14)) << tsamples; -} - -}} // namespace diff --git a/modules/ml/test/test_precomp.hpp b/modules/ml/test/test_precomp.hpp deleted file mode 100644 index 380e612616..0000000000 --- a/modules/ml/test/test_precomp.hpp +++ /dev/null @@ -1,50 +0,0 @@ -#ifndef __OPENCV_TEST_PRECOMP_HPP__ -#define __OPENCV_TEST_PRECOMP_HPP__ - -#include "opencv2/ts.hpp" -#include // EXPECT_MAT_NEAR -#include "opencv2/ml.hpp" - -#include -using std::ifstream; - -namespace opencv_test { - -using namespace cv::ml; - -#define CV_NBAYES "nbayes" -#define CV_KNEAREST "knearest" -#define CV_SVM "svm" -#define CV_EM "em" -#define CV_ANN "ann" -#define CV_DTREE "dtree" -#define CV_BOOST "boost" -#define CV_RTREES "rtrees" -#define CV_ERTREES "ertrees" -#define CV_SVMSGD "svmsgd" - -using cv::Ptr; -using cv::ml::StatModel; -using cv::ml::TrainData; -using cv::ml::NormalBayesClassifier; -using cv::ml::SVM; -using cv::ml::KNearest; -using cv::ml::ParamGrid; -using cv::ml::ANN_MLP; -using cv::ml::DTrees; -using cv::ml::Boost; -using cv::ml::RTrees; -using cv::ml::SVMSGD; - -void defaultDistribs( Mat& means, vector& covs, int type=CV_32FC1 ); -void generateData( Mat& data, Mat& labels, const vector& sizes, const Mat& _means, const vector& covs, int dataType, int labelType ); -int maxIdx( const vector& count ); -bool getLabelsMap( const Mat& labels, const vector& sizes, vector& labelsMap, bool checkClusterUniq=true ); -bool calcErr( const Mat& labels, const Mat& origLabels, const vector& sizes, float& err, bool labelsEquivalent = true, bool checkClusterUniq=true ); - -// used in LR test -bool calculateError( const Mat& _p_labels, const Mat& _o_labels, float& error); - -} // namespace - -#endif diff --git a/modules/ml/test/test_rtrees.cpp b/modules/ml/test/test_rtrees.cpp deleted file mode 100644 index 5a4fb34e74..0000000000 --- a/modules/ml/test/test_rtrees.cpp +++ /dev/null @@ -1,119 +0,0 @@ -// This file is part of OpenCV project. -// It is subject to the license terms in the LICENSE file found in the top-level directory -// of this distribution and at http://opencv.org/license.html. - -#include "test_precomp.hpp" - -namespace opencv_test { namespace { - -TEST(ML_RTrees, getVotes) -{ - int n = 12; - int count, i; - int label_size = 3; - int predicted_class = 0; - int max_votes = -1; - int val; - // RTrees for classification - Ptr rt = cv::ml::RTrees::create(); - - //data - Mat data(n, 4, CV_32F); - randu(data, 0, 10); - - //labels - Mat labels = (Mat_(n,1) << 0,0,0,0, 1,1,1,1, 2,2,2,2); - - rt->train(data, ml::ROW_SAMPLE, labels); - - //run function - Mat test(1, 4, CV_32F); - Mat result; - randu(test, 0, 10); - rt->getVotes(test, result, 0); - - //count vote amount and find highest vote - count = 0; - const int* result_row = result.ptr(1); - for( i = 0; i < label_size; i++ ) - { - val = result_row[i]; - //predicted_class = max_votes < val? i; - if( max_votes < val ) - { - max_votes = val; - predicted_class = i; - } - count += val; - } - - EXPECT_EQ(count, (int)rt->getRoots().size()); - EXPECT_EQ(result.at(0, predicted_class), rt->predict(test)); -} - -TEST(ML_RTrees, 11142_sample_weights_regression) -{ - int n = 3; - // RTrees for regression - Ptr rt = cv::ml::RTrees::create(); - //simple regression problem of x -> 2x - Mat data = (Mat_(n,1) << 1, 2, 3); - Mat values = (Mat_(n,1) << 2, 4, 6); - Mat weights = (Mat_(n, 1) << 10, 10, 10); - - Ptr trainData = TrainData::create(data, ml::ROW_SAMPLE, values); - rt->train(trainData); - double error_without_weights = round(rt->getOOBError()); - rt->clear(); - Ptr trainDataWithWeights = TrainData::create(data, ml::ROW_SAMPLE, values, Mat(), Mat(), weights ); - rt->train(trainDataWithWeights); - double error_with_weights = round(rt->getOOBError()); - // error with weights should be larger than error without weights - EXPECT_GE(error_with_weights, error_without_weights); -} - -TEST(ML_RTrees, 11142_sample_weights_classification) -{ - int n = 12; - // RTrees for classification - Ptr rt = cv::ml::RTrees::create(); - - Mat data(n, 4, CV_32F); - randu(data, 0, 10); - Mat labels = (Mat_(n,1) << 0,0,0,0, 1,1,1,1, 2,2,2,2); - Mat weights = (Mat_(n, 1) << 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10); - - rt->train(data, ml::ROW_SAMPLE, labels); - rt->clear(); - double error_without_weights = round(rt->getOOBError()); - Ptr trainDataWithWeights = TrainData::create(data, ml::ROW_SAMPLE, labels, Mat(), Mat(), weights ); - rt->train(data, ml::ROW_SAMPLE, labels); - double error_with_weights = round(rt->getOOBError()); - std::cout << error_without_weights << std::endl; - std::cout << error_with_weights << std::endl; - // error with weights should be larger than error without weights - EXPECT_GE(error_with_weights, error_without_weights); -} - -TEST(ML_RTrees, bug_12974_throw_exception_when_predict_different_feature_count) -{ - int numFeatures = 5; - // create a 5 feature dataset and train the model - cv::Ptr model = RTrees::create(); - Mat samples(10, numFeatures, CV_32F); - randu(samples, 0, 10); - Mat labels = (Mat_(10,1) << 0,0,0,0,0,1,1,1,1,1); - cv::Ptr trainData = TrainData::create(samples, cv::ml::ROW_SAMPLE, labels); - model->train(trainData); - // try to predict on data which have fewer features - this should throw an exception - for(int i = 1; i < numFeatures - 1; ++i) { - Mat test(1, i, CV_32FC1); - ASSERT_THROW(model->predict(test), Exception); - } - // try to predict on data which have more features - this should also throw an exception - Mat test(1, numFeatures + 1, CV_32FC1); - ASSERT_THROW(model->predict(test), Exception); -} - - -}} // namespace diff --git a/modules/ml/test/test_save_load.cpp b/modules/ml/test/test_save_load.cpp deleted file mode 100644 index 201e6303f5..0000000000 --- a/modules/ml/test/test_save_load.cpp +++ /dev/null @@ -1,107 +0,0 @@ -// This file is part of OpenCV project. -// It is subject to the license terms in the LICENSE file found in the top-level directory -// of this distribution and at http://opencv.org/license.html. - -#include "test_precomp.hpp" - -namespace opencv_test { namespace { - - -void randomFillCategories(const string & filename, Mat & input) -{ - Mat catMap; - Mat catCount; - std::vector varTypes; - - FileStorage fs(filename, FileStorage::READ); - FileNode root = fs.getFirstTopLevelNode(); - root["cat_map"] >> catMap; - root["cat_count"] >> catCount; - root["var_type"] >> varTypes; - - int offset = 0; - int countOffset = 0; - uint var = 0, varCount = (uint)varTypes.size(); - for (; var < varCount; ++var) - { - if (varTypes[var] == ml::VAR_CATEGORICAL) - { - int size = catCount.at(0, countOffset); - for (int row = 0; row < input.rows; ++row) - { - int randomChosenIndex = offset + ((uint)cv::theRNG()) % size; - int value = catMap.at(0, randomChosenIndex); - input.at(row, var) = (float)value; - } - offset += size; - ++countOffset; - } - } -} - -//================================================================================================== - -typedef tuple ML_Legacy_Param; -typedef testing::TestWithParam< ML_Legacy_Param > ML_Legacy_Params; - -TEST_P(ML_Legacy_Params, legacy_load) -{ - const string modelName = get<0>(GetParam()); - const string dataName = get<1>(GetParam()); - const string filename = findDataFile("legacy/" + modelName + "_" + dataName + ".xml"); - const bool isTree = modelName == CV_BOOST || modelName == CV_DTREE || modelName == CV_RTREES; - - Ptr model; - if (modelName == CV_BOOST) - model = Algorithm::load(filename); - else if (modelName == CV_ANN) - model = Algorithm::load(filename); - else if (modelName == CV_DTREE) - model = Algorithm::load(filename); - else if (modelName == CV_NBAYES) - model = Algorithm::load(filename); - else if (modelName == CV_SVM) - model = Algorithm::load(filename); - else if (modelName == CV_RTREES) - model = Algorithm::load(filename); - else if (modelName == CV_SVMSGD) - model = Algorithm::load(filename); - ASSERT_TRUE(model); - - Mat input = Mat(isTree ? 10 : 1, model->getVarCount(), CV_32F); - cv::theRNG().fill(input, RNG::UNIFORM, 0, 40); - - if (isTree) - randomFillCategories(filename, input); - - Mat output; - EXPECT_NO_THROW(model->predict(input, output, StatModel::RAW_OUTPUT | (isTree ? DTrees::PREDICT_SUM : 0))); - // just check if no internal assertions or errors thrown -} - -ML_Legacy_Param param_list[] = { - ML_Legacy_Param(CV_ANN, "waveform"), - ML_Legacy_Param(CV_BOOST, "adult"), - ML_Legacy_Param(CV_BOOST, "1"), - ML_Legacy_Param(CV_BOOST, "2"), - ML_Legacy_Param(CV_BOOST, "3"), - ML_Legacy_Param(CV_DTREE, "abalone"), - ML_Legacy_Param(CV_DTREE, "mushroom"), - ML_Legacy_Param(CV_NBAYES, "waveform"), - ML_Legacy_Param(CV_SVM, "poletelecomm"), - ML_Legacy_Param(CV_SVM, "waveform"), - ML_Legacy_Param(CV_RTREES, "waveform"), - ML_Legacy_Param(CV_SVMSGD, "waveform"), -}; - -INSTANTIATE_TEST_CASE_P(/**/, ML_Legacy_Params, testing::ValuesIn(param_list)); - -/*TEST(ML_SVM, throw_exception_when_save_untrained_model) -{ - Ptr svm; - string filename = tempfile("svm.xml"); - ASSERT_THROW(svm.save(filename.c_str()), Exception); - remove(filename.c_str()); -}*/ - -}} // namespace diff --git a/modules/ml/test/test_svmsgd.cpp b/modules/ml/test/test_svmsgd.cpp deleted file mode 100644 index 038fca0d40..0000000000 --- a/modules/ml/test/test_svmsgd.cpp +++ /dev/null @@ -1,156 +0,0 @@ -// This file is part of OpenCV project. -// It is subject to the license terms in the LICENSE file found in the top-level directory -// of this distribution and at http://opencv.org/license.html. - -#include "test_precomp.hpp" - -namespace opencv_test { namespace { - -static const int TEST_VALUE_LIMIT = 500; -enum -{ - UNIFORM_SAME_SCALE, - UNIFORM_DIFFERENT_SCALES -}; - -CV_ENUM(SVMSGD_TYPE, UNIFORM_SAME_SCALE, UNIFORM_DIFFERENT_SCALES) - -typedef std::vector< std::pair > BorderList; - -static void makeData(RNG &rng, int samplesCount, const Mat &weights, float shift, const BorderList & borders, Mat &samples, Mat & responses) -{ - int featureCount = weights.cols; - samples.create(samplesCount, featureCount, CV_32FC1); - for (int featureIndex = 0; featureIndex < featureCount; featureIndex++) - rng.fill(samples.col(featureIndex), RNG::UNIFORM, borders[featureIndex].first, borders[featureIndex].second); - responses.create(samplesCount, 1, CV_32FC1); - for (int i = 0 ; i < samplesCount; i++) - { - double res = samples.row(i).dot(weights) + shift; - responses.at(i) = res > 0 ? 1.f : -1.f; - } -} - -//================================================================================================== - -typedef tuple ML_SVMSGD_Param; -typedef testing::TestWithParam ML_SVMSGD_Params; - -TEST_P(ML_SVMSGD_Params, scale_and_features) -{ - const int type = get<0>(GetParam()); - const int featureCount = get<1>(GetParam()); - const double precision = get<2>(GetParam()); - - RNG &rng = cv::theRNG(); - - Mat_ weights(1, featureCount); - rng.fill(weights, RNG::UNIFORM, -1, 1); - const float shift = static_cast(rng.uniform(-featureCount, featureCount)); - - BorderList borders; - float lowerLimit = -TEST_VALUE_LIMIT; - float upperLimit = TEST_VALUE_LIMIT; - if (type == UNIFORM_SAME_SCALE) - { - for (int featureIndex = 0; featureIndex < featureCount; featureIndex++) - borders.push_back(std::pair(lowerLimit, upperLimit)); - } - else if (type == UNIFORM_DIFFERENT_SCALES) - { - for (int featureIndex = 0; featureIndex < featureCount; featureIndex++) - { - int crit = rng.uniform(0, 2); - if (crit > 0) - borders.push_back(std::pair(lowerLimit, upperLimit)); - else - borders.push_back(std::pair(lowerLimit/1000, upperLimit/1000)); - } - } - ASSERT_FALSE(borders.empty()); - - Mat trainSamples; - Mat trainResponses; - int trainSamplesCount = 10000; - makeData(rng, trainSamplesCount, weights, shift, borders, trainSamples, trainResponses); - ASSERT_EQ(trainResponses.type(), CV_32FC1); - - Mat testSamples; - Mat testResponses; - int testSamplesCount = 100000; - makeData(rng, testSamplesCount, weights, shift, borders, testSamples, testResponses); - ASSERT_EQ(testResponses.type(), CV_32FC1); - - Ptr data = TrainData::create(trainSamples, cv::ml::ROW_SAMPLE, trainResponses); - ASSERT_TRUE(data); - - cv::Ptr svmsgd = SVMSGD::create(); - ASSERT_TRUE(svmsgd); - - svmsgd->train(data); - - Mat responses; - svmsgd->predict(testSamples, responses); - ASSERT_EQ(responses.type(), CV_32FC1); - ASSERT_EQ(responses.rows, testSamplesCount); - - int errCount = 0; - for (int i = 0; i < testSamplesCount; i++) - if (responses.at(i) * testResponses.at(i) < 0) - errCount++; - float err = (float)errCount / testSamplesCount; - EXPECT_LE(err, precision); -} - -ML_SVMSGD_Param params_list[] = { - ML_SVMSGD_Param(UNIFORM_SAME_SCALE, 2, 0.01), - ML_SVMSGD_Param(UNIFORM_SAME_SCALE, 5, 0.01), - ML_SVMSGD_Param(UNIFORM_SAME_SCALE, 100, 0.02), - ML_SVMSGD_Param(UNIFORM_DIFFERENT_SCALES, 2, 0.01), - ML_SVMSGD_Param(UNIFORM_DIFFERENT_SCALES, 5, 0.01), - ML_SVMSGD_Param(UNIFORM_DIFFERENT_SCALES, 100, 0.01), -}; - -INSTANTIATE_TEST_CASE_P(/**/, ML_SVMSGD_Params, testing::ValuesIn(params_list)); - -//================================================================================================== - -TEST(ML_SVMSGD, twoPoints) -{ - Mat samples(2, 2, CV_32FC1); - samples.at(0,0) = 0; - samples.at(0,1) = 0; - samples.at(1,0) = 1000; - samples.at(1,1) = 1; - - Mat responses(2, 1, CV_32FC1); - responses.at(0) = -1; - responses.at(1) = 1; - - cv::Ptr trainData = TrainData::create(samples, cv::ml::ROW_SAMPLE, responses); - - Mat realWeights(1, 2, CV_32FC1); - realWeights.at(0) = 1000; - realWeights.at(1) = 1; - - float realShift = -500000.5; - - float normRealWeights = static_cast(cv::norm(realWeights)); // TODO cvtest - realWeights /= normRealWeights; - realShift /= normRealWeights; - - cv::Ptr svmsgd = SVMSGD::create(); - svmsgd->setOptimalParameters(); - svmsgd->train( trainData ); - - Mat foundWeights = svmsgd->getWeights(); - float foundShift = svmsgd->getShift(); - - float normFoundWeights = static_cast(cv::norm(foundWeights)); // TODO cvtest - foundWeights /= normFoundWeights; - foundShift /= normFoundWeights; - EXPECT_LE(cv::norm(Mat(foundWeights - realWeights)), 0.001); // TODO cvtest - EXPECT_LE(std::abs((foundShift - realShift) / realShift), 0.05); -} - -}} // namespace diff --git a/modules/ml/test/test_svmtrainauto.cpp b/modules/ml/test/test_svmtrainauto.cpp deleted file mode 100644 index 9d78762c4c..0000000000 --- a/modules/ml/test/test_svmtrainauto.cpp +++ /dev/null @@ -1,164 +0,0 @@ -// This file is part of OpenCV project. -// It is subject to the license terms in the LICENSE file found in the top-level directory -// of this distribution and at http://opencv.org/license.html. - -#include "test_precomp.hpp" - -namespace opencv_test { namespace { - -using cv::ml::SVM; -using cv::ml::TrainData; - -static Ptr makeRandomData(int datasize) -{ - cv::Mat samples = cv::Mat::zeros( datasize, 2, CV_32FC1 ); - cv::Mat responses = cv::Mat::zeros( datasize, 1, CV_32S ); - RNG &rng = cv::theRNG(); - for (int i = 0; i < datasize; ++i) - { - int response = rng.uniform(0, 2); // Random from {0, 1}. - samples.at( i, 0 ) = rng.uniform(0.f, 0.5f) + response * 0.5f; - samples.at( i, 1 ) = rng.uniform(0.f, 0.5f) + response * 0.5f; - responses.at( i, 0 ) = response; - } - return TrainData::create( samples, cv::ml::ROW_SAMPLE, responses ); -} - -static Ptr makeCircleData(int datasize, float scale_factor, float radius) -{ - // Populate samples with data that can be split into two concentric circles - cv::Mat samples = cv::Mat::zeros( datasize, 2, CV_32FC1 ); - cv::Mat responses = cv::Mat::zeros( datasize, 1, CV_32S ); - for (int i = 0; i < datasize; i+=2) - { - const float pi = 3.14159f; - const float angle_rads = (i/datasize) * pi; - const float x = radius * cos(angle_rads); - const float y = radius * cos(angle_rads); - - // Larger circle - samples.at( i, 0 ) = x; - samples.at( i, 1 ) = y; - responses.at( i, 0 ) = 0; - - // Smaller circle - samples.at( i + 1, 0 ) = x * scale_factor; - samples.at( i + 1, 1 ) = y * scale_factor; - responses.at( i + 1, 0 ) = 1; - } - return TrainData::create( samples, cv::ml::ROW_SAMPLE, responses ); -} - -static Ptr makeRandomData2(int datasize) -{ - cv::Mat samples = cv::Mat::zeros( datasize, 2, CV_32FC1 ); - cv::Mat responses = cv::Mat::zeros( datasize, 1, CV_32S ); - RNG &rng = cv::theRNG(); - for (int i = 0; i < datasize; ++i) - { - int response = rng.uniform(0, 2); // Random from {0, 1}. - samples.at( i, 0 ) = 0; - samples.at( i, 1 ) = (0.5f - response) * rng.uniform(0.f, 1.2f) + response; - responses.at( i, 0 ) = response; - } - return TrainData::create( samples, cv::ml::ROW_SAMPLE, responses ); -} - -//================================================================================================== - -TEST(ML_SVM, trainauto) -{ - const int datasize = 100; - cv::Ptr data = makeRandomData(datasize); - ASSERT_TRUE(data); - cv::Ptr svm = SVM::create(); - ASSERT_TRUE(svm); - svm->trainAuto( data, 10 ); // 2-fold cross validation. - - float test_data0[2] = {0.25f, 0.25f}; - cv::Mat test_point0 = cv::Mat( 1, 2, CV_32FC1, test_data0 ); - float result0 = svm->predict( test_point0 ); - float test_data1[2] = {0.75f, 0.75f}; - cv::Mat test_point1 = cv::Mat( 1, 2, CV_32FC1, test_data1 ); - float result1 = svm->predict( test_point1 ); - - EXPECT_NEAR(result0, 0, 0.001); - EXPECT_NEAR(result1, 1, 0.001); -} - -TEST(ML_SVM, trainauto_sigmoid) -{ - const int datasize = 100; - const float scale_factor = 0.5; - const float radius = 2.0; - cv::Ptr data = makeCircleData(datasize, scale_factor, radius); - ASSERT_TRUE(data); - - cv::Ptr svm = SVM::create(); - ASSERT_TRUE(svm); - svm->setKernel(SVM::SIGMOID); - svm->setGamma(10.0); - svm->setCoef0(-10.0); - svm->trainAuto( data, 10 ); // 2-fold cross validation. - - float test_data0[2] = {radius, radius}; - cv::Mat test_point0 = cv::Mat( 1, 2, CV_32FC1, test_data0 ); - EXPECT_FLOAT_EQ(svm->predict( test_point0 ), 0); - - float test_data1[2] = {scale_factor * radius, scale_factor * radius}; - cv::Mat test_point1 = cv::Mat( 1, 2, CV_32FC1, test_data1 ); - EXPECT_FLOAT_EQ(svm->predict( test_point1 ), 1); -} - -TEST(ML_SVM, trainAuto_regression_5369) -{ - const int datasize = 100; - Ptr data = makeRandomData2(datasize); - cv::Ptr svm = SVM::create(); - svm->trainAuto( data, 10 ); // 2-fold cross validation. - - float test_data0[2] = {0.25f, 0.25f}; - cv::Mat test_point0 = cv::Mat( 1, 2, CV_32FC1, test_data0 ); - float result0 = svm->predict( test_point0 ); - float test_data1[2] = {0.75f, 0.75f}; - cv::Mat test_point1 = cv::Mat( 1, 2, CV_32FC1, test_data1 ); - float result1 = svm->predict( test_point1 ); - - EXPECT_EQ(0., result0); - EXPECT_EQ(1., result1); -} - -TEST(ML_SVM, getSupportVectors) -{ - // Set up training data - int labels[4] = {1, -1, -1, -1}; - float trainingData[4][2] = { {501, 10}, {255, 10}, {501, 255}, {10, 501} }; - Mat trainingDataMat(4, 2, CV_32FC1, trainingData); - Mat labelsMat(4, 1, CV_32SC1, labels); - - Ptr svm = SVM::create(); - ASSERT_TRUE(svm); - svm->setType(SVM::C_SVC); - svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6)); - - // Test retrieval of SVs and compressed SVs on linear SVM - svm->setKernel(SVM::LINEAR); - svm->train(trainingDataMat, cv::ml::ROW_SAMPLE, labelsMat); - - Mat sv = svm->getSupportVectors(); - EXPECT_EQ(1, sv.rows); // by default compressed SV returned - sv = svm->getUncompressedSupportVectors(); - EXPECT_EQ(3, sv.rows); - - // Test retrieval of SVs and compressed SVs on non-linear SVM - svm->setKernel(SVM::POLY); - svm->setDegree(2); - svm->train(trainingDataMat, cv::ml::ROW_SAMPLE, labelsMat); - - sv = svm->getSupportVectors(); - EXPECT_EQ(3, sv.rows); - sv = svm->getUncompressedSupportVectors(); - EXPECT_EQ(0, sv.rows); // inapplicable for non-linear SVMs -} - -}} // namespace diff --git a/modules/ml/test/test_utils.cpp b/modules/ml/test/test_utils.cpp deleted file mode 100644 index 8717d9f301..0000000000 --- a/modules/ml/test/test_utils.cpp +++ /dev/null @@ -1,189 +0,0 @@ -// This file is part of OpenCV project. -// It is subject to the license terms in the LICENSE file found in the top-level directory -// of this distribution and at http://opencv.org/license.html. -#include "test_precomp.hpp" - -namespace opencv_test { - -void defaultDistribs( Mat& means, vector& covs, int type) -{ - float mp0[] = {0.0f, 0.0f}, cp0[] = {0.67f, 0.0f, 0.0f, 0.67f}; - float mp1[] = {5.0f, 0.0f}, cp1[] = {1.0f, 0.0f, 0.0f, 1.0f}; - float mp2[] = {1.0f, 5.0f}, cp2[] = {1.0f, 0.0f, 0.0f, 1.0f}; - means.create(3, 2, type); - Mat m0( 1, 2, CV_32FC1, mp0 ), c0( 2, 2, CV_32FC1, cp0 ); - Mat m1( 1, 2, CV_32FC1, mp1 ), c1( 2, 2, CV_32FC1, cp1 ); - Mat m2( 1, 2, CV_32FC1, mp2 ), c2( 2, 2, CV_32FC1, cp2 ); - means.resize(3), covs.resize(3); - - Mat mr0 = means.row(0); - m0.convertTo(mr0, type); - c0.convertTo(covs[0], type); - - Mat mr1 = means.row(1); - m1.convertTo(mr1, type); - c1.convertTo(covs[1], type); - - Mat mr2 = means.row(2); - m2.convertTo(mr2, type); - c2.convertTo(covs[2], type); -} - -// generate points sets by normal distributions -void generateData( Mat& data, Mat& labels, const vector& sizes, const Mat& _means, const vector& covs, int dataType, int labelType ) -{ - vector::const_iterator sit = sizes.begin(); - int total = 0; - for( ; sit != sizes.end(); ++sit ) - total += *sit; - CV_Assert( _means.rows == (int)sizes.size() && covs.size() == sizes.size() ); - CV_Assert( !data.empty() && data.rows == total ); - CV_Assert( data.type() == dataType ); - - labels.create( data.rows, 1, labelType ); - - randn( data, Scalar::all(-1.0), Scalar::all(1.0) ); - vector means(sizes.size()); - for(int i = 0; i < _means.rows; i++) - means[i] = _means.row(i); - vector::const_iterator mit = means.begin(), cit = covs.begin(); - int bi, ei = 0; - sit = sizes.begin(); - for( int p = 0, l = 0; sit != sizes.end(); ++sit, ++mit, ++cit, l++ ) - { - bi = ei; - ei = bi + *sit; - CV_Assert( mit->rows == 1 && mit->cols == data.cols ); - CV_Assert( cit->rows == data.cols && cit->cols == data.cols ); - for( int i = bi; i < ei; i++, p++ ) - { - Mat r = data.row(i); - r = r * (*cit) + *mit; - if( labelType == CV_32FC1 ) - labels.at(p, 0) = (float)l; - else if( labelType == CV_32SC1 ) - labels.at(p, 0) = l; - else - { - CV_DbgAssert(0); - } - } - } -} - -int maxIdx( const vector& count ) -{ - int idx = -1; - int maxVal = -1; - vector::const_iterator it = count.begin(); - for( int i = 0; it != count.end(); ++it, i++ ) - { - if( *it > maxVal) - { - maxVal = *it; - idx = i; - } - } - CV_Assert( idx >= 0); - return idx; -} - -bool getLabelsMap( const Mat& labels, const vector& sizes, vector& labelsMap, bool checkClusterUniq) -{ - size_t total = 0, nclusters = sizes.size(); - for(size_t i = 0; i < sizes.size(); i++) - total += sizes[i]; - - CV_Assert( !labels.empty() ); - CV_Assert( labels.total() == total && (labels.cols == 1 || labels.rows == 1)); - CV_Assert( labels.type() == CV_32SC1 || labels.type() == CV_32FC1 ); - - bool isFlt = labels.type() == CV_32FC1; - - labelsMap.resize(nclusters); - - vector buzy(nclusters, false); - int startIndex = 0; - for( size_t clusterIndex = 0; clusterIndex < sizes.size(); clusterIndex++ ) - { - vector count( nclusters, 0 ); - for( int i = startIndex; i < startIndex + sizes[clusterIndex]; i++) - { - int lbl = isFlt ? (int)labels.at(i) : labels.at(i); - CV_Assert(lbl < (int)nclusters); - count[lbl]++; - CV_Assert(count[lbl] < (int)total); - } - startIndex += sizes[clusterIndex]; - - int cls = maxIdx( count ); - CV_Assert( !checkClusterUniq || !buzy[cls] ); - - labelsMap[clusterIndex] = cls; - - buzy[cls] = true; - } - - if(checkClusterUniq) - { - for(size_t i = 0; i < buzy.size(); i++) - if(!buzy[i]) - return false; - } - - return true; -} - -bool calcErr( const Mat& labels, const Mat& origLabels, const vector& sizes, float& err, bool labelsEquivalent, bool checkClusterUniq) -{ - err = 0; - CV_Assert( !labels.empty() && !origLabels.empty() ); - CV_Assert( labels.rows == 1 || labels.cols == 1 ); - CV_Assert( origLabels.rows == 1 || origLabels.cols == 1 ); - CV_Assert( labels.total() == origLabels.total() ); - CV_Assert( labels.type() == CV_32SC1 || labels.type() == CV_32FC1 ); - CV_Assert( origLabels.type() == labels.type() ); - - vector labelsMap; - bool isFlt = labels.type() == CV_32FC1; - if( !labelsEquivalent ) - { - if( !getLabelsMap( labels, sizes, labelsMap, checkClusterUniq ) ) - return false; - - for( int i = 0; i < labels.rows; i++ ) - if( isFlt ) - err += labels.at(i) != labelsMap[(int)origLabels.at(i)] ? 1.f : 0.f; - else - err += labels.at(i) != labelsMap[origLabels.at(i)] ? 1.f : 0.f; - } - else - { - for( int i = 0; i < labels.rows; i++ ) - if( isFlt ) - err += labels.at(i) != origLabels.at(i) ? 1.f : 0.f; - else - err += labels.at(i) != origLabels.at(i) ? 1.f : 0.f; - } - err /= (float)labels.rows; - return true; -} - -bool calculateError( const Mat& _p_labels, const Mat& _o_labels, float& error) -{ - error = 0.0f; - float accuracy = 0.0f; - Mat _p_labels_temp; - Mat _o_labels_temp; - _p_labels.convertTo(_p_labels_temp, CV_32S); - _o_labels.convertTo(_o_labels_temp, CV_32S); - - CV_Assert(_p_labels_temp.total() == _o_labels_temp.total()); - CV_Assert(_p_labels_temp.rows == _o_labels_temp.rows); - - accuracy = (float)countNonZero(_p_labels_temp == _o_labels_temp)/_p_labels_temp.rows; - error = 1 - accuracy; - return true; -} - -} // namespace diff --git a/modules/objdetect/include/opencv2/objdetect.hpp b/modules/objdetect/include/opencv2/objdetect.hpp index 1cb75aa579..2e6aad24ae 100644 --- a/modules/objdetect/include/opencv2/objdetect.hpp +++ b/modules/objdetect/include/opencv2/objdetect.hpp @@ -501,8 +501,6 @@ public: */ virtual void copyTo(HOGDescriptor& c) const; - /**@example samples/cpp/train_HOG.cpp - */ /** @brief Computes HOG descriptors of given image. @param img Matrix of the type CV_8U containing an image where HOG features will be calculated. @param descriptors Matrix of the type CV_32F diff --git a/modules/python/test/test_gaussian_mix.py b/modules/python/test/test_gaussian_mix.py deleted file mode 100644 index 62866295e6..0000000000 --- a/modules/python/test/test_gaussian_mix.py +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env python - -# Python 2/3 compatibility -from __future__ import print_function -import sys -PY3 = sys.version_info[0] == 3 - -if PY3: - xrange = range - -import numpy as np -from numpy import random -import cv2 as cv - -def make_gaussians(cluster_n, img_size): - points = [] - ref_distrs = [] - for _ in xrange(cluster_n): - mean = (0.1 + 0.8*random.rand(2)) * img_size - a = (random.rand(2, 2)-0.5)*img_size*0.1 - cov = np.dot(a.T, a) + img_size*0.05*np.eye(2) - n = 100 + random.randint(900) - pts = random.multivariate_normal(mean, cov, n) - points.append( pts ) - ref_distrs.append( (mean, cov) ) - points = np.float32( np.vstack(points) ) - return points, ref_distrs - -from tests_common import NewOpenCVTests - -class gaussian_mix_test(NewOpenCVTests): - - def test_gaussian_mix(self): - - np.random.seed(10) - cluster_n = 5 - img_size = 512 - - points, ref_distrs = make_gaussians(cluster_n, img_size) - - em = cv.ml.EM_create() - em.setClustersNumber(cluster_n) - em.setCovarianceMatrixType(cv.ml.EM_COV_MAT_GENERIC) - em.trainEM(points) - means = em.getMeans() - covs = em.getCovs() # Known bug: https://github.com/opencv/opencv/pull/4232 - #found_distrs = zip(means, covs) - - matches_count = 0 - - meanEps = 0.05 - covEps = 0.1 - - for i in range(cluster_n): - for j in range(cluster_n): - if (cv.norm(means[i] - ref_distrs[j][0], cv.NORM_L2) / cv.norm(ref_distrs[j][0], cv.NORM_L2) < meanEps and - cv.norm(covs[i] - ref_distrs[j][1], cv.NORM_L2) / cv.norm(ref_distrs[j][1], cv.NORM_L2) < covEps): - matches_count += 1 - - self.assertEqual(matches_count, cluster_n) - - -if __name__ == '__main__': - NewOpenCVTests.bootstrap() diff --git a/modules/python/test/test_misc.py b/modules/python/test/test_misc.py index bcd3152699..a8e204bafe 100644 --- a/modules/python/test/test_misc.py +++ b/modules/python/test/test_misc.py @@ -136,11 +136,6 @@ class Bindings(NewOpenCVTests): bm.getPreFilterCap() # from StereoBM bm.getBlockSize() # from SteroMatcher - boost = cv.ml.Boost_create() - boost.getBoostType() # from ml::Boost - boost.getMaxDepth() # from ml::DTrees - boost.isClassifier() # from ml::StatModel - def test_raiseGeneralException(self): with self.assertRaises((cv.error,), msg='C++ exception is not propagated to Python in the right way') as cm: @@ -820,16 +815,6 @@ class Arguments(NewOpenCVTests): self.assertEqual(flag, cv.utils.nested.testEchoBooleanFunction(flag), msg="Function in nested module returns wrong result") - def test_class_from_submodule_has_global_alias(self): - self.assertTrue(hasattr(cv.ml, "Boost"), - msg="Class is not registered in the submodule") - self.assertTrue(hasattr(cv, "ml_Boost"), - msg="Class from submodule doesn't have alias in the " - "global module") - self.assertEqual(cv.ml.Boost, cv.ml_Boost, - msg="Classes from submodules and global module don't refer " - "to the same type") - def test_inner_class_has_global_alias(self): self.assertTrue(hasattr(cv.SimpleBlobDetector, "Params"), msg="Class is not registered as inner class") diff --git a/samples/cpp/CMakeLists.txt b/samples/cpp/CMakeLists.txt index 28cdbae9b2..4fc4c1fdb1 100644 --- a/samples/cpp/CMakeLists.txt +++ b/samples/cpp/CMakeLists.txt @@ -7,7 +7,6 @@ set(OPENCV_CPP_SAMPLES_REQUIRED_DEPS opencv_imgcodecs opencv_videoio opencv_highgui - opencv_ml opencv_video opencv_objdetect opencv_photo diff --git a/samples/cpp/digits_svm.cpp b/samples/cpp/digits_svm.cpp deleted file mode 100644 index 3e28e24fe7..0000000000 --- a/samples/cpp/digits_svm.cpp +++ /dev/null @@ -1,367 +0,0 @@ -#include "opencv2/core.hpp" -#include "opencv2/highgui.hpp" -#include "opencv2/imgcodecs.hpp" -#include "opencv2/imgproc.hpp" -#include "opencv2/ml.hpp" - -#include -#include -#include - -using namespace cv; -using namespace std; - -const int SZ = 20; // size of each digit is SZ x SZ -const int CLASS_N = 10; -const char* DIGITS_FN = "digits.png"; - -static void help(char** argv) -{ - cout << - "\n" - "SVM and KNearest digit recognition.\n" - "\n" - "Sample loads a dataset of handwritten digits from 'digits.png'.\n" - "Then it trains a SVM and KNearest classifiers on it and evaluates\n" - "their accuracy.\n" - "\n" - "Following preprocessing is applied to the dataset:\n" - " - Moment-based image deskew (see deskew())\n" - " - Digit images are split into 4 10x10 cells and 16-bin\n" - " histogram of oriented gradients is computed for each\n" - " cell\n" - " - Transform histograms to space with Hellinger metric (see [1] (RootSIFT))\n" - "\n" - "\n" - "[1] R. Arandjelovic, A. Zisserman\n" - " \"Three things everyone should know to improve object retrieval\"\n" - " http://www.robots.ox.ac.uk/~vgg/publications/2012/Arandjelovic12/arandjelovic12.pdf\n" - "\n" - "Usage:\n" - << argv[0] << endl; -} - -static void split2d(const Mat& image, const Size cell_size, vector& cells) -{ - int height = image.rows; - int width = image.cols; - - int sx = cell_size.width; - int sy = cell_size.height; - - cells.clear(); - - for (int i = 0; i < height; i += sy) - { - for (int j = 0; j < width; j += sx) - { - cells.push_back(image(Rect(j, i, sx, sy))); - } - } -} - -static void load_digits(const char* fn, vector& digits, vector& labels) -{ - digits.clear(); - labels.clear(); - - String filename = samples::findFile(fn); - - cout << "Loading " << filename << " ..." << endl; - - Mat digits_img = imread(filename, IMREAD_GRAYSCALE); - split2d(digits_img, Size(SZ, SZ), digits); - - for (int i = 0; i < CLASS_N; i++) - { - for (size_t j = 0; j < digits.size() / CLASS_N; j++) - { - labels.push_back(i); - } - } -} - -static void deskew(const Mat& img, Mat& deskewed_img) -{ - Moments m = moments(img); - - if (abs(m.mu02) < 0.01) - { - deskewed_img = img.clone(); - return; - } - - float skew = (float)(m.mu11 / m.mu02); - float M_vals[2][3] = {{1, skew, -0.5f * SZ * skew}, {0, 1, 0}}; - Mat M(Size(3, 2), CV_32F, &M_vals[0][0]); - - warpAffine(img, deskewed_img, M, Size(SZ, SZ), WARP_INVERSE_MAP | INTER_LINEAR); -} - -static void mosaic(const int width, const vector& images, Mat& grid) -{ - int mat_width = SZ * width; - int mat_height = SZ * (int)ceil((double)images.size() / width); - - if (!images.empty()) - { - grid = Mat(Size(mat_width, mat_height), images[0].type()); - - for (size_t i = 0; i < images.size(); i++) - { - Mat location_on_grid = grid(Rect(SZ * ((int)i % width), SZ * ((int)i / width), SZ, SZ)); - images[i].copyTo(location_on_grid); - } - } -} - -static void evaluate_model(const vector& predictions, const vector& digits, const vector& labels, Mat& mos) -{ - double err = 0; - - for (size_t i = 0; i < predictions.size(); i++) - { - if ((int)predictions[i] != labels[i]) - { - err++; - } - } - - err /= predictions.size(); - - cout << cv::format("error: %.2f %%", err * 100) << endl; - - int confusion[10][10] = {}; - - for (size_t i = 0; i < labels.size(); i++) - { - confusion[labels[i]][(int)predictions[i]]++; - } - - cout << "confusion matrix:" << endl; - for (int i = 0; i < 10; i++) - { - for (int j = 0; j < 10; j++) - { - cout << cv::format("%2d ", confusion[i][j]); - } - cout << endl; - } - - cout << endl; - - vector vis; - - for (size_t i = 0; i < digits.size(); i++) - { - Mat img; - cvtColor(digits[i], img, COLOR_GRAY2BGR); - - if ((int)predictions[i] != labels[i]) - { - for (int j = 0; j < img.rows; j++) - { - for (int k = 0; k < img.cols; k++) - { - img.at(j, k)[0] = 0; - img.at(j, k)[1] = 0; - } - } - } - - vis.push_back(img); - } - - mosaic(25, vis, mos); -} - -static void bincount(const Mat& x, const Mat& weights, const int min_length, vector& bins) -{ - double max_x_val = 0; - minMaxLoc(x, NULL, &max_x_val); - - bins = vector(max((int)max_x_val, min_length)); - - for (int i = 0; i < x.rows; i++) - { - for (int j = 0; j < x.cols; j++) - { - bins[x.at(i, j)] += weights.at(i, j); - } - } -} - -static void preprocess_hog(const vector& digits, Mat& hog) -{ - int bin_n = 16; - int half_cell = SZ / 2; - double eps = 1e-7; - - hog = Mat(Size(4 * bin_n, (int)digits.size()), CV_32F); - - for (size_t img_index = 0; img_index < digits.size(); img_index++) - { - Mat gx; - Sobel(digits[img_index], gx, CV_32F, 1, 0); - - Mat gy; - Sobel(digits[img_index], gy, CV_32F, 0, 1); - - Mat mag; - Mat ang; - cartToPolar(gx, gy, mag, ang); - - Mat bin(ang.size(), CV_32S); - - for (int i = 0; i < ang.rows; i++) - { - for (int j = 0; j < ang.cols; j++) - { - bin.at(i, j) = (int)(bin_n * ang.at(i, j) / (2 * CV_PI)); - } - } - - Mat bin_cells[] = { - bin(Rect(0, 0, half_cell, half_cell)), - bin(Rect(half_cell, 0, half_cell, half_cell)), - bin(Rect(0, half_cell, half_cell, half_cell)), - bin(Rect(half_cell, half_cell, half_cell, half_cell)) - }; - Mat mag_cells[] = { - mag(Rect(0, 0, half_cell, half_cell)), - mag(Rect(half_cell, 0, half_cell, half_cell)), - mag(Rect(0, half_cell, half_cell, half_cell)), - mag(Rect(half_cell, half_cell, half_cell, half_cell)) - }; - - vector hist; - hist.reserve(4 * bin_n); - - for (int i = 0; i < 4; i++) - { - vector partial_hist; - bincount(bin_cells[i], mag_cells[i], bin_n, partial_hist); - hist.insert(hist.end(), partial_hist.begin(), partial_hist.end()); - } - - // transform to Hellinger kernel - double sum = 0; - - for (size_t i = 0; i < hist.size(); i++) - { - sum += hist[i]; - } - - for (size_t i = 0; i < hist.size(); i++) - { - hist[i] /= sum + eps; - hist[i] = sqrt(hist[i]); - } - - double hist_norm = norm(hist); - - for (size_t i = 0; i < hist.size(); i++) - { - hog.at((int)img_index, (int)i) = (float)(hist[i] / (hist_norm + eps)); - } - } -} - -static void shuffle(vector& digits, vector& labels) -{ - vector shuffled_indexes(digits.size()); - - for (size_t i = 0; i < digits.size(); i++) - { - shuffled_indexes[i] = (int)i; - } - - randShuffle(shuffled_indexes); - - vector shuffled_digits(digits.size()); - vector shuffled_labels(labels.size()); - - for (size_t i = 0; i < shuffled_indexes.size(); i++) - { - shuffled_digits[shuffled_indexes[i]] = digits[i]; - shuffled_labels[shuffled_indexes[i]] = labels[i]; - } - - digits = shuffled_digits; - labels = shuffled_labels; -} - -int main(int /* argc */, char* argv[]) -{ - help(argv); - - vector digits; - vector labels; - - load_digits(DIGITS_FN, digits, labels); - - cout << "preprocessing..." << endl; - - // shuffle digits - shuffle(digits, labels); - - vector digits2; - - for (size_t i = 0; i < digits.size(); i++) - { - Mat deskewed_digit; - deskew(digits[i], deskewed_digit); - digits2.push_back(deskewed_digit); - } - - Mat samples; - - preprocess_hog(digits2, samples); - - int train_n = (int)(0.9 * samples.rows); - Mat test_set; - - vector digits_test(digits2.begin() + train_n, digits2.end()); - mosaic(25, digits_test, test_set); - imshow("test set", test_set); - - Mat samples_train = samples(Rect(0, 0, samples.cols, train_n)); - Mat samples_test = samples(Rect(0, train_n, samples.cols, samples.rows - train_n)); - vector labels_train(labels.begin(), labels.begin() + train_n); - vector labels_test(labels.begin() + train_n, labels.end()); - - Ptr k_nearest; - Ptr svm; - vector predictions; - Mat vis; - - cout << "training KNearest..." << endl; - k_nearest = ml::KNearest::create(); - k_nearest->train(samples_train, ml::ROW_SAMPLE, labels_train); - - // predict digits with KNearest - k_nearest->findNearest(samples_test, 4, predictions); - evaluate_model(predictions, digits_test, labels_test, vis); - imshow("KNearest test", vis); - k_nearest.release(); - - cout << "training SVM..." << endl; - svm = ml::SVM::create(); - svm->setGamma(5.383); - svm->setC(2.67); - svm->setKernel(ml::SVM::RBF); - svm->setType(ml::SVM::C_SVC); - svm->train(samples_train, ml::ROW_SAMPLE, labels_train); - - // predict digits with SVM - svm->predict(samples_test, predictions); - evaluate_model(predictions, digits_test, labels_test, vis); - imshow("SVM test", vis); - cout << "Saving SVM as \"digits_svm.yml\"..." << endl; - svm->save("digits_svm.yml"); - svm.release(); - - waitKey(); - - return 0; -} diff --git a/samples/cpp/em.cpp b/samples/cpp/em.cpp deleted file mode 100644 index f5310740f4..0000000000 --- a/samples/cpp/em.cpp +++ /dev/null @@ -1,70 +0,0 @@ -#include "opencv2/highgui.hpp" -#include "opencv2/imgproc.hpp" -#include "opencv2/ml.hpp" - -using namespace cv; -using namespace cv::ml; - -int main( int /*argc*/, char** /*argv*/ ) -{ - const int N = 4; - const int N1 = (int)sqrt((double)N); - const Scalar colors[] = - { - Scalar(0,0,255), Scalar(0,255,0), - Scalar(0,255,255),Scalar(255,255,0) - }; - - int i, j; - int nsamples = 100; - Mat samples( nsamples, 2, CV_32FC1 ); - Mat labels; - Mat img = Mat::zeros( Size( 500, 500 ), CV_8UC3 ); - Mat sample( 1, 2, CV_32FC1 ); - - samples = samples.reshape(2, 0); - for( i = 0; i < N; i++ ) - { - // form the training samples - Mat samples_part = samples.rowRange(i*nsamples/N, (i+1)*nsamples/N ); - - Scalar mean(((i%N1)+1)*img.rows/(N1+1), - ((i/N1)+1)*img.rows/(N1+1)); - Scalar sigma(30,30); - randn( samples_part, mean, sigma ); - } - samples = samples.reshape(1, 0); - - // cluster the data - Ptr em_model = EM::create(); - em_model->setClustersNumber(N); - em_model->setCovarianceMatrixType(EM::COV_MAT_SPHERICAL); - em_model->setTermCriteria(TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 300, 0.1)); - em_model->trainEM( samples, noArray(), labels, noArray() ); - - // classify every image pixel - for( i = 0; i < img.rows; i++ ) - { - for( j = 0; j < img.cols; j++ ) - { - sample.at(0) = (float)j; - sample.at(1) = (float)i; - int response = cvRound(em_model->predict2( sample, noArray() )[1]); - Scalar c = colors[response]; - - circle( img, Point(j, i), 1, c*0.75, FILLED ); - } - } - - //draw the clustered samples - for( i = 0; i < nsamples; i++ ) - { - Point pt(cvRound(samples.at(i, 0)), cvRound(samples.at(i, 1))); - circle( img, pt, 1, colors[labels.at(i)], FILLED ); - } - - imshow( "EM-clustering result", img ); - waitKey(0); - - return 0; -} diff --git a/samples/cpp/letter_recog.cpp b/samples/cpp/letter_recog.cpp deleted file mode 100644 index bcad2f4687..0000000000 --- a/samples/cpp/letter_recog.cpp +++ /dev/null @@ -1,558 +0,0 @@ -#include "opencv2/core.hpp" -#include "opencv2/ml.hpp" - -#include -#include -#include - -using namespace std; -using namespace cv; -using namespace cv::ml; - -static void help(char** argv) -{ - printf("\nThe sample demonstrates how to train Random Trees classifier\n" - "(or Boosting classifier, or MLP, or Knearest, or Nbayes, or Support Vector Machines - see main()) using the provided dataset.\n" - "\n" - "We use the sample database letter-recognition.data\n" - "from UCI Repository, here is the link:\n" - "\n" - "Newman, D.J. & Hettich, S. & Blake, C.L. & Merz, C.J. (1998).\n" - "UCI Repository of machine learning databases\n" - "[http://www.ics.uci.edu/~mlearn/MLRepository.html].\n" - "Irvine, CA: University of California, Department of Information and Computer Science.\n" - "\n" - "The dataset consists of 20000 feature vectors along with the\n" - "responses - capital latin letters A..Z.\n" - "The first 16000 (10000 for boosting)) samples are used for training\n" - "and the remaining 4000 (10000 for boosting) - to test the classifier.\n" - "======================================================\n"); - printf("\nThis is letter recognition sample.\n" - "The usage: %s [-data=] \\\n" - " [-save=] \\\n" - " [-load=] \\\n" - " [-boost|-mlp|-knearest|-nbayes|-svm] # to use boost/mlp/knearest/SVM classifier instead of default Random Trees\n", argv[0] ); -} - -// This function reads data and responses from the file -static bool -read_num_class_data( const string& filename, int var_count, - Mat* _data, Mat* _responses ) -{ - const int M = 1024; - char buf[M+2]; - - Mat el_ptr(1, var_count, CV_32F); - int i; - vector responses; - - _data->release(); - _responses->release(); - - FILE* f = fopen( filename.c_str(), "rt" ); - if( !f ) - { - cout << "Could not read the database " << filename << endl; - return false; - } - - for(;;) - { - char* ptr; - if( !fgets( buf, M, f ) || !strchr( buf, ',' ) ) - break; - responses.push_back((int)buf[0]); - ptr = buf+2; - for( i = 0; i < var_count; i++ ) - { - int n = 0; - sscanf( ptr, "%f%n", &el_ptr.at(i), &n ); - ptr += n + 1; - } - if( i < var_count ) - break; - _data->push_back(el_ptr); - } - fclose(f); - Mat(responses).copyTo(*_responses); - - cout << "The database " << filename << " is loaded.\n"; - - return true; -} - -template -static Ptr load_classifier(const string& filename_to_load) -{ - // load classifier from the specified file - Ptr model = StatModel::load( filename_to_load ); - if( model.empty() ) - cout << "Could not read the classifier " << filename_to_load << endl; - else - cout << "The classifier " << filename_to_load << " is loaded.\n"; - - return model; -} - -static Ptr -prepare_train_data(const Mat& data, const Mat& responses, int ntrain_samples) -{ - Mat sample_idx = Mat::zeros( 1, data.rows, CV_8U ); - Mat train_samples = sample_idx.colRange(0, ntrain_samples); - train_samples.setTo(Scalar::all(1)); - - int nvars = data.cols; - Mat var_type( nvars + 1, 1, CV_8U ); - var_type.setTo(Scalar::all(VAR_ORDERED)); - var_type.at(nvars) = VAR_CATEGORICAL; - - return TrainData::create(data, ROW_SAMPLE, responses, - noArray(), sample_idx, noArray(), var_type); -} - -inline TermCriteria TC(int iters, double eps) -{ - return TermCriteria(TermCriteria::MAX_ITER + (eps > 0 ? TermCriteria::EPS : 0), iters, eps); -} - -static void test_and_save_classifier(const Ptr& model, - const Mat& data, const Mat& responses, - int ntrain_samples, int rdelta, - const string& filename_to_save) -{ - int i, nsamples_all = data.rows; - double train_hr = 0, test_hr = 0; - - // compute prediction error on train and test data - for( i = 0; i < nsamples_all; i++ ) - { - Mat sample = data.row(i); - - float r = model->predict( sample ); - r = std::abs(r + rdelta - responses.at(i)) <= FLT_EPSILON ? 1.f : 0.f; - - if( i < ntrain_samples ) - train_hr += r; - else - test_hr += r; - } - - test_hr /= nsamples_all - ntrain_samples; - train_hr = ntrain_samples > 0 ? train_hr/ntrain_samples : 1.; - - printf( "Recognition rate: train = %.1f%%, test = %.1f%%\n", - train_hr*100., test_hr*100. ); - - if( !filename_to_save.empty() ) - { - model->save( filename_to_save ); - } -} - - -static bool -build_rtrees_classifier( const string& data_filename, - const string& filename_to_save, - const string& filename_to_load ) -{ - Mat data; - Mat responses; - bool ok = read_num_class_data( data_filename, 16, &data, &responses ); - if( !ok ) - return ok; - - Ptr model; - - int nsamples_all = data.rows; - int ntrain_samples = (int)(nsamples_all*0.8); - - // Create or load Random Trees classifier - if( !filename_to_load.empty() ) - { - model = load_classifier(filename_to_load); - if( model.empty() ) - return false; - ntrain_samples = 0; - } - else - { - // create classifier by using and - cout << "Training the classifier ...\n"; -// Params( int maxDepth, int minSampleCount, -// double regressionAccuracy, bool useSurrogates, -// int maxCategories, const Mat& priors, -// bool calcVarImportance, int nactiveVars, -// TermCriteria termCrit ); - Ptr tdata = prepare_train_data(data, responses, ntrain_samples); - model = RTrees::create(); - model->setMaxDepth(10); - model->setMinSampleCount(10); - model->setRegressionAccuracy(0); - model->setUseSurrogates(false); - model->setMaxCategories(15); - model->setPriors(Mat()); - model->setCalculateVarImportance(true); - model->setActiveVarCount(4); - model->setTermCriteria(TC(100,0.01f)); - model->train(tdata); - cout << endl; - } - - test_and_save_classifier(model, data, responses, ntrain_samples, 0, filename_to_save); - cout << "Number of trees: " << model->getRoots().size() << endl; - - // Print variable importance - Mat var_importance = model->getVarImportance(); - if( !var_importance.empty() ) - { - double rt_imp_sum = sum( var_importance )[0]; - printf("var#\timportance (in %%):\n"); - int i, n = (int)var_importance.total(); - for( i = 0; i < n; i++ ) - printf( "%-2d\t%-4.1f\n", i, 100.f*var_importance.at(i)/rt_imp_sum); - } - - return true; -} - - -static bool -build_boost_classifier( const string& data_filename, - const string& filename_to_save, - const string& filename_to_load ) -{ - const int class_count = 26; - Mat data; - Mat responses; - Mat weak_responses; - - bool ok = read_num_class_data( data_filename, 16, &data, &responses ); - if( !ok ) - return ok; - - int i, j, k; - Ptr model; - - int nsamples_all = data.rows; - int ntrain_samples = (int)(nsamples_all*0.5); - int var_count = data.cols; - - // Create or load Boosted Tree classifier - if( !filename_to_load.empty() ) - { - model = load_classifier(filename_to_load); - if( model.empty() ) - return false; - ntrain_samples = 0; - } - else - { - // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - // - // As currently boosted tree classifier in MLL can only be trained - // for 2-class problems, we transform the training database by - // "unrolling" each training sample as many times as the number of - // classes (26) that we have. - // - // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - Mat new_data( ntrain_samples*class_count, var_count + 1, CV_32F ); - Mat new_responses( ntrain_samples*class_count, 1, CV_32S ); - - // 1. unroll the database type mask - printf( "Unrolling the database...\n"); - for( i = 0; i < ntrain_samples; i++ ) - { - const float* data_row = data.ptr(i); - for( j = 0; j < class_count; j++ ) - { - float* new_data_row = (float*)new_data.ptr(i*class_count+j); - memcpy(new_data_row, data_row, var_count*sizeof(data_row[0])); - new_data_row[var_count] = (float)j; - new_responses.at(i*class_count + j) = responses.at(i) == j+'A'; - } - } - - Mat var_type( 1, var_count + 2, CV_8U ); - var_type.setTo(Scalar::all(VAR_ORDERED)); - var_type.at(var_count) = var_type.at(var_count+1) = VAR_CATEGORICAL; - - Ptr tdata = TrainData::create(new_data, ROW_SAMPLE, new_responses, - noArray(), noArray(), noArray(), var_type); - vector priors(2); - priors[0] = 1; - priors[1] = 26; - - cout << "Training the classifier (may take a few minutes)...\n"; - model = Boost::create(); - model->setBoostType(Boost::GENTLE); - model->setWeakCount(100); - model->setWeightTrimRate(0.95); - model->setMaxDepth(5); - model->setUseSurrogates(false); - model->setPriors(Mat(priors)); - model->train(tdata); - cout << endl; - } - - Mat temp_sample( 1, var_count + 1, CV_32F ); - float* tptr = temp_sample.ptr(); - - // compute prediction error on train and test data - double train_hr = 0, test_hr = 0; - for( i = 0; i < nsamples_all; i++ ) - { - int best_class = 0; - double max_sum = -DBL_MAX; - const float* ptr = data.ptr(i); - for( k = 0; k < var_count; k++ ) - tptr[k] = ptr[k]; - - for( j = 0; j < class_count; j++ ) - { - tptr[var_count] = (float)j; - float s = model->predict( temp_sample, noArray(), StatModel::RAW_OUTPUT ); - if( max_sum < s ) - { - max_sum = s; - best_class = j + 'A'; - } - } - - double r = std::abs(best_class - responses.at(i)) < FLT_EPSILON ? 1 : 0; - if( i < ntrain_samples ) - train_hr += r; - else - test_hr += r; - } - - test_hr /= nsamples_all-ntrain_samples; - train_hr = ntrain_samples > 0 ? train_hr/ntrain_samples : 1.; - printf( "Recognition rate: train = %.1f%%, test = %.1f%%\n", - train_hr*100., test_hr*100. ); - - cout << "Number of trees: " << model->getRoots().size() << endl; - - // Save classifier to file if needed - if( !filename_to_save.empty() ) - model->save( filename_to_save ); - - return true; -} - - -static bool -build_mlp_classifier( const string& data_filename, - const string& filename_to_save, - const string& filename_to_load ) -{ - const int class_count = 26; - Mat data; - Mat responses; - - bool ok = read_num_class_data( data_filename, 16, &data, &responses ); - if( !ok ) - return ok; - - Ptr model; - - int nsamples_all = data.rows; - int ntrain_samples = (int)(nsamples_all*0.8); - - // Create or load MLP classifier - if( !filename_to_load.empty() ) - { - model = load_classifier(filename_to_load); - if( model.empty() ) - return false; - ntrain_samples = 0; - } - else - { - // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - // - // MLP does not support categorical variables by explicitly. - // So, instead of the output class label, we will use - // a binary vector of components for training and, - // therefore, MLP will give us a vector of "probabilities" at the - // prediction stage - // - // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - Mat train_data = data.rowRange(0, ntrain_samples); - Mat train_responses = Mat::zeros( ntrain_samples, class_count, CV_32F ); - - // 1. unroll the responses - cout << "Unrolling the responses...\n"; - for( int i = 0; i < ntrain_samples; i++ ) - { - int cls_label = responses.at(i) - 'A'; - train_responses.at(i, cls_label) = 1.f; - } - - // 2. train classifier - int layer_sz[] = { data.cols, 100, 100, class_count }; - int nlayers = (int)(sizeof(layer_sz)/sizeof(layer_sz[0])); - Mat layer_sizes( 1, nlayers, CV_32S, layer_sz ); - -#if 1 - int method = ANN_MLP::BACKPROP; - double method_param = 0.001; - int max_iter = 300; -#else - int method = ANN_MLP::RPROP; - double method_param = 0.1; - int max_iter = 1000; -#endif - - Ptr tdata = TrainData::create(train_data, ROW_SAMPLE, train_responses); - - cout << "Training the classifier (may take a few minutes)...\n"; - model = ANN_MLP::create(); - model->setLayerSizes(layer_sizes); - model->setActivationFunction(ANN_MLP::SIGMOID_SYM, 0, 0); - model->setTermCriteria(TC(max_iter,0)); - model->setTrainMethod(method, method_param); - model->train(tdata); - cout << endl; - } - - test_and_save_classifier(model, data, responses, ntrain_samples, 'A', filename_to_save); - return true; -} - -static bool -build_knearest_classifier( const string& data_filename, int K ) -{ - Mat data; - Mat responses; - bool ok = read_num_class_data( data_filename, 16, &data, &responses ); - if( !ok ) - return ok; - - - int nsamples_all = data.rows; - int ntrain_samples = (int)(nsamples_all*0.8); - - // create classifier by using and - cout << "Training the classifier ...\n"; - Ptr tdata = prepare_train_data(data, responses, ntrain_samples); - Ptr model = KNearest::create(); - model->setDefaultK(K); - model->setIsClassifier(true); - model->train(tdata); - cout << endl; - - test_and_save_classifier(model, data, responses, ntrain_samples, 0, string()); - return true; -} - -static bool -build_nbayes_classifier( const string& data_filename ) -{ - Mat data; - Mat responses; - bool ok = read_num_class_data( data_filename, 16, &data, &responses ); - if( !ok ) - return ok; - - Ptr model; - - int nsamples_all = data.rows; - int ntrain_samples = (int)(nsamples_all*0.8); - - // create classifier by using and - cout << "Training the classifier ...\n"; - Ptr tdata = prepare_train_data(data, responses, ntrain_samples); - model = NormalBayesClassifier::create(); - model->train(tdata); - cout << endl; - - test_and_save_classifier(model, data, responses, ntrain_samples, 0, string()); - return true; -} - -static bool -build_svm_classifier( const string& data_filename, - const string& filename_to_save, - const string& filename_to_load ) -{ - Mat data; - Mat responses; - bool ok = read_num_class_data( data_filename, 16, &data, &responses ); - if( !ok ) - return ok; - - Ptr model; - - int nsamples_all = data.rows; - int ntrain_samples = (int)(nsamples_all*0.8); - - // Create or load Random Trees classifier - if( !filename_to_load.empty() ) - { - model = load_classifier(filename_to_load); - if( model.empty() ) - return false; - ntrain_samples = 0; - } - else - { - // create classifier by using and - cout << "Training the classifier ...\n"; - Ptr tdata = prepare_train_data(data, responses, ntrain_samples); - model = SVM::create(); - model->setType(SVM::C_SVC); - model->setKernel(SVM::LINEAR); - model->setC(1); - model->train(tdata); - cout << endl; - } - - test_and_save_classifier(model, data, responses, ntrain_samples, 0, filename_to_save); - return true; -} - -int main( int argc, char *argv[] ) -{ - string filename_to_save = ""; - string filename_to_load = ""; - string data_filename; - int method = 0; - - cv::CommandLineParser parser(argc, argv, "{data|letter-recognition.data|}{save||}{load||}{boost||}" - "{mlp||}{knn knearest||}{nbayes||}{svm||}"); - data_filename = samples::findFile(parser.get("data")); - if (parser.has("save")) - filename_to_save = parser.get("save"); - if (parser.has("load")) - filename_to_load = samples::findFile(parser.get("load")); - if (parser.has("boost")) - method = 1; - else if (parser.has("mlp")) - method = 2; - else if (parser.has("knearest")) - method = 3; - else if (parser.has("nbayes")) - method = 4; - else if (parser.has("svm")) - method = 5; - - help(argv); - - if( (method == 0 ? - build_rtrees_classifier( data_filename, filename_to_save, filename_to_load ) : - method == 1 ? - build_boost_classifier( data_filename, filename_to_save, filename_to_load ) : - method == 2 ? - build_mlp_classifier( data_filename, filename_to_save, filename_to_load ) : - method == 3 ? - build_knearest_classifier( data_filename, 10 ) : - method == 4 ? - build_nbayes_classifier( data_filename) : - method == 5 ? - build_svm_classifier( data_filename, filename_to_save, filename_to_load ): - -1) < 0) - - return 0; -} diff --git a/samples/cpp/logistic_regression.cpp b/samples/cpp/logistic_regression.cpp deleted file mode 100644 index 1bc2bf9711..0000000000 --- a/samples/cpp/logistic_regression.cpp +++ /dev/null @@ -1,127 +0,0 @@ -// Logistic Regression sample -// AUTHOR: Rahul Kavi rahulkavi[at]live[at]com - -#include - -#include -#include -#include - -using namespace std; -using namespace cv; -using namespace cv::ml; - -static void showImage(const Mat &data, int columns, const String &name) -{ - Mat bigImage; - for(int i = 0; i < data.rows; ++i) - { - bigImage.push_back(data.row(i).reshape(0, columns)); - } - imshow(name, bigImage.t()); -} - -static float calculateAccuracyPercent(const Mat &original, const Mat &predicted) -{ - return 100 * (float)countNonZero(original == predicted) / predicted.rows; -} - -int main() -{ - const String filename = samples::findFile("data01.xml"); - cout << "**********************************************************************" << endl; - cout << filename - << " contains digits 0 and 1 of 20 samples each, collected on an Android device" << endl; - cout << "Each of the collected images are of size 28 x 28 re-arranged to 1 x 784 matrix" - << endl; - cout << "**********************************************************************" << endl; - - Mat data, labels; - { - cout << "loading the dataset..."; - FileStorage f; - if(f.open(filename, FileStorage::READ)) - { - f["datamat"] >> data; - f["labelsmat"] >> labels; - f.release(); - } - else - { - cerr << "file can not be opened: " << filename << endl; - return 1; - } - data.convertTo(data, CV_32F); - labels.convertTo(labels, CV_32F); - cout << "read " << data.rows << " rows of data" << endl; - } - - Mat data_train, data_test; - Mat labels_train, labels_test; - for(int i = 0; i < data.rows; i++) - { - if(i % 2 == 0) - { - data_train.push_back(data.row(i)); - labels_train.push_back(labels.row(i)); - } - else - { - data_test.push_back(data.row(i)); - labels_test.push_back(labels.row(i)); - } - } - cout << "training/testing samples count: " << data_train.rows << "/" << data_test.rows << endl; - - // display sample image - showImage(data_train, 28, "train data"); - showImage(data_test, 28, "test data"); - - // simple case with batch gradient - cout << "training..."; - //! [init] - Ptr lr1 = LogisticRegression::create(); - lr1->setLearningRate(0.001); - lr1->setIterations(10); - lr1->setRegularization(LogisticRegression::REG_L2); - lr1->setTrainMethod(LogisticRegression::BATCH); - lr1->setMiniBatchSize(1); - //! [init] - lr1->train(data_train, ROW_SAMPLE, labels_train); - cout << "done!" << endl; - - cout << "predicting..."; - Mat responses; - lr1->predict(data_test, responses); - cout << "done!" << endl; - - // show prediction report - cout << "original vs predicted:" << endl; - labels_test.convertTo(labels_test, CV_32S); - cout << labels_test.t() << endl; - cout << responses.t() << endl; - cout << "accuracy: " << calculateAccuracyPercent(labels_test, responses) << "%" << endl; - - // save the classifier - const String saveFilename = "NewLR_Trained.xml"; - cout << "saving the classifier to " << saveFilename << endl; - lr1->save(saveFilename); - - // load the classifier onto new object - cout << "loading a new classifier from " << saveFilename << endl; - Ptr lr2 = StatModel::load(saveFilename); - - // predict using loaded classifier - cout << "predicting the dataset using the loaded classifier..."; - Mat responses2; - lr2->predict(data_test, responses2); - cout << "done!" << endl; - - // calculate accuracy - cout << labels_test.t() << endl; - cout << responses2.t() << endl; - cout << "accuracy: " << calculateAccuracyPercent(labels_test, responses2) << "%" << endl; - - waitKey(0); - return 0; -} diff --git a/samples/cpp/neural_network.cpp b/samples/cpp/neural_network.cpp deleted file mode 100644 index d6e681b6c6..0000000000 --- a/samples/cpp/neural_network.cpp +++ /dev/null @@ -1,65 +0,0 @@ -#include - -using namespace std; -using namespace cv; -using namespace cv::ml; - -int main() -{ - //create random training data - Mat_ data(100, 100); - randn(data, Mat::zeros(1, 1, data.type()), Mat::ones(1, 1, data.type())); - - //half of the samples for each class - Mat_ responses(data.rows, 2); - for (int i = 0; i responses(data.rows, 1); - for (int i=0; i layerSizes(1, 3); - layerSizes(0, 0) = data.cols; - layerSizes(0, 1) = 20; - layerSizes(0, 2) = responses.cols; - - Ptr network = ANN_MLP::create(); - network->setLayerSizes(layerSizes); - network->setActivationFunction(ANN_MLP::SIGMOID_SYM, 0.1, 0.1); - network->setTrainMethod(ANN_MLP::BACKPROP, 0.1, 0.1); - Ptr trainData = TrainData::create(data, ROW_SAMPLE, responses); - - network->train(trainData); - if (network->isTrained()) - { - printf("Predict one-vector:\n"); - Mat result; - network->predict(Mat::ones(1, data.cols, data.type()), result); - cout << result << endl; - - printf("Predict training data:\n"); - for (int i=0; ipredict(data.row(i), result); - cout << result << endl; - } - } - - return 0; -} diff --git a/samples/cpp/points_classifier.cpp b/samples/cpp/points_classifier.cpp deleted file mode 100644 index 02e393495d..0000000000 --- a/samples/cpp/points_classifier.cpp +++ /dev/null @@ -1,399 +0,0 @@ -#include "opencv2/core.hpp" -#include "opencv2/imgproc.hpp" -#include "opencv2/ml.hpp" -#include "opencv2/highgui.hpp" - -#include - -using namespace std; -using namespace cv; -using namespace cv::ml; - -const Scalar WHITE_COLOR = Scalar(255,255,255); -const string winName = "points"; -const int testStep = 5; - -Mat img, imgDst; -RNG rng; - -vector trainedPoints; -vector trainedPointsMarkers; -const int MAX_CLASSES = 2; -vector classColors(MAX_CLASSES); -int currentClass = 0; -vector classCounters(MAX_CLASSES); - -#define _NBC_ 1 // normal Bayessian classifier -#define _KNN_ 1 // k nearest neighbors classifier -#define _SVM_ 1 // support vectors machine -#define _DT_ 1 // decision tree -#define _BT_ 1 // ADA Boost -#define _GBT_ 0 // gradient boosted trees -#define _RF_ 1 // random forest -#define _ANN_ 1 // artificial neural networks -#define _EM_ 1 // expectation-maximization - -static void on_mouse( int event, int x, int y, int /*flags*/, void* ) -{ - if( img.empty() ) - return; - - int updateFlag = 0; - - if( event == EVENT_LBUTTONUP ) - { - trainedPoints.push_back( Point(x,y) ); - trainedPointsMarkers.push_back( currentClass ); - classCounters[currentClass]++; - updateFlag = true; - } - - //draw - if( updateFlag ) - { - img = Scalar::all(0); - - // draw points - for( size_t i = 0; i < trainedPoints.size(); i++ ) - { - Vec3b c = classColors[trainedPointsMarkers[i]]; - circle( img, trainedPoints[i], 5, Scalar(c), -1 ); - } - - imshow( winName, img ); - } -} - -static Mat prepare_train_samples(const vector& pts) -{ - Mat samples; - Mat(pts).reshape(1, (int)pts.size()).convertTo(samples, CV_32F); - return samples; -} - -static Ptr prepare_train_data() -{ - Mat samples = prepare_train_samples(trainedPoints); - return TrainData::create(samples, ROW_SAMPLE, Mat(trainedPointsMarkers)); -} - -static void predict_and_paint(const Ptr& model, Mat& dst) -{ - Mat testSample( 1, 2, CV_32FC1 ); - for( int y = 0; y < img.rows; y += testStep ) - { - for( int x = 0; x < img.cols; x += testStep ) - { - testSample.at(0) = (float)x; - testSample.at(1) = (float)y; - - int response = (int)model->predict( testSample ); - dst.at(y, x) = classColors[response]; - } - } -} - -#if _NBC_ -static void find_decision_boundary_NBC() -{ - // learn classifier - Ptr normalBayesClassifier = StatModel::train(prepare_train_data()); - - predict_and_paint(normalBayesClassifier, imgDst); -} -#endif - - -#if _KNN_ -static void find_decision_boundary_KNN( int K ) -{ - - Ptr knn = KNearest::create(); - knn->setDefaultK(K); - knn->setIsClassifier(true); - knn->train(prepare_train_data()); - predict_and_paint(knn, imgDst); -} -#endif - -#if _SVM_ -static void find_decision_boundary_SVM( double C ) -{ - Ptr svm = SVM::create(); - svm->setType(SVM::C_SVC); - svm->setKernel(SVM::POLY); //SVM::LINEAR; - svm->setDegree(0.5); - svm->setGamma(1); - svm->setCoef0(1); - svm->setNu(0.5); - svm->setP(0); - svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 1000, 0.01)); - svm->setC(C); - svm->train(prepare_train_data()); - predict_and_paint(svm, imgDst); - - Mat sv = svm->getSupportVectors(); - for( int i = 0; i < sv.rows; i++ ) - { - const float* supportVector = sv.ptr(i); - circle( imgDst, Point(saturate_cast(supportVector[0]),saturate_cast(supportVector[1])), 5, Scalar(255,255,255), -1 ); - } -} -#endif - -#if _DT_ -static void find_decision_boundary_DT() -{ - Ptr dtree = DTrees::create(); - dtree->setMaxDepth(8); - dtree->setMinSampleCount(2); - dtree->setUseSurrogates(false); - dtree->setCVFolds(0); // the number of cross-validation folds - dtree->setUse1SERule(false); - dtree->setTruncatePrunedTree(false); - dtree->train(prepare_train_data()); - predict_and_paint(dtree, imgDst); -} -#endif - -#if _BT_ -static void find_decision_boundary_BT() -{ - Ptr boost = Boost::create(); - boost->setBoostType(Boost::DISCRETE); - boost->setWeakCount(100); - boost->setWeightTrimRate(0.95); - boost->setMaxDepth(2); - boost->setUseSurrogates(false); - boost->setPriors(Mat()); - boost->train(prepare_train_data()); - predict_and_paint(boost, imgDst); -} - -#endif - -#if _GBT_ -static void find_decision_boundary_GBT() -{ - GBTrees::Params params( GBTrees::DEVIANCE_LOSS, // loss_function_type - 100, // weak_count - 0.1f, // shrinkage - 1.0f, // subsample_portion - 2, // max_depth - false // use_surrogates ) - ); - - Ptr gbtrees = StatModel::train(prepare_train_data(), params); - predict_and_paint(gbtrees, imgDst); -} -#endif - -#if _RF_ -static void find_decision_boundary_RF() -{ - Ptr rtrees = RTrees::create(); - rtrees->setMaxDepth(4); - rtrees->setMinSampleCount(2); - rtrees->setRegressionAccuracy(0.f); - rtrees->setUseSurrogates(false); - rtrees->setMaxCategories(16); - rtrees->setPriors(Mat()); - rtrees->setCalculateVarImportance(false); - rtrees->setActiveVarCount(1); - rtrees->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 5, 0)); - rtrees->train(prepare_train_data()); - predict_and_paint(rtrees, imgDst); -} - -#endif - -#if _ANN_ -static void find_decision_boundary_ANN( const Mat& layer_sizes ) -{ - Mat trainClasses = Mat::zeros( (int)trainedPoints.size(), (int)classColors.size(), CV_32FC1 ); - for( int i = 0; i < trainClasses.rows; i++ ) - { - trainClasses.at(i, trainedPointsMarkers[i]) = 1.f; - } - - Mat samples = prepare_train_samples(trainedPoints); - Ptr tdata = TrainData::create(samples, ROW_SAMPLE, trainClasses); - - Ptr ann = ANN_MLP::create(); - ann->setLayerSizes(layer_sizes); - ann->setActivationFunction(ANN_MLP::SIGMOID_SYM, 1, 1); - ann->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 300, FLT_EPSILON)); - ann->setTrainMethod(ANN_MLP::BACKPROP, 0.001); - ann->train(tdata); - predict_and_paint(ann, imgDst); -} -#endif - -#if _EM_ -static void find_decision_boundary_EM() -{ - img.copyTo( imgDst ); - - Mat samples = prepare_train_samples(trainedPoints); - - int i, j, nmodels = (int)classColors.size(); - vector > em_models(nmodels); - Mat modelSamples; - - for( i = 0; i < nmodels; i++ ) - { - const int componentCount = 3; - - modelSamples.release(); - for( j = 0; j < samples.rows; j++ ) - { - if( trainedPointsMarkers[j] == i ) - modelSamples.push_back(samples.row(j)); - } - - // learn models - if( !modelSamples.empty() ) - { - Ptr em = EM::create(); - em->setClustersNumber(componentCount); - em->setCovarianceMatrixType(EM::COV_MAT_DIAGONAL); - em->trainEM(modelSamples, noArray(), noArray(), noArray()); - em_models[i] = em; - } - } - - // classify coordinate plane points using the bayes classifier, i.e. - // y(x) = arg max_i=1_modelsCount likelihoods_i(x) - Mat testSample(1, 2, CV_32FC1 ); - Mat logLikelihoods(1, nmodels, CV_64FC1, Scalar(-DBL_MAX)); - - for( int y = 0; y < img.rows; y += testStep ) - { - for( int x = 0; x < img.cols; x += testStep ) - { - testSample.at(0) = (float)x; - testSample.at(1) = (float)y; - - for( i = 0; i < nmodels; i++ ) - { - if( !em_models[i].empty() ) - logLikelihoods.at(i) = em_models[i]->predict2(testSample, noArray())[0]; - } - Point maxLoc; - minMaxLoc(logLikelihoods, 0, 0, 0, &maxLoc); - imgDst.at(y, x) = classColors[maxLoc.x]; - } - } -} -#endif - -int main() -{ - cout << "Use:" << endl - << " key '0' .. '1' - switch to class #n" << endl - << " left mouse button - to add new point;" << endl - << " key 'r' - to run the ML model;" << endl - << " key 'i' - to init (clear) the data." << endl << endl; - - cv::namedWindow( "points", 1 ); - img.create( 480, 640, CV_8UC3 ); - imgDst.create( 480, 640, CV_8UC3 ); - - imshow( "points", img ); - setMouseCallback( "points", on_mouse ); - - classColors[0] = Vec3b(0, 255, 0); - classColors[1] = Vec3b(0, 0, 255); - - for(;;) - { - char key = (char)waitKey(); - - if( key == 27 ) break; - - if( key == 'i' ) // init - { - img = Scalar::all(0); - - trainedPoints.clear(); - trainedPointsMarkers.clear(); - classCounters.assign(MAX_CLASSES, 0); - - imshow( winName, img ); - } - - if( key == '0' || key == '1' ) - { - currentClass = key - '0'; - } - - if( key == 'r' ) // run - { - double minVal = 0; - minMaxLoc(classCounters, &minVal, 0, 0, 0); - if( minVal == 0 ) - { - printf("each class should have at least 1 point\n"); - continue; - } - img.copyTo( imgDst ); -#if _NBC_ - find_decision_boundary_NBC(); - imshow( "NormalBayesClassifier", imgDst ); -#endif -#if _KNN_ - find_decision_boundary_KNN( 3 ); - imshow( "kNN", imgDst ); - - find_decision_boundary_KNN( 15 ); - imshow( "kNN2", imgDst ); -#endif - -#if _SVM_ - //(1)-(2)separable and not sets - - find_decision_boundary_SVM( 1 ); - imshow( "classificationSVM1", imgDst ); - - find_decision_boundary_SVM( 10 ); - imshow( "classificationSVM2", imgDst ); -#endif - -#if _DT_ - find_decision_boundary_DT(); - imshow( "DT", imgDst ); -#endif - -#if _BT_ - find_decision_boundary_BT(); - imshow( "BT", imgDst); -#endif - -#if _GBT_ - find_decision_boundary_GBT(); - imshow( "GBT", imgDst); -#endif - -#if _RF_ - find_decision_boundary_RF(); - imshow( "RF", imgDst); -#endif - -#if _ANN_ - Mat layer_sizes1( 1, 3, CV_32SC1 ); - layer_sizes1.at(0) = 2; - layer_sizes1.at(1) = 5; - layer_sizes1.at(2) = (int)classColors.size(); - find_decision_boundary_ANN( layer_sizes1 ); - imshow( "ANN", imgDst ); -#endif - -#if _EM_ - find_decision_boundary_EM(); - imshow( "EM", imgDst ); -#endif - } - } - - return 0; -} diff --git a/samples/cpp/train_HOG.cpp b/samples/cpp/train_HOG.cpp deleted file mode 100644 index c8355ee591..0000000000 --- a/samples/cpp/train_HOG.cpp +++ /dev/null @@ -1,392 +0,0 @@ -#include "opencv2/imgproc.hpp" -#include "opencv2/highgui.hpp" -#include "opencv2/ml.hpp" -#include "opencv2/objdetect.hpp" -#include "opencv2/videoio.hpp" -#include -#include - -using namespace cv; -using namespace cv::ml; -using namespace std; - -vector< float > get_svm_detector( const Ptr< SVM >& svm ); -void convert_to_ml( const std::vector< Mat > & train_samples, Mat& trainData ); -void load_images( const String & dirname, vector< Mat > & img_lst, bool showImages ); -void sample_neg( const vector< Mat > & full_neg_lst, vector< Mat > & neg_lst, const Size & size ); -void computeHOGs( const Size wsize, const vector< Mat > & img_lst, vector< Mat > & gradient_lst, bool use_flip ); -void test_trained_detector( String obj_det_filename, String test_dir, String videofilename ); - -vector< float > get_svm_detector( const Ptr< SVM >& svm ) -{ - // get the support vectors - Mat sv = svm->getSupportVectors(); - const int sv_total = sv.rows; - // get the decision function - Mat alpha, svidx; - double rho = svm->getDecisionFunction( 0, alpha, svidx ); - - CV_Assert( alpha.total() == 1 && svidx.total() == 1 && sv_total == 1 ); - CV_Assert( (alpha.type() == CV_64F && alpha.at(0) == 1.) || - (alpha.type() == CV_32F && alpha.at(0) == 1.f) ); - CV_Assert( sv.type() == CV_32F ); - - vector< float > hog_detector( sv.cols + 1 ); - memcpy( &hog_detector[0], sv.ptr(), sv.cols*sizeof( hog_detector[0] ) ); - hog_detector[sv.cols] = (float)-rho; - return hog_detector; -} - -/* -* Convert training/testing set to be used by OpenCV Machine Learning algorithms. -* TrainData is a matrix of size (#samples x max(#cols,#rows) per samples), in 32FC1. -* Transposition of samples are made if needed. -*/ -void convert_to_ml( const vector< Mat > & train_samples, Mat& trainData ) -{ - //--Convert data - const int rows = (int)train_samples.size(); - const int cols = (int)std::max( train_samples[0].cols, train_samples[0].rows ); - Mat tmp( 1, cols, CV_32FC1 ); ///< used for transposition if needed - trainData = Mat( rows, cols, CV_32FC1 ); - - for( size_t i = 0 ; i < train_samples.size(); ++i ) - { - CV_Assert( train_samples[i].cols == 1 || train_samples[i].rows == 1 ); - - if( train_samples[i].cols == 1 ) - { - transpose( train_samples[i], tmp ); - tmp.copyTo( trainData.row( (int)i ) ); - } - else if( train_samples[i].rows == 1 ) - { - train_samples[i].copyTo( trainData.row( (int)i ) ); - } - } -} - -void load_images( const String & dirname, vector< Mat > & img_lst, bool showImages = false ) -{ - vector< String > files; - glob( dirname, files ); - - for ( size_t i = 0; i < files.size(); ++i ) - { - Mat img = imread( files[i] ); // load the image - if ( img.empty() ) - { - cout << files[i] << " is invalid!" << endl; // invalid image, skip it. - continue; - } - - if ( showImages ) - { - imshow( "image", img ); - waitKey( 1 ); - } - img_lst.push_back( img ); - } -} - -void sample_neg( const vector< Mat > & full_neg_lst, vector< Mat > & neg_lst, const Size & size ) -{ - Rect box; - box.width = size.width; - box.height = size.height; - - srand( (unsigned int)time( NULL ) ); - - for ( size_t i = 0; i < full_neg_lst.size(); i++ ) - if ( full_neg_lst[i].cols > box.width && full_neg_lst[i].rows > box.height ) - { - box.x = rand() % ( full_neg_lst[i].cols - box.width ); - box.y = rand() % ( full_neg_lst[i].rows - box.height ); - Mat roi = full_neg_lst[i]( box ); - neg_lst.push_back( roi.clone() ); - } -} - -void computeHOGs( const Size wsize, const vector< Mat > & img_lst, vector< Mat > & gradient_lst, bool use_flip ) -{ - HOGDescriptor hog; - hog.winSize = wsize; - Mat gray; - vector< float > descriptors; - - for( size_t i = 0 ; i < img_lst.size(); i++ ) - { - if ( img_lst[i].cols >= wsize.width && img_lst[i].rows >= wsize.height ) - { - Rect r = Rect(( img_lst[i].cols - wsize.width ) / 2, - ( img_lst[i].rows - wsize.height ) / 2, - wsize.width, - wsize.height); - cvtColor( img_lst[i](r), gray, COLOR_BGR2GRAY ); - hog.compute( gray, descriptors, Size( 8, 8 ), Size( 0, 0 ) ); - gradient_lst.push_back( Mat( descriptors ).clone() ); - if ( use_flip ) - { - flip( gray, gray, 1 ); - hog.compute( gray, descriptors, Size( 8, 8 ), Size( 0, 0 ) ); - gradient_lst.push_back( Mat( descriptors ).clone() ); - } - } - } -} - -void test_trained_detector( String obj_det_filename, String test_dir, String videofilename ) -{ - cout << "Testing trained detector..." << endl; - HOGDescriptor hog; - hog.load( obj_det_filename ); - - vector< String > files; - glob( test_dir, files ); - - int delay = 0; - VideoCapture cap; - - if ( videofilename != "" ) - { - if ( videofilename.size() == 1 && isdigit( videofilename[0] ) ) - cap.open( videofilename[0] - '0' ); - else - cap.open( videofilename ); - } - - obj_det_filename = "testing " + obj_det_filename; - namedWindow( obj_det_filename, WINDOW_NORMAL ); - - for( size_t i=0;; i++ ) - { - Mat img; - - if ( cap.isOpened() ) - { - cap >> img; - delay = 1; - } - else if( i < files.size() ) - { - img = imread( files[i] ); - } - - if ( img.empty() ) - { - return; - } - - vector< Rect > detections; - vector< double > foundWeights; - - hog.detectMultiScale( img, detections, foundWeights ); - for ( size_t j = 0; j < detections.size(); j++ ) - { - Scalar color = Scalar( 0, foundWeights[j] * foundWeights[j] * 200, 0 ); - rectangle( img, detections[j], color, img.cols / 400 + 1 ); - } - - imshow( obj_det_filename, img ); - - if( waitKey( delay ) == 27 ) - { - return; - } - } -} - -int main( int argc, char** argv ) -{ - const char* keys = - { - "{help h| | show help message}" - "{pd | | path of directory contains positive images}" - "{nd | | path of directory contains negative images}" - "{td | | path of directory contains test images}" - "{tv | | test video file name}" - "{dw | | width of the detector}" - "{dh | | height of the detector}" - "{f |false| indicates if the program will generate and use mirrored samples or not}" - "{d |false| train twice}" - "{t |false| test a trained detector}" - "{v |false| visualize training steps}" - "{fn |my_detector.yml| file name of trained SVM}" - }; - - CommandLineParser parser( argc, argv, keys ); - - if ( parser.has( "help" ) ) - { - parser.printMessage(); - exit( 0 ); - } - - String pos_dir = parser.get< String >( "pd" ); - String neg_dir = parser.get< String >( "nd" ); - String test_dir = parser.get< String >( "td" ); - String obj_det_filename = parser.get< String >( "fn" ); - String videofilename = parser.get< String >( "tv" ); - int detector_width = parser.get< int >( "dw" ); - int detector_height = parser.get< int >( "dh" ); - bool test_detector = parser.get< bool >( "t" ); - bool train_twice = parser.get< bool >( "d" ); - bool visualization = parser.get< bool >( "v" ); - bool flip_samples = parser.get< bool >( "f" ); - - if ( test_detector ) - { - test_trained_detector( obj_det_filename, test_dir, videofilename ); - exit( 0 ); - } - - if( pos_dir.empty() || neg_dir.empty() ) - { - parser.printMessage(); - cout << "Wrong number of parameters.\n\n" - << "Example command line:\n" << argv[0] << " -dw=64 -dh=128 -pd=/INRIAPerson/96X160H96/Train/pos -nd=/INRIAPerson/neg -td=/INRIAPerson/Test/pos -fn=HOGpedestrian64x128.xml -d\n" - << "\nExample command line for testing trained detector:\n" << argv[0] << " -t -fn=HOGpedestrian64x128.xml -td=/INRIAPerson/Test/pos"; - exit( 1 ); - } - - vector< Mat > pos_lst, full_neg_lst, neg_lst, gradient_lst; - vector< int > labels; - - clog << "Positive images are being loaded..." ; - load_images( pos_dir, pos_lst, visualization ); - if ( pos_lst.size() > 0 ) - { - clog << "...[done] " << pos_lst.size() << " files." << endl; - } - else - { - clog << "no image in " << pos_dir < svm = SVM::create(); - /* Default values to train SVM */ - svm->setCoef0( 0.0 ); - svm->setDegree( 3 ); - svm->setTermCriteria( TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, 1e-3 ) ); - svm->setGamma( 0 ); - svm->setKernel( SVM::LINEAR ); - svm->setNu( 0.5 ); - svm->setP( 0.1 ); // for EPSILON_SVR, epsilon in loss function? - svm->setC( 0.01 ); // From paper, soft classifier - svm->setType( SVM::EPS_SVR ); // C_SVC; // EPSILON_SVR; // may be also NU_SVR; // do regression task - svm->train( train_data, ROW_SAMPLE, labels ); - clog << "...[done]" << endl; - - if ( train_twice ) - { - clog << "Testing trained detector on negative images. This might take a few minutes..."; - HOGDescriptor my_hog; - my_hog.winSize = pos_image_size; - - // Set the trained svm to my_hog - my_hog.setSVMDetector( get_svm_detector( svm ) ); - - vector< Rect > detections; - vector< double > foundWeights; - - for ( size_t i = 0; i < full_neg_lst.size(); i++ ) - { - if ( full_neg_lst[i].cols >= pos_image_size.width && full_neg_lst[i].rows >= pos_image_size.height ) - my_hog.detectMultiScale( full_neg_lst[i], detections, foundWeights ); - else - detections.clear(); - - for ( size_t j = 0; j < detections.size(); j++ ) - { - Mat detection = full_neg_lst[i]( detections[j] ).clone(); - resize( detection, detection, pos_image_size, 0, 0, INTER_LINEAR_EXACT); - neg_lst.push_back( detection ); - } - - if ( visualization ) - { - for ( size_t j = 0; j < detections.size(); j++ ) - { - rectangle( full_neg_lst[i], detections[j], Scalar( 0, 255, 0 ), 2 ); - } - imshow( "testing trained detector on negative images", full_neg_lst[i] ); - waitKey( 5 ); - } - } - clog << "...[done]" << endl; - - gradient_lst.clear(); - clog << "Histogram of Gradients are being calculated for positive images..."; - computeHOGs( pos_image_size, pos_lst, gradient_lst, flip_samples ); - positive_count = gradient_lst.size(); - clog << "...[done] ( positive count : " << positive_count << " )" << endl; - - clog << "Histogram of Gradients are being calculated for negative images..."; - computeHOGs( pos_image_size, neg_lst, gradient_lst, flip_samples ); - negative_count = gradient_lst.size() - positive_count; - clog << "...[done] ( negative count : " << negative_count << " )" << endl; - - labels.clear(); - labels.assign(positive_count, +1); - labels.insert(labels.end(), negative_count, -1); - - clog << "Training SVM again..."; - convert_to_ml( gradient_lst, train_data ); - svm->train( train_data, ROW_SAMPLE, labels ); - clog << "...[done]" << endl; - } - - HOGDescriptor hog; - hog.winSize = pos_image_size; - hog.setSVMDetector( get_svm_detector( svm ) ); - hog.save( obj_det_filename ); - - test_trained_detector( obj_det_filename, test_dir, videofilename ); - - return 0; -} diff --git a/samples/cpp/train_svmsgd.cpp b/samples/cpp/train_svmsgd.cpp deleted file mode 100644 index 12e0384081..0000000000 --- a/samples/cpp/train_svmsgd.cpp +++ /dev/null @@ -1,211 +0,0 @@ -#include "opencv2/core.hpp" -#include "opencv2/video/tracking.hpp" -#include "opencv2/imgproc.hpp" -#include "opencv2/highgui.hpp" -#include "opencv2/ml.hpp" - -using namespace cv; -using namespace cv::ml; - - -struct Data -{ - Mat img; - Mat samples; //Set of train samples. Contains points on image - Mat responses; //Set of responses for train samples - - Data() - { - const int WIDTH = 841; - const int HEIGHT = 594; - img = Mat::zeros(HEIGHT, WIDTH, CV_8UC3); - imshow("Train svmsgd", img); - } -}; - -//Train with SVMSGD algorithm -//(samples, responses) is a train set -//weights is a required vector for decision function of SVMSGD algorithm -bool doTrain(const Mat samples, const Mat responses, Mat &weights, float &shift); - -//function finds two points for drawing line (wx = 0) -bool findPointsForLine(const Mat &weights, float shift, Point points[2], int width, int height); - -// function finds cross point of line (wx = 0) and segment ( (y = HEIGHT, 0 <= x <= WIDTH) or (x = WIDTH, 0 <= y <= HEIGHT) ) -bool findCrossPointWithBorders(const Mat &weights, float shift, const std::pair &segment, Point &crossPoint); - -//segments' initialization ( (y = HEIGHT, 0 <= x <= WIDTH) and (x = WIDTH, 0 <= y <= HEIGHT) ) -void fillSegments(std::vector > &segments, int width, int height); - -//redraw points' set and line (wx = 0) -void redraw(Data data, const Point points[2]); - -//add point in train set, train SVMSGD algorithm and draw results on image -void addPointRetrainAndRedraw(Data &data, int x, int y, int response); - - -bool doTrain( const Mat samples, const Mat responses, Mat &weights, float &shift) -{ - cv::Ptr svmsgd = SVMSGD::create(); - - cv::Ptr trainData = TrainData::create(samples, cv::ml::ROW_SAMPLE, responses); - svmsgd->train( trainData ); - - if (svmsgd->isTrained()) - { - weights = svmsgd->getWeights(); - shift = svmsgd->getShift(); - - return true; - } - return false; -} - -void fillSegments(std::vector > &segments, int width, int height) -{ - std::pair currentSegment; - - currentSegment.first = Point(width, 0); - currentSegment.second = Point(width, height); - segments.push_back(currentSegment); - - currentSegment.first = Point(0, height); - currentSegment.second = Point(width, height); - segments.push_back(currentSegment); - - currentSegment.first = Point(0, 0); - currentSegment.second = Point(width, 0); - segments.push_back(currentSegment); - - currentSegment.first = Point(0, 0); - currentSegment.second = Point(0, height); - segments.push_back(currentSegment); -} - - -bool findCrossPointWithBorders(const Mat &weights, float shift, const std::pair &segment, Point &crossPoint) -{ - int x = 0; - int y = 0; - int xMin = std::min(segment.first.x, segment.second.x); - int xMax = std::max(segment.first.x, segment.second.x); - int yMin = std::min(segment.first.y, segment.second.y); - int yMax = std::max(segment.first.y, segment.second.y); - - CV_Assert(weights.type() == CV_32FC1); - CV_Assert(xMin == xMax || yMin == yMax); - - if (xMin == xMax && weights.at(1) != 0) - { - x = xMin; - y = static_cast(std::floor( - (weights.at(0) * x + shift) / weights.at(1))); - if (y >= yMin && y <= yMax) - { - crossPoint.x = x; - crossPoint.y = y; - return true; - } - } - else if (yMin == yMax && weights.at(0) != 0) - { - y = yMin; - x = static_cast(std::floor( - (weights.at(1) * y + shift) / weights.at(0))); - if (x >= xMin && x <= xMax) - { - crossPoint.x = x; - crossPoint.y = y; - return true; - } - } - return false; -} - -bool findPointsForLine(const Mat &weights, float shift, Point points[2], int width, int height) -{ - if (weights.empty()) - { - return false; - } - - int foundPointsCount = 0; - std::vector > segments; - fillSegments(segments, width, height); - - for (uint i = 0; i < segments.size(); i++) - { - if (findCrossPointWithBorders(weights, shift, segments[i], points[foundPointsCount])) - foundPointsCount++; - if (foundPointsCount >= 2) - break; - } - - return true; -} - -void redraw(Data data, const Point points[2]) -{ - data.img.setTo(0); - Point center; - int radius = 3; - Scalar color; - CV_Assert((data.samples.type() == CV_32FC1) && (data.responses.type() == CV_32FC1)); - for (int i = 0; i < data.samples.rows; i++) - { - center.x = static_cast(data.samples.at(i,0)); - center.y = static_cast(data.samples.at(i,1)); - color = (data.responses.at(i) > 0) ? Scalar(128,128,0) : Scalar(0,128,128); - circle(data.img, center, radius, color, 5); - } - line(data.img, points[0], points[1],cv::Scalar(1,255,1)); - - imshow("Train svmsgd", data.img); -} - -void addPointRetrainAndRedraw(Data &data, int x, int y, int response) -{ - Mat currentSample(1, 2, CV_32FC1); - - currentSample.at(0,0) = (float)x; - currentSample.at(0,1) = (float)y; - data.samples.push_back(currentSample); - data.responses.push_back(static_cast(response)); - - Mat weights(1, 2, CV_32FC1); - float shift = 0; - - if (doTrain(data.samples, data.responses, weights, shift)) - { - Point points[2]; - findPointsForLine(weights, shift, points, data.img.cols, data.img.rows); - - redraw(data, points); - } -} - - -static void onMouse( int event, int x, int y, int, void* pData) -{ - Data &data = *(Data*)pData; - - switch( event ) - { - case EVENT_LBUTTONUP: - addPointRetrainAndRedraw(data, x, y, 1); - break; - - case EVENT_RBUTTONDOWN: - addPointRetrainAndRedraw(data, x, y, -1); - break; - } - -} - -int main() -{ - Data data; - - setMouseCallback( "Train svmsgd", onMouse, &data ); - waitKey(); - - return 0; -} diff --git a/samples/cpp/travelsalesman.cpp b/samples/cpp/travelsalesman.cpp deleted file mode 100644 index 256ff55b43..0000000000 --- a/samples/cpp/travelsalesman.cpp +++ /dev/null @@ -1,109 +0,0 @@ -#include -#include -#include -#include - -using namespace cv; - -class TravelSalesman -{ -private : - const std::vector& posCity; - std::vector& next; - RNG rng; - int d0,d1,d2,d3; - -public: - TravelSalesman(std::vector &p, std::vector &n) : - posCity(p), next(n) - { - rng = theRNG(); - } - /** Give energy value for a state of system.*/ - double energy() const; - /** Function which change the state of system (random perturbation).*/ - void changeState(); - /** Function to reverse to the previous state.*/ - void reverseState(); - -}; - -void TravelSalesman::changeState() -{ - d0 = rng.uniform(0,static_cast(posCity.size())); - d1 = next[d0]; - d2 = next[d1]; - d3 = next[d2]; - - next[d0] = d2; - next[d2] = d1; - next[d1] = d3; -} - - -void TravelSalesman::reverseState() -{ - next[d0] = d1; - next[d1] = d2; - next[d2] = d3; -} - -double TravelSalesman::energy() const -{ - double e = 0; - for (size_t i = 0; i < next.size(); i++) - { - e += norm(posCity[i]-posCity[next[i]]); - } - return e; -} - - -static void DrawTravelMap(Mat &img, std::vector &p, std::vector &n) -{ - for (size_t i = 0; i < n.size(); i++) - { - circle(img,p[i],5,Scalar(0,0,255),2); - line(img,p[i],p[n[i]],Scalar(0,255,0),2); - } -} -int main(void) -{ - int nbCity=40; - Mat img(500,500,CV_8UC3,Scalar::all(0)); - RNG rng(123456); - int radius=static_cast(img.cols*0.45); - Point center(img.cols/2,img.rows/2); - - std::vector posCity(nbCity); - std::vector next(nbCity); - for (size_t i = 0; i < posCity.size(); i++) - { - double theta = rng.uniform(0., 2 * CV_PI); - posCity[i].x = static_cast(radius*cos(theta)) + center.x; - posCity[i].y = static_cast(radius*sin(theta)) + center.y; - next[i]=(i+1)%nbCity; - } - TravelSalesman ts_system(posCity, next); - - DrawTravelMap(img,posCity,next); - imshow("Map",img); - waitKey(10); - double currentTemperature = 100.0; - for (int i = 0, zeroChanges = 0; zeroChanges < 10; i++) - { - int changesApplied = ml::simulatedAnnealingSolver(ts_system, currentTemperature, currentTemperature*0.97, 0.99, 10000*nbCity, ¤tTemperature, rng); - img.setTo(Scalar::all(0)); - DrawTravelMap(img, posCity, next); - imshow("Map", img); - int k = waitKey(10); - std::cout << "i=" << i << " changesApplied=" << changesApplied << " temp=" << currentTemperature << " result=" << ts_system.energy() << std::endl; - if (k == 27 || k == 'q' || k == 'Q') - return 0; - if (changesApplied == 0) - zeroChanges++; - } - std::cout << "Done" << std::endl; - waitKey(0); - return 0; -} diff --git a/samples/cpp/tree_engine.cpp b/samples/cpp/tree_engine.cpp deleted file mode 100644 index 956deb8f78..0000000000 --- a/samples/cpp/tree_engine.cpp +++ /dev/null @@ -1,116 +0,0 @@ -#include "opencv2/ml.hpp" -#include "opencv2/core.hpp" -#include "opencv2/core/utility.hpp" -#include -#include -#include - -using namespace cv; -using namespace cv::ml; - -static void help(char** argv) -{ - printf( - "\nThis sample demonstrates how to use different decision trees and forests including boosting and random trees.\n" - "Usage:\n\t%s [-r=] [-ts=type_spec] \n" - "where -r= specified the 0-based index of the response (0 by default)\n" - "-ts= specifies the var type spec in the form ord[n1,n2-n3,n4-n5,...]cat[m1-m2,m3,m4-m5,...]\n" - " is the name of training data file in comma-separated value format\n\n", argv[0]); -} - -static void train_and_print_errs(Ptr model, const Ptr& data) -{ - bool ok = model->train(data); - if( !ok ) - { - printf("Training failed\n"); - } - else - { - printf( "train error: %f\n", model->calcError(data, false, noArray()) ); - printf( "test error: %f\n\n", model->calcError(data, true, noArray()) ); - } -} - -int main(int argc, char** argv) -{ - cv::CommandLineParser parser(argc, argv, "{ help h | | }{r | 0 | }{ts | | }{@input | | }"); - if (parser.has("help")) - { - help(argv); - return 0; - } - std::string filename = parser.get("@input"); - int response_idx; - std::string typespec; - response_idx = parser.get("r"); - typespec = parser.get("ts"); - if( filename.empty() || !parser.check() ) - { - parser.printErrors(); - help(argv); - return 0; - } - printf("\nReading in %s...\n\n",filename.c_str()); - const double train_test_split_ratio = 0.5; - - Ptr data = TrainData::loadFromCSV(filename, 0, response_idx, response_idx+1, typespec); - if( data.empty() ) - { - printf("ERROR: File %s can not be read\n", filename.c_str()); - return 0; - } - - data->setTrainTestSplitRatio(train_test_split_ratio); - std::cout << "Test/Train: " << data->getNTestSamples() << "/" << data->getNTrainSamples(); - - printf("======DTREE=====\n"); - Ptr dtree = DTrees::create(); - dtree->setMaxDepth(10); - dtree->setMinSampleCount(2); - dtree->setRegressionAccuracy(0); - dtree->setUseSurrogates(false); - dtree->setMaxCategories(16); - dtree->setCVFolds(0); - dtree->setUse1SERule(false); - dtree->setTruncatePrunedTree(false); - dtree->setPriors(Mat()); - train_and_print_errs(dtree, data); - - if( (int)data->getClassLabels().total() <= 2 ) // regression or 2-class classification problem - { - printf("======BOOST=====\n"); - Ptr boost = Boost::create(); - boost->setBoostType(Boost::GENTLE); - boost->setWeakCount(100); - boost->setWeightTrimRate(0.95); - boost->setMaxDepth(2); - boost->setUseSurrogates(false); - boost->setPriors(Mat()); - train_and_print_errs(boost, data); - } - - printf("======RTREES=====\n"); - Ptr rtrees = RTrees::create(); - rtrees->setMaxDepth(10); - rtrees->setMinSampleCount(2); - rtrees->setRegressionAccuracy(0); - rtrees->setUseSurrogates(false); - rtrees->setMaxCategories(16); - rtrees->setPriors(Mat()); - rtrees->setCalculateVarImportance(true); - rtrees->setActiveVarCount(0); - rtrees->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 0)); - train_and_print_errs(rtrees, data); - cv::Mat ref_labels = data->getClassLabels(); - cv::Mat test_data = data->getTestSampleIdx(); - cv::Mat predict_labels; - rtrees->predict(data->getSamples(), predict_labels); - - cv::Mat variable_importance = rtrees->getVarImportance(); - std::cout << "Estimated variable importance" << std::endl; - for (int i = 0; i < variable_importance.rows; i++) { - std::cout << "Variable " << i << ": " << variable_importance.at(i, 0) << std::endl; - } - return 0; -} diff --git a/samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp b/samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp deleted file mode 100644 index a5bcf98cc0..0000000000 --- a/samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp +++ /dev/null @@ -1,81 +0,0 @@ -#include -#include -#include -#include -#include - -using namespace cv; -using namespace cv::ml; - -int main(int, char**) -{ - // Set up training data - //! [setup1] - int labels[4] = {1, -1, -1, -1}; - float trainingData[4][2] = { {501, 10}, {255, 10}, {501, 255}, {10, 501} }; - //! [setup1] - //! [setup2] - Mat trainingDataMat(4, 2, CV_32F, trainingData); - Mat labelsMat(4, 1, CV_32SC1, labels); - //! [setup2] - - // Train the SVM - //! [init] - Ptr svm = SVM::create(); - svm->setType(SVM::C_SVC); - svm->setKernel(SVM::LINEAR); - svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6)); - //! [init] - //! [train] - svm->train(trainingDataMat, ROW_SAMPLE, labelsMat); - //! [train] - - // Data for visual representation - int width = 512, height = 512; - Mat image = Mat::zeros(height, width, CV_8UC3); - - // Show the decision regions given by the SVM - //! [show] - Vec3b green(0,255,0), blue(255,0,0); - for (int i = 0; i < image.rows; i++) - { - for (int j = 0; j < image.cols; j++) - { - Mat sampleMat = (Mat_(1,2) << j,i); - float response = svm->predict(sampleMat); - - if (response == 1) - image.at(i,j) = green; - else if (response == -1) - image.at(i,j) = blue; - } - } - //! [show] - - // Show the training data - //! [show_data] - int thickness = -1; - circle( image, Point(501, 10), 5, Scalar( 0, 0, 0), thickness ); - circle( image, Point(255, 10), 5, Scalar(255, 255, 255), thickness ); - circle( image, Point(501, 255), 5, Scalar(255, 255, 255), thickness ); - circle( image, Point( 10, 501), 5, Scalar(255, 255, 255), thickness ); - //! [show_data] - - // Show support vectors - //! [show_vectors] - thickness = 2; - Mat sv = svm->getUncompressedSupportVectors(); - - for (int i = 0; i < sv.rows; i++) - { - const float* v = sv.ptr(i); - circle(image, Point( (int) v[0], (int) v[1]), 6, Scalar(128, 128, 128), thickness); - } - //! [show_vectors] - - imwrite("result.png", image); // save the image - - imshow("SVM Simple Example", image); // show it to the user - waitKey(); - return 0; -} diff --git a/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp b/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp deleted file mode 100644 index f8b7a373cc..0000000000 --- a/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp +++ /dev/null @@ -1,144 +0,0 @@ -#include -#include -#include -#include "opencv2/imgcodecs.hpp" -#include -#include - -using namespace cv; -using namespace cv::ml; -using namespace std; - -static void help() -{ - cout<< "\n--------------------------------------------------------------------------" << endl - << "This program shows Support Vector Machines for Non-Linearly Separable Data. " << endl - << "--------------------------------------------------------------------------" << endl - << endl; -} - -int main() -{ - help(); - - const int NTRAINING_SAMPLES = 100; // Number of training samples per class - const float FRAC_LINEAR_SEP = 0.9f; // Fraction of samples which compose the linear separable part - - // Data for visual representation - const int WIDTH = 512, HEIGHT = 512; - Mat I = Mat::zeros(HEIGHT, WIDTH, CV_8UC3); - - //--------------------- 1. Set up training data randomly --------------------------------------- - Mat trainData(2*NTRAINING_SAMPLES, 2, CV_32F); - Mat labels (2*NTRAINING_SAMPLES, 1, CV_32S); - - RNG rng(100); // Random value generation class - - // Set up the linearly separable part of the training data - int nLinearSamples = (int) (FRAC_LINEAR_SEP * NTRAINING_SAMPLES); - - //! [setup1] - // Generate random points for the class 1 - Mat trainClass = trainData.rowRange(0, nLinearSamples); - // The x coordinate of the points is in [0, 0.4) - Mat c = trainClass.colRange(0, 1); - rng.fill(c, RNG::UNIFORM, Scalar(0), Scalar(0.4 * WIDTH)); - // The y coordinate of the points is in [0, 1) - c = trainClass.colRange(1,2); - rng.fill(c, RNG::UNIFORM, Scalar(0), Scalar(HEIGHT)); - - // Generate random points for the class 2 - trainClass = trainData.rowRange(2*NTRAINING_SAMPLES-nLinearSamples, 2*NTRAINING_SAMPLES); - // The x coordinate of the points is in [0.6, 1] - c = trainClass.colRange(0 , 1); - rng.fill(c, RNG::UNIFORM, Scalar(0.6*WIDTH), Scalar(WIDTH)); - // The y coordinate of the points is in [0, 1) - c = trainClass.colRange(1,2); - rng.fill(c, RNG::UNIFORM, Scalar(0), Scalar(HEIGHT)); - //! [setup1] - - //------------------ Set up the non-linearly separable part of the training data --------------- - //! [setup2] - // Generate random points for the classes 1 and 2 - trainClass = trainData.rowRange(nLinearSamples, 2*NTRAINING_SAMPLES-nLinearSamples); - // The x coordinate of the points is in [0.4, 0.6) - c = trainClass.colRange(0,1); - rng.fill(c, RNG::UNIFORM, Scalar(0.4*WIDTH), Scalar(0.6*WIDTH)); - // The y coordinate of the points is in [0, 1) - c = trainClass.colRange(1,2); - rng.fill(c, RNG::UNIFORM, Scalar(0), Scalar(HEIGHT)); - //! [setup2] - - //------------------------- Set up the labels for the classes --------------------------------- - labels.rowRange( 0, NTRAINING_SAMPLES).setTo(1); // Class 1 - labels.rowRange(NTRAINING_SAMPLES, 2*NTRAINING_SAMPLES).setTo(2); // Class 2 - - //------------------------ 2. Set up the support vector machines parameters -------------------- - cout << "Starting training process" << endl; - //! [init] - Ptr svm = SVM::create(); - svm->setType(SVM::C_SVC); - svm->setC(0.1); - svm->setKernel(SVM::LINEAR); - svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, (int)1e7, 1e-6)); - //! [init] - - //------------------------ 3. Train the svm ---------------------------------------------------- - //! [train] - svm->train(trainData, ROW_SAMPLE, labels); - //! [train] - cout << "Finished training process" << endl; - - //------------------------ 4. Show the decision regions ---------------------------------------- - //! [show] - Vec3b green(0,100,0), blue(100,0,0); - for (int i = 0; i < I.rows; i++) - { - for (int j = 0; j < I.cols; j++) - { - Mat sampleMat = (Mat_(1,2) << j, i); - float response = svm->predict(sampleMat); - - if (response == 1) I.at(i,j) = green; - else if (response == 2) I.at(i,j) = blue; - } - } - //! [show] - - //----------------------- 5. Show the training data -------------------------------------------- - //! [show_data] - int thick = -1; - float px, py; - // Class 1 - for (int i = 0; i < NTRAINING_SAMPLES; i++) - { - px = trainData.at(i,0); - py = trainData.at(i,1); - circle(I, Point( (int) px, (int) py ), 3, Scalar(0, 255, 0), thick); - } - // Class 2 - for (int i = NTRAINING_SAMPLES; i <2*NTRAINING_SAMPLES; i++) - { - px = trainData.at(i,0); - py = trainData.at(i,1); - circle(I, Point( (int) px, (int) py ), 3, Scalar(255, 0, 0), thick); - } - //! [show_data] - - //------------------------- 6. Show support vectors -------------------------------------------- - //! [show_vectors] - thick = 2; - Mat sv = svm->getUncompressedSupportVectors(); - - for (int i = 0; i < sv.rows; i++) - { - const float* v = sv.ptr(i); - circle(I, Point( (int) v[0], (int) v[1]), 6, Scalar(128, 128, 128), thick); - } - //! [show_vectors] - - imwrite("result.png", I); // save the Image - imshow("SVM for Non-Linear Training Data", I); // show it to the user - waitKey(); - return 0; -} diff --git a/samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java b/samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java deleted file mode 100644 index dcff5ff788..0000000000 --- a/samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java +++ /dev/null @@ -1,99 +0,0 @@ -import org.opencv.core.Core; -import org.opencv.core.CvType; -import org.opencv.core.Mat; -import org.opencv.core.Point; -import org.opencv.core.Scalar; -import org.opencv.core.TermCriteria; -import org.opencv.highgui.HighGui; -import org.opencv.imgcodecs.Imgcodecs; -import org.opencv.imgproc.Imgproc; -import org.opencv.ml.Ml; -import org.opencv.ml.SVM; - -public class IntroductionToSVMDemo { - public static void main(String[] args) { - // Load the native OpenCV library - System.loadLibrary(Core.NATIVE_LIBRARY_NAME); - - // Set up training data - //! [setup1] - int[] labels = { 1, -1, -1, -1 }; - float[] trainingData = { 501, 10, 255, 10, 501, 255, 10, 501 }; - //! [setup1] - //! [setup2] - Mat trainingDataMat = new Mat(4, 2, CvType.CV_32FC1); - trainingDataMat.put(0, 0, trainingData); - Mat labelsMat = new Mat(4, 1, CvType.CV_32SC1); - labelsMat.put(0, 0, labels); - //! [setup2] - - // Train the SVM - //! [init] - SVM svm = SVM.create(); - svm.setType(SVM.C_SVC); - svm.setKernel(SVM.LINEAR); - svm.setTermCriteria(new TermCriteria(TermCriteria.MAX_ITER, 100, 1e-6)); - //! [init] - //! [train] - svm.train(trainingDataMat, Ml.ROW_SAMPLE, labelsMat); - //! [train] - - // Data for visual representation - int width = 512, height = 512; - Mat image = Mat.zeros(height, width, CvType.CV_8UC3); - - // Show the decision regions given by the SVM - //! [show] - byte[] imageData = new byte[(int) (image.total() * image.channels())]; - Mat sampleMat = new Mat(1, 2, CvType.CV_32F); - float[] sampleMatData = new float[(int) (sampleMat.total() * sampleMat.channels())]; - for (int i = 0; i < image.rows(); i++) { - for (int j = 0; j < image.cols(); j++) { - sampleMatData[0] = j; - sampleMatData[1] = i; - sampleMat.put(0, 0, sampleMatData); - float response = svm.predict(sampleMat); - - if (response == 1) { - imageData[(i * image.cols() + j) * image.channels()] = 0; - imageData[(i * image.cols() + j) * image.channels() + 1] = (byte) 255; - imageData[(i * image.cols() + j) * image.channels() + 2] = 0; - } else if (response == -1) { - imageData[(i * image.cols() + j) * image.channels()] = (byte) 255; - imageData[(i * image.cols() + j) * image.channels() + 1] = 0; - imageData[(i * image.cols() + j) * image.channels() + 2] = 0; - } - } - } - image.put(0, 0, imageData); - //! [show] - - // Show the training data - //! [show_data] - int thickness = -1; - int lineType = Imgproc.LINE_8; - Imgproc.circle(image, new Point(501, 10), 5, new Scalar(0, 0, 0), thickness, lineType, 0); - Imgproc.circle(image, new Point(255, 10), 5, new Scalar(255, 255, 255), thickness, lineType, 0); - Imgproc.circle(image, new Point(501, 255), 5, new Scalar(255, 255, 255), thickness, lineType, 0); - Imgproc.circle(image, new Point(10, 501), 5, new Scalar(255, 255, 255), thickness, lineType, 0); - //! [show_data] - - // Show support vectors - //! [show_vectors] - thickness = 2; - Mat sv = svm.getUncompressedSupportVectors(); - float[] svData = new float[(int) (sv.total() * sv.channels())]; - sv.get(0, 0, svData); - for (int i = 0; i < sv.rows(); ++i) { - Imgproc.circle(image, new Point(svData[i * sv.cols()], svData[i * sv.cols() + 1]), 6, - new Scalar(128, 128, 128), thickness, lineType, 0); - } - //! [show_vectors] - - Imgcodecs.imwrite("result.png", image); // save the image - - HighGui.imshow("SVM Simple Example", image); // show it to the user - HighGui.waitKey(); - System.exit(0); - } -} diff --git a/samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java b/samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java deleted file mode 100644 index b2b40d1513..0000000000 --- a/samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java +++ /dev/null @@ -1,186 +0,0 @@ -import java.util.Random; - -import org.opencv.core.Core; -import org.opencv.core.CvType; -import org.opencv.core.Mat; -import org.opencv.core.Point; -import org.opencv.core.Scalar; -import org.opencv.core.TermCriteria; -import org.opencv.highgui.HighGui; -import org.opencv.imgcodecs.Imgcodecs; -import org.opencv.imgproc.Imgproc; -import org.opencv.ml.Ml; -import org.opencv.ml.SVM; - -public class NonLinearSVMsDemo { - public static final int NTRAINING_SAMPLES = 100; - public static final float FRAC_LINEAR_SEP = 0.9f; - - public static void main(String[] args) { - // Load the native OpenCV library - System.loadLibrary(Core.NATIVE_LIBRARY_NAME); - - System.out.println("\n--------------------------------------------------------------------------"); - System.out.println("This program shows Support Vector Machines for Non-Linearly Separable Data. "); - System.out.println("--------------------------------------------------------------------------\n"); - - // Data for visual representation - int width = 512, height = 512; - Mat I = Mat.zeros(height, width, CvType.CV_8UC3); - - // --------------------- 1. Set up training data randomly--------------------------------------- - Mat trainData = new Mat(2 * NTRAINING_SAMPLES, 2, CvType.CV_32F); - Mat labels = new Mat(2 * NTRAINING_SAMPLES, 1, CvType.CV_32S); - - Random rng = new Random(100); // Random value generation class - - // Set up the linearly separable part of the training data - int nLinearSamples = (int) (FRAC_LINEAR_SEP * NTRAINING_SAMPLES); - - //! [setup1] - // Generate random points for the class 1 - Mat trainClass = trainData.rowRange(0, nLinearSamples); - // The x coordinate of the points is in [0, 0.4) - Mat c = trainClass.colRange(0, 1); - float[] cData = new float[(int) (c.total() * c.channels())]; - double[] cDataDbl = rng.doubles(cData.length, 0, 0.4f * width).toArray(); - for (int i = 0; i < cData.length; i++) { - cData[i] = (float) cDataDbl[i]; - } - c.put(0, 0, cData); - // The y coordinate of the points is in [0, 1) - c = trainClass.colRange(1, 2); - cData = new float[(int) (c.total() * c.channels())]; - cDataDbl = rng.doubles(cData.length, 0, height).toArray(); - for (int i = 0; i < cData.length; i++) { - cData[i] = (float) cDataDbl[i]; - } - c.put(0, 0, cData); - - // Generate random points for the class 2 - trainClass = trainData.rowRange(2 * NTRAINING_SAMPLES - nLinearSamples, 2 * NTRAINING_SAMPLES); - // The x coordinate of the points is in [0.6, 1] - c = trainClass.colRange(0, 1); - cData = new float[(int) (c.total() * c.channels())]; - cDataDbl = rng.doubles(cData.length, 0.6 * width, width).toArray(); - for (int i = 0; i < cData.length; i++) { - cData[i] = (float) cDataDbl[i]; - } - c.put(0, 0, cData); - // The y coordinate of the points is in [0, 1) - c = trainClass.colRange(1, 2); - cData = new float[(int) (c.total() * c.channels())]; - cDataDbl = rng.doubles(cData.length, 0, height).toArray(); - for (int i = 0; i < cData.length; i++) { - cData[i] = (float) cDataDbl[i]; - } - c.put(0, 0, cData); - //! [setup1] - - // ------------------ Set up the non-linearly separable part of the training data --------------- - //! [setup2] - // Generate random points for the classes 1 and 2 - trainClass = trainData.rowRange(nLinearSamples, 2 * NTRAINING_SAMPLES - nLinearSamples); - // The x coordinate of the points is in [0.4, 0.6) - c = trainClass.colRange(0, 1); - cData = new float[(int) (c.total() * c.channels())]; - cDataDbl = rng.doubles(cData.length, 0.4 * width, 0.6 * width).toArray(); - for (int i = 0; i < cData.length; i++) { - cData[i] = (float) cDataDbl[i]; - } - c.put(0, 0, cData); - // The y coordinate of the points is in [0, 1) - c = trainClass.colRange(1, 2); - cData = new float[(int) (c.total() * c.channels())]; - cDataDbl = rng.doubles(cData.length, 0, height).toArray(); - for (int i = 0; i < cData.length; i++) { - cData[i] = (float) cDataDbl[i]; - } - c.put(0, 0, cData); - //! [setup2] - - // ------------------------- Set up the labels for the classes--------------------------------- - labels.rowRange(0, NTRAINING_SAMPLES).setTo(new Scalar(1)); // Class 1 - labels.rowRange(NTRAINING_SAMPLES, 2 * NTRAINING_SAMPLES).setTo(new Scalar(2)); // Class 2 - - // ------------------------ 2. Set up the support vector machines parameters-------------------- - System.out.println("Starting training process"); - //! [init] - SVM svm = SVM.create(); - svm.setType(SVM.C_SVC); - svm.setC(0.1); - svm.setKernel(SVM.LINEAR); - svm.setTermCriteria(new TermCriteria(TermCriteria.MAX_ITER, (int) 1e7, 1e-6)); - //! [init] - - // ------------------------ 3. Train the svm---------------------------------------------------- - //! [train] - svm.train(trainData, Ml.ROW_SAMPLE, labels); - //! [train] - System.out.println("Finished training process"); - - // ------------------------ 4. Show the decision regions---------------------------------------- - //! [show] - byte[] IData = new byte[(int) (I.total() * I.channels())]; - Mat sampleMat = new Mat(1, 2, CvType.CV_32F); - float[] sampleMatData = new float[(int) (sampleMat.total() * sampleMat.channels())]; - for (int i = 0; i < I.rows(); i++) { - for (int j = 0; j < I.cols(); j++) { - sampleMatData[0] = j; - sampleMatData[1] = i; - sampleMat.put(0, 0, sampleMatData); - float response = svm.predict(sampleMat); - - if (response == 1) { - IData[(i * I.cols() + j) * I.channels()] = 0; - IData[(i * I.cols() + j) * I.channels() + 1] = 100; - IData[(i * I.cols() + j) * I.channels() + 2] = 0; - } else if (response == 2) { - IData[(i * I.cols() + j) * I.channels()] = 100; - IData[(i * I.cols() + j) * I.channels() + 1] = 0; - IData[(i * I.cols() + j) * I.channels() + 2] = 0; - } - } - } - I.put(0, 0, IData); - //! [show] - - // ----------------------- 5. Show the training data-------------------------------------------- - //! [show_data] - int thick = -1; - int lineType = Imgproc.LINE_8; - float px, py; - // Class 1 - float[] trainDataData = new float[(int) (trainData.total() * trainData.channels())]; - trainData.get(0, 0, trainDataData); - for (int i = 0; i < NTRAINING_SAMPLES; i++) { - px = trainDataData[i * trainData.cols()]; - py = trainDataData[i * trainData.cols() + 1]; - Imgproc.circle(I, new Point(px, py), 3, new Scalar(0, 255, 0), thick, lineType, 0); - } - // Class 2 - for (int i = NTRAINING_SAMPLES; i < 2 * NTRAINING_SAMPLES; ++i) { - px = trainDataData[i * trainData.cols()]; - py = trainDataData[i * trainData.cols() + 1]; - Imgproc.circle(I, new Point(px, py), 3, new Scalar(255, 0, 0), thick, lineType, 0); - } - //! [show_data] - - // ------------------------- 6. Show support vectors-------------------------------------------- - //! [show_vectors] - thick = 2; - Mat sv = svm.getUncompressedSupportVectors(); - float[] svData = new float[(int) (sv.total() * sv.channels())]; - sv.get(0, 0, svData); - for (int i = 0; i < sv.rows(); i++) { - Imgproc.circle(I, new Point(svData[i * sv.cols()], svData[i * sv.cols() + 1]), 6, new Scalar(128, 128, 128), - thick, lineType, 0); - } - //! [show_vectors] - - Imgcodecs.imwrite("result.png", I); // save the Image - HighGui.imshow("SVM for Non-Linear Training Data", I); // show it to the user - HighGui.waitKey(); - System.exit(0); - } -} diff --git a/samples/python/digits.py b/samples/python/digits.py deleted file mode 100755 index 25db411f94..0000000000 --- a/samples/python/digits.py +++ /dev/null @@ -1,194 +0,0 @@ -#!/usr/bin/env python - -''' -SVM and KNearest digit recognition. - -Sample loads a dataset of handwritten digits from 'digits.png'. -Then it trains a SVM and KNearest classifiers on it and evaluates -their accuracy. - -Following preprocessing is applied to the dataset: - - Moment-based image deskew (see deskew()) - - Digit images are split into 4 10x10 cells and 16-bin - histogram of oriented gradients is computed for each - cell - - Transform histograms to space with Hellinger metric (see [1] (RootSIFT)) - - -[1] R. Arandjelovic, A. Zisserman - "Three things everyone should know to improve object retrieval" - http://www.robots.ox.ac.uk/~vgg/publications/2012/Arandjelovic12/arandjelovic12.pdf - -Usage: - digits.py -''' - - -# Python 2/3 compatibility -from __future__ import print_function - -import numpy as np -import cv2 as cv - -# built-in modules -from multiprocessing.pool import ThreadPool - -from numpy.linalg import norm - -# local modules -from common import clock, mosaic - - - -SZ = 20 # size of each digit is SZ x SZ -CLASS_N = 10 -DIGITS_FN = 'digits.png' - -def split2d(img, cell_size, flatten=True): - h, w = img.shape[:2] - sx, sy = cell_size - cells = [np.hsplit(row, w//sx) for row in np.vsplit(img, h//sy)] - cells = np.array(cells) - if flatten: - cells = cells.reshape(-1, sy, sx) - return cells - -def load_digits(fn): - fn = cv.samples.findFile(fn) - print('loading "%s" ...' % fn) - digits_img = cv.imread(fn, cv.IMREAD_GRAYSCALE) - digits = split2d(digits_img, (SZ, SZ)) - labels = np.repeat(np.arange(CLASS_N), len(digits)/CLASS_N) - return digits, labels - -def deskew(img): - m = cv.moments(img) - if abs(m['mu02']) < 1e-2: - return img.copy() - skew = m['mu11']/m['mu02'] - M = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]]) - img = cv.warpAffine(img, M, (SZ, SZ), flags=cv.WARP_INVERSE_MAP | cv.INTER_LINEAR) - return img - - -class KNearest(object): - def __init__(self, k = 3): - self.k = k - self.model = cv.ml.KNearest_create() - - def train(self, samples, responses): - self.model.train(samples, cv.ml.ROW_SAMPLE, responses) - - def predict(self, samples): - _retval, results, _neigh_resp, _dists = self.model.findNearest(samples, self.k) - return results.ravel() - - def load(self, fn): - self.model = cv.ml.KNearest_load(fn) - - def save(self, fn): - self.model.save(fn) - -class SVM(object): - def __init__(self, C = 1, gamma = 0.5): - self.model = cv.ml.SVM_create() - self.model.setGamma(gamma) - self.model.setC(C) - self.model.setKernel(cv.ml.SVM_RBF) - self.model.setType(cv.ml.SVM_C_SVC) - - def train(self, samples, responses): - self.model.train(samples, cv.ml.ROW_SAMPLE, responses) - - def predict(self, samples): - return self.model.predict(samples)[1].ravel() - - def load(self, fn): - self.model = cv.ml.SVM_load(fn) - - def save(self, fn): - self.model.save(fn) - -def evaluate_model(model, digits, samples, labels): - resp = model.predict(samples) - err = (labels != resp).mean() - print('error: %.2f %%' % (err*100)) - - confusion = np.zeros((10, 10), np.int32) - for i, j in zip(labels, resp): - confusion[i, int(j)] += 1 - print('confusion matrix:') - print(confusion) - print() - - vis = [] - for img, flag in zip(digits, resp == labels): - img = cv.cvtColor(img, cv.COLOR_GRAY2BGR) - if not flag: - img[...,:2] = 0 - vis.append(img) - return mosaic(25, vis) - -def preprocess_simple(digits): - return np.float32(digits).reshape(-1, SZ*SZ) / 255.0 - -def preprocess_hog(digits): - samples = [] - for img in digits: - gx = cv.Sobel(img, cv.CV_32F, 1, 0) - gy = cv.Sobel(img, cv.CV_32F, 0, 1) - mag, ang = cv.cartToPolar(gx, gy) - bin_n = 16 - bin = np.int32(bin_n*ang/(2*np.pi)) - bin_cells = bin[:10,:10], bin[10:,:10], bin[:10,10:], bin[10:,10:] - mag_cells = mag[:10,:10], mag[10:,:10], mag[:10,10:], mag[10:,10:] - hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)] - hist = np.hstack(hists) - - # transform to Hellinger kernel - eps = 1e-7 - hist /= hist.sum() + eps - hist = np.sqrt(hist) - hist /= norm(hist) + eps - - samples.append(hist) - return np.float32(samples) - - -if __name__ == '__main__': - print(__doc__) - - digits, labels = load_digits(DIGITS_FN) - - print('preprocessing...') - # shuffle digits - rand = np.random.RandomState(321) - shuffle = rand.permutation(len(digits)) - digits, labels = digits[shuffle], labels[shuffle] - - digits2 = list(map(deskew, digits)) - samples = preprocess_hog(digits2) - - train_n = int(0.9*len(samples)) - cv.imshow('test set', mosaic(25, digits[train_n:])) - digits_train, digits_test = np.split(digits2, [train_n]) - samples_train, samples_test = np.split(samples, [train_n]) - labels_train, labels_test = np.split(labels, [train_n]) - - - print('training KNearest...') - model = KNearest(k=4) - model.train(samples_train, labels_train) - vis = evaluate_model(model, digits_test, samples_test, labels_test) - cv.imshow('KNearest test', vis) - - print('training SVM...') - model = SVM(C=2.67, gamma=5.383) - model.train(samples_train, labels_train) - vis = evaluate_model(model, digits_test, samples_test, labels_test) - cv.imshow('SVM test', vis) - print('saving SVM as "digits_svm.dat"...') - model.save('digits_svm.dat') - - cv.waitKey(0) - cv.destroyAllWindows() diff --git a/samples/python/digits_adjust.py b/samples/python/digits_adjust.py deleted file mode 100755 index 94771e63b1..0000000000 --- a/samples/python/digits_adjust.py +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env python - -''' -Digit recognition adjustment. -Grid search is used to find the best parameters for SVM and KNearest classifiers. -SVM adjustment follows the guidelines given in -http://www.csie.ntu.edu.tw/~cjlin/papers/guide/guide.pdf - -Usage: - digits_adjust.py [--model {svm|knearest}] - - --model {svm|knearest} - select the classifier (SVM is the default) - -''' - -import numpy as np -import cv2 as cv - -from multiprocessing.pool import ThreadPool - -from digits import * - -def cross_validate(model_class, params, samples, labels, kfold = 3, pool = None): - n = len(samples) - folds = np.array_split(np.arange(n), kfold) - def f(i): - model = model_class(**params) - test_idx = folds[i] - train_idx = list(folds) - train_idx.pop(i) - train_idx = np.hstack(train_idx) - train_samples, train_labels = samples[train_idx], labels[train_idx] - test_samples, test_labels = samples[test_idx], labels[test_idx] - model.train(train_samples, train_labels) - resp = model.predict(test_samples) - score = (resp != test_labels).mean() - print(".", end='') - return score - if pool is None: - scores = list(map(f, range(kfold))) - else: - scores = pool.map(f, range(kfold)) - return np.mean(scores) - - -class App(object): - def __init__(self): - self._samples, self._labels = self.preprocess() - - def preprocess(self): - digits, labels = load_digits(DIGITS_FN) - shuffle = np.random.permutation(len(digits)) - digits, labels = digits[shuffle], labels[shuffle] - digits2 = list(map(deskew, digits)) - samples = preprocess_hog(digits2) - return samples, labels - - def get_dataset(self): - return self._samples, self._labels - - def run_jobs(self, f, jobs): - pool = ThreadPool(processes=cv.getNumberOfCPUs()) - ires = pool.imap_unordered(f, jobs) - return ires - - def adjust_SVM(self): - Cs = np.logspace(0, 10, 15, base=2) - gammas = np.logspace(-7, 4, 15, base=2) - scores = np.zeros((len(Cs), len(gammas))) - scores[:] = np.nan - - print('adjusting SVM (may take a long time) ...') - def f(job): - i, j = job - samples, labels = self.get_dataset() - params = dict(C = Cs[i], gamma=gammas[j]) - score = cross_validate(SVM, params, samples, labels) - return i, j, score - - ires = self.run_jobs(f, np.ndindex(*scores.shape)) - for count, (i, j, score) in enumerate(ires): - scores[i, j] = score - print('%d / %d (best error: %.2f %%, last: %.2f %%)' % - (count+1, scores.size, np.nanmin(scores)*100, score*100)) - print(scores) - - print('writing score table to "svm_scores.npz"') - np.savez('svm_scores.npz', scores=scores, Cs=Cs, gammas=gammas) - - i, j = np.unravel_index(scores.argmin(), scores.shape) - best_params = dict(C = Cs[i], gamma=gammas[j]) - print('best params:', best_params) - print('best error: %.2f %%' % (scores.min()*100)) - return best_params - - def adjust_KNearest(self): - print('adjusting KNearest ...') - def f(k): - samples, labels = self.get_dataset() - err = cross_validate(KNearest, dict(k=k), samples, labels) - return k, err - best_err, best_k = np.inf, -1 - for k, err in self.run_jobs(f, range(1, 9)): - if err < best_err: - best_err, best_k = err, k - print('k = %d, error: %.2f %%' % (k, err*100)) - best_params = dict(k=best_k) - print('best params:', best_params, 'err: %.2f' % (best_err*100)) - return best_params - - -if __name__ == '__main__': - import getopt - import sys - - print(__doc__) - - args, _ = getopt.getopt(sys.argv[1:], '', ['model=']) - args = dict(args) - args.setdefault('--model', 'svm') - args.setdefault('--env', '') - if args['--model'] not in ['svm', 'knearest']: - print('unknown model "%s"' % args['--model']) - sys.exit(1) - - t = clock() - app = App() - if args['--model'] == 'knearest': - app.adjust_KNearest() - else: - app.adjust_SVM() - print('work time: %f s' % (clock() - t)) diff --git a/samples/python/digits_video.py b/samples/python/digits_video.py deleted file mode 100755 index 17f44c333d..0000000000 --- a/samples/python/digits_video.py +++ /dev/null @@ -1,109 +0,0 @@ -#!/usr/bin/env python -''' -Digit recognition from video. - -Run digits.py before, to train and save the SVM. - -Usage: - digits_video.py [{camera_id|video_file}] -''' - -# Python 2/3 compatibility -from __future__ import print_function - -import numpy as np -import cv2 as cv - -# built-in modules -import os -import sys - -# local modules -import video -from common import mosaic - -from digits import * - -def main(): - try: - src = sys.argv[1] - except: - src = 0 - cap = video.create_capture(src, fallback='synth:bg={}:noise=0.05'.format(cv.samples.findFile('sudoku.png'))) - - classifier_fn = 'digits_svm.dat' - if not os.path.exists(classifier_fn): - print('"%s" not found, run digits.py first' % classifier_fn) - return - - model = cv.ml.SVM_load(classifier_fn) - - while True: - _ret, frame = cap.read() - gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) - - - bin = cv.adaptiveThreshold(gray, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY_INV, 31, 10) - bin = cv.medianBlur(bin, 3) - contours, heirs = cv.findContours( bin.copy(), cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE) - try: - heirs = heirs[0] - except: - heirs = [] - - for cnt, heir in zip(contours, heirs): - _, _, _, outer_i = heir - if outer_i >= 0: - continue - x, y, w, h = cv.boundingRect(cnt) - if not (16 <= h <= 64 and w <= 1.2*h): - continue - pad = max(h-w, 0) - x, w = x - (pad // 2), w + pad - cv.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0)) - - bin_roi = bin[y:,x:][:h,:w] - - m = bin_roi != 0 - if not 0.1 < m.mean() < 0.4: - continue - ''' - gray_roi = gray[y:,x:][:h,:w] - v_in, v_out = gray_roi[m], gray_roi[~m] - if v_out.std() > 10.0: - continue - s = "%f, %f" % (abs(v_in.mean() - v_out.mean()), v_out.std()) - cv.putText(frame, s, (x, y), cv.FONT_HERSHEY_PLAIN, 1.0, (200, 0, 0), thickness = 1) - ''' - - s = 1.5*float(h)/SZ - m = cv.moments(bin_roi) - c1 = np.float32([m['m10'], m['m01']]) / m['m00'] - c0 = np.float32([SZ/2, SZ/2]) - t = c1 - s*c0 - A = np.zeros((2, 3), np.float32) - A[:,:2] = np.eye(2)*s - A[:,2] = t - bin_norm = cv.warpAffine(bin_roi, A, (SZ, SZ), flags=cv.WARP_INVERSE_MAP | cv.INTER_LINEAR) - bin_norm = deskew(bin_norm) - if x+w+SZ < frame.shape[1] and y+SZ < frame.shape[0]: - frame[y:,x+w:][:SZ, :SZ] = bin_norm[...,np.newaxis] - - sample = preprocess_hog([bin_norm]) - digit = model.predict(sample)[1].ravel() - cv.putText(frame, '%d'%digit, (x, y), cv.FONT_HERSHEY_PLAIN, 1.0, (200, 0, 0), thickness = 1) - - - cv.imshow('frame', frame) - cv.imshow('bin', bin) - ch = cv.waitKey(1) - if ch == 27: - break - - print('Done') - - -if __name__ == '__main__': - print(__doc__) - main() - cv.destroyAllWindows() diff --git a/samples/python/gaussian_mix.py b/samples/python/gaussian_mix.py deleted file mode 100755 index dd49535ab8..0000000000 --- a/samples/python/gaussian_mix.py +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/env python - -import numpy as np -import cv2 as cv - -from numpy import random - -def make_gaussians(cluster_n, img_size): - points = [] - ref_distrs = [] - for _i in range(cluster_n): - mean = (0.1 + 0.8*random.rand(2)) * img_size - a = (random.rand(2, 2)-0.5)*img_size*0.1 - cov = np.dot(a.T, a) + img_size*0.05*np.eye(2) - n = 100 + random.randint(900) - pts = random.multivariate_normal(mean, cov, n) - points.append( pts ) - ref_distrs.append( (mean, cov) ) - points = np.float32( np.vstack(points) ) - return points, ref_distrs - -def draw_gaussain(img, mean, cov, color): - x, y = mean - w, u, _vt = cv.SVDecomp(cov) - ang = np.arctan2(u[1, 0], u[0, 0])*(180/np.pi) - s1, s2 = np.sqrt(w)*3.0 - cv.ellipse(img, (int(x), int(y)), (int(s1), int(s2)), ang, 0, 360, color, 1, cv.LINE_AA) - - -def main(): - cluster_n = 5 - img_size = 512 - - print('press any key to update distributions, ESC - exit\n') - - while True: - print('sampling distributions...') - points, ref_distrs = make_gaussians(cluster_n, img_size) - - print('EM (opencv) ...') - em = cv.ml.EM_create() - em.setClustersNumber(cluster_n) - em.setCovarianceMatrixType(cv.ml.EM_COV_MAT_GENERIC) - em.trainEM(points) - means = em.getMeans() - covs = em.getCovs() # Known bug: https://github.com/opencv/opencv/pull/4232 - found_distrs = zip(means, covs) - print('ready!\n') - - img = np.zeros((img_size, img_size, 3), np.uint8) - for x, y in np.int32(points): - cv.circle(img, (x, y), 1, (255, 255, 255), -1) - for m, cov in ref_distrs: - draw_gaussain(img, m, cov, (0, 255, 0)) - for m, cov in found_distrs: - draw_gaussain(img, m, cov, (0, 0, 255)) - - cv.imshow('gaussian mixture', img) - ch = cv.waitKey(0) - if ch == 27: - break - - print('Done') - - -if __name__ == '__main__': - print(__doc__) - main() - cv.destroyAllWindows() diff --git a/samples/python/letter_recog.py b/samples/python/letter_recog.py deleted file mode 100755 index f646f178fc..0000000000 --- a/samples/python/letter_recog.py +++ /dev/null @@ -1,194 +0,0 @@ -#!/usr/bin/env python - -''' -The sample demonstrates how to train Random Trees classifier -(or Boosting classifier, or MLP, or Knearest, or Support Vector Machines) using the provided dataset. - -We use the sample database letter-recognition.data -from UCI Repository, here is the link: - -Newman, D.J. & Hettich, S. & Blake, C.L. & Merz, C.J. (1998). -UCI Repository of machine learning databases -[http://www.ics.uci.edu/~mlearn/MLRepository.html]. -Irvine, CA: University of California, Department of Information and Computer Science. - -The dataset consists of 20000 feature vectors along with the -responses - capital latin letters A..Z. -The first 10000 samples are used for training -and the remaining 10000 - to test the classifier. -====================================================== -USAGE: - letter_recog.py [--model ] - [--data ] - [--load ] [--save ] - - Models: RTrees, KNearest, Boost, SVM, MLP -''' - -# Python 2/3 compatibility -from __future__ import print_function - -import numpy as np -import cv2 as cv - -def load_base(fn): - a = np.loadtxt(fn, np.float32, delimiter=',', converters={ 0 : lambda ch : ord(ch)-ord('A') }) - samples, responses = a[:,1:], a[:,0] - return samples, responses - -class LetterStatModel(object): - class_n = 26 - train_ratio = 0.5 - - def load(self, fn): - self.model = self.model.load(fn) - def save(self, fn): - self.model.save(fn) - - def unroll_samples(self, samples): - sample_n, var_n = samples.shape - new_samples = np.zeros((sample_n * self.class_n, var_n+1), np.float32) - new_samples[:,:-1] = np.repeat(samples, self.class_n, axis=0) - new_samples[:,-1] = np.tile(np.arange(self.class_n), sample_n) - return new_samples - - def unroll_responses(self, responses): - sample_n = len(responses) - new_responses = np.zeros(sample_n*self.class_n, np.int32) - resp_idx = np.int32( responses + np.arange(sample_n)*self.class_n ) - new_responses[resp_idx] = 1 - return new_responses - -class RTrees(LetterStatModel): - def __init__(self): - self.model = cv.ml.RTrees_create() - - def train(self, samples, responses): - self.model.setMaxDepth(20) - self.model.train(samples, cv.ml.ROW_SAMPLE, responses.astype(int)) - - def predict(self, samples): - _ret, resp = self.model.predict(samples) - return resp.ravel() - - -class KNearest(LetterStatModel): - def __init__(self): - self.model = cv.ml.KNearest_create() - - def train(self, samples, responses): - self.model.train(samples, cv.ml.ROW_SAMPLE, responses) - - def predict(self, samples): - _retval, results, _neigh_resp, _dists = self.model.findNearest(samples, k = 10) - return results.ravel() - - -class Boost(LetterStatModel): - def __init__(self): - self.model = cv.ml.Boost_create() - - def train(self, samples, responses): - _sample_n, var_n = samples.shape - new_samples = self.unroll_samples(samples) - new_responses = self.unroll_responses(responses) - var_types = np.array([cv.ml.VAR_NUMERICAL] * var_n + [cv.ml.VAR_CATEGORICAL, cv.ml.VAR_CATEGORICAL], np.uint8) - - self.model.setWeakCount(15) - self.model.setMaxDepth(10) - self.model.train(cv.ml.TrainData_create(new_samples, cv.ml.ROW_SAMPLE, new_responses.astype(int), varType = var_types)) - - def predict(self, samples): - new_samples = self.unroll_samples(samples) - _ret, resp = self.model.predict(new_samples) - - return resp.ravel().reshape(-1, self.class_n).argmax(1) - - -class SVM(LetterStatModel): - def __init__(self): - self.model = cv.ml.SVM_create() - - def train(self, samples, responses): - self.model.setType(cv.ml.SVM_C_SVC) - self.model.setC(1) - self.model.setKernel(cv.ml.SVM_RBF) - self.model.setGamma(.1) - self.model.train(samples, cv.ml.ROW_SAMPLE, responses.astype(int)) - - def predict(self, samples): - _ret, resp = self.model.predict(samples) - return resp.ravel() - - -class MLP(LetterStatModel): - def __init__(self): - self.model = cv.ml.ANN_MLP_create() - - def train(self, samples, responses): - _sample_n, var_n = samples.shape - new_responses = self.unroll_responses(responses).reshape(-1, self.class_n) - layer_sizes = np.int32([var_n, 100, 100, self.class_n]) - - self.model.setLayerSizes(layer_sizes) - self.model.setTrainMethod(cv.ml.ANN_MLP_BACKPROP) - self.model.setBackpropMomentumScale(0.0) - self.model.setBackpropWeightScale(0.001) - self.model.setTermCriteria((cv.TERM_CRITERIA_COUNT, 20, 0.01)) - self.model.setActivationFunction(cv.ml.ANN_MLP_SIGMOID_SYM, 2, 1) - - self.model.train(samples, cv.ml.ROW_SAMPLE, np.float32(new_responses)) - - def predict(self, samples): - _ret, resp = self.model.predict(samples) - return resp.argmax(-1) - - - -def main(): - import getopt - import sys - - models = [RTrees, KNearest, Boost, SVM, MLP] # NBayes - models = dict( [(cls.__name__.lower(), cls) for cls in models] ) - - - args, dummy = getopt.getopt(sys.argv[1:], '', ['model=', 'data=', 'load=', 'save=']) - args = dict(args) - args.setdefault('--model', 'svm') - args.setdefault('--data', 'letter-recognition.data') - - datafile = cv.samples.findFile(args['--data']) - - print('loading data %s ...' % datafile) - samples, responses = load_base(datafile) - Model = models[args['--model']] - model = Model() - - train_n = int(len(samples)*model.train_ratio) - if '--load' in args: - fn = args['--load'] - print('loading model from %s ...' % fn) - model.load(fn) - else: - print('training %s ...' % Model.__name__) - model.train(samples[:train_n], responses[:train_n]) - - print('testing...') - train_rate = np.mean(model.predict(samples[:train_n]) == responses[:train_n].astype(int)) - test_rate = np.mean(model.predict(samples[train_n:]) == responses[train_n:].astype(int)) - - print('train rate: %f test rate: %f' % (train_rate*100, test_rate*100)) - - if '--save' in args: - fn = args['--save'] - print('saving model to %s ...' % fn) - model.save(fn) - - print('Done') - - -if __name__ == '__main__': - print(__doc__) - main() - cv.destroyAllWindows() diff --git a/samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py b/samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py deleted file mode 100644 index eeb246bc38..0000000000 --- a/samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py +++ /dev/null @@ -1,62 +0,0 @@ -import cv2 as cv -import numpy as np - -# Set up training data -## [setup1] -labels = np.array([1, -1, -1, -1]) -trainingData = np.matrix([[501, 10], [255, 10], [501, 255], [10, 501]], dtype=np.float32) -## [setup1] - -# Train the SVM -## [init] -svm = cv.ml.SVM_create() -svm.setType(cv.ml.SVM_C_SVC) -svm.setKernel(cv.ml.SVM_LINEAR) -svm.setTermCriteria((cv.TERM_CRITERIA_MAX_ITER, 100, 1e-6)) -## [init] -## [train] -svm.train(trainingData, cv.ml.ROW_SAMPLE, labels) -## [train] - -# Data for visual representation -width = 512 -height = 512 -image = np.zeros((height, width, 3), dtype=np.uint8) - -# Show the decision regions given by the SVM -## [show] -green = (0,255,0) -blue = (255,0,0) -for i in range(image.shape[0]): - for j in range(image.shape[1]): - sampleMat = np.matrix([[j,i]], dtype=np.float32) - response = svm.predict(sampleMat)[1] - - if response == 1: - image[i,j] = green - elif response == -1: - image[i,j] = blue -## [show] - -# Show the training data -## [show_data] -thickness = -1 -cv.circle(image, (501, 10), 5, ( 0, 0, 0), thickness) -cv.circle(image, (255, 10), 5, (255, 255, 255), thickness) -cv.circle(image, (501, 255), 5, (255, 255, 255), thickness) -cv.circle(image, ( 10, 501), 5, (255, 255, 255), thickness) -## [show_data] - -# Show support vectors -## [show_vectors] -thickness = 2 -sv = svm.getUncompressedSupportVectors() - -for i in range(sv.shape[0]): - cv.circle(image, (int(sv[i,0]), int(sv[i,1])), 6, (128, 128, 128), thickness) -## [show_vectors] - -cv.imwrite('result.png', image) # save the image - -cv.imshow('SVM Simple Example', image) # show it to the user -cv.waitKey() diff --git a/samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py b/samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py deleted file mode 100644 index a88ac4bd1b..0000000000 --- a/samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py +++ /dev/null @@ -1,117 +0,0 @@ -from __future__ import print_function -import cv2 as cv -import numpy as np -import random as rng - -NTRAINING_SAMPLES = 100 # Number of training samples per class -FRAC_LINEAR_SEP = 0.9 # Fraction of samples which compose the linear separable part - -# Data for visual representation -WIDTH = 512 -HEIGHT = 512 -I = np.zeros((HEIGHT, WIDTH, 3), dtype=np.uint8) - -# --------------------- 1. Set up training data randomly --------------------------------------- -trainData = np.empty((2*NTRAINING_SAMPLES, 2), dtype=np.float32) -labels = np.empty((2*NTRAINING_SAMPLES, 1), dtype=np.int32) - -rng.seed(100) # Random value generation class - -# Set up the linearly separable part of the training data -nLinearSamples = int(FRAC_LINEAR_SEP * NTRAINING_SAMPLES) - -## [setup1] -# Generate random points for the class 1 -trainClass = trainData[0:nLinearSamples,:] -# The x coordinate of the points is in [0, 0.4) -c = trainClass[:,0:1] -c[:] = np.random.uniform(0.0, 0.4 * WIDTH, c.shape) -# The y coordinate of the points is in [0, 1) -c = trainClass[:,1:2] -c[:] = np.random.uniform(0.0, HEIGHT, c.shape) - -# Generate random points for the class 2 -trainClass = trainData[2*NTRAINING_SAMPLES-nLinearSamples:2*NTRAINING_SAMPLES,:] -# The x coordinate of the points is in [0.6, 1] -c = trainClass[:,0:1] -c[:] = np.random.uniform(0.6*WIDTH, WIDTH, c.shape) -# The y coordinate of the points is in [0, 1) -c = trainClass[:,1:2] -c[:] = np.random.uniform(0.0, HEIGHT, c.shape) -## [setup1] - -#------------------ Set up the non-linearly separable part of the training data --------------- -## [setup2] -# Generate random points for the classes 1 and 2 -trainClass = trainData[nLinearSamples:2*NTRAINING_SAMPLES-nLinearSamples,:] -# The x coordinate of the points is in [0.4, 0.6) -c = trainClass[:,0:1] -c[:] = np.random.uniform(0.4*WIDTH, 0.6*WIDTH, c.shape) -# The y coordinate of the points is in [0, 1) -c = trainClass[:,1:2] -c[:] = np.random.uniform(0.0, HEIGHT, c.shape) -## [setup2] - -#------------------------- Set up the labels for the classes --------------------------------- -labels[0:NTRAINING_SAMPLES,:] = 1 # Class 1 -labels[NTRAINING_SAMPLES:2*NTRAINING_SAMPLES,:] = 2 # Class 2 - -#------------------------ 2. Set up the support vector machines parameters -------------------- -print('Starting training process') -## [init] -svm = cv.ml.SVM_create() -svm.setType(cv.ml.SVM_C_SVC) -svm.setC(0.1) -svm.setKernel(cv.ml.SVM_LINEAR) -svm.setTermCriteria((cv.TERM_CRITERIA_MAX_ITER, int(1e7), 1e-6)) -## [init] - -#------------------------ 3. Train the svm ---------------------------------------------------- -## [train] -svm.train(trainData, cv.ml.ROW_SAMPLE, labels) -## [train] -print('Finished training process') - -#------------------------ 4. Show the decision regions ---------------------------------------- -## [show] -green = (0,100,0) -blue = (100,0,0) -for i in range(I.shape[0]): - for j in range(I.shape[1]): - sampleMat = np.matrix([[j,i]], dtype=np.float32) - response = svm.predict(sampleMat)[1] - - if response == 1: - I[i,j] = green - elif response == 2: - I[i,j] = blue -## [show] - -#----------------------- 5. Show the training data -------------------------------------------- -## [show_data] -thick = -1 -# Class 1 -for i in range(NTRAINING_SAMPLES): - px = trainData[i,0] - py = trainData[i,1] - cv.circle(I, (int(px), int(py)), 3, (0, 255, 0), thick) - -# Class 2 -for i in range(NTRAINING_SAMPLES, 2*NTRAINING_SAMPLES): - px = trainData[i,0] - py = trainData[i,1] - cv.circle(I, (int(px), int(py)), 3, (255, 0, 0), thick) -## [show_data] - -#------------------------- 6. Show support vectors -------------------------------------------- -## [show_vectors] -thick = 2 -sv = svm.getUncompressedSupportVectors() - -for i in range(sv.shape[0]): - cv.circle(I, (int(sv[i,0]), int(sv[i,1])), 6, (128, 128, 128), thick) -## [show_vectors] - -cv.imwrite('result.png', I) # save the Image -cv.imshow('SVM for Non-Linear Training Data', I) # show it to the user -cv.waitKey() diff --git a/samples/python/tutorial_code/ml/py_svm_opencv/hogsvm.py b/samples/python/tutorial_code/ml/py_svm_opencv/hogsvm.py deleted file mode 100755 index 898c7dc4d7..0000000000 --- a/samples/python/tutorial_code/ml/py_svm_opencv/hogsvm.py +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env python - -import cv2 as cv -import numpy as np - -SZ=20 -bin_n = 16 # Number of bins - - -affine_flags = cv.WARP_INVERSE_MAP|cv.INTER_LINEAR - -## [deskew] -def deskew(img): - m = cv.moments(img) - if abs(m['mu02']) < 1e-2: - return img.copy() - skew = m['mu11']/m['mu02'] - M = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]]) - img = cv.warpAffine(img,M,(SZ, SZ),flags=affine_flags) - return img -## [deskew] - -## [hog] -def hog(img): - gx = cv.Sobel(img, cv.CV_32F, 1, 0) - gy = cv.Sobel(img, cv.CV_32F, 0, 1) - mag, ang = cv.cartToPolar(gx, gy) - bins = np.int32(bin_n*ang/(2*np.pi)) # quantizing binvalues in (0...16) - bin_cells = bins[:10,:10], bins[10:,:10], bins[:10,10:], bins[10:,10:] - mag_cells = mag[:10,:10], mag[10:,:10], mag[:10,10:], mag[10:,10:] - hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)] - hist = np.hstack(hists) # hist is a 64 bit vector - return hist -## [hog] - -img = cv.imread(cv.samples.findFile('digits.png'),0) -if img is None: - raise Exception("we need the digits.png image from samples/data here !") - - -cells = [np.hsplit(row,100) for row in np.vsplit(img,50)] - -# First half is trainData, remaining is testData -train_cells = [ i[:50] for i in cells ] -test_cells = [ i[50:] for i in cells] - -###### Now training ######################## - -deskewed = [list(map(deskew,row)) for row in train_cells] -hogdata = [list(map(hog,row)) for row in deskewed] -trainData = np.float32(hogdata).reshape(-1,64) -responses = np.repeat(np.arange(10),250)[:,np.newaxis] - -svm = cv.ml.SVM_create() -svm.setKernel(cv.ml.SVM_LINEAR) -svm.setType(cv.ml.SVM_C_SVC) -svm.setC(2.67) -svm.setGamma(5.383) - -svm.train(trainData, cv.ml.ROW_SAMPLE, responses) -svm.save('svm_data.dat') - -###### Now testing ######################## - -deskewed = [list(map(deskew,row)) for row in test_cells] -hogdata = [list(map(hog,row)) for row in deskewed] -testData = np.float32(hogdata).reshape(-1,bin_n*4) -result = svm.predict(testData)[1] - -####### Check Accuracy ######################## -mask = result==responses -correct = np.count_nonzero(mask) -print(correct*100.0/result.size)