Merge branch 'master' into python2and3

This commit is contained in:
Michael Pratt 2014-08-06 01:45:07 -04:00 committed by arc
commit 218b12c557
115 changed files with 23402 additions and 33378 deletions

View File

@ -1,4 +1,4 @@
set(OPENCV_TRAINCASCADE_DEPS opencv_core opencv_ml opencv_imgproc opencv_photo opencv_objdetect opencv_imgcodecs opencv_videoio opencv_highgui opencv_calib3d opencv_video opencv_features2d)
set(OPENCV_TRAINCASCADE_DEPS opencv_core opencv_imgproc opencv_objdetect opencv_imgcodecs opencv_highgui opencv_calib3d opencv_features2d)
ocv_check_dependencies(${OPENCV_TRAINCASCADE_DEPS})
if(NOT OCV_DEPENDENCIES_FOUND)
@ -10,13 +10,10 @@ project(traincascade)
ocv_include_directories("${CMAKE_CURRENT_SOURCE_DIR}" "${OpenCV_SOURCE_DIR}/include/opencv")
ocv_include_modules(${OPENCV_TRAINCASCADE_DEPS})
set(traincascade_files traincascade.cpp
cascadeclassifier.cpp cascadeclassifier.h
boost.cpp boost.h features.cpp traincascade_features.h
haarfeatures.cpp haarfeatures.h
lbpfeatures.cpp lbpfeatures.h
HOGfeatures.cpp HOGfeatures.h
imagestorage.cpp imagestorage.h)
file(GLOB SRCS *.cpp)
file(GLOB HDRS *.h*)
set(traincascade_files ${SRCS} ${HDRS})
set(the_target opencv_traincascade)
add_executable(${the_target} ${traincascade_files})

View File

@ -2,7 +2,7 @@
#define _OPENCV_BOOST_H_
#include "traincascade_features.h"
#include "ml.h"
#include "old_ml.hpp"
struct CvCascadeBoostParams : CvBoostParams
{

View File

@ -7,8 +7,6 @@
#include "lbpfeatures.h"
#include "HOGfeatures.h" //new
#include "boost.h"
#include "cv.h"
#include "cxcore.h"
#define CC_CASCADE_FILENAME "cascade.xml"
#define CC_PARAMS_FILENAME "params.xml"

2165
apps/traincascade/old_ml.hpp Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,792 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "old_ml_precomp.hpp"
#include <ctype.h>
#define MISS_VAL FLT_MAX
#define CV_VAR_MISS 0
CvTrainTestSplit::CvTrainTestSplit()
{
train_sample_part_mode = CV_COUNT;
train_sample_part.count = -1;
mix = false;
}
CvTrainTestSplit::CvTrainTestSplit( int _train_sample_count, bool _mix )
{
train_sample_part_mode = CV_COUNT;
train_sample_part.count = _train_sample_count;
mix = _mix;
}
CvTrainTestSplit::CvTrainTestSplit( float _train_sample_portion, bool _mix )
{
train_sample_part_mode = CV_PORTION;
train_sample_part.portion = _train_sample_portion;
mix = _mix;
}
////////////////
CvMLData::CvMLData()
{
values = missing = var_types = var_idx_mask = response_out = var_idx_out = var_types_out = 0;
train_sample_idx = test_sample_idx = 0;
header_lines_number = 0;
sample_idx = 0;
response_idx = -1;
train_sample_count = -1;
delimiter = ',';
miss_ch = '?';
//flt_separator = '.';
rng = &cv::theRNG();
}
CvMLData::~CvMLData()
{
clear();
}
void CvMLData::free_train_test_idx()
{
cvReleaseMat( &train_sample_idx );
cvReleaseMat( &test_sample_idx );
sample_idx = 0;
}
void CvMLData::clear()
{
class_map.clear();
cvReleaseMat( &values );
cvReleaseMat( &missing );
cvReleaseMat( &var_types );
cvReleaseMat( &var_idx_mask );
cvReleaseMat( &response_out );
cvReleaseMat( &var_idx_out );
cvReleaseMat( &var_types_out );
free_train_test_idx();
total_class_count = 0;
response_idx = -1;
train_sample_count = -1;
}
void CvMLData::set_header_lines_number( int idx )
{
header_lines_number = std::max(0, idx);
}
int CvMLData::get_header_lines_number() const
{
return header_lines_number;
}
static char *fgets_chomp(char *str, int n, FILE *stream)
{
char *head = fgets(str, n, stream);
if( head )
{
for(char *tail = head + strlen(head) - 1; tail >= head; --tail)
{
if( *tail != '\r' && *tail != '\n' )
break;
*tail = '\0';
}
}
return head;
}
int CvMLData::read_csv(const char* filename)
{
const int M = 1000000;
const char str_delimiter[3] = { ' ', delimiter, '\0' };
FILE* file = 0;
CvMemStorage* storage;
CvSeq* seq;
char *ptr;
float* el_ptr;
CvSeqReader reader;
int cols_count = 0;
uchar *var_types_ptr = 0;
clear();
file = fopen( filename, "rt" );
if( !file )
return -1;
std::vector<char> _buf(M);
char* buf = &_buf[0];
// skip header lines
for( int i = 0; i < header_lines_number; i++ )
{
if( fgets( buf, M, file ) == 0 )
{
fclose(file);
return -1;
}
}
// read the first data line and determine the number of variables
if( !fgets_chomp( buf, M, file ))
{
fclose(file);
return -1;
}
ptr = buf;
while( *ptr == ' ' )
ptr++;
for( ; *ptr != '\0'; )
{
if(*ptr == delimiter || *ptr == ' ')
{
cols_count++;
ptr++;
while( *ptr == ' ' ) ptr++;
}
else
ptr++;
}
cols_count++;
if ( cols_count == 0)
{
fclose(file);
return -1;
}
// create temporary memory storage to store the whole database
el_ptr = new float[cols_count];
storage = cvCreateMemStorage();
seq = cvCreateSeq( 0, sizeof(*seq), cols_count*sizeof(float), storage );
var_types = cvCreateMat( 1, cols_count, CV_8U );
cvZero( var_types );
var_types_ptr = var_types->data.ptr;
for(;;)
{
char *token = NULL;
int type;
token = strtok(buf, str_delimiter);
if (!token)
break;
for (int i = 0; i < cols_count-1; i++)
{
str_to_flt_elem( token, el_ptr[i], type);
var_types_ptr[i] |= type;
token = strtok(NULL, str_delimiter);
if (!token)
{
fclose(file);
delete [] el_ptr;
return -1;
}
}
str_to_flt_elem( token, el_ptr[cols_count-1], type);
var_types_ptr[cols_count-1] |= type;
cvSeqPush( seq, el_ptr );
if( !fgets_chomp( buf, M, file ) )
break;
}
fclose(file);
values = cvCreateMat( seq->total, cols_count, CV_32FC1 );
missing = cvCreateMat( seq->total, cols_count, CV_8U );
var_idx_mask = cvCreateMat( 1, values->cols, CV_8UC1 );
cvSet( var_idx_mask, cvRealScalar(1) );
train_sample_count = seq->total;
cvStartReadSeq( seq, &reader );
for(int i = 0; i < seq->total; i++ )
{
const float* sdata = (float*)reader.ptr;
float* ddata = values->data.fl + cols_count*i;
uchar* dm = missing->data.ptr + cols_count*i;
for( int j = 0; j < cols_count; j++ )
{
ddata[j] = sdata[j];
dm[j] = ( fabs( MISS_VAL - sdata[j] ) <= FLT_EPSILON );
}
CV_NEXT_SEQ_ELEM( seq->elem_size, reader );
}
if ( cvNorm( missing, 0, CV_L1 ) <= FLT_EPSILON )
cvReleaseMat( &missing );
cvReleaseMemStorage( &storage );
delete []el_ptr;
return 0;
}
const CvMat* CvMLData::get_values() const
{
return values;
}
const CvMat* CvMLData::get_missing() const
{
CV_FUNCNAME( "CvMLData::get_missing" );
__BEGIN__;
if ( !values )
CV_ERROR( CV_StsInternal, "data is empty" );
__END__;
return missing;
}
const std::map<cv::String, int>& CvMLData::get_class_labels_map() const
{
return class_map;
}
void CvMLData::str_to_flt_elem( const char* token, float& flt_elem, int& type)
{
char* stopstring = NULL;
flt_elem = (float)strtod( token, &stopstring );
assert( stopstring );
type = CV_VAR_ORDERED;
if ( *stopstring == miss_ch && strlen(stopstring) == 1 ) // missed value
{
flt_elem = MISS_VAL;
type = CV_VAR_MISS;
}
else
{
if ( (*stopstring != 0) && (*stopstring != '\n') && (strcmp(stopstring, "\r\n") != 0) ) // class label
{
int idx = class_map[token];
if ( idx == 0)
{
total_class_count++;
idx = total_class_count;
class_map[token] = idx;
}
flt_elem = (float)idx;
type = CV_VAR_CATEGORICAL;
}
}
}
void CvMLData::set_delimiter(char ch)
{
CV_FUNCNAME( "CvMLData::set_delimited" );
__BEGIN__;
if (ch == miss_ch /*|| ch == flt_separator*/)
CV_ERROR(CV_StsBadArg, "delimited, miss_character and flt_separator must be different");
delimiter = ch;
__END__;
}
char CvMLData::get_delimiter() const
{
return delimiter;
}
void CvMLData::set_miss_ch(char ch)
{
CV_FUNCNAME( "CvMLData::set_miss_ch" );
__BEGIN__;
if (ch == delimiter/* || ch == flt_separator*/)
CV_ERROR(CV_StsBadArg, "delimited, miss_character and flt_separator must be different");
miss_ch = ch;
__END__;
}
char CvMLData::get_miss_ch() const
{
return miss_ch;
}
void CvMLData::set_response_idx( int idx )
{
CV_FUNCNAME( "CvMLData::set_response_idx" );
__BEGIN__;
if ( !values )
CV_ERROR( CV_StsInternal, "data is empty" );
if ( idx >= values->cols)
CV_ERROR( CV_StsBadArg, "idx value is not correct" );
if ( response_idx >= 0 )
chahge_var_idx( response_idx, true );
if ( idx >= 0 )
chahge_var_idx( idx, false );
response_idx = idx;
__END__;
}
int CvMLData::get_response_idx() const
{
CV_FUNCNAME( "CvMLData::get_response_idx" );
__BEGIN__;
if ( !values )
CV_ERROR( CV_StsInternal, "data is empty" );
__END__;
return response_idx;
}
void CvMLData::change_var_type( int var_idx, int type )
{
CV_FUNCNAME( "CvMLData::change_var_type" );
__BEGIN__;
int var_count = 0;
if ( !values )
CV_ERROR( CV_StsInternal, "data is empty" );
var_count = values->cols;
if ( var_idx < 0 || var_idx >= var_count)
CV_ERROR( CV_StsBadArg, "var_idx is not correct" );
if ( type != CV_VAR_ORDERED && type != CV_VAR_CATEGORICAL)
CV_ERROR( CV_StsBadArg, "type is not correct" );
assert( var_types );
if ( var_types->data.ptr[var_idx] == CV_VAR_CATEGORICAL && type == CV_VAR_ORDERED)
CV_ERROR( CV_StsBadArg, "it`s impossible to assign CV_VAR_ORDERED type to categorical variable" );
var_types->data.ptr[var_idx] = (uchar)type;
__END__;
return;
}
void CvMLData::set_var_types( const char* str )
{
CV_FUNCNAME( "CvMLData::set_var_types" );
__BEGIN__;
const char* ord = 0, *cat = 0;
int var_count = 0, set_var_type_count = 0;
if ( !values )
CV_ERROR( CV_StsInternal, "data is empty" );
var_count = values->cols;
assert( var_types );
ord = strstr( str, "ord" );
cat = strstr( str, "cat" );
if ( !ord && !cat )
CV_ERROR( CV_StsBadArg, "types string is not correct" );
if ( !ord && strlen(cat) == 3 ) // str == "cat"
{
cvSet( var_types, cvScalarAll(CV_VAR_CATEGORICAL) );
return;
}
if ( !cat && strlen(ord) == 3 ) // str == "ord"
{
cvSet( var_types, cvScalarAll(CV_VAR_ORDERED) );
return;
}
if ( ord ) // parse ord str
{
char* stopstring = NULL;
if ( ord[3] != '[')
CV_ERROR( CV_StsBadArg, "types string is not correct" );
ord += 4; // pass "ord["
do
{
int b1 = (int)strtod( ord, &stopstring );
if ( *stopstring == 0 || (*stopstring != ',' && *stopstring != ']' && *stopstring != '-') )
CV_ERROR( CV_StsBadArg, "types string is not correct" );
ord = stopstring + 1;
if ( (stopstring[0] == ',') || (stopstring[0] == ']'))
{
if ( var_types->data.ptr[b1] == CV_VAR_CATEGORICAL)
CV_ERROR( CV_StsBadArg, "it`s impossible to assign CV_VAR_ORDERED type to categorical variable" );
var_types->data.ptr[b1] = CV_VAR_ORDERED;
set_var_type_count++;
}
else
{
if ( stopstring[0] == '-')
{
int b2 = (int)strtod( ord, &stopstring);
if ( (*stopstring == 0) || (*stopstring != ',' && *stopstring != ']') )
CV_ERROR( CV_StsBadArg, "types string is not correct" );
ord = stopstring + 1;
for (int i = b1; i <= b2; i++)
{
if ( var_types->data.ptr[i] == CV_VAR_CATEGORICAL)
CV_ERROR( CV_StsBadArg, "it`s impossible to assign CV_VAR_ORDERED type to categorical variable" );
var_types->data.ptr[i] = CV_VAR_ORDERED;
}
set_var_type_count += b2 - b1 + 1;
}
else
CV_ERROR( CV_StsBadArg, "types string is not correct" );
}
}
while (*stopstring != ']');
if ( stopstring[1] != '\0' && stopstring[1] != ',')
CV_ERROR( CV_StsBadArg, "types string is not correct" );
}
if ( cat ) // parse cat str
{
char* stopstring = NULL;
if ( cat[3] != '[')
CV_ERROR( CV_StsBadArg, "types string is not correct" );
cat += 4; // pass "cat["
do
{
int b1 = (int)strtod( cat, &stopstring );
if ( *stopstring == 0 || (*stopstring != ',' && *stopstring != ']' && *stopstring != '-') )
CV_ERROR( CV_StsBadArg, "types string is not correct" );
cat = stopstring + 1;
if ( (stopstring[0] == ',') || (stopstring[0] == ']'))
{
var_types->data.ptr[b1] = CV_VAR_CATEGORICAL;
set_var_type_count++;
}
else
{
if ( stopstring[0] == '-')
{
int b2 = (int)strtod( cat, &stopstring);
if ( (*stopstring == 0) || (*stopstring != ',' && *stopstring != ']') )
CV_ERROR( CV_StsBadArg, "types string is not correct" );
cat = stopstring + 1;
for (int i = b1; i <= b2; i++)
var_types->data.ptr[i] = CV_VAR_CATEGORICAL;
set_var_type_count += b2 - b1 + 1;
}
else
CV_ERROR( CV_StsBadArg, "types string is not correct" );
}
}
while (*stopstring != ']');
if ( stopstring[1] != '\0' && stopstring[1] != ',')
CV_ERROR( CV_StsBadArg, "types string is not correct" );
}
if (set_var_type_count != var_count)
CV_ERROR( CV_StsBadArg, "types string is not correct" );
__END__;
}
const CvMat* CvMLData::get_var_types()
{
CV_FUNCNAME( "CvMLData::get_var_types" );
__BEGIN__;
uchar *var_types_out_ptr = 0;
int avcount, vt_size;
if ( !values )
CV_ERROR( CV_StsInternal, "data is empty" );
assert( var_idx_mask );
avcount = cvFloor( cvNorm( var_idx_mask, 0, CV_L1 ) );
vt_size = avcount + (response_idx >= 0);
if ( avcount == values->cols || (avcount == values->cols-1 && response_idx == values->cols-1) )
return var_types;
if ( !var_types_out || ( var_types_out && var_types_out->cols != vt_size ) )
{
cvReleaseMat( &var_types_out );
var_types_out = cvCreateMat( 1, vt_size, CV_8UC1 );
}
var_types_out_ptr = var_types_out->data.ptr;
for( int i = 0; i < var_types->cols; i++)
{
if (i == response_idx || !var_idx_mask->data.ptr[i]) continue;
*var_types_out_ptr = var_types->data.ptr[i];
var_types_out_ptr++;
}
if ( response_idx >= 0 )
*var_types_out_ptr = var_types->data.ptr[response_idx];
__END__;
return var_types_out;
}
int CvMLData::get_var_type( int var_idx ) const
{
return var_types->data.ptr[var_idx];
}
const CvMat* CvMLData::get_responses()
{
CV_FUNCNAME( "CvMLData::get_responses_ptr" );
__BEGIN__;
int var_count = 0;
if ( !values )
CV_ERROR( CV_StsInternal, "data is empty" );
var_count = values->cols;
if ( response_idx < 0 || response_idx >= var_count )
return 0;
if ( !response_out )
response_out = cvCreateMatHeader( values->rows, 1, CV_32FC1 );
else
cvInitMatHeader( response_out, values->rows, 1, CV_32FC1);
cvGetCol( values, response_out, response_idx );
__END__;
return response_out;
}
void CvMLData::set_train_test_split( const CvTrainTestSplit * spl)
{
CV_FUNCNAME( "CvMLData::set_division" );
__BEGIN__;
int sample_count = 0;
if ( !values )
CV_ERROR( CV_StsInternal, "data is empty" );
sample_count = values->rows;
float train_sample_portion;
if (spl->train_sample_part_mode == CV_COUNT)
{
train_sample_count = spl->train_sample_part.count;
if (train_sample_count > sample_count)
CV_ERROR( CV_StsBadArg, "train samples count is not correct" );
train_sample_count = train_sample_count<=0 ? sample_count : train_sample_count;
}
else // dtype.train_sample_part_mode == CV_PORTION
{
train_sample_portion = spl->train_sample_part.portion;
if ( train_sample_portion > 1)
CV_ERROR( CV_StsBadArg, "train samples count is not correct" );
train_sample_portion = train_sample_portion <= FLT_EPSILON ||
1 - train_sample_portion <= FLT_EPSILON ? 1 : train_sample_portion;
train_sample_count = std::max(1, cvFloor( train_sample_portion * sample_count ));
}
if ( train_sample_count == sample_count )
{
free_train_test_idx();
return;
}
if ( train_sample_idx && train_sample_idx->cols != train_sample_count )
free_train_test_idx();
if ( !sample_idx)
{
int test_sample_count = sample_count- train_sample_count;
sample_idx = (int*)cvAlloc( sample_count * sizeof(sample_idx[0]) );
for (int i = 0; i < sample_count; i++ )
sample_idx[i] = i;
train_sample_idx = cvCreateMatHeader( 1, train_sample_count, CV_32SC1 );
*train_sample_idx = cvMat( 1, train_sample_count, CV_32SC1, &sample_idx[0] );
CV_Assert(test_sample_count > 0);
test_sample_idx = cvCreateMatHeader( 1, test_sample_count, CV_32SC1 );
*test_sample_idx = cvMat( 1, test_sample_count, CV_32SC1, &sample_idx[train_sample_count] );
}
mix = spl->mix;
if ( mix )
mix_train_and_test_idx();
__END__;
}
const CvMat* CvMLData::get_train_sample_idx() const
{
CV_FUNCNAME( "CvMLData::get_train_sample_idx" );
__BEGIN__;
if ( !values )
CV_ERROR( CV_StsInternal, "data is empty" );
__END__;
return train_sample_idx;
}
const CvMat* CvMLData::get_test_sample_idx() const
{
CV_FUNCNAME( "CvMLData::get_test_sample_idx" );
__BEGIN__;
if ( !values )
CV_ERROR( CV_StsInternal, "data is empty" );
__END__;
return test_sample_idx;
}
void CvMLData::mix_train_and_test_idx()
{
CV_FUNCNAME( "CvMLData::mix_train_and_test_idx" );
__BEGIN__;
if ( !values )
CV_ERROR( CV_StsInternal, "data is empty" );
__END__;
if ( !sample_idx)
return;
if ( train_sample_count > 0 && train_sample_count < values->rows )
{
int n = values->rows;
for (int i = 0; i < n; i++)
{
int a = (*rng)(n);
int b = (*rng)(n);
int t;
CV_SWAP( sample_idx[a], sample_idx[b], t );
}
}
}
const CvMat* CvMLData::get_var_idx()
{
CV_FUNCNAME( "CvMLData::get_var_idx" );
__BEGIN__;
int avcount = 0;
if ( !values )
CV_ERROR( CV_StsInternal, "data is empty" );
assert( var_idx_mask );
avcount = cvFloor( cvNorm( var_idx_mask, 0, CV_L1 ) );
int* vidx;
if ( avcount == values->cols )
return 0;
if ( !var_idx_out || ( var_idx_out && var_idx_out->cols != avcount ) )
{
cvReleaseMat( &var_idx_out );
var_idx_out = cvCreateMat( 1, avcount, CV_32SC1);
if ( response_idx >=0 )
var_idx_mask->data.ptr[response_idx] = 0;
}
vidx = var_idx_out->data.i;
for(int i = 0; i < var_idx_mask->cols; i++)
if ( var_idx_mask->data.ptr[i] )
{
*vidx = i;
vidx++;
}
__END__;
return var_idx_out;
}
void CvMLData::chahge_var_idx( int vi, bool state )
{
change_var_idx( vi, state );
}
void CvMLData::change_var_idx( int vi, bool state )
{
CV_FUNCNAME( "CvMLData::change_var_idx" );
__BEGIN__;
int var_count = 0;
if ( !values )
CV_ERROR( CV_StsInternal, "data is empty" );
var_count = values->cols;
if ( vi < 0 || vi >= var_count)
CV_ERROR( CV_StsBadArg, "variable index is not correct" );
assert( var_idx_mask );
var_idx_mask->data.ptr[vi] = state;
__END__;
}
/* End of file. */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,376 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_PRECOMP_H__
#define __OPENCV_PRECOMP_H__
#include "opencv2/core.hpp"
#include "old_ml.hpp"
#include "opencv2/core/core_c.h"
#include "opencv2/core/utility.hpp"
#include "opencv2/core/private.hpp"
#include <assert.h>
#include <float.h>
#include <limits.h>
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#define ML_IMPL CV_IMPL
#define __BEGIN__ __CV_BEGIN__
#define __END__ __CV_END__
#define EXIT __CV_EXIT__
#define CV_MAT_ELEM_FLAG( mat, type, comp, vect, tflag ) \
(( tflag == CV_ROW_SAMPLE ) \
? (CV_MAT_ELEM( mat, type, comp, vect )) \
: (CV_MAT_ELEM( mat, type, vect, comp )))
/* Convert matrix to vector */
#define ICV_MAT2VEC( mat, vdata, vstep, num ) \
if( MIN( (mat).rows, (mat).cols ) != 1 ) \
CV_ERROR( CV_StsBadArg, "" ); \
(vdata) = ((mat).data.ptr); \
if( (mat).rows == 1 ) \
{ \
(vstep) = CV_ELEM_SIZE( (mat).type ); \
(num) = (mat).cols; \
} \
else \
{ \
(vstep) = (mat).step; \
(num) = (mat).rows; \
}
/* get raw data */
#define ICV_RAWDATA( mat, flags, rdata, sstep, cstep, m, n ) \
(rdata) = (mat).data.ptr; \
if( CV_IS_ROW_SAMPLE( flags ) ) \
{ \
(sstep) = (mat).step; \
(cstep) = CV_ELEM_SIZE( (mat).type ); \
(m) = (mat).rows; \
(n) = (mat).cols; \
} \
else \
{ \
(cstep) = (mat).step; \
(sstep) = CV_ELEM_SIZE( (mat).type ); \
(n) = (mat).rows; \
(m) = (mat).cols; \
}
#define ICV_IS_MAT_OF_TYPE( mat, mat_type) \
(CV_IS_MAT( mat ) && CV_MAT_TYPE( mat->type ) == (mat_type) && \
(mat)->cols > 0 && (mat)->rows > 0)
/*
uchar* data; int sstep, cstep; - trainData->data
uchar* classes; int clstep; int ncl;- trainClasses
uchar* tmask; int tmstep; int ntm; - typeMask
uchar* missed;int msstep, mcstep; -missedMeasurements...
int mm, mn; == m,n == size,dim
uchar* sidx;int sistep; - sampleIdx
uchar* cidx;int cistep; - compIdx
int k, l; == n,m == dim,size (length of cidx, sidx)
int m, n; == size,dim
*/
#define ICV_DECLARE_TRAIN_ARGS() \
uchar* data; \
int sstep, cstep; \
uchar* classes; \
int clstep; \
int ncl; \
uchar* tmask; \
int tmstep; \
int ntm; \
uchar* missed; \
int msstep, mcstep; \
int mm, mn; \
uchar* sidx; \
int sistep; \
uchar* cidx; \
int cistep; \
int k, l; \
int m, n; \
\
data = classes = tmask = missed = sidx = cidx = NULL; \
sstep = cstep = clstep = ncl = tmstep = ntm = msstep = mcstep = mm = mn = 0; \
sistep = cistep = k = l = m = n = 0;
#define ICV_TRAIN_DATA_REQUIRED( param, flags ) \
if( !ICV_IS_MAT_OF_TYPE( (param), CV_32FC1 ) ) \
{ \
CV_ERROR( CV_StsBadArg, "Invalid " #param " parameter" ); \
} \
else \
{ \
ICV_RAWDATA( *(param), (flags), data, sstep, cstep, m, n ); \
k = n; \
l = m; \
}
#define ICV_TRAIN_CLASSES_REQUIRED( param ) \
if( !ICV_IS_MAT_OF_TYPE( (param), CV_32FC1 ) ) \
{ \
CV_ERROR( CV_StsBadArg, "Invalid " #param " parameter" ); \
} \
else \
{ \
ICV_MAT2VEC( *(param), classes, clstep, ncl ); \
if( m != ncl ) \
{ \
CV_ERROR( CV_StsBadArg, "Unmatched sizes" ); \
} \
}
#define ICV_ARG_NULL( param ) \
if( (param) != NULL ) \
{ \
CV_ERROR( CV_StsBadArg, #param " parameter must be NULL" ); \
}
#define ICV_MISSED_MEASUREMENTS_OPTIONAL( param, flags ) \
if( param ) \
{ \
if( !ICV_IS_MAT_OF_TYPE( param, CV_8UC1 ) ) \
{ \
CV_ERROR( CV_StsBadArg, "Invalid " #param " parameter" ); \
} \
else \
{ \
ICV_RAWDATA( *(param), (flags), missed, msstep, mcstep, mm, mn ); \
if( mm != m || mn != n ) \
{ \
CV_ERROR( CV_StsBadArg, "Unmatched sizes" ); \
} \
} \
}
#define ICV_COMP_IDX_OPTIONAL( param ) \
if( param ) \
{ \
if( !ICV_IS_MAT_OF_TYPE( param, CV_32SC1 ) ) \
{ \
CV_ERROR( CV_StsBadArg, "Invalid " #param " parameter" ); \
} \
else \
{ \
ICV_MAT2VEC( *(param), cidx, cistep, k ); \
if( k > n ) \
CV_ERROR( CV_StsBadArg, "Invalid " #param " parameter" ); \
} \
}
#define ICV_SAMPLE_IDX_OPTIONAL( param ) \
if( param ) \
{ \
if( !ICV_IS_MAT_OF_TYPE( param, CV_32SC1 ) ) \
{ \
CV_ERROR( CV_StsBadArg, "Invalid " #param " parameter" ); \
} \
else \
{ \
ICV_MAT2VEC( *sampleIdx, sidx, sistep, l ); \
if( l > m ) \
CV_ERROR( CV_StsBadArg, "Invalid " #param " parameter" ); \
} \
}
/****************************************************************************************/
#define ICV_CONVERT_FLOAT_ARRAY_TO_MATRICE( array, matrice ) \
{ \
CvMat a, b; \
int dims = (matrice)->cols; \
int nsamples = (matrice)->rows; \
int type = CV_MAT_TYPE((matrice)->type); \
int i, offset = dims; \
\
CV_ASSERT( type == CV_32FC1 || type == CV_64FC1 ); \
offset *= ((type == CV_32FC1) ? sizeof(float) : sizeof(double));\
\
b = cvMat( 1, dims, CV_32FC1 ); \
cvGetRow( matrice, &a, 0 ); \
for( i = 0; i < nsamples; i++, a.data.ptr += offset ) \
{ \
b.data.fl = (float*)array[i]; \
CV_CALL( cvConvert( &b, &a ) ); \
} \
}
/****************************************************************************************\
* Auxiliary functions declarations *
\****************************************************************************************/
/* Generates a set of classes centers in quantity <num_of_clusters> that are generated as
uniform random vectors in parallelepiped, where <data> is concentrated. Vectors in
<data> should have horizontal orientation. If <centers> != NULL, the function doesn't
allocate any memory and stores generated centers in <centers>, returns <centers>.
If <centers> == NULL, the function allocates memory and creates the matrice. Centers
are supposed to be oriented horizontally. */
CvMat* icvGenerateRandomClusterCenters( int seed,
const CvMat* data,
int num_of_clusters,
CvMat* centers CV_DEFAULT(0));
/* Fills the <labels> using <probs> by choosing the maximal probability. Outliers are
fixed by <oulier_tresh> and have cluster label (-1). Function also controls that there
weren't "empty" clusters by filling empty clusters with the maximal probability vector.
If probs_sums != NULL, filles it with the sums of probabilities for each sample (it is
useful for normalizing probabilities' matrice of FCM) */
void icvFindClusterLabels( const CvMat* probs, float outlier_thresh, float r,
const CvMat* labels );
typedef struct CvSparseVecElem32f
{
int idx;
float val;
}
CvSparseVecElem32f;
/* Prepare training data and related parameters */
#define CV_TRAIN_STATMODEL_DEFRAGMENT_TRAIN_DATA 1
#define CV_TRAIN_STATMODEL_SAMPLES_AS_ROWS 2
#define CV_TRAIN_STATMODEL_SAMPLES_AS_COLUMNS 4
#define CV_TRAIN_STATMODEL_CATEGORICAL_RESPONSE 8
#define CV_TRAIN_STATMODEL_ORDERED_RESPONSE 16
#define CV_TRAIN_STATMODEL_RESPONSES_ON_OUTPUT 32
#define CV_TRAIN_STATMODEL_ALWAYS_COPY_TRAIN_DATA 64
#define CV_TRAIN_STATMODEL_SPARSE_AS_SPARSE 128
int
cvPrepareTrainData( const char* /*funcname*/,
const CvMat* train_data, int tflag,
const CvMat* responses, int response_type,
const CvMat* var_idx,
const CvMat* sample_idx,
bool always_copy_data,
const float*** out_train_samples,
int* _sample_count,
int* _var_count,
int* _var_all,
CvMat** out_responses,
CvMat** out_response_map,
CvMat** out_var_idx,
CvMat** out_sample_idx=0 );
void
cvSortSamplesByClasses( const float** samples, const CvMat* classes,
int* class_ranges, const uchar** mask CV_DEFAULT(0) );
void
cvCombineResponseMaps (CvMat* _responses,
const CvMat* old_response_map,
CvMat* new_response_map,
CvMat** out_response_map);
void
cvPreparePredictData( const CvArr* sample, int dims_all, const CvMat* comp_idx,
int class_count, const CvMat* prob, float** row_sample,
int as_sparse CV_DEFAULT(0) );
/* copies clustering [or batch "predict"] results
(labels and/or centers and/or probs) back to the output arrays */
void
cvWritebackLabels( const CvMat* labels, CvMat* dst_labels,
const CvMat* centers, CvMat* dst_centers,
const CvMat* probs, CvMat* dst_probs,
const CvMat* sample_idx, int samples_all,
const CvMat* comp_idx, int dims_all );
#define cvWritebackResponses cvWritebackLabels
#define XML_FIELD_NAME "_name"
CvFileNode* icvFileNodeGetChild(CvFileNode* father, const char* name);
CvFileNode* icvFileNodeGetChildArrayElem(CvFileNode* father, const char* name,int index);
CvFileNode* icvFileNodeGetNext(CvFileNode* n, const char* name);
void cvCheckTrainData( const CvMat* train_data, int tflag,
const CvMat* missing_mask,
int* var_all, int* sample_all );
CvMat* cvPreprocessIndexArray( const CvMat* idx_arr, int data_arr_size, bool check_for_duplicates=false );
CvMat* cvPreprocessVarType( const CvMat* type_mask, const CvMat* var_idx,
int var_all, int* response_type );
CvMat* cvPreprocessOrderedResponses( const CvMat* responses,
const CvMat* sample_idx, int sample_all );
CvMat* cvPreprocessCategoricalResponses( const CvMat* responses,
const CvMat* sample_idx, int sample_all,
CvMat** out_response_map, CvMat** class_counts=0 );
const float** cvGetTrainSamples( const CvMat* train_data, int tflag,
const CvMat* var_idx, const CvMat* sample_idx,
int* _var_count, int* _sample_count,
bool always_copy_data=false );
namespace cv
{
struct DTreeBestSplitFinder
{
DTreeBestSplitFinder(){ splitSize = 0, tree = 0; node = 0; }
DTreeBestSplitFinder( CvDTree* _tree, CvDTreeNode* _node);
DTreeBestSplitFinder( const DTreeBestSplitFinder& finder, Split );
virtual ~DTreeBestSplitFinder() {}
virtual void operator()(const BlockedRange& range);
void join( DTreeBestSplitFinder& rhs );
Ptr<CvDTreeSplit> bestSplit;
Ptr<CvDTreeSplit> split;
int splitSize;
CvDTree* tree;
CvDTreeNode* node;
};
struct ForestTreeBestSplitFinder : DTreeBestSplitFinder
{
ForestTreeBestSplitFinder() : DTreeBestSplitFinder() {}
ForestTreeBestSplitFinder( CvForestTree* _tree, CvDTreeNode* _node );
ForestTreeBestSplitFinder( const ForestTreeBestSplitFinder& finder, Split );
virtual void operator()(const BlockedRange& range);
};
}
#endif /* __ML_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,4 @@
#include "opencv2/core.hpp"
#include "cv.h"
#include "cascadeclassifier.h"
using namespace std;

View File

@ -2,9 +2,6 @@
#define _OPENCV_FEATURES_H_
#include "imagestorage.h"
#include "cxcore.h"
#include "cv.h"
#include "ml.h"
#include <stdio.h>
#define FEATURES "features"

View File

@ -760,6 +760,27 @@ They are
:math:`[R_2, -t]`.
By decomposing ``E``, you can only get the direction of the translation, so the function returns unit ``t``.
decomposeHomographyMat
--------------------------
Decompose a homography matrix to rotation(s), translation(s) and plane normal(s).
.. ocv:function:: int decomposeHomographyMat( InputArray H, InputArray K, OutputArrayOfArrays rotations, OutputArrayOfArrays translations, OutputArrayOfArrays normals)
:param H: The input homography matrix between two images.
:param K: The input intrinsic camera calibration matrix.
:param rotations: Array of rotation matrices.
:param translations: Array of translation matrices.
:param normals: Array of plane normal matrices.
This function extracts relative camera motion between two views observing a planar object from the homography ``H`` induced by the plane.
The intrinsic camera matrix ``K`` must also be provided. The function may return up to four mathematical solution sets. At least two of the
solutions may further be invalidated if point correspondences are available by applying positive depth constraint (all points must be in front of the camera).
The decomposition method is described in detail in [Malis]_.
recoverPose
---------------
@ -1518,3 +1539,5 @@ The function reconstructs 3-dimensional points (in homogeneous coordinates) by u
.. [Slabaugh] Slabaugh, G.G. Computing Euler angles from a rotation matrix. http://www.soi.city.ac.uk/~sbbh653/publications/euler.pdf (verified: 2013-04-15)
.. [Zhang2000] Z. Zhang. A Flexible New Technique for Camera Calibration. IEEE Transactions on Pattern Analysis and Machine Intelligence, 22(11):1330-1334, 2000.
.. [Malis] Malis, E. and Vargas, M. Deeper understanding of the homography decomposition for vision-based control, Research Report 6303, INRIA (2007)

View File

@ -314,6 +314,11 @@ CV_EXPORTS_W int estimateAffine3D(InputArray src, InputArray dst,
double ransacThreshold = 3, double confidence = 0.99);
CV_EXPORTS_W int decomposeHomographyMat(InputArray H,
InputArray K,
OutputArrayOfArrays rotations,
OutputArrayOfArrays translations,
OutputArrayOfArrays normals);
class CV_EXPORTS_W StereoMatcher : public Algorithm
{

View File

@ -0,0 +1,482 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// This is a homography decomposition implementation contributed to OpenCV
// by Samson Yilma. It implements the homography decomposition algorithm
// descriped in the research report:
// Malis, E and Vargas, M, "Deeper understanding of the homography decomposition
// for vision-based control", Research Report 6303, INRIA (2007)
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2014, Samson Yilma¸ (samson_yilma@yahoo.com), all rights reserved.
//
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include <memory>
namespace cv
{
namespace HomographyDecomposition
{
//struct to hold solutions of homography decomposition
typedef struct _CameraMotion {
cv::Matx33d R; //!< rotation matrix
cv::Vec3d n; //!< normal of the plane the camera is looking at
cv::Vec3d t; //!< translation vector
} CameraMotion;
inline int signd(const double x)
{
return ( x >= 0 ? 1 : -1 );
}
class HomographyDecomp {
public:
HomographyDecomp() {}
virtual ~HomographyDecomp() {}
virtual void decomposeHomography(const cv::Matx33d& H, const cv::Matx33d& K,
std::vector<CameraMotion>& camMotions);
bool isRotationValid(const cv::Matx33d& R, const double epsilon=0.01);
protected:
bool passesSameSideOfPlaneConstraint(CameraMotion& motion);
virtual void decompose(std::vector<CameraMotion>& camMotions) = 0;
const cv::Matx33d& getHnorm() const {
return _Hnorm;
}
private:
cv::Matx33d normalize(const cv::Matx33d& H, const cv::Matx33d& K);
void removeScale();
cv::Matx33d _Hnorm;
};
class HomographyDecompZhang : public HomographyDecomp {
public:
HomographyDecompZhang():HomographyDecomp() {}
virtual ~HomographyDecompZhang() {}
private:
virtual void decompose(std::vector<CameraMotion>& camMotions);
bool findMotionFrom_tstar_n(const cv::Vec3d& tstar, const cv::Vec3d& n, CameraMotion& motion);
};
class HomographyDecompInria : public HomographyDecomp {
public:
HomographyDecompInria():HomographyDecomp() {}
virtual ~HomographyDecompInria() {}
private:
virtual void decompose(std::vector<CameraMotion>& camMotions);
double oppositeOfMinor(const cv::Matx33d& M, const int row, const int col);
void findRmatFrom_tstar_n(const cv::Vec3d& tstar, const cv::Vec3d& n, const double v, cv::Matx33d& R);
};
// normalizes homography with intrinsic camera parameters
Matx33d HomographyDecomp::normalize(const Matx33d& H, const Matx33d& K)
{
return K.inv() * H * K;
}
void HomographyDecomp::removeScale()
{
Mat W;
SVD::compute(_Hnorm, W);
_Hnorm = _Hnorm * (1.0/W.at<double>(1));
}
/*! This checks that the input is a pure rotation matrix 'm'.
* The conditions for this are: R' * R = I and det(R) = 1 (proper rotation matrix)
*/
bool HomographyDecomp::isRotationValid(const Matx33d& R, const double epsilon)
{
Matx33d RtR = R.t() * R;
Matx33d I(1,0,0, 0,1,0, 0,0,1);
if (norm(RtR, I, NORM_INF) > epsilon)
return false;
return (fabs(determinant(R) - 1.0) < epsilon);
}
bool HomographyDecomp::passesSameSideOfPlaneConstraint(CameraMotion& motion)
{
typedef Matx<double, 1, 1> Matx11d;
Matx31d t = Matx31d(motion.t);
Matx31d n = Matx31d(motion.n);
Matx11d proj = n.t() * motion.R.t() * t;
if ( (1 + proj(0, 0) ) <= 0 )
return false;
return true;
}
//!main routine to decompose homography
void HomographyDecomp::decomposeHomography(const Matx33d& H, const cv::Matx33d& K,
std::vector<CameraMotion>& camMotions)
{
//normalize homography matrix with intrinsic camera matrix
_Hnorm = normalize(H, K);
//remove scale of the normalized homography
removeScale();
//apply decomposition
decompose(camMotions);
}
/* function computes R&t from tstar, and plane normal(n) using
R = H * inv(I + tstar*transpose(n) );
t = R * tstar;
returns true if computed R&t is a valid solution
*/
bool HomographyDecompZhang::findMotionFrom_tstar_n(const cv::Vec3d& tstar, const cv::Vec3d& n, CameraMotion& motion)
{
Matx31d tstar_m = Mat(tstar);
Matx31d n_m = Mat(n);
Matx33d temp = tstar_m * n_m.t();
temp(0, 0) += 1.0;
temp(1, 1) += 1.0;
temp(2, 2) += 1.0;
motion.R = getHnorm() * temp.inv();
motion.t = motion.R * tstar;
motion.n = n;
return passesSameSideOfPlaneConstraint(motion);
}
void HomographyDecompZhang::decompose(std::vector<CameraMotion>& camMotions)
{
Mat W, U, Vt;
SVD::compute(getHnorm(), W, U, Vt);
double lambda1=W.at<double>(0);
double lambda3=W.at<double>(2);
double lambda1m3 = (lambda1-lambda3);
double lambda1m3_2 = lambda1m3*lambda1m3;
double lambda1t3 = lambda1*lambda3;
double t1 = 1.0/(2.0*lambda1t3);
double t2 = sqrt(1.0+4.0*lambda1t3/lambda1m3_2);
double t12 = t1*t2;
double e1 = -t1 + t12; //t1*(-1.0f + t2 );
double e3 = -t1 - t12; //t1*(-1.0f - t2);
double e1_2 = e1*e1;
double e3_2 = e3*e3;
double nv1p = sqrt(e1_2*lambda1m3_2 + 2*e1*(lambda1t3-1) + 1.0);
double nv3p = sqrt(e3_2*lambda1m3_2 + 2*e3*(lambda1t3-1) + 1.0);
double v1p[3], v3p[3];
v1p[0]=Vt.at<double>(0)*nv1p, v1p[1]=Vt.at<double>(1)*nv1p, v1p[2]=Vt.at<double>(2)*nv1p;
v3p[0]=Vt.at<double>(6)*nv3p, v3p[1]=Vt.at<double>(7)*nv3p, v3p[2]=Vt.at<double>(8)*nv3p;
/*The eight solutions are
(A): tstar = +- (v1p - v3p)/(e1 -e3), n = +- (e1*v3p - e3*v1p)/(e1-e3)
(B): tstar = +- (v1p + v3p)/(e1 -e3), n = +- (e1*v3p + e3*v1p)/(e1-e3)
*/
double v1pmv3p[3], v1ppv3p[3];
double e1v3me3v1[3], e1v3pe3v1[3];
double inv_e1me3 = 1.0/(e1-e3);
for(int kk=0;kk<3;++kk){
v1pmv3p[kk] = v1p[kk]-v3p[kk];
v1ppv3p[kk] = v1p[kk]+v3p[kk];
}
for(int kk=0; kk<3; ++kk){
double e1v3 = e1*v3p[kk];
double e3v1=e3*v1p[kk];
e1v3me3v1[kk] = e1v3-e3v1;
e1v3pe3v1[kk] = e1v3+e3v1;
}
Vec3d tstar_p, tstar_n;
Vec3d n_p, n_n;
///Solution group A
for(int kk=0; kk<3; ++kk) {
tstar_p[kk] = v1pmv3p[kk]*inv_e1me3;
tstar_n[kk] = -tstar_p[kk];
n_p[kk] = e1v3me3v1[kk]*inv_e1me3;
n_n[kk] = -n_p[kk];
}
CameraMotion cmotion;
//(A) Four different combinations for solution A
// (i) (+, +)
if (findMotionFrom_tstar_n(tstar_p, n_p, cmotion))
camMotions.push_back(cmotion);
// (ii) (+, -)
if (findMotionFrom_tstar_n(tstar_p, n_n, cmotion))
camMotions.push_back(cmotion);
// (iii) (-, +)
if (findMotionFrom_tstar_n(tstar_n, n_p, cmotion))
camMotions.push_back(cmotion);
// (iv) (-, -)
if (findMotionFrom_tstar_n(tstar_n, n_n, cmotion))
camMotions.push_back(cmotion);
//////////////////////////////////////////////////////////////////
///Solution group B
for(int kk=0;kk<3;++kk){
tstar_p[kk] = v1ppv3p[kk]*inv_e1me3;
tstar_n[kk] = -tstar_p[kk];
n_p[kk] = e1v3pe3v1[kk]*inv_e1me3;
n_n[kk] = -n_p[kk];
}
//(B) Four different combinations for solution B
// (i) (+, +)
if (findMotionFrom_tstar_n(tstar_p, n_p, cmotion))
camMotions.push_back(cmotion);
// (ii) (+, -)
if (findMotionFrom_tstar_n(tstar_p, n_n, cmotion))
camMotions.push_back(cmotion);
// (iii) (-, +)
if (findMotionFrom_tstar_n(tstar_n, n_p, cmotion))
camMotions.push_back(cmotion);
// (iv) (-, -)
if (findMotionFrom_tstar_n(tstar_n, n_n, cmotion))
camMotions.push_back(cmotion);
}
double HomographyDecompInria::oppositeOfMinor(const Matx33d& M, const int row, const int col)
{
int x1 = col == 0 ? 1 : 0;
int x2 = col == 2 ? 1 : 2;
int y1 = row == 0 ? 1 : 0;
int y2 = row == 2 ? 1 : 2;
return (M(y1, x2) * M(y2, x1) - M(y1, x1) * M(y2, x2));
}
//computes R = H( I - (2/v)*te_star*ne_t )
void HomographyDecompInria::findRmatFrom_tstar_n(const cv::Vec3d& tstar, const cv::Vec3d& n, const double v, cv::Matx33d& R)
{
Matx31d tstar_m = Matx31d(tstar);
Matx31d n_m = Matx31d(n);
Matx33d I(1.0, 0.0, 0.0,
0.0, 1.0, 0.0,
0.0, 0.0, 1.0);
R = getHnorm() * (I - (2/v) * tstar_m * n_m.t() );
}
void HomographyDecompInria::decompose(std::vector<CameraMotion>& camMotions)
{
const double epsilon = 0.001;
Matx33d S;
//S = H'H - I
S = getHnorm().t() * getHnorm();
S(0, 0) -= 1.0;
S(1, 1) -= 1.0;
S(2, 2) -= 1.0;
//check if H is rotation matrix
if( norm(S, NORM_INF) < epsilon) {
CameraMotion motion;
motion.R = Matx33d(getHnorm());
motion.t = Vec3d(0, 0, 0);
motion.n = Vec3d(0, 0, 0);
camMotions.push_back(motion);
return;
}
//! Compute nvectors
Vec3d npa, npb;
double M00 = oppositeOfMinor(S, 0, 0);
double M11 = oppositeOfMinor(S, 1, 1);
double M22 = oppositeOfMinor(S, 2, 2);
double rtM00 = sqrt(M00);
double rtM11 = sqrt(M11);
double rtM22 = sqrt(M22);
double M01 = oppositeOfMinor(S, 0, 1);
double M12 = oppositeOfMinor(S, 1, 2);
double M02 = oppositeOfMinor(S, 0, 2);
int e12 = signd(M12);
int e02 = signd(M02);
int e01 = signd(M01);
double nS00 = abs(S(0, 0));
double nS11 = abs(S(1, 1));
double nS22 = abs(S(2, 2));
//find max( |Sii| ), i=0, 1, 2
int indx = 0;
if(nS00 < nS11){
indx = 1;
if( nS11 < nS22 )
indx = 2;
}
else {
if(nS00 < nS22 )
indx = 2;
}
switch (indx) {
case 0:
npa[0] = S(0, 0), npb[0] = S(0, 0);
npa[1] = S(0, 1) + rtM22, npb[1] = S(0, 1) - rtM22;
npa[2] = S(0, 2) + e12 * rtM11, npb[2] = S(0, 2) - e12 * rtM11;
break;
case 1:
npa[0] = S(0, 1) + rtM22, npb[0] = S(0, 1) - rtM22;
npa[1] = S(1, 1), npb[1] = S(1, 1);
npa[2] = S(1, 2) - e02 * rtM00, npb[2] = S(1, 2) + e02 * rtM00;
break;
case 2:
npa[0] = S(0, 2) + e01 * rtM11, npb[0] = S(0, 2) - e01 * rtM11;
npa[1] = S(1, 2) + rtM00, npb[1] = S(1, 2) - rtM00;
npa[2] = S(2, 2), npb[2] = S(2, 2);
break;
default:
break;
}
double traceS = S(0, 0) + S(1, 1) + S(2, 2);
double v = 2.0 * sqrt(1 + traceS - M00 - M11 - M22);
double ESii = signd(S(indx, indx)) ;
double r_2 = 2 + traceS + v;
double nt_2 = 2 + traceS - v;
double r = sqrt(r_2);
double n_t = sqrt(nt_2);
Vec3d na = npa / norm(npa);
Vec3d nb = npb / norm(npb);
double half_nt = 0.5 * n_t;
double esii_t_r = ESii * r;
Vec3d ta_star = half_nt * (esii_t_r * nb - n_t * na);
Vec3d tb_star = half_nt * (esii_t_r * na - n_t * nb);
camMotions.resize(4);
Matx33d Ra, Rb;
Vec3d ta, tb;
//Ra, ta, na
findRmatFrom_tstar_n(ta_star, na, v, Ra);
ta = Ra * ta_star;
camMotions[0].R = Ra;
camMotions[0].t = ta;
camMotions[0].n = na;
//Ra, -ta, -na
camMotions[1].R = Ra;
camMotions[1].t = -ta;
camMotions[1].n = -na;
//Rb, tb, nb
findRmatFrom_tstar_n(tb_star, nb, v, Rb);
tb = Rb * tb_star;
camMotions[2].R = Rb;
camMotions[2].t = tb;
camMotions[2].n = nb;
//Rb, -tb, -nb
camMotions[3].R = Rb;
camMotions[3].t = -tb;
camMotions[3].n = -nb;
}
} //namespace HomographyDecomposition
// function decomposes image-to-image homography to rotation and translation matrices
int decomposeHomographyMat(InputArray _H,
InputArray _K,
OutputArrayOfArrays _rotations,
OutputArrayOfArrays _translations,
OutputArrayOfArrays _normals)
{
using namespace std;
using namespace HomographyDecomposition;
Mat H = _H.getMat().reshape(1, 3);
CV_Assert(H.cols == 3 && H.rows == 3);
Mat K = _K.getMat().reshape(1, 3);
CV_Assert(K.cols == 3 && K.rows == 3);
auto_ptr<HomographyDecomp> hdecomp(new HomographyDecompInria);
vector<CameraMotion> motions;
hdecomp->decomposeHomography(H, K, motions);
int nsols = static_cast<int>(motions.size());
int depth = CV_64F; //double precision matrices used in CameraMotion struct
if (_rotations.needed()) {
_rotations.create(nsols, 1, depth);
for (int k = 0; k < nsols; ++k ) {
_rotations.getMatRef(k) = Mat(motions[k].R);
}
}
if (_translations.needed()) {
_translations.create(nsols, 1, depth);
for (int k = 0; k < nsols; ++k ) {
_translations.getMatRef(k) = Mat(motions[k].t);
}
}
if (_normals.needed()) {
_normals.create(nsols, 1, depth);
for (int k = 0; k < nsols; ++k ) {
_normals.getMatRef(k) = Mat(motions[k].n);
}
}
return nsols;
}
} //namespace cv

View File

@ -0,0 +1,138 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// This is a test file for the function decomposeHomography contributed to OpenCV
// by Samson Yilma.
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2014, Samson Yilma¸ (samson_yilma@yahoo.com), all rights reserved.
//
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#include "opencv2/calib3d.hpp"
#include <vector>
using namespace cv;
using namespace std;
class CV_HomographyDecompTest: public cvtest::BaseTest {
public:
CV_HomographyDecompTest()
{
buildTestDataSet();
}
protected:
void run(int)
{
vector<Mat> rotations;
vector<Mat> translations;
vector<Mat> normals;
decomposeHomographyMat(_H, _K, rotations, translations, normals);
//there should be at least 1 solution
ASSERT_GT(static_cast<int>(rotations.size()), 0);
ASSERT_GT(static_cast<int>(translations.size()), 0);
ASSERT_GT(static_cast<int>(normals.size()), 0);
ASSERT_EQ(rotations.size(), normals.size());
ASSERT_EQ(translations.size(), normals.size());
ASSERT_TRUE(containsValidMotion(rotations, translations, normals));
decomposeHomographyMat(_H, _K, rotations, noArray(), noArray());
ASSERT_GT(static_cast<int>(rotations.size()), 0);
}
private:
void buildTestDataSet()
{
_K = Matx33d(640, 0.0, 320,
0, 640, 240,
0, 0, 1);
_H = Matx33d(2.649157564634028, 4.583875997496426, 70.694447785121326,
-1.072756858861583, 3.533262150437228, 1513.656999614321649,
0.001303887589576, 0.003042206876298, 1.000000000000000
);
//expected solution for the given homography and intrinsic matrices
_R = Matx33d(0.43307983549125, 0.545749113549648, -0.717356090899523,
-0.85630229674426, 0.497582023798831, -0.138414255706431,
0.281404038139784, 0.67421809131173, 0.682818960388909);
_t = Vec3d(1.826751712278038, 1.264718492450820, 0.195080809998819);
_n = Vec3d(0.244875830334816, 0.480857890778889, 0.841909446789566);
}
bool containsValidMotion(std::vector<Mat>& rotations,
std::vector<Mat>& translations,
std::vector<Mat>& normals
)
{
double max_error = 1.0e-3;
vector<Mat>::iterator riter = rotations.begin();
vector<Mat>::iterator titer = translations.begin();
vector<Mat>::iterator niter = normals.begin();
for (;
riter != rotations.end() && titer != translations.end() && niter != normals.end();
++riter, ++titer, ++niter) {
double rdist = norm(*riter, _R, NORM_INF);
double tdist = norm(*titer, _t, NORM_INF);
double ndist = norm(*niter, _n, NORM_INF);
if ( rdist < max_error
&& tdist < max_error
&& ndist < max_error )
return true;
}
return false;
}
Matx33d _R, _K, _H;
Vec3d _t, _n;
};
TEST(Calib3d_DecomposeHomography, regression) { CV_HomographyDecompTest test; test.safe_run(); }

View File

@ -845,7 +845,6 @@ For convenience, the following types from the OpenCV C API already have such a s
that calls the appropriate release function:
* ``CvCapture``
* :ocv:struct:`CvDTreeSplit`
* :ocv:struct:`CvFileStorage`
* ``CvHaarClassifierCascade``
* :ocv:struct:`CvMat`

View File

@ -636,6 +636,9 @@ protected:
CV_EXPORTS MatAllocator* getOpenCLAllocator();
CV_EXPORTS_W bool isPerformanceCheckBypassed();
#define OCL_PERFORMANCE_CHECK(condition) (cv::ocl::isPerformanceCheckBypassed() || (condition))
}}
#endif

View File

@ -1491,6 +1491,9 @@ static bool ocl_arithm_op(InputArray _src1, InputArray _src2, OutputArray _dst,
if (!doubleSupport && (depth2 == CV_64F || depth1 == CV_64F))
return false;
if( (oclop == OCL_OP_MUL_SCALE || oclop == OCL_OP_DIV_SCALE) && (depth1 >= CV_32F || depth2 >= CV_32F || ddepth >= CV_32F) )
return false;
int kercn = haveMask || haveScalar ? cn : ocl::predictOptimalVectorWidth(_src1, _src2, _dst);
int scalarcn = kercn == 3 ? 4 : kercn, rowsPerWI = d.isIntel() ? 4 : 1;
@ -1604,7 +1607,7 @@ static void arithm_op(InputArray _src1, InputArray _src2, OutputArray _dst,
Size sz1 = dims1 <= 2 ? psrc1->size() : Size();
Size sz2 = dims2 <= 2 ? psrc2->size() : Size();
#ifdef HAVE_OPENCL
bool use_opencl = _dst.isUMat() && dims1 <= 2 && dims2 <= 2;
bool use_opencl = OCL_PERFORMANCE_CHECK(_dst.isUMat()) && dims1 <= 2 && dims2 <= 2;
#endif
bool src1Scalar = checkScalar(*psrc1, type2, kind1, kind2);
bool src2Scalar = checkScalar(*psrc2, type1, kind2, kind1);
@ -2979,7 +2982,7 @@ void cv::compare(InputArray _src1, InputArray _src2, OutputArray _dst, int op)
haveScalar = true;
}
CV_OCL_RUN(_src1.dims() <= 2 && _src2.dims() <= 2 && _dst.isUMat(),
CV_OCL_RUN(_src1.dims() <= 2 && _src2.dims() <= 2 && OCL_PERFORMANCE_CHECK(_dst.isUMat()),
ocl_compare(_src1, _src2, _dst, op, haveScalar))
int kind1 = _src1.kind(), kind2 = _src2.kind();
@ -3497,7 +3500,7 @@ void cv::inRange(InputArray _src, InputArray _lowerb,
InputArray _upperb, OutputArray _dst)
{
CV_OCL_RUN(_src.dims() <= 2 && _lowerb.dims() <= 2 &&
_upperb.dims() <= 2 && _dst.isUMat(),
_upperb.dims() <= 2 && OCL_PERFORMANCE_CHECK(_dst.isUMat()),
ocl_inRange(_src, _lowerb, _upperb, _dst))
int skind = _src.kind(), lkind = _lowerb.kind(), ukind = _upperb.kind();

View File

@ -1541,7 +1541,7 @@ static bool ocl_convertScaleAbs( InputArray _src, OutputArray _dst, double alpha
kercn = ocl::predictOptimalVectorWidth(_src, _dst), rowsPerWI = d.isIntel() ? 4 : 1;
bool doubleSupport = d.doubleFPConfig() > 0;
if (!doubleSupport && depth == CV_64F)
if (depth == CV_32F || depth == CV_64F)
return false;
char cvt[2][50];

View File

@ -432,7 +432,7 @@ Mat& Mat::setTo(InputArray _value, InputArray _mask)
IppStatus status = (IppStatus)-1;
IppiSize roisize = { cols, rows };
int mstep = (int)mask.step, dstep = (int)step;
int mstep = (int)mask.step[0], dstep = (int)step[0];
if (isContinuous() && mask.isContinuous())
{
@ -616,7 +616,7 @@ static bool ocl_flip(InputArray _src, OutputArray _dst, int flipCode )
{
CV_Assert(flipCode >= -1 && flipCode <= 1);
int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type),
flipType, kercn = std::min(ocl::predictOptimalVectorWidth(_src, _dst), 4);;
flipType, kercn = std::min(ocl::predictOptimalVectorWidth(_src, _dst), 4);
if (cn > 4)
return false;
@ -631,7 +631,7 @@ static bool ocl_flip(InputArray _src, OutputArray _dst, int flipCode )
ocl::Device dev = ocl::Device::getDefault();
int pxPerWIy = (dev.isIntel() && (dev.type() & ocl::Device::TYPE_GPU)) ? 4 : 1;
kercn = std::max(kercn, cn);
kercn = (cn!=3 || flipType == FLIP_ROWS) ? std::max(kercn, cn) : cn;
ocl::Kernel k(kernelName, ocl::core::flip_oclsrc,
format( "-D T=%s -D T1=%s -D cn=%d -D PIX_PER_WI_Y=%d -D kercn=%d",
@ -762,7 +762,7 @@ void flip( InputArray _src, OutputArray _dst, int flip_mode )
flipHoriz( dst.data, dst.step, dst.data, dst.step, dst.size(), esz );
}
#ifdef HAVE_OPENCL
/*#ifdef HAVE_OPENCL
static bool ocl_repeat(InputArray _src, int ny, int nx, OutputArray _dst)
{
@ -790,7 +790,7 @@ static bool ocl_repeat(InputArray _src, int ny, int nx, OutputArray _dst)
return k.run(2, globalsize, NULL, false);
}
#endif
#endif*/
void repeat(InputArray _src, int ny, int nx, OutputArray _dst)
{
@ -800,8 +800,8 @@ void repeat(InputArray _src, int ny, int nx, OutputArray _dst)
Size ssize = _src.size();
_dst.create(ssize.height*ny, ssize.width*nx, _src.type());
CV_OCL_RUN(_dst.isUMat(),
ocl_repeat(_src, ny, nx, _dst))
/*CV_OCL_RUN(_dst.isUMat(),
ocl_repeat(_src, ny, nx, _dst))*/
Mat src = _src.getMat(), dst = _dst.getMat();
Size dsize = dst.size();

View File

@ -1557,13 +1557,17 @@ static void _SVDcompute( InputArray _aarr, OutputArray _w,
{
if( !at )
{
transpose(temp_u, _u);
temp_v.copyTo(_vt);
if( _u.needed() )
transpose(temp_u, _u);
if( _vt.needed() )
temp_v.copyTo(_vt);
}
else
{
transpose(temp_v, _u);
temp_u.copyTo(_vt);
if( _u.needed() )
transpose(temp_v, _u);
if( _vt.needed() )
temp_u.copyTo(_vt);
}
}
}

View File

@ -3336,7 +3336,7 @@ static inline void reduceSumC_8u16u16s32f_64f(const cv::Mat& srcmat, cv::Mat& ds
stype == CV_32FC3 ? (ippiSumHint)ippiSum_32f_C3R :
stype == CV_32FC4 ? (ippiSumHint)ippiSum_32f_C4R : 0;
func =
sdepth == CV_8U ? (cv::ReduceFunc)cv::reduceC_<uchar, double, cv::OpAdd<double> > :
sdepth == CV_8U ? (cv::ReduceFunc)cv::reduceC_<uchar, double, cv::OpAdd<double> > :
sdepth == CV_16U ? (cv::ReduceFunc)cv::reduceC_<ushort, double, cv::OpAdd<double> > :
sdepth == CV_16S ? (cv::ReduceFunc)cv::reduceC_<short, double, cv::OpAdd<double> > :
sdepth == CV_32F ? (cv::ReduceFunc)cv::reduceC_<float, double, cv::OpAdd<double> > : 0;
@ -3459,6 +3459,9 @@ static bool ocl_reduce(InputArray _src, OutputArray _dst,
if (!doubleSupport && (sdepth == CV_64F || ddepth == CV_64F))
return false;
if ((op == CV_REDUCE_SUM && sdepth == CV_32F) || op == CV_REDUCE_MIN || op == CV_REDUCE_MAX)
return false;
if (op == CV_REDUCE_AVG)
{
if (sdepth < CV_32S && ddepth < CV_32S)

View File

@ -57,6 +57,28 @@
# endif
#endif
// TODO Move to some common place
static bool getBoolParameter(const char* name, bool defaultValue)
{
const char* envValue = getenv(name);
if (envValue == NULL)
{
return defaultValue;
}
cv::String value = envValue;
if (value == "1" || value == "True" || value == "true" || value == "TRUE")
{
return true;
}
if (value == "0" || value == "False" || value == "false" || value == "FALSE")
{
return false;
}
CV_ErrorNoReturn(cv::Error::StsBadArg, cv::format("Invalid value for %s parameter: %s", name, value.c_str()));
}
// TODO Move to some common place
static size_t getConfigurationParameterForSize(const char* name, size_t defaultValue)
{
@ -1302,10 +1324,22 @@ OCL_FUNC(cl_int, clReleaseEvent, (cl_event event), (event))
#endif
static bool isRaiseError()
{
static bool initialized = false;
static bool value = false;
if (!initialized)
{
value = getBoolParameter("OPENCV_OPENCL_RAISE_ERROR", false);
initialized = true;
}
return value;
}
#ifdef _DEBUG
#define CV_OclDbgAssert CV_DbgAssert
#else
#define CV_OclDbgAssert(expr) (void)(expr)
#define CV_OclDbgAssert(expr) do { if (isRaiseError()) { CV_Assert(expr); } else { (void)(expr); } } while ((void)0, 0)
#endif
namespace cv { namespace ocl {
@ -4711,4 +4745,16 @@ void* Image2D::ptr() const
return p ? p->handle : 0;
}
bool isPerformanceCheckBypassed()
{
static bool initialized = false;
static bool value = false;
if (!initialized)
{
value = getBoolParameter("OPENCV_OPENCL_PERF_CHECK_BYPASS", false);
initialized = true;
}
return value;
}
}}

View File

@ -59,7 +59,7 @@ __kernel void meanStdDev(__global const uchar * srcptr, int src_step, int src_of
for (int grain = groups * WGS; id < total; id += grain)
{
#ifdef HAVE_MASK
#ifdef HAVE_SRC_CONT
#ifdef HAVE_MASK_CONT
int mask_index = id;
#else
int mask_index = mad24(id / cols, mask_step, id % cols);

View File

@ -209,7 +209,7 @@ __kernel void minmaxloc(__global const uchar * srcptr, int src_step, int src_off
#if kercn == 1
#ifdef NEED_MINVAL
#if NEED_MINLOC
#ifdef NEED_MINLOC
if (minval > temp)
{
minval = temp;
@ -326,7 +326,7 @@ __kernel void minmaxloc(__global const uchar * srcptr, int src_step, int src_off
int lid2 = lsize + lid;
#ifdef NEED_MINVAL
#ifdef NEED_MAXLOC
#ifdef NEED_MINLOC
if (localmem_min[lid] >= localmem_min[lid2])
{
if (localmem_min[lid] == localmem_min[lid2])

View File

@ -568,7 +568,7 @@ cv::Scalar cv::sum( InputArray _src )
{
#ifdef HAVE_OPENCL
Scalar _res;
CV_OCL_RUN_(_src.isUMat() && _src.dims() <= 2,
CV_OCL_RUN_(OCL_PERFORMANCE_CHECK(_src.isUMat()) && _src.dims() <= 2,
ocl_sum(_src, _res, OCL_OP_SUM),
_res)
#endif
@ -719,7 +719,7 @@ int cv::countNonZero( InputArray _src )
#ifdef HAVE_OPENCL
int res = -1;
CV_OCL_RUN_(_src.isUMat() && _src.dims() <= 2,
CV_OCL_RUN_(OCL_PERFORMANCE_CHECK(_src.isUMat()) && _src.dims() <= 2,
ocl_countNonZero(_src, res),
res)
#endif
@ -918,7 +918,8 @@ static bool ocl_meanStdDev( InputArray _src, OutputArray _mean, OutputArray _sdv
{
int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0,
isContinuous = _src.isContinuous();
isContinuous = _src.isContinuous(),
isMaskContinuous = _mask.isContinuous();
const ocl::Device &defDev = ocl::Device::getDefault();
int groups = defDev.maxComputeUnits();
if (defDev.isIntel())
@ -943,13 +944,14 @@ static bool ocl_meanStdDev( InputArray _src, OutputArray _mean, OutputArray _sdv
char cvt[2][40];
String opts = format("-D srcT=%s -D srcT1=%s -D dstT=%s -D dstT1=%s -D sqddepth=%d"
" -D sqdstT=%s -D sqdstT1=%s -D convertToSDT=%s -D cn=%d%s"
" -D sqdstT=%s -D sqdstT1=%s -D convertToSDT=%s -D cn=%d%s%s"
" -D convertToDT=%s -D WGS=%d -D WGS2_ALIGNED=%d%s%s",
ocl::typeToStr(type), ocl::typeToStr(depth),
ocl::typeToStr(dtype), ocl::typeToStr(ddepth), sqddepth,
ocl::typeToStr(sqdtype), ocl::typeToStr(sqddepth),
ocl::convertTypeStr(depth, sqddepth, cn, cvt[0]),
cn, isContinuous ? " -D HAVE_SRC_CONT" : "",
isMaskContinuous ? " -D HAVE_MASK_CONT" : "",
ocl::convertTypeStr(depth, ddepth, cn, cvt[1]),
(int)wgs, wgs2_aligned, haveMask ? " -D HAVE_MASK" : "",
doubleSupport ? " -D DOUBLE_SUPPORT" : "");
@ -1025,7 +1027,7 @@ static bool ocl_meanStdDev( InputArray _src, OutputArray _mean, OutputArray _sdv
void cv::meanStdDev( InputArray _src, OutputArray _mean, OutputArray _sdv, InputArray _mask )
{
CV_OCL_RUN(_src.isUMat() && _src.dims() <= 2,
CV_OCL_RUN(OCL_PERFORMANCE_CHECK(_src.isUMat()) && _src.dims() <= 2,
ocl_meanStdDev(_src, _mean, _sdv, _mask))
Mat src = _src.getMat(), mask = _mask.getMat();
@ -1571,7 +1573,7 @@ void cv::minMaxIdx(InputArray _src, double* minVal,
CV_Assert( (cn == 1 && (_mask.empty() || _mask.type() == CV_8U)) ||
(cn > 1 && _mask.empty() && !minIdx && !maxIdx) );
CV_OCL_RUN(_src.isUMat() && _src.dims() <= 2 && (_mask.empty() || _src.size() == _mask.size()),
CV_OCL_RUN(OCL_PERFORMANCE_CHECK(_src.isUMat()) && _src.dims() <= 2 && (_mask.empty() || _src.size() == _mask.size()),
ocl_minMaxIdx(_src, minVal, maxVal, minIdx, maxIdx, _mask))
Mat src = _src.getMat(), mask = _mask.getMat();
@ -2234,7 +2236,7 @@ double cv::norm( InputArray _src, int normType, InputArray _mask )
#ifdef HAVE_OPENCL
double _result = 0;
CV_OCL_RUN_(_src.isUMat() && _src.dims() <= 2,
CV_OCL_RUN_(OCL_PERFORMANCE_CHECK(_src.isUMat()) && _src.dims() <= 2,
ocl_norm(_src, normType, _mask, _result),
_result)
#endif
@ -2283,7 +2285,7 @@ double cv::norm( InputArray _src, int normType, InputArray _mask )
setIppErrorStatus();
}
typedef IppStatus (CV_STDCALL* ippiMaskNormFuncC3)(const void *, int, const void *, int, IppiSize, int, Ipp64f *);
/*typedef IppStatus (CV_STDCALL* ippiMaskNormFuncC3)(const void *, int, const void *, int, IppiSize, int, Ipp64f *);
ippiMaskNormFuncC3 ippFuncC3 =
normType == NORM_INF ?
(type == CV_8UC3 ? (ippiMaskNormFuncC3)ippiNorm_Inf_8u_C3CMR :
@ -2318,7 +2320,7 @@ double cv::norm( InputArray _src, int normType, InputArray _mask )
return normType == NORM_L2SQR ? (double)(norm * norm) : (double)norm;
}
setIppErrorStatus();
}
}*/
}
else
{
@ -2594,7 +2596,7 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m
#ifdef HAVE_OPENCL
double _result = 0;
CV_OCL_RUN_(_src1.isUMat(),
CV_OCL_RUN_(OCL_PERFORMANCE_CHECK(_src1.isUMat()),
ocl_norm(_src1, _src2, normType, _mask, _result),
_result)
#endif
@ -2724,7 +2726,7 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m
0) :
normType == NORM_L1 ?
(type == CV_8UC1 ? (ippiMaskNormDiffFuncC1)ippiNormDiff_L1_8u_C1MR :
type == CV_8SC1 ? (ippiMaskNormDiffFuncC1)ippiNormDiff_L1_8s_C1MR :
//type == CV_8SC1 ? (ippiMaskNormDiffFuncC1)ippiNormDiff_L1_8s_C1MR :
type == CV_16UC1 ? (ippiMaskNormDiffFuncC1)ippiNormDiff_L1_16u_C1MR :
type == CV_32FC1 ? (ippiMaskNormDiffFuncC1)ippiNormDiff_L1_32f_C1MR :
0) :
@ -2741,7 +2743,7 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m
return normType == NORM_L2SQR ? (double)(norm * norm) : (double)norm;
setIppErrorStatus();
}
typedef IppStatus (CV_STDCALL* ippiMaskNormDiffFuncC3)(const void *, int, const void *, int, const void *, int, IppiSize, int, Ipp64f *);
/*typedef IppStatus (CV_STDCALL* ippiMaskNormDiffFuncC3)(const void *, int, const void *, int, const void *, int, IppiSize, int, Ipp64f *);
ippiMaskNormDiffFuncC3 ippFuncC3 =
normType == NORM_INF ?
(type == CV_8UC3 ? (ippiMaskNormDiffFuncC3)ippiNormDiff_Inf_8u_C3CMR :
@ -2776,7 +2778,7 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m
return normType == NORM_L2SQR ? (double)(norm * norm) : (double)norm;
}
setIppErrorStatus();
}
}*/
}
else
{

View File

@ -157,6 +157,7 @@ PARAM_TEST_CASE(ArithmTestBase, MatDepth, Channels, bool)
Border maskBorder = randomBorder(0, use_roi ? MAX_VALUE : 0);
randomSubMat(mask, mask_roi, roiSize, maskBorder, CV_8UC1, 0, 2);
cv::threshold(mask, mask, 0.5, 255., CV_8UC1);
*mask.ptr(0) = 255; // prevent test case with mask filled 0 only
val = cv::Scalar(rng.uniform(-100.0, 100.0), rng.uniform(-100.0, 100.0),
rng.uniform(-100.0, 100.0), rng.uniform(-100.0, 100.0));
@ -829,7 +830,7 @@ OCL_TEST_P(Pow, Mat)
{
static const double pows[] = { -4, -1, -2.5, 0, 1, 2, 3.7, 4 };
for (int j = 0; j < test_loop_times; j++)
for (int j = 0; j < 1/*test_loop_times*/; j++)
for (int k = 0, size = sizeof(pows) / sizeof(double); k < size; ++k)
{
SCOPED_TRACE(pows[k]);
@ -1203,7 +1204,7 @@ OCL_TEST_P(MinMaxIdx_Mask, Mat)
static bool relativeError(double actual, double expected, double eps)
{
return std::abs(actual - expected) / actual < eps;
return std::abs(actual - expected) < eps*(1 + std::abs(actual));
}
typedef ArithmTestBase Norm;
@ -1230,7 +1231,7 @@ OCL_TEST_P(Norm, NORM_INF_1arg_mask)
OCL_OFF(const double cpuRes = cv::norm(src1_roi, NORM_INF, mask_roi));
OCL_ON(const double gpuRes = cv::norm(usrc1_roi, NORM_INF, umask_roi));
EXPECT_NEAR(cpuRes, gpuRes, 0.1);
EXPECT_NEAR(cpuRes, gpuRes, 0.2);
}
}
@ -1302,7 +1303,7 @@ OCL_TEST_P(Norm, NORM_INF_2args)
OCL_OFF(const double cpuRes = cv::norm(src1_roi, src2_roi, type));
OCL_ON(const double gpuRes = cv::norm(usrc1_roi, usrc2_roi, type));
EXPECT_NEAR(cpuRes, gpuRes, 0.1);
EXPECT_NEAR(cpuRes, gpuRes, 0.2);
}
}
@ -1419,7 +1420,7 @@ OCL_TEST_P(UMatDot, Mat)
OCL_OFF(const double cpuRes = src1_roi.dot(src2_roi));
OCL_ON(const double gpuRes = usrc1_roi.dot(usrc2_roi));
EXPECT_PRED3(relativeError, cpuRes, gpuRes, 1e-6);
EXPECT_PRED3(relativeError, cpuRes, gpuRes, 1e-5);
}
}
@ -1749,7 +1750,7 @@ OCL_TEST_P(ReduceAvg, Mat)
OCL_OFF(cv::reduce(src_roi, dst_roi, dim, CV_REDUCE_AVG, dtype));
OCL_ON(cv::reduce(usrc_roi, udst_roi, dim, CV_REDUCE_AVG, dtype));
double eps = ddepth <= CV_32S ? 1 : 5e-6;
double eps = ddepth <= CV_32S ? 1 : 6e-6;
OCL_EXPECT_MATS_NEAR(dst, eps);
}
}

View File

@ -105,6 +105,7 @@ PARAM_TEST_CASE(Merge, MatDepth, int, bool)
UMAT_UPLOAD_INPUT_PARAMETER(src3);
UMAT_UPLOAD_INPUT_PARAMETER(src4);
src_roi.clear(); usrc_roi.clear(); // for test_loop_times > 1
src_roi.push_back(src1_roi), usrc_roi.push_back(usrc1_roi);
if (nsrc >= 2)
src_roi.push_back(src2_roi), usrc_roi.push_back(usrc2_roi);

View File

@ -96,7 +96,7 @@ OCL_TEST_P(ConvertTo, Accuracy)
OCL_OFF(src_roi.convertTo(dst_roi, dstType, alpha, beta));
OCL_ON(usrc_roi.convertTo(udst_roi, dstType, alpha, beta));
double eps = src_depth >= CV_32F || CV_MAT_DEPTH(dstType) >= CV_32F ? 1e-4 : 1;
double eps = CV_MAT_DEPTH(dstType) >= CV_32F ? 2e-4 : 1;
OCL_EXPECT_MATS_NEAR(dst, eps);
}
}
@ -121,7 +121,7 @@ PARAM_TEST_CASE(CopyTo, MatDepth, Channels, bool, bool)
use_mask = GET_PARAM(3);
}
void generateTestData()
void generateTestData(bool one_cn_mask = false)
{
const int type = CV_MAKE_TYPE(depth, cn);
@ -132,9 +132,11 @@ PARAM_TEST_CASE(CopyTo, MatDepth, Channels, bool, bool)
if (use_mask)
{
Border maskBorder = randomBorder(0, use_roi ? MAX_VALUE : 0);
int mask_cn = randomDouble(0.0, 2.0) > 1.0 ? cn : 1;
int mask_cn = 1;
if (!one_cn_mask && randomDouble(0.0, 2.0) > 1.0)
mask_cn = cn;
randomSubMat(mask, mask_roi, roiSize, maskBorder, CV_8UC(mask_cn), 0, 2);
cv::threshold(mask, mask, 0.5, 255., CV_8UC1);
cv::threshold(mask, mask, 0.5, 255., THRESH_BINARY);
}
Border dstBorder = randomBorder(0, use_roi ? MAX_VALUE : 0);
@ -177,7 +179,7 @@ OCL_TEST_P(SetTo, Accuracy)
{
for (int j = 0; j < test_loop_times; j++)
{
generateTestData();
generateTestData(true); // see modules/core/src/umatrix.cpp Ln:791 => CV_Assert( mask.size() == size() && mask.type() == CV_8UC1 );
if (use_mask)
{

View File

@ -6,4 +6,4 @@ set(the_description "CUDA-accelerated Background Segmentation")
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4324 /wd4512 -Wundef -Wmissing-declarations)
ocv_define_module(cudabgsegm opencv_video OPTIONAL opencv_legacy opencv_imgproc opencv_cudaarithm opencv_cudafilters opencv_cudaimgproc)
ocv_define_module(cudabgsegm opencv_video OPTIONAL opencv_imgproc opencv_cudaarithm opencv_cudafilters opencv_cudaimgproc)

View File

@ -42,10 +42,6 @@
#include "perf_precomp.hpp"
#ifdef HAVE_OPENCV_CUDALEGACY
# include "opencv2/cudalegacy.hpp"
#endif
#ifdef HAVE_OPENCV_CUDAIMGPROC
# include "opencv2/cudaimgproc.hpp"
#endif
@ -72,18 +68,6 @@ using namespace perf;
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
#ifdef HAVE_OPENCV_CUDALEGACY
namespace cv
{
template<> void DefaultDeleter<CvBGStatModel>::operator ()(CvBGStatModel* obj) const
{
cvReleaseBGStatModel(&obj);
}
}
#endif
DEF_PARAM_TEST_1(Video, string);
PERF_TEST_P(Video, FGDStatModel,
@ -150,48 +134,7 @@ PERF_TEST_P(Video, FGDStatModel,
}
else
{
#ifdef HAVE_OPENCV_CUDALEGACY
IplImage ipl_frame = frame;
cv::Ptr<CvBGStatModel> model(cvCreateFGDStatModel(&ipl_frame));
int i = 0;
// collect performance data
for (; i < numIters; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
ipl_frame = frame;
startTimer();
if(!next())
break;
cvUpdateBGStatModel(&ipl_frame, model);
stopTimer();
}
// process last frame in sequence to get data for sanity test
for (; i < numIters; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
ipl_frame = frame;
cvUpdateBGStatModel(&ipl_frame, model);
}
const cv::Mat background = cv::cvarrToMat(model->background);
const cv::Mat foreground = cv::cvarrToMat(model->foreground);
CPU_SANITY_CHECK(background);
CPU_SANITY_CHECK(foreground);
#else
FAIL_NO_CPU();
#endif
}
}

View File

@ -42,10 +42,6 @@
#include "test_precomp.hpp"
#ifdef HAVE_OPENCV_CUDALEGACY
# include "opencv2/cudalegacy.hpp"
#endif
#ifdef HAVE_CUDA
using namespace cvtest;
@ -63,80 +59,6 @@ using namespace cvtest;
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 0
#endif
//////////////////////////////////////////////////////
// FGDStatModel
#if BUILD_WITH_VIDEO_INPUT_SUPPORT && defined(HAVE_OPENCV_CUDALEGACY)
namespace cv
{
template<> void DefaultDeleter<CvBGStatModel>::operator ()(CvBGStatModel* obj) const
{
cvReleaseBGStatModel(&obj);
}
}
PARAM_TEST_CASE(FGDStatModel, cv::cuda::DeviceInfo, std::string)
{
cv::cuda::DeviceInfo devInfo;
std::string inputFile;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
cv::cuda::setDevice(devInfo.deviceID());
inputFile = std::string(cvtest::TS::ptr()->get_data_path()) + "video/" + GET_PARAM(1);
}
};
CUDA_TEST_P(FGDStatModel, Update)
{
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cap >> frame;
ASSERT_FALSE(frame.empty());
IplImage ipl_frame = frame;
cv::Ptr<CvBGStatModel> model(cvCreateFGDStatModel(&ipl_frame));
cv::cuda::GpuMat d_frame(frame);
cv::Ptr<cv::cuda::BackgroundSubtractorFGD> d_fgd = cv::cuda::createBackgroundSubtractorFGD();
cv::cuda::GpuMat d_foreground, d_background;
std::vector< std::vector<cv::Point> > foreground_regions;
d_fgd->apply(d_frame, d_foreground);
for (int i = 0; i < 5; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
ipl_frame = frame;
int gold_count = cvUpdateBGStatModel(&ipl_frame, model);
d_frame.upload(frame);
d_fgd->apply(d_frame, d_foreground);
d_fgd->getBackgroundImage(d_background);
d_fgd->getForegroundRegions(foreground_regions);
int count = (int) foreground_regions.size();
cv::Mat gold_background = cv::cvarrToMat(model->background);
cv::Mat gold_foreground = cv::cvarrToMat(model->foreground);
ASSERT_MAT_NEAR(gold_background, d_background, 1.0);
ASSERT_MAT_NEAR(gold_foreground, d_foreground, 0.0);
ASSERT_EQ(gold_count, count);
}
}
INSTANTIATE_TEST_CASE_P(CUDA_BgSegm, FGDStatModel, testing::Combine(
ALL_DEVICES,
testing::Values(std::string("768x576.avi"))));
#endif
//////////////////////////////////////////////////////
// MOG

View File

@ -6,4 +6,4 @@ set(the_description "CUDA-accelerated Optical Flow")
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4324 /wd4512 -Wundef -Wmissing-declarations)
ocv_define_module(cudaoptflow opencv_video opencv_legacy opencv_cudaarithm opencv_cudawarping opencv_cudaimgproc OPTIONAL opencv_cudalegacy)
ocv_define_module(cudaoptflow opencv_video opencv_cudaarithm opencv_cudawarping opencv_cudaimgproc OPTIONAL opencv_cudalegacy)

View File

@ -41,7 +41,6 @@
//M*/
#include "perf_precomp.hpp"
#include "opencv2/legacy.hpp"
using namespace std;
using namespace testing;
@ -389,24 +388,6 @@ PERF_TEST_P(ImagePair, OpticalFlowDual_TVL1,
//////////////////////////////////////////////////////
// OpticalFlowBM
void calcOpticalFlowBM(const cv::Mat& prev, const cv::Mat& curr,
cv::Size bSize, cv::Size shiftSize, cv::Size maxRange, int usePrevious,
cv::Mat& velx, cv::Mat& vely)
{
cv::Size sz((curr.cols - bSize.width + shiftSize.width)/shiftSize.width, (curr.rows - bSize.height + shiftSize.height)/shiftSize.height);
velx.create(sz, CV_32FC1);
vely.create(sz, CV_32FC1);
CvMat cvprev = prev;
CvMat cvcurr = curr;
CvMat cvvelx = velx;
CvMat cvvely = vely;
cvCalcOpticalFlowBM(&cvprev, &cvcurr, bSize, shiftSize, maxRange, usePrevious, &cvvelx, &cvvely);
}
PERF_TEST_P(ImagePair, OpticalFlowBM,
Values<pair_string>(make_pair("gpu/opticalflow/frame0.png", "gpu/opticalflow/frame1.png")))
{
@ -435,12 +416,7 @@ PERF_TEST_P(ImagePair, OpticalFlowBM,
}
else
{
cv::Mat u, v;
TEST_CYCLE() calcOpticalFlowBM(frame0, frame1, block_size, shift_size, max_range, false, u, v);
CPU_SANITY_CHECK(u);
CPU_SANITY_CHECK(v);
FAIL_NO_CPU();
}
}

View File

@ -41,7 +41,6 @@
//M*/
#include "test_precomp.hpp"
#include "opencv2/legacy.hpp"
#ifdef HAVE_CUDA
@ -370,65 +369,6 @@ INSTANTIATE_TEST_CASE_P(CUDA_OptFlow, OpticalFlowDual_TVL1, testing::Combine(
ALL_DEVICES,
WHOLE_SUBMAT));
//////////////////////////////////////////////////////
// OpticalFlowBM
namespace
{
void calcOpticalFlowBM(const cv::Mat& prev, const cv::Mat& curr,
cv::Size bSize, cv::Size shiftSize, cv::Size maxRange, int usePrevious,
cv::Mat& velx, cv::Mat& vely)
{
cv::Size sz((curr.cols - bSize.width + shiftSize.width)/shiftSize.width, (curr.rows - bSize.height + shiftSize.height)/shiftSize.height);
velx.create(sz, CV_32FC1);
vely.create(sz, CV_32FC1);
CvMat cvprev = prev;
CvMat cvcurr = curr;
CvMat cvvelx = velx;
CvMat cvvely = vely;
cvCalcOpticalFlowBM(&cvprev, &cvcurr, bSize, shiftSize, maxRange, usePrevious, &cvvelx, &cvvely);
}
}
struct OpticalFlowBM : testing::TestWithParam<cv::cuda::DeviceInfo>
{
};
CUDA_TEST_P(OpticalFlowBM, Accuracy)
{
cv::cuda::DeviceInfo devInfo = GetParam();
cv::cuda::setDevice(devInfo.deviceID());
cv::Mat frame0 = readImage("opticalflow/rubberwhale1.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame0.empty());
cv::resize(frame0, frame0, cv::Size(), 0.5, 0.5);
cv::Mat frame1 = readImage("opticalflow/rubberwhale2.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame1.empty());
cv::resize(frame1, frame1, cv::Size(), 0.5, 0.5);
cv::Size block_size(8, 8);
cv::Size shift_size(1, 1);
cv::Size max_range(8, 8);
cv::cuda::GpuMat d_velx, d_vely, buf;
cv::cuda::calcOpticalFlowBM(loadMat(frame0), loadMat(frame1),
block_size, shift_size, max_range, false,
d_velx, d_vely, buf);
cv::Mat velx, vely;
calcOpticalFlowBM(frame0, frame1, block_size, shift_size, max_range, false, velx, vely);
EXPECT_MAT_NEAR(velx, d_velx, 0);
EXPECT_MAT_NEAR(vely, d_vely, 0);
}
INSTANTIATE_TEST_CASE_P(CUDA_OptFlow, OpticalFlowBM, ALL_DEVICES);
//////////////////////////////////////////////////////
// FastOpticalFlowBM

View File

@ -2038,6 +2038,10 @@ struct Luv2RGB_f
float G = X*C3 + Y*C4 + Z*C5;
float B = X*C6 + Y*C7 + Z*C8;
R = std::min(std::max(R, 0.f), 1.f);
G = std::min(std::max(G, 0.f), 1.f);
B = std::min(std::max(B, 0.f), 1.f);
if( gammaTab )
{
R = splineInterpolate(R*gscale, gammaTab, GAMMA_TAB_SIZE);

View File

@ -66,6 +66,11 @@ public:
return 0;
}
int bayer2RGBA(const T*, int, T*, int, int) const
{
return 0;
}
int bayer2RGB_EA(const T*, int, T*, int, int) const
{
return 0;
@ -218,6 +223,11 @@ public:
return (int)(bayer - (bayer_end - width));
}
int bayer2RGBA(const uchar*, int, uchar*, int, int) const
{
return 0;
}
int bayer2RGB_EA(const uchar* bayer, int bayer_step, uchar* dst, int width, int blue) const
{
if (!use_simd)
@ -323,6 +333,165 @@ public:
bool use_simd;
};
#elif CV_NEON
class SIMDBayerInterpolator_8u
{
public:
SIMDBayerInterpolator_8u()
{
}
int bayer2Gray(const uchar* bayer, int bayer_step, uchar* dst,
int width, int bcoeff, int gcoeff, int rcoeff) const
{
/*
B G B G | B G B G | B G B G | B G B G
G R G R | G R G R | G R G R | G R G R
B G B G | B G B G | B G B G | B G B G
*/
uint16x8_t masklo = vdupq_n_u16(255);
const uchar* bayer_end = bayer + width;
for( ; bayer <= bayer_end - 18; bayer += 14, dst += 14 )
{
uint16x8_t r0 = vld1q_u16((const ushort*)bayer);
uint16x8_t r1 = vld1q_u16((const ushort*)(bayer + bayer_step));
uint16x8_t r2 = vld1q_u16((const ushort*)(bayer + bayer_step*2));
uint16x8_t b1_ = vaddq_u16(vandq_u16(r0, masklo), vandq_u16(r2, masklo));
uint16x8_t b1 = vextq_u16(b1_, b1_, 1);
uint16x8_t b0 = vaddq_u16(b1_, b1);
// b0 = b0 b2 b4 ...
// b1 = b1 b3 b5 ...
uint16x8_t g0 = vaddq_u16(vshrq_n_u16(r0, 8), vshrq_n_u16(r2, 8));
uint16x8_t g1 = vandq_u16(r1, masklo);
g0 = vaddq_u16(g0, vaddq_u16(g1, vextq_u16(g1, g1, 1)));
g1 = vshlq_n_u16(vextq_u16(g1, g1, 1), 2);
// g0 = b0 b2 b4 ...
// g1 = b1 b3 b5 ...
r0 = vshrq_n_u16(r1, 8);
r1 = vaddq_u16(r0, vextq_u16(r0, r0, 1));
r0 = vshlq_n_u16(r0, 2);
// r0 = r0 r2 r4 ...
// r1 = r1 r3 r5 ...
b0 = vreinterpretq_u16_s16(vqdmulhq_n_s16(vreinterpretq_s16_u16(b0), (short)(rcoeff*2)));
b1 = vreinterpretq_u16_s16(vqdmulhq_n_s16(vreinterpretq_s16_u16(b1), (short)(rcoeff*4)));
g0 = vreinterpretq_u16_s16(vqdmulhq_n_s16(vreinterpretq_s16_u16(g0), (short)(gcoeff*2)));
g1 = vreinterpretq_u16_s16(vqdmulhq_n_s16(vreinterpretq_s16_u16(g1), (short)(gcoeff*2)));
r0 = vreinterpretq_u16_s16(vqdmulhq_n_s16(vreinterpretq_s16_u16(r0), (short)(bcoeff*2)));
r1 = vreinterpretq_u16_s16(vqdmulhq_n_s16(vreinterpretq_s16_u16(r1), (short)(bcoeff*4)));
g0 = vaddq_u16(vaddq_u16(g0, b0), r0);
g1 = vaddq_u16(vaddq_u16(g1, b1), r1);
uint8x8x2_t p = vzip_u8(vrshrn_n_u16(g0, 2), vrshrn_n_u16(g1, 2));
vst1_u8(dst, p.val[0]);
vst1_u8(dst + 8, p.val[1]);
}
return (int)(bayer - (bayer_end - width));
}
int bayer2RGB(const uchar* bayer, int bayer_step, uchar* dst, int width, int blue) const
{
/*
B G B G | B G B G | B G B G | B G B G
G R G R | G R G R | G R G R | G R G R
B G B G | B G B G | B G B G | B G B G
*/
uint16x8_t masklo = vdupq_n_u16(255);
uint8x16x3_t pix;
const uchar* bayer_end = bayer + width;
for( ; bayer <= bayer_end - 18; bayer += 14, dst += 42 )
{
uint16x8_t r0 = vld1q_u16((const ushort*)bayer);
uint16x8_t r1 = vld1q_u16((const ushort*)(bayer + bayer_step));
uint16x8_t r2 = vld1q_u16((const ushort*)(bayer + bayer_step*2));
uint16x8_t b1 = vaddq_u16(vandq_u16(r0, masklo), vandq_u16(r2, masklo));
uint16x8_t nextb1 = vextq_u16(b1, b1, 1);
uint16x8_t b0 = vaddq_u16(b1, nextb1);
// b0 b1 b2 ...
uint8x8x2_t bb = vzip_u8(vrshrn_n_u16(b0, 2), vrshrn_n_u16(nextb1, 1));
pix.val[1-blue] = vcombine_u8(bb.val[0], bb.val[1]);
uint16x8_t g0 = vaddq_u16(vshrq_n_u16(r0, 8), vshrq_n_u16(r2, 8));
uint16x8_t g1 = vandq_u16(r1, masklo);
g0 = vaddq_u16(g0, vaddq_u16(g1, vextq_u16(g1, g1, 1)));
g1 = vextq_u16(g1, g1, 1);
// g0 g1 g2 ...
uint8x8x2_t gg = vzip_u8(vrshrn_n_u16(g0, 2), vmovn_u16(g1));
pix.val[1] = vcombine_u8(gg.val[0], gg.val[1]);
r0 = vshrq_n_u16(r1, 8);
r1 = vaddq_u16(r0, vextq_u16(r0, r0, 1));
// r0 r1 r2 ...
uint8x8x2_t rr = vzip_u8(vmovn_u16(r0), vrshrn_n_u16(r1, 1));
pix.val[1+blue] = vcombine_u8(rr.val[0], rr.val[1]);
vst3q_u8(dst-1, pix);
}
return (int)(bayer - (bayer_end - width));
}
int bayer2RGBA(const uchar* bayer, int bayer_step, uchar* dst, int width, int blue) const
{
/*
B G B G | B G B G | B G B G | B G B G
G R G R | G R G R | G R G R | G R G R
B G B G | B G B G | B G B G | B G B G
*/
uint16x8_t masklo = vdupq_n_u16(255);
uint8x16x4_t pix;
const uchar* bayer_end = bayer + width;
pix.val[3] = vdupq_n_u8(255);
for( ; bayer <= bayer_end - 18; bayer += 14, dst += 56 )
{
uint16x8_t r0 = vld1q_u16((const ushort*)bayer);
uint16x8_t r1 = vld1q_u16((const ushort*)(bayer + bayer_step));
uint16x8_t r2 = vld1q_u16((const ushort*)(bayer + bayer_step*2));
uint16x8_t b1 = vaddq_u16(vandq_u16(r0, masklo), vandq_u16(r2, masklo));
uint16x8_t nextb1 = vextq_u16(b1, b1, 1);
uint16x8_t b0 = vaddq_u16(b1, nextb1);
// b0 b1 b2 ...
uint8x8x2_t bb = vzip_u8(vrshrn_n_u16(b0, 2), vrshrn_n_u16(nextb1, 1));
pix.val[1-blue] = vcombine_u8(bb.val[0], bb.val[1]);
uint16x8_t g0 = vaddq_u16(vshrq_n_u16(r0, 8), vshrq_n_u16(r2, 8));
uint16x8_t g1 = vandq_u16(r1, masklo);
g0 = vaddq_u16(g0, vaddq_u16(g1, vextq_u16(g1, g1, 1)));
g1 = vextq_u16(g1, g1, 1);
// g0 g1 g2 ...
uint8x8x2_t gg = vzip_u8(vrshrn_n_u16(g0, 2), vmovn_u16(g1));
pix.val[1] = vcombine_u8(gg.val[0], gg.val[1]);
r0 = vshrq_n_u16(r1, 8);
r1 = vaddq_u16(r0, vextq_u16(r0, r0, 1));
// r0 r1 r2 ...
uint8x8x2_t rr = vzip_u8(vmovn_u16(r0), vrshrn_n_u16(r1, 1));
pix.val[1+blue] = vcombine_u8(rr.val[0], rr.val[1]);
vst4q_u8(dst-1, pix);
}
return (int)(bayer - (bayer_end - width));
}
int bayer2RGB_EA(const uchar*, int, uchar*, int, int) const
{
return 0;
}
};
#else
typedef SIMDBayerStubInterpolator_<uchar> SIMDBayerInterpolator_8u;
#endif
@ -559,7 +728,9 @@ public:
}
// simd optimization only for dcn == 3
int delta = dcn == 4 ? 0 : vecOp.bayer2RGB(bayer, bayer_step, dst, size.width, blue);
int delta = dcn == 4 ?
vecOp.bayer2RGBA(bayer, bayer_step, dst, size.width, blue) :
vecOp.bayer2RGB(bayer, bayer_step, dst, size.width, blue);
bayer += delta;
dst += delta*dcn;

View File

@ -1221,7 +1221,7 @@ static bool IPPMorphReplicate(int op, const Mat &src, Mat &dst, const Mat &kerne
IPP_MORPH_CASE(CV_32FC3, 32f_C3R, 32f);
IPP_MORPH_CASE(CV_32FC4, 32f_C4R, 32f);
default:
return false;
;
}
#undef IPP_MORPH_CASE
@ -1253,14 +1253,11 @@ static bool IPPMorphReplicate(int op, const Mat &src, Mat &dst, const Mat &kerne
IPP_MORPH_CASE(CV_32FC3, 32f_C3R, 32f);
IPP_MORPH_CASE(CV_32FC4, 32f_C4R, 32f);
default:
return false;
;
}
#undef IPP_MORPH_CASE
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ >= 8
return false; /// It disables false positive warning in GCC 4.8 and further
#endif
}
return false;
}
static bool IPPMorphOp(int op, InputArray _src, OutputArray _dst,
@ -1339,22 +1336,190 @@ static bool IPPMorphOp(int op, InputArray _src, OutputArray _dst,
#ifdef HAVE_OPENCL
#define ROUNDUP(sz, n) ((sz) + (n) - 1 - (((sz) + (n) - 1) % (n)))
static bool ocl_morphSmall( InputArray _src, OutputArray _dst, InputArray _kernel, Point anchor, int borderType,
int op, int actual_op = -1, InputArray _extraMat = noArray())
{
const ocl::Device & dev = ocl::Device::getDefault();
int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type), esz = CV_ELEM_SIZE(type);
bool doubleSupport = dev.doubleFPConfig() > 0;
if (cn > 4 || (!doubleSupport && depth == CV_64F) ||
_src.offset() % esz != 0 || _src.step() % esz != 0)
return false;
bool haveExtraMat = !_extraMat.empty();
CV_Assert(actual_op <= 3 || haveExtraMat);
Size ksize = _kernel.size();
if (anchor.x < 0)
anchor.x = ksize.width / 2;
if (anchor.y < 0)
anchor.y = ksize.height / 2;
Size size = _src.size(), wholeSize;
bool isolated = (borderType & BORDER_ISOLATED) != 0;
borderType &= ~BORDER_ISOLATED;
int wdepth = depth, wtype = type;
if (depth == CV_8U)
{
wdepth = CV_32S;
wtype = CV_MAKETYPE(wdepth, cn);
}
char cvt[2][40];
const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE",
"BORDER_REFLECT", 0, "BORDER_REFLECT_101" };
size_t globalsize[2] = { size.width, size.height };
UMat src = _src.getUMat();
if (!isolated)
{
Point ofs;
src.locateROI(wholeSize, ofs);
}
int h = isolated ? size.height : wholeSize.height;
int w = isolated ? size.width : wholeSize.width;
if (w < ksize.width || h < ksize.height)
return false;
// Figure out what vector size to use for loading the pixels.
int pxLoadNumPixels = cn != 1 || size.width % 4 ? 1 : 4;
int pxLoadVecSize = cn * pxLoadNumPixels;
// Figure out how many pixels per work item to compute in X and Y
// directions. Too many and we run out of registers.
int pxPerWorkItemX = 1, pxPerWorkItemY = 1;
if (cn <= 2 && ksize.width <= 4 && ksize.height <= 4)
{
pxPerWorkItemX = size.width % 8 ? size.width % 4 ? size.width % 2 ? 1 : 2 : 4 : 8;
pxPerWorkItemY = size.height % 2 ? 1 : 2;
}
else if (cn < 4 || (ksize.width <= 4 && ksize.height <= 4))
{
pxPerWorkItemX = size.width % 2 ? 1 : 2;
pxPerWorkItemY = size.height % 2 ? 1 : 2;
}
globalsize[0] = size.width / pxPerWorkItemX;
globalsize[1] = size.height / pxPerWorkItemY;
// Need some padding in the private array for pixels
int privDataWidth = ROUNDUP(pxPerWorkItemX + ksize.width - 1, pxLoadNumPixels);
// Make the global size a nice round number so the runtime can pick
// from reasonable choices for the workgroup size
const int wgRound = 256;
globalsize[0] = ROUNDUP(globalsize[0], wgRound);
if (actual_op < 0)
actual_op = op;
// build processing
String processing;
Mat kernel8u;
_kernel.getMat().convertTo(kernel8u, CV_8U);
for (int y = 0; y < kernel8u.rows; ++y)
for (int x = 0; x < kernel8u.cols; ++x)
if (kernel8u.at<uchar>(y, x) != 0)
processing += format("PROCESS(%d,%d)", y, x);
static const char * const op2str[] = { "OP_ERODE", "OP_DILATE", NULL, NULL, "OP_GRADIENT", "OP_TOPHAT", "OP_BLACKHAT" };
String opts = format("-D cn=%d "
"-D ANCHOR_X=%d -D ANCHOR_Y=%d -D KERNEL_SIZE_X=%d -D KERNEL_SIZE_Y=%d "
"-D PX_LOAD_VEC_SIZE=%d -D PX_LOAD_NUM_PX=%d -D DEPTH_%d "
"-D PX_PER_WI_X=%d -D PX_PER_WI_Y=%d -D PRIV_DATA_WIDTH=%d -D %s -D %s "
"-D PX_LOAD_X_ITERATIONS=%d -D PX_LOAD_Y_ITERATIONS=%d "
"-D srcT=%s -D srcT1=%s -D dstT=srcT -D dstT1=srcT1 -D WT=%s -D WT1=%s "
"-D convertToWT=%s -D convertToDstT=%s -D PROCESS_ELEM_=%s -D %s%s",
cn, anchor.x, anchor.y, ksize.width, ksize.height,
pxLoadVecSize, pxLoadNumPixels, depth,
pxPerWorkItemX, pxPerWorkItemY, privDataWidth, borderMap[borderType],
isolated ? "BORDER_ISOLATED" : "NO_BORDER_ISOLATED",
privDataWidth / pxLoadNumPixels, pxPerWorkItemY + ksize.height - 1,
ocl::typeToStr(type), ocl::typeToStr(depth),
haveExtraMat ? ocl::typeToStr(wtype):"srcT",//to prevent overflow - WT
haveExtraMat ? ocl::typeToStr(wdepth):"srcT1",//to prevent overflow - WT1
haveExtraMat ? ocl::convertTypeStr(depth, wdepth, cn, cvt[0]) : "noconvert",//to prevent overflow - src to WT
haveExtraMat ? ocl::convertTypeStr(wdepth, depth, cn, cvt[1]) : "noconvert",//to prevent overflow - WT to dst
processing.c_str(), op2str[op],
actual_op == op ? "" : cv::format(" -D %s", op2str[actual_op]).c_str());
ocl::Kernel kernel("filterSmall", cv::ocl::imgproc::filterSmall_oclsrc, opts);
if (kernel.empty())
return false;
_dst.create(size, type);
UMat dst = _dst.getUMat();
UMat source;
if(src.u != dst.u)
source = src;
else
{
Point ofs;
int cols = src.cols, rows = src.rows;
src.locateROI(wholeSize, ofs);
src.adjustROI(ofs.y, wholeSize.height - rows - ofs.y, ofs.x, wholeSize.width - cols - ofs.x);
src.copyTo(source);
src.adjustROI(-ofs.y, -wholeSize.height + rows + ofs.y, -ofs.x, -wholeSize.width + cols + ofs.x);
source.adjustROI(-ofs.y, -wholeSize.height + rows + ofs.y, -ofs.x, -wholeSize.width + cols + ofs.x);
source.locateROI(wholeSize, ofs);
}
UMat extraMat = _extraMat.getUMat();
int idxArg = kernel.set(0, ocl::KernelArg::PtrReadOnly(source));
idxArg = kernel.set(idxArg, (int)source.step);
int srcOffsetX = (int)((source.offset % source.step) / source.elemSize());
int srcOffsetY = (int)(source.offset / source.step);
int srcEndX = isolated ? srcOffsetX + size.width : wholeSize.width;
int srcEndY = isolated ? srcOffsetY + size.height : wholeSize.height;
idxArg = kernel.set(idxArg, srcOffsetX);
idxArg = kernel.set(idxArg, srcOffsetY);
idxArg = kernel.set(idxArg, srcEndX);
idxArg = kernel.set(idxArg, srcEndY);
idxArg = kernel.set(idxArg, ocl::KernelArg::WriteOnly(dst));
if (haveExtraMat)
{
idxArg = kernel.set(idxArg, ocl::KernelArg::ReadOnlyNoSize(extraMat));
}
return kernel.run(2, globalsize, NULL, false);
}
static bool ocl_morphOp(InputArray _src, OutputArray _dst, InputArray _kernel,
Point anchor, int iterations, int op, int borderType,
const Scalar &, int actual_op = -1, InputArray _extraMat = noArray())
{
const ocl::Device & dev = ocl::Device::getDefault();
int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
bool doubleSupport = dev.doubleFPConfig() > 0;
int type = _src.type(), depth = CV_MAT_DEPTH(type),
cn = CV_MAT_CN(type), esz = CV_ELEM_SIZE(type);
Mat kernel = _kernel.getMat();
Size ksize = kernel.data ? kernel.size() : Size(3, 3), ssize = _src.size();
bool doubleSupport = dev.doubleFPConfig() > 0;
if ((depth == CV_64F && !doubleSupport) || borderType != BORDER_CONSTANT)
return false;
Mat kernel = _kernel.getMat();
bool haveExtraMat = !_extraMat.empty();
Size ksize = kernel.data ? kernel.size() : Size(3, 3), ssize = _src.size();
CV_Assert(actual_op <= 3 || haveExtraMat);
// try to use OpenCL kernel adopted for small morph kernel
if (dev.isIntel() && !(dev.type() & ocl::Device::TYPE_CPU) &&
((ksize.width < 5 && ksize.height < 5 && esz <= 4) ||
(ksize.width == 5 && ksize.height == 5 && cn == 1)) &&
(iterations == 1))
{
if (ocl_morphSmall(_src, _dst, _kernel, anchor, borderType, op, actual_op, _extraMat))
return true;
}
if (iterations == 0 || kernel.rows*kernel.cols == 1)
{
_src.copyTo(_dst);

View File

@ -441,18 +441,18 @@ __kernel void YCrCb2RGB(__global const uchar* src, int src_step, int src_offset,
__global DATA_TYPE * dstptr = (__global DATA_TYPE*)(dst + dst_index);
DATA_TYPE_4 src_pix = vload4(0, srcptr);
DATA_TYPE y = src_pix.x, cr = src_pix.y, cb = src_pix.z;
DATA_TYPE yp = src_pix.x, cr = src_pix.y, cb = src_pix.z;
#ifdef DEPTH_5
__constant float * coeff = c_YCrCb2RGBCoeffs_f;
float r = fma(coeff[0], cr - HALF_MAX, y);
float g = fma(coeff[1], cr - HALF_MAX, fma(coeff[2], cb - HALF_MAX, y));
float b = fma(coeff[3], cb - HALF_MAX, y);
float r = fma(coeff[0], cr - HALF_MAX, yp);
float g = fma(coeff[1], cr - HALF_MAX, fma(coeff[2], cb - HALF_MAX, yp));
float b = fma(coeff[3], cb - HALF_MAX, yp);
#else
__constant int * coeff = c_YCrCb2RGBCoeffs_i;
int r = y + CV_DESCALE(coeff[0] * (cr - HALF_MAX), yuv_shift);
int g = y + CV_DESCALE(mad24(coeff[1], cr - HALF_MAX, coeff[2] * (cb - HALF_MAX)), yuv_shift);
int b = y + CV_DESCALE(coeff[3] * (cb - HALF_MAX), yuv_shift);
int r = yp + CV_DESCALE(coeff[0] * (cr - HALF_MAX), yuv_shift);
int g = yp + CV_DESCALE(mad24(coeff[1], cr - HALF_MAX, coeff[2] * (cb - HALF_MAX)), yuv_shift);
int b = yp + CV_DESCALE(coeff[3] * (cb - HALF_MAX), yuv_shift);
#endif
dstptr[(bidx^2)] = SAT_CAST(r);
@ -1796,6 +1796,10 @@ __kernel void Luv2BGR(__global const uchar * srcptr, int src_step, int src_offse
float G = fma(X, coeffs[3], fma(Y, coeffs[4], Z * coeffs[5]));
float B = fma(X, coeffs[6], fma(Y, coeffs[7], Z * coeffs[8]));
R = clamp(R, 0.f, 1.f);
G = clamp(G, 0.f, 1.f);
B = clamp(B, 0.f, 1.f);
#ifdef SRGB
R = splineInterpolate(R*GammaTabScale, gammaTab, GAMMA_TAB_SIZE);
G = splineInterpolate(G*GammaTabScale, gammaTab, GAMMA_TAB_SIZE);
@ -1853,6 +1857,10 @@ __kernel void Luv2BGR(__global const uchar * src, int src_step, int src_offset,
float G = fma(X, coeffs[3], fma(Y, coeffs[4], Z * coeffs[5]));
float B = fma(X, coeffs[6], fma(Y, coeffs[7], Z * coeffs[8]));
R = clamp(R, 0.f, 1.f);
G = clamp(G, 0.f, 1.f);
B = clamp(B, 0.f, 1.f);
#ifdef SRGB
R = splineInterpolate(R*GammaTabScale, gammaTab, GAMMA_TAB_SIZE);
G = splineInterpolate(G*GammaTabScale, gammaTab, GAMMA_TAB_SIZE);

View File

@ -153,35 +153,10 @@ inline bool isBorder(const struct RectCoords bounds, int2 coord, int numPixels)
}
#endif
inline WT getBorderPixel(const struct RectCoords bounds, int2 coord,
__global const uchar * srcptr, int srcstep)
{
#ifdef BORDER_CONSTANT
return (WT)(0);
#else
int selected_col = coord.x;
int selected_row = coord.y;
EXTRAPOLATE(selected_col, selected_row,
bounds.x1, bounds.y1,
bounds.x2, bounds.y2);
__global const uchar* ptr = srcptr + mad24(selected_row, srcstep, selected_col * SRCSIZE);
return convertToWT(loadpix(ptr));
#endif
}
inline WT readSrcPixelSingle(int2 pos, __global const uchar * srcptr,
int srcstep, const struct RectCoords srcCoords)
{
if (!isBorder(srcCoords, pos, 1))
{
__global const uchar * ptr = srcptr + mad24(pos.y, srcstep, pos.x * SRCSIZE);
return convertToWT(loadpix(ptr));
}
else
return getBorderPixel(srcCoords, pos, srcptr, srcstep);
}
#define float1 float
#define uchar1 uchar
#define int1 int
#define uint1 unit
#define __CAT(x, y) x##y
#define CAT(x, y) __CAT(x, y)
@ -191,7 +166,7 @@ inline WT readSrcPixelSingle(int2 pos, __global const uchar * srcptr,
#define PX_LOAD_FLOAT_VEC_TYPE CAT(WT1, PX_LOAD_VEC_SIZE)
#define PX_LOAD_FLOAT_VEC_CONV CAT(convert_, PX_LOAD_FLOAT_VEC_TYPE)
#define PX_LOAD CAT(vload, PX_LOAD_VEC_SIZE)
#define float1 float
inline PX_LOAD_FLOAT_VEC_TYPE readSrcPixelGroup(int2 pos, __global const uchar * srcptr,
int srcstep, const struct RectCoords srcCoords)
@ -218,12 +193,150 @@ inline PX_LOAD_FLOAT_VEC_TYPE readSrcPixelGroup(int2 pos, __global const uchar *
#define LOOP(N, VAR, STMT) CAT(LOOP, N)((VAR), (STMT))
__kernel void boxFilterSmall(__global const uchar * srcptr, int src_step, int srcOffsetX, int srcOffsetY, int srcEndX, int srcEndY,
__global uchar * dstptr, int dst_step, int dst_offset, int rows, int cols
#ifdef NORMALIZE
, float alpha
#ifdef OP_BOX_FILTER
#define PROCESS_ELEM \
WT total_sum = (WT)(0); \
int sy = 0; \
LOOP(KERNEL_SIZE_Y, sy, \
{ \
int sx = 0; \
LOOP(KERNEL_SIZE_X, sx, \
{ \
total_sum += privateData[py + sy][px + sx]; \
}); \
})
#elif defined OP_FILTER2D
#define DIG(a) a,
__constant WT1 kernelData[] = { COEFF };
#define PROCESS_ELEM \
WT total_sum = 0; \
int sy = 0; \
int kernelIndex = 0; \
LOOP(KERNEL_SIZE_Y, sy, \
{ \
int sx = 0; \
LOOP(KERNEL_SIZE_X, sx, \
{ \
total_sum = fma(kernelData[kernelIndex++], privateData[py + sy][px + sx], total_sum); \
}); \
})
#elif defined OP_ERODE || defined OP_DILATE
#ifdef DEPTH_0
#define MIN_VAL 0
#define MAX_VAL UCHAR_MAX
#elif defined DEPTH_1
#define MIN_VAL SCHAR_MIN
#define MAX_VAL SCHAR_MAX
#elif defined DEPTH_2
#define MIN_VAL 0
#define MAX_VAL USHRT_MAX
#elif defined DEPTH_3
#define MIN_VAL SHRT_MIN
#define MAX_VAL SHRT_MAX
#elif defined DEPTH_4
#define MIN_VAL INT_MIN
#define MAX_VAL INT_MAX
#elif defined DEPTH_5
#define MIN_VAL (-FLT_MAX)
#define MAX_VAL FLT_MAX
#elif defined DEPTH_6
#define MIN_VAL (-DBL_MAX)
#define MAX_VAL DBL_MAX
#endif
)
#ifdef OP_ERODE
#define VAL (WT)MAX_VAL
#elif defined OP_DILATE
#define VAL (WT)MIN_VAL
#else
#error "Unknown operation"
#endif
#define convert_float1 convert_float
#define convert_uchar1 convert_uchar
#define convert_int1 convert_int
#define convert_uint1 convert_uint
#ifdef OP_ERODE
#if defined INTEL_DEVICE && defined DEPTH_0
// workaround for bug in Intel HD graphics drivers (10.18.10.3496 or older)
#define WA_CONVERT_1 CAT(convert_uint, cn)
#define WA_CONVERT_2 CAT(convert_, srcT)
#define MORPH_OP(A, B) WA_CONVERT_2(min(WA_CONVERT_1(A), WA_CONVERT_1(B)))
#else
#define MORPH_OP(A, B) min((A), (B))
#endif
#endif
#ifdef OP_DILATE
#define MORPH_OP(A, B) max((A), (B))
#endif
#define PROCESS(_y, _x) \
total_sum = convertToWT(MORPH_OP(convertToWT(total_sum), convertToWT(privateData[py + _y][px + _x])));
#define PROCESS_ELEM \
WT total_sum = convertToWT(VAL); \
PROCESS_ELEM_
#else
#error "No processing is specified"
#endif
#if defined OP_GRADIENT || defined OP_TOPHAT || defined OP_BLACKHAT
#define EXTRA_PARAMS , __global const uchar * matptr, int mat_step, int mat_offset
#else
#define EXTRA_PARAMS
#endif
inline WT getBorderPixel(const struct RectCoords bounds, int2 coord,
__global const uchar * srcptr, int srcstep)
{
#ifdef BORDER_CONSTANT
#ifdef OP_ERODE
return (WT)(MAX_VAL);
#elif defined OP_DILATE
return (WT)(MIN_VAL);
#else
return (WT)(0);
#endif
#else
int selected_col = coord.x;
int selected_row = coord.y;
EXTRAPOLATE(selected_col, selected_row,
bounds.x1, bounds.y1,
bounds.x2, bounds.y2);
__global const uchar* ptr = srcptr + mad24(selected_row, srcstep, selected_col * SRCSIZE);
return convertToWT(loadpix(ptr));
#endif
}
inline WT readSrcPixelSingle(int2 pos, __global const uchar * srcptr,
int srcstep, const struct RectCoords srcCoords)
{
if (!isBorder(srcCoords, pos, 1))
{
__global const uchar * ptr = srcptr + mad24(pos.y, srcstep, pos.x * SRCSIZE);
return convertToWT(loadpix(ptr));
}
else
return getBorderPixel(srcCoords, pos, srcptr, srcstep);
}
__kernel void filterSmall(__global const uchar * srcptr, int src_step, int srcOffsetX, int srcOffsetY, int srcEndX, int srcEndY,
__global uchar * dstptr, int dst_step, int dst_offset, int rows, int cols
#ifdef NORMALIZE
, float alpha
#endif
EXTRA_PARAMS )
{
// for non-isolated border: offsetX, offsetY, wholeX, wholeY
const struct RectCoords srcCoords = { srcOffsetX, srcOffsetY, srcEndX, srcEndY };
@ -282,24 +395,27 @@ __kernel void boxFilterSmall(__global const uchar * srcptr, int src_step, int sr
LOOP(PX_PER_WI_X, px,
{
int x = startX + px;
int sy = 0;
int kernelIndex = 0;
WT total_sum = (WT)(0);
LOOP(KERNEL_SIZE_Y, sy,
{
int sx = 0;
LOOP(KERNEL_SIZE_X, sx,
{
total_sum += privateData[py + sy][px + sx];
});
});
__global dstT * dstPtr = (__global dstT *)(dstptr + mad24(y, dst_step, mad24(x, DSTSIZE, dst_offset)));
PROCESS_ELEM;
int dst_index = mad24(y, dst_step, mad24(x, DSTSIZE, dst_offset));
__global dstT * dstPtr = (__global dstT *)(dstptr + dst_index);
#ifdef NORMALIZE
total_sum *= (WT)(alpha);
#endif
#if defined OP_GRADIENT || defined OP_TOPHAT || defined OP_BLACKHAT
//for this type of operations SRCSIZE == DSTSIZE
int mat_index = mad24(y, mat_step, mad24(x, SRCSIZE, mat_offset));
WT value = convertToWT(loadpix(matptr + mat_index));
#ifdef OP_GRADIENT
storepix(convertToDstT(convertToWT(total_sum) - convertToWT(value)), dstPtr );
#elif defined OP_TOPHAT
storepix(convertToDstT(convertToWT(value) - convertToWT(total_sum)), dstPtr );
#elif defined OP_BLACKHAT
storepix(convertToDstT(convertToWT(total_sum) - convertToWT(value)), dstPtr );
#endif
#else // erode or dilate, or open-close
storepix(convertToDstT(total_sum), dstPtr);
#endif
});
});
}

View File

@ -132,8 +132,11 @@ kernel void integral_sum_rows(__global const uchar *buf_ptr, int buf_step, int b
}
dst_sq_offset += dst_sq_step;
dst_sq = (__global sumSQT *)(dst_sq_ptr + mad24(x, dst_sq_step, dst_sq_offset));
dst_sq[0] = 0;
if (x < rows - 1)
{
dst_sq = (__global sumSQT *)(dst_sq_ptr + mad24(x, dst_sq_step, dst_sq_offset));
dst_sq[0] = 0;
}
int buf_sq_index = mad24((int)sizeof(sumSQT), x, buf_sq_offset);
sumSQT accum_sq = 0;

View File

@ -89,19 +89,56 @@
#define MAD(x,y,z) mad((x),(y),(z))
#endif
#define LOAD_LOCAL(col_gl, col_lcl) \
sum0 = co3* SRC(col_gl, EXTRAPOLATE_(src_y - 2, src_rows)); \
sum0 = MAD(co2, SRC(col_gl, EXTRAPOLATE_(src_y - 1, src_rows)), sum0); \
temp = SRC(col_gl, EXTRAPOLATE_(src_y, src_rows)); \
sum0 = MAD(co1, temp, sum0); \
sum1 = co3 * temp; \
temp = SRC(col_gl, EXTRAPOLATE_(src_y + 1, src_rows)); \
sum0 = MAD(co2, temp, sum0); \
sum1 = MAD(co2, temp, sum1); \
temp = SRC(col_gl, EXTRAPOLATE_(src_y + 2, src_rows)); \
sum0 = MAD(co3, temp, sum0); \
sum1 = MAD(co1, temp, sum1); \
smem[0][col_lcl] = sum0; \
sum1 = MAD(co2, SRC(col_gl, EXTRAPOLATE_(src_y + 3, src_rows)), sum1); \
sum1 = MAD(co3, SRC(col_gl, EXTRAPOLATE_(src_y + 4, src_rows)), sum1); \
smem[1][col_lcl] = sum1;
#if kercn == 4
#define LOAD_LOCAL4(col_gl, col_lcl) \
sum40 = co3* SRC4(col_gl, EXTRAPOLATE_(src_y - 2, src_rows)); \
sum40 = MAD(co2, SRC4(col_gl, EXTRAPOLATE_(src_y - 1, src_rows)), sum40); \
temp4 = SRC4(col_gl, EXTRAPOLATE_(src_y, src_rows)); \
sum40 = MAD(co1, temp4, sum40); \
sum41 = co3 * temp4; \
temp4 = SRC4(col_gl, EXTRAPOLATE_(src_y + 1, src_rows)); \
sum40 = MAD(co2, temp4, sum40); \
sum41 = MAD(co2, temp4, sum41); \
temp4 = SRC4(col_gl, EXTRAPOLATE_(src_y + 2, src_rows)); \
sum40 = MAD(co3, temp4, sum40); \
sum41 = MAD(co1, temp4, sum41); \
vstore4(sum40, col_lcl, (__local float*) &smem[0][2]); \
sum41 = MAD(co2, SRC4(col_gl, EXTRAPOLATE_(src_y + 3, src_rows)), sum41); \
sum41 = MAD(co3, SRC4(col_gl, EXTRAPOLATE_(src_y + 4, src_rows)), sum41); \
vstore4(sum41, col_lcl, (__local float*) &smem[1][2]);
#endif
#define noconvert
__kernel void pyrDown(__global const uchar * src, int src_step, int src_offset, int src_rows, int src_cols,
__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols)
{
const int x = get_global_id(0)*kercn;
const int y = get_group_id(1);
const int y = 2*get_global_id(1);
__local FT smem[LOCAL_SIZE + 4];
__local FT smem[2][LOCAL_SIZE + 4];
__global uchar * dstData = dst + dst_offset;
__global const uchar * srcData = src + src_offset;
FT sum;
FT sum0, sum1, temp;
FT co1 = 0.375f;
FT co2 = 0.25f;
FT co3 = 0.0625f;
@ -109,134 +146,68 @@ __kernel void pyrDown(__global const uchar * src, int src_step, int src_offset,
const int src_y = 2*y;
int col;
if (src_y >= 2 && src_y < src_rows - 2)
if (src_y >= 2 && src_y < src_rows - 4)
{
#define EXTRAPOLATE_(val, maxVal) val
#if kercn == 1
col = EXTRAPOLATE(x, src_cols);
sum = co3* SRC(col, src_y - 2);
sum = MAD(co2, SRC(col, src_y - 1), sum);
sum = MAD(co1, SRC(col, src_y ), sum);
sum = MAD(co2, SRC(col, src_y + 1), sum);
sum = MAD(co3, SRC(col, src_y + 2), sum);
smem[2 + get_local_id(0)] = sum;
LOAD_LOCAL(col, 2 + get_local_id(0))
#else
if (x < src_cols-4)
{
float4 sum4;
sum4 = co3* SRC4(x, src_y - 2);
sum4 = MAD(co2, SRC4(x, src_y - 1), sum4);
sum4 = MAD(co1, SRC4(x, src_y ), sum4);
sum4 = MAD(co2, SRC4(x, src_y + 1), sum4);
sum4 = MAD(co3, SRC4(x, src_y + 2), sum4);
vstore4(sum4, get_local_id(0), (__local float*) &smem[2]);
float4 sum40, sum41, temp4;
LOAD_LOCAL4(x, get_local_id(0))
}
else
{
for (int i=0; i<4; i++)
{
col = EXTRAPOLATE(x+i, src_cols);
sum = co3* SRC(col, src_y - 2);
sum = MAD(co2, SRC(col, src_y - 1), sum);
sum = MAD(co1, SRC(col, src_y ), sum);
sum = MAD(co2, SRC(col, src_y + 1), sum);
sum = MAD(co3, SRC(col, src_y + 2), sum);
smem[2 + 4*get_local_id(0)+i] = sum;
LOAD_LOCAL(col, 2 + 4 * get_local_id(0) + i)
}
}
#endif
if (get_local_id(0) < 2)
{
col = EXTRAPOLATE((int)(get_group_id(0)*LOCAL_SIZE + get_local_id(0) - 2), src_cols);
sum = co3* SRC(col, src_y - 2);
sum = MAD(co2, SRC(col, src_y - 1), sum);
sum = MAD(co1, SRC(col, src_y ), sum);
sum = MAD(co2, SRC(col, src_y + 1), sum);
sum = MAD(co3, SRC(col, src_y + 2), sum);
smem[get_local_id(0)] = sum;
LOAD_LOCAL(col, get_local_id(0))
}
if (get_local_id(0) > 1 && get_local_id(0) < 4)
else if (get_local_id(0) < 4)
{
col = EXTRAPOLATE((int)((get_group_id(0)+1)*LOCAL_SIZE + get_local_id(0) - 2), src_cols);
sum = co3* SRC(col, src_y - 2);
sum = MAD(co2, SRC(col, src_y - 1), sum);
sum = MAD(co1, SRC(col, src_y ), sum);
sum = MAD(co2, SRC(col, src_y + 1), sum);
sum = MAD(co3, SRC(col, src_y + 2), sum);
smem[LOCAL_SIZE + get_local_id(0)] = sum;
LOAD_LOCAL(col, LOCAL_SIZE + get_local_id(0))
}
}
else // need extrapolate y
{
#define EXTRAPOLATE_(val, maxVal) EXTRAPOLATE(val, maxVal)
#if kercn == 1
col = EXTRAPOLATE(x, src_cols);
sum = co3* SRC(col, EXTRAPOLATE(src_y - 2, src_rows));
sum = MAD(co2, SRC(col, EXTRAPOLATE(src_y - 1, src_rows)), sum);
sum = MAD(co1, SRC(col, EXTRAPOLATE(src_y , src_rows)), sum);
sum = MAD(co2, SRC(col, EXTRAPOLATE(src_y + 1, src_rows)), sum);
sum = MAD(co3, SRC(col, EXTRAPOLATE(src_y + 2, src_rows)), sum);
smem[2 + get_local_id(0)] = sum;
LOAD_LOCAL(col, 2 + get_local_id(0))
#else
if (x < src_cols-4)
{
float4 sum4;
sum4 = co3* SRC4(x, EXTRAPOLATE(src_y - 2, src_rows));
sum4 = MAD(co2, SRC4(x, EXTRAPOLATE(src_y - 1, src_rows)), sum4);
sum4 = MAD(co1, SRC4(x, EXTRAPOLATE(src_y , src_rows)), sum4);
sum4 = MAD(co2, SRC4(x, EXTRAPOLATE(src_y + 1, src_rows)), sum4);
sum4 = MAD(co3, SRC4(x, EXTRAPOLATE(src_y + 2, src_rows)), sum4);
vstore4(sum4, get_local_id(0), (__local float*) &smem[2]);
float4 sum40, sum41, temp4;
LOAD_LOCAL4(x, get_local_id(0))
}
else
{
for (int i=0; i<4; i++)
{
col = EXTRAPOLATE(x+i, src_cols);
sum = co3* SRC(col, EXTRAPOLATE(src_y - 2, src_rows));
sum = MAD(co2, SRC(col, EXTRAPOLATE(src_y - 1, src_rows)), sum);
sum = MAD(co1, SRC(col, EXTRAPOLATE(src_y , src_rows)), sum);
sum = MAD(co2, SRC(col, EXTRAPOLATE(src_y + 1, src_rows)), sum);
sum = MAD(co3, SRC(col, EXTRAPOLATE(src_y + 2, src_rows)), sum);
smem[2 + 4*get_local_id(0)+i] = sum;
LOAD_LOCAL(col, 2 + 4*get_local_id(0) + i)
}
}
#endif
if (get_local_id(0) < 2)
{
col = EXTRAPOLATE((int)(get_group_id(0)*LOCAL_SIZE + get_local_id(0) - 2), src_cols);
sum = co3* SRC(col, EXTRAPOLATE(src_y - 2, src_rows));
sum = MAD(co2, SRC(col, EXTRAPOLATE(src_y - 1, src_rows)), sum);
sum = MAD(co1, SRC(col, EXTRAPOLATE(src_y , src_rows)), sum);
sum = MAD(co2, SRC(col, EXTRAPOLATE(src_y + 1, src_rows)), sum);
sum = MAD(co3, SRC(col, EXTRAPOLATE(src_y + 2, src_rows)), sum);
smem[get_local_id(0)] = sum;
LOAD_LOCAL(col, get_local_id(0))
}
if (get_local_id(0) > 1 && get_local_id(0) < 4)
else if (get_local_id(0) < 4)
{
col = EXTRAPOLATE((int)((get_group_id(0)+1)*LOCAL_SIZE + get_local_id(0) - 2), src_cols);
sum = co3* SRC(col, EXTRAPOLATE(src_y - 2, src_rows));
sum = MAD(co2, SRC(col, EXTRAPOLATE(src_y - 1, src_rows)), sum);
sum = MAD(co1, SRC(col, EXTRAPOLATE(src_y , src_rows)), sum);
sum = MAD(co2, SRC(col, EXTRAPOLATE(src_y + 1, src_rows)), sum);
sum = MAD(co3, SRC(col, EXTRAPOLATE(src_y + 2, src_rows)), sum);
smem[LOCAL_SIZE + get_local_id(0)] = sum;
LOAD_LOCAL(col, LOCAL_SIZE + get_local_id(0))
}
}
@ -247,50 +218,68 @@ __kernel void pyrDown(__global const uchar * src, int src_step, int src_offset,
{
const int tid2 = get_local_id(0) * 2;
sum = 0.f;
#if cn == 1
#if fdepth <= 5
sum = sum + dot(vload4(0, (__local float*) (&smem)+tid2), (float4)(co3, co2, co1, co2));
#else
sum = sum + dot(vload4(0, (__local double*) (&smem)+tid2), (double4)(co3, co2, co1, co2));
#endif
#else
sum = MAD(co3, smem[2 + tid2 - 2], sum);
sum = MAD(co2, smem[2 + tid2 - 1], sum);
sum = MAD(co1, smem[2 + tid2 ], sum);
sum = MAD(co2, smem[2 + tid2 + 1], sum);
#endif
sum = MAD(co3, smem[2 + tid2 + 2], sum);
const int dst_x = (get_group_id(0) * get_local_size(0) + tid2) / 2;
if (dst_x < dst_cols)
storepix(convertToT(sum), dstData + y * dst_step + dst_x * PIXSIZE);
{
for (int yin = y, y1 = min(dst_rows, y + 2); yin < y1; yin++)
{
#if cn == 1
#if fdepth <= 5
FT sum = dot(vload4(0, (__local float*) (&smem) + tid2 + (yin - y) * (LOCAL_SIZE + 4)), (float4)(co3, co2, co1, co2));
#else
FT sum = dot(vload4(0, (__local double*) (&smem) + tid2 + (yin - y) * (LOCAL_SIZE + 4)), (double4)(co3, co2, co1, co2));
#endif
#else
FT sum = co3 * smem[yin - y][2 + tid2 - 2];
sum = MAD(co2, smem[yin - y][2 + tid2 - 1], sum);
sum = MAD(co1, smem[yin - y][2 + tid2 ], sum);
sum = MAD(co2, smem[yin - y][2 + tid2 + 1], sum);
#endif
sum = MAD(co3, smem[yin - y][2 + tid2 + 2], sum);
storepix(convertToT(sum), dstData + yin * dst_step + dst_x * PIXSIZE);
}
}
}
#else
int tid4 = get_local_id(0) * 4;
sum = co3* smem[2 + tid4 + 2];
sum = MAD(co3, smem[2 + tid4 - 2], sum);
sum = MAD(co2, smem[2 + tid4 - 1], sum);
sum = MAD(co1, smem[2 + tid4 ], sum);
sum = MAD(co2, smem[2 + tid4 + 1], sum);
int dst_x = (get_group_id(0) * LOCAL_SIZE + tid4) / 2;
if (dst_x < dst_cols - 1)
{
for (int yin = y, y1 = min(dst_rows, y + 2); yin < y1; yin++)
{
if (dst_x < dst_cols)
storepix(convertToT(sum), dstData + mad24(y, dst_step, dst_x * PIXSIZE));
FT sum = co3* smem[yin - y][2 + tid4 + 2];
sum = MAD(co3, smem[yin - y][2 + tid4 - 2], sum);
sum = MAD(co2, smem[yin - y][2 + tid4 - 1], sum);
sum = MAD(co1, smem[yin - y][2 + tid4 ], sum);
sum = MAD(co2, smem[yin - y][2 + tid4 + 1], sum);
storepix(convertToT(sum), dstData + mad24(yin, dst_step, dst_x * PIXSIZE));
tid4 += 2;
dst_x += 1;
dst_x ++;
sum = co3* smem[yin - y][2 + tid4 + 4];
sum = MAD(co3, smem[yin - y][2 + tid4 ], sum);
sum = MAD(co2, smem[yin - y][2 + tid4 + 1], sum);
sum = MAD(co1, smem[yin - y][2 + tid4 + 2], sum);
sum = MAD(co2, smem[yin - y][2 + tid4 + 3], sum);
storepix(convertToT(sum), dstData + mad24(yin, dst_step, dst_x * PIXSIZE));
dst_x --;
}
sum = co3* smem[2 + tid4 + 2];
sum = MAD(co3, smem[2 + tid4 - 2], sum);
sum = MAD(co2, smem[2 + tid4 - 1], sum);
sum = MAD(co1, smem[2 + tid4 ], sum);
sum = MAD(co2, smem[2 + tid4 + 1], sum);
}
else if (dst_x < dst_cols)
{
for (int yin = y, y1 = min(dst_rows, y + 2); yin < y1; yin++)
{
FT sum = co3* smem[yin - y][2 + tid4 + 2];
sum = MAD(co3, smem[yin - y][2 + tid4 - 2], sum);
sum = MAD(co2, smem[yin - y][2 + tid4 - 1], sum);
sum = MAD(co1, smem[yin - y][2 + tid4 ], sum);
sum = MAD(co2, smem[yin - y][2 + tid4 + 1], sum);
if (dst_x < dst_cols)
storepix(convertToT(sum), dstData + mad24(y, dst_step, dst_x * PIXSIZE));
storepix(convertToT(sum), dstData + mad24(yin, dst_step, dst_x * PIXSIZE));
}
}
#endif
}

View File

@ -445,7 +445,7 @@ static bool ocl_pyrDown( InputArray _src, OutputArray _dst, const Size& _dsz, in
k.args(ocl::KernelArg::ReadOnly(src), ocl::KernelArg::WriteOnly(dst));
size_t localThreads[2] = { local_size/kercn, 1 };
size_t globalThreads[2] = { (src.cols + (kercn-1))/kercn, dst.rows };
size_t globalThreads[2] = { (src.cols + (kercn-1))/kercn, (dst.rows + 1) / 2 };
return k.run(2, globalThreads, localThreads, false);
}

View File

@ -720,7 +720,7 @@ static bool ocl_boxFilter( InputArray _src, OutputArray _dst, int ddepth,
"-D PX_PER_WI_X=%d -D PX_PER_WI_Y=%d -D PRIV_DATA_WIDTH=%d -D %s -D %s "
"-D PX_LOAD_X_ITERATIONS=%d -D PX_LOAD_Y_ITERATIONS=%d "
"-D srcT=%s -D srcT1=%s -D dstT=%s -D dstT1=%s -D WT=%s -D WT1=%s "
"-D convertToWT=%s -D convertToDstT=%s%s%s",
"-D convertToWT=%s -D convertToDstT=%s%s%s -D OP_BOX_FILTER",
cn, anchor.x, anchor.y, ksize.width, ksize.height,
pxLoadVecSize, pxLoadNumPixels,
pxPerWorkItemX, pxPerWorkItemY, privDataWidth, borderMap[borderType],
@ -734,7 +734,7 @@ static bool ocl_boxFilter( InputArray _src, OutputArray _dst, int ddepth,
if (!kernel.create("boxFilterSmall", cv::ocl::imgproc::boxFilterSmall_oclsrc, build_options))
if (!kernel.create("filterSmall", cv::ocl::imgproc::filterSmall_oclsrc, build_options))
return false;
}
else

View File

@ -117,7 +117,7 @@ OCL_TEST_P(BlendLinear, Accuracy)
OCL_OFF(cv::blendLinear(src1_roi, src2_roi, weights1_roi, weights2_roi, dst_roi));
OCL_ON(cv::blendLinear(usrc1_roi, usrc2_roi, uweights1_roi, uweights2_roi, udst_roi));
Near(depth <= CV_32S ? 1.0 : 0.2);
Near(depth <= CV_32S ? 1.0 : 0.5);
}
}

View File

@ -109,7 +109,7 @@ OCL_TEST_P(BoxFilter, Mat)
OCL_OFF(cv::boxFilter(src_roi, dst_roi, -1, ksize, anchor, normalize, borderType));
OCL_ON(cv::boxFilter(usrc_roi, udst_roi, -1, ksize, anchor, normalize, borderType));
Near(depth <= CV_32S ? 1 : 1e-3);
Near(depth <= CV_32S ? 1 : 3e-3);
}
}

View File

@ -302,14 +302,14 @@ OCL_TEST_P(CvtColor8u32f, Lab2LRGBA) { performTest(3, 4, CVTCODE(Lab2LRGB), dept
// RGB -> Luv
OCL_TEST_P(CvtColor8u32f, BGR2Luv) { performTest(3, 3, CVTCODE(BGR2Luv), depth == CV_8U ? 1 : 1e-2); }
OCL_TEST_P(CvtColor8u32f, RGB2Luv) { performTest(3, 3, CVTCODE(RGB2Luv), depth == CV_8U ? 1 : 1e-2); }
OCL_TEST_P(CvtColor8u32f, LBGR2Luv) { performTest(3, 3, CVTCODE(LBGR2Luv), depth == CV_8U ? 1 : 4e-3); }
OCL_TEST_P(CvtColor8u32f, LRGB2Luv) { performTest(3, 3, CVTCODE(LRGB2Luv), depth == CV_8U ? 1 : 5e-3); }
OCL_TEST_P(CvtColor8u32f, BGRA2Luv) { performTest(4, 3, CVTCODE(BGR2Luv), depth == CV_8U ? 1 : 8e-3); }
OCL_TEST_P(CvtColor8u32f, RGBA2Luv) { performTest(4, 3, CVTCODE(RGB2Luv), depth == CV_8U ? 1 : 9e-3); }
OCL_TEST_P(CvtColor8u32f, LBGRA2Luv) { performTest(4, 3, CVTCODE(LBGR2Luv), depth == CV_8U ? 1 : 5e-3); }
OCL_TEST_P(CvtColor8u32f, LRGBA2Luv) { performTest(4, 3, CVTCODE(LRGB2Luv), depth == CV_8U ? 1 : 5e-3); }
OCL_TEST_P(CvtColor8u32f, BGR2Luv) { performTest(3, 3, CVTCODE(BGR2Luv), depth == CV_8U ? 1 : 1.5e-2); }
OCL_TEST_P(CvtColor8u32f, RGB2Luv) { performTest(3, 3, CVTCODE(RGB2Luv), depth == CV_8U ? 1 : 1.5e-2); }
OCL_TEST_P(CvtColor8u32f, LBGR2Luv) { performTest(3, 3, CVTCODE(LBGR2Luv), depth == CV_8U ? 1 : 6e-3); }
OCL_TEST_P(CvtColor8u32f, LRGB2Luv) { performTest(3, 3, CVTCODE(LRGB2Luv), depth == CV_8U ? 1 : 6e-3); }
OCL_TEST_P(CvtColor8u32f, BGRA2Luv) { performTest(4, 3, CVTCODE(BGR2Luv), depth == CV_8U ? 1 : 2e-2); }
OCL_TEST_P(CvtColor8u32f, RGBA2Luv) { performTest(4, 3, CVTCODE(RGB2Luv), depth == CV_8U ? 1 : 2e-2); }
OCL_TEST_P(CvtColor8u32f, LBGRA2Luv) { performTest(4, 3, CVTCODE(LBGR2Luv), depth == CV_8U ? 1 : 6e-3); }
OCL_TEST_P(CvtColor8u32f, LRGBA2Luv) { performTest(4, 3, CVTCODE(LRGB2Luv), depth == CV_8U ? 1 : 6e-3); }
OCL_TEST_P(CvtColor8u32f, Luv2BGR) { performTest(3, 3, CVTCODE(Luv2BGR), depth == CV_8U ? 1 : 7e-5); }
OCL_TEST_P(CvtColor8u32f, Luv2RGB) { performTest(3, 3, CVTCODE(Luv2RGB), depth == CV_8U ? 1 : 7e-5); }

View File

@ -275,14 +275,68 @@ OCL_TEST_P(Dilate, Mat)
/////////////////////////////////////////////////////////////////////////////////////////////////
// MorphologyEx
IMPLEMENT_PARAM_CLASS(MorphOp, int)
PARAM_TEST_CASE(MorphologyEx, MatType,
int, // kernel size
MorphOp, // MORPH_OP
int, // iterations
bool)
{
int type, ksize, op, iterations;
bool useRoi;
typedef FilterTestBase MorphologyEx;
TEST_DECLARE_INPUT_PARAMETER(src);
TEST_DECLARE_OUTPUT_PARAMETER(dst);
virtual void SetUp()
{
type = GET_PARAM(0);
ksize = GET_PARAM(1);
op = GET_PARAM(2);
iterations = GET_PARAM(3);
useRoi = GET_PARAM(4);
}
void random_roi(int minSize = 1)
{
if (minSize == 0)
minSize = ksize;
Size roiSize = randomSize(minSize, MAX_VALUE);
Border srcBorder = randomBorder(0, useRoi ? MAX_VALUE : 0);
randomSubMat(src, src_roi, roiSize, srcBorder, type, 5, 256);
Border dstBorder = randomBorder(0, useRoi ? MAX_VALUE : 0);
randomSubMat(dst, dst_roi, roiSize, dstBorder, type, -60, 70);
UMAT_UPLOAD_INPUT_PARAMETER(src);
UMAT_UPLOAD_OUTPUT_PARAMETER(dst);
}
void Near()
{
int depth = CV_MAT_DEPTH(type);
bool isFP = depth >= CV_32F;
if (isFP)
Near(1e-6, true);
else
Near(1, false);
}
void Near(double threshold, bool relative)
{
if (relative)
OCL_EXPECT_MATS_NEAR_RELATIVE(dst, threshold);
else
OCL_EXPECT_MATS_NEAR(dst, threshold);
}
};
OCL_TEST_P(MorphologyEx, Mat)
{
Size kernelSize(ksize, ksize);
int iterations = (int)param;
int op = size.height;
for (int j = 0; j < test_loop_times; j++)
{
@ -377,12 +431,10 @@ OCL_INSTANTIATE_TEST_CASE_P(Filter, Dilate, Combine(
OCL_INSTANTIATE_TEST_CASE_P(Filter, MorphologyEx, Combine(
Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_32FC1, CV_32FC3, CV_32FC4),
Values(3, 5, 7),
Values(Size(0, 2), Size(0, 3), Size(0, 4), Size(0, 5), Size(0, 6)), // used as generator of operations
Values((BorderType)BORDER_CONSTANT),
Values(1.0, 2.0, 3.0),
Bool(),
Values(1))); // not used
Values(3, 5, 7), // kernel size
Values((MorphOp)MORPH_OPEN, (MorphOp)MORPH_CLOSE, (MorphOp)MORPH_GRADIENT, (MorphOp)MORPH_TOPHAT, (MorphOp)MORPH_BLACKHAT), // used as generator of operations
Values(1, 2, 3),
Bool()));
} } // namespace cvtest::ocl

View File

@ -6,7 +6,7 @@ if(IOS OR NOT PYTHON_DEFAULT_AVAILABLE OR NOT ANT_EXECUTABLE OR NOT (JNI_FOUND O
endif()
set(the_description "The java bindings")
ocv_add_module(java BINDINGS opencv_core opencv_imgproc OPTIONAL opencv_objdetect opencv_features2d opencv_video opencv_imgcodecs opencv_videoio opencv_ml opencv_calib3d opencv_photo opencv_nonfree opencv_contrib)
ocv_add_module(java BINDINGS opencv_core opencv_imgproc OPTIONAL opencv_objdetect opencv_features2d opencv_video opencv_imgcodecs opencv_videoio opencv_calib3d opencv_photo opencv_nonfree opencv_contrib)
ocv_module_include_directories("${CMAKE_CURRENT_SOURCE_DIR}/generator/src/cpp")
if(NOT ANDROID)

View File

@ -2,7 +2,7 @@
from __future__ import print_function
import os, sys, re, string, fnmatch
allmodules = ["core", "flann", "imgproc", "ml", "imgcodecs", "videoio", "highgui", "video", "features2d", "calib3d", "objdetect", "legacy", "contrib", "cuda", "androidcamera", "java", "python", "stitching", "ts", "photo", "nonfree", "videostab", "softcascade", "superres"]
allmodules = ["core", "flann", "imgproc", "imgcodecs", "videoio", "highgui", "video", "features2d", "calib3d", "objdetect", "legacy", "contrib", "cuda", "androidcamera", "java", "python", "stitching", "ts", "photo", "nonfree", "videostab", "softcascade", "superres"]
verbose = False
show_warnings = True
show_errors = True

View File

@ -14,10 +14,6 @@
# include "opencv2/video.hpp"
#endif
#ifdef HAVE_OPENCV_ML
# include "opencv2/ml.hpp"
#endif
#ifdef HAVE_OPENCV_CONTRIB
# include "opencv2/contrib.hpp"
#endif
@ -41,10 +37,7 @@ JNI_OnLoad(JavaVM* vm, void* )
#ifdef HAVE_OPENCV_VIDEO
init &= cv::initModule_video();
#endif
#ifdef HAVE_OPENCV_ML
init &= cv::initModule_ml();
#endif
#ifdef HAVE_OPENCV_CONTRIB
#ifdef HAVE_OPENCV_CONTRIB
init &= cv::initModule_contrib();
#endif

View File

@ -63,41 +63,30 @@ training examples are recomputed at each training iteration. Examples deleted at
.. [FHT98] Friedman, J. H., Hastie, T. and Tibshirani, R. Additive Logistic Regression: a Statistical View of Boosting. Technical Report, Dept. of Statistics*, Stanford University, 1998.
CvBoostParams
Boost::Params
-------------
.. ocv:struct:: CvBoostParams : public CvDTreeParams
.. ocv:struct:: Boost::Params : public DTree::Params
Boosting training parameters.
There is one structure member that you can set directly:
.. ocv:member:: int split_criteria
Splitting criteria used to choose optimal splits during a weak tree construction. Possible values are:
* **CvBoost::DEFAULT** Use the default for the particular boosting method, see below.
* **CvBoost::GINI** Use Gini index. This is default option for Real AdaBoost; may be also used for Discrete AdaBoost.
* **CvBoost::MISCLASS** Use misclassification rate. This is default option for Discrete AdaBoost; may be also used for Real AdaBoost.
* **CvBoost::SQERR** Use least squares criteria. This is default and the only option for LogitBoost and Gentle AdaBoost.
The structure is derived from :ocv:class:`CvDTreeParams` but not all of the decision tree parameters are supported. In particular, cross-validation is not supported.
The structure is derived from ``DTrees::Params`` but not all of the decision tree parameters are supported. In particular, cross-validation is not supported.
All parameters are public. You can initialize them by a constructor and then override some of them directly if you want.
CvBoostParams::CvBoostParams
Boost::Params::Params
----------------------------
The constructors.
.. ocv:function:: CvBoostParams::CvBoostParams()
.. ocv:function:: Boost::Params::Params()
.. ocv:function:: CvBoostParams::CvBoostParams( int boost_type, int weak_count, double weight_trim_rate, int max_depth, bool use_surrogates, const float* priors )
.. ocv:function:: Boost::Params::Params( int boostType, int weakCount, double weightTrimRate, int maxDepth, bool useSurrogates, const Mat& priors )
:param boost_type: Type of the boosting algorithm. Possible values are:
* **CvBoost::DISCRETE** Discrete AdaBoost.
* **CvBoost::REAL** Real AdaBoost. It is a technique that utilizes confidence-rated predictions and works well with categorical data.
* **CvBoost::LOGIT** LogitBoost. It can produce good regression fits.
* **CvBoost::GENTLE** Gentle AdaBoost. It puts less weight on outlier data points and for that reason is often good with regression data.
* **Boost::DISCRETE** Discrete AdaBoost.
* **Boost::REAL** Real AdaBoost. It is a technique that utilizes confidence-rated predictions and works well with categorical data.
* **Boost::LOGIT** LogitBoost. It can produce good regression fits.
* **Boost::GENTLE** Gentle AdaBoost. It puts less weight on outlier data points and for that reason is often good with regression data.
Gentle AdaBoost and Real AdaBoost are often the preferable choices.
@ -105,131 +94,54 @@ The constructors.
:param weight_trim_rate: A threshold between 0 and 1 used to save computational time. Samples with summary weight :math:`\leq 1 - weight\_trim\_rate` do not participate in the *next* iteration of training. Set this parameter to 0 to turn off this functionality.
See :ocv:func:`CvDTreeParams::CvDTreeParams` for description of other parameters.
See ``DTrees::Params`` for description of other parameters.
Default parameters are:
::
CvBoostParams::CvBoostParams()
Boost::Params::Params()
{
boost_type = CvBoost::REAL;
weak_count = 100;
weight_trim_rate = 0.95;
cv_folds = 0;
max_depth = 1;
boostType = Boost::REAL;
weakCount = 100;
weightTrimRate = 0.95;
CVFolds = 0;
maxDepth = 1;
}
CvBoostTree
-----------
.. ocv:class:: CvBoostTree : public CvDTree
The weak tree classifier, a component of the boosted tree classifier :ocv:class:`CvBoost`, is a derivative of :ocv:class:`CvDTree`. Normally, there is no need to use the weak classifiers directly. However, they can be accessed as elements of the sequence ``CvBoost::weak``, retrieved by :ocv:func:`CvBoost::get_weak_predictors`.
.. note:: In case of LogitBoost and Gentle AdaBoost, each weak predictor is a regression tree, rather than a classification tree. Even in case of Discrete AdaBoost and Real AdaBoost, the ``CvBoostTree::predict`` return value (:ocv:member:`CvDTreeNode::value`) is not an output class label. A negative value "votes" for class #0, a positive value - for class #1. The votes are weighted. The weight of each individual tree may be increased or decreased using the method ``CvBoostTree::scale``.
CvBoost
Boost
-------
.. ocv:class:: CvBoost : public CvStatModel
.. ocv:class:: Boost : public DTrees
Boosted tree classifier derived from :ocv:class:`CvStatModel`.
Boosted tree classifier derived from ``DTrees``
CvBoost::CvBoost
Boost::create
----------------
Default and training constructors.
Creates the empty model
.. ocv:function:: CvBoost::CvBoost()
.. ocv:function:: Ptr<Boost> Boost::create(const Params& params=Params())
.. ocv:function:: CvBoost::CvBoost( const Mat& trainData, int tflag, const Mat& responses, const Mat& varIdx=Mat(), const Mat& sampleIdx=Mat(), const Mat& varType=Mat(), const Mat& missingDataMask=Mat(), CvBoostParams params=CvBoostParams() )
Use ``StatModel::train`` to train the model, ``StatModel::train<Boost>(traindata, params)`` to create and train the model, ``StatModel::load<Boost>(filename)`` to load the pre-trained model.
.. ocv:function:: CvBoost::CvBoost( const CvMat* trainData, int tflag, const CvMat* responses, const CvMat* varIdx=0, const CvMat* sampleIdx=0, const CvMat* varType=0, const CvMat* missingDataMask=0, CvBoostParams params=CvBoostParams() )
.. ocv:pyfunction:: cv2.Boost([trainData, tflag, responses[, varIdx[, sampleIdx[, varType[, missingDataMask[, params]]]]]]) -> <Boost object>
The constructors follow conventions of :ocv:func:`CvStatModel::CvStatModel`. See :ocv:func:`CvStatModel::train` for parameters descriptions.
CvBoost::train
--------------
Trains a boosted tree classifier.
.. ocv:function:: bool CvBoost::train( const Mat& trainData, int tflag, const Mat& responses, const Mat& varIdx=Mat(), const Mat& sampleIdx=Mat(), const Mat& varType=Mat(), const Mat& missingDataMask=Mat(), CvBoostParams params=CvBoostParams(), bool update=false )
.. ocv:function:: bool CvBoost::train( const CvMat* trainData, int tflag, const CvMat* responses, const CvMat* varIdx=0, const CvMat* sampleIdx=0, const CvMat* varType=0, const CvMat* missingDataMask=0, CvBoostParams params=CvBoostParams(), bool update=false )
.. ocv:function:: bool CvBoost::train( CvMLData* data, CvBoostParams params=CvBoostParams(), bool update=false )
.. ocv:pyfunction:: cv2.Boost.train(trainData, tflag, responses[, varIdx[, sampleIdx[, varType[, missingDataMask[, params[, update]]]]]]) -> retval
:param update: Specifies whether the classifier needs to be updated (``true``, the new weak tree classifiers added to the existing ensemble) or the classifier needs to be rebuilt from scratch (``false``).
The train method follows the common template of :ocv:func:`CvStatModel::train`. The responses must be categorical, which means that boosted trees cannot be built for regression, and there should be two classes.
CvBoost::predict
----------------
Predicts a response for an input sample.
.. ocv:function:: float CvBoost::predict( const cv::Mat& sample, const cv::Mat& missing=Mat(), const cv::Range& slice=Range::all(), bool rawMode=false, bool returnSum=false ) const
.. ocv:function:: float CvBoost::predict( const CvMat* sample, const CvMat* missing=0, CvMat* weak_responses=0, CvSlice slice=CV_WHOLE_SEQ, bool raw_mode=false, bool return_sum=false ) const
.. ocv:pyfunction:: cv2.Boost.predict(sample[, missing[, slice[, rawMode[, returnSum]]]]) -> retval
:param sample: Input sample.
:param missing: Optional mask of missing measurements. To handle missing measurements, the weak classifiers must include surrogate splits (see ``CvDTreeParams::use_surrogates``).
:param weak_responses: Optional output parameter, a floating-point vector with responses of each individual weak classifier. The number of elements in the vector must be equal to the slice length.
:param slice: Continuous subset of the sequence of weak classifiers to be used for prediction. By default, all the weak classifiers are used.
:param rawMode: Normally, it should be set to ``false``.
:param returnSum: If ``true`` then return sum of votes instead of the class label.
The method runs the sample through the trees in the ensemble and returns the output class label based on the weighted voting.
CvBoost::prune
--------------
Removes the specified weak classifiers.
.. ocv:function:: void CvBoost::prune( CvSlice slice )
.. ocv:pyfunction:: cv2.Boost.prune(slice) -> None
:param slice: Continuous subset of the sequence of weak classifiers to be removed.
The method removes the specified weak classifiers from the sequence.
.. note:: Do not confuse this method with the pruning of individual decision trees, which is currently not supported.
CvBoost::calc_error
-------------------
Returns error of the boosted tree classifier.
.. ocv:function:: float CvBoost::calc_error( CvMLData* _data, int type , std::vector<float> *resp = 0 )
The method is identical to :ocv:func:`CvDTree::calc_error` but uses the boosted tree classifier as predictor.
CvBoost::get_weak_predictors
----------------------------
Returns the sequence of weak tree classifiers.
.. ocv:function:: CvSeq* CvBoost::get_weak_predictors()
The method returns the sequence of weak classifiers. Each element of the sequence is a pointer to the :ocv:class:`CvBoostTree` class or to some of its derivatives.
CvBoost::get_params
-------------------
Returns current parameters of the boosted tree classifier.
.. ocv:function:: const CvBoostParams& CvBoost::get_params() const
CvBoost::get_data
Boost::getBParams
-----------------
Returns used train data of the boosted tree classifier.
Returns the boosting parameters
.. ocv:function:: const CvDTreeTrainData* CvBoost::get_data() const
.. ocv:function:: Params Boost::getBParams() const
The method returns the training parameters.
Boost::setBParams
-----------------
Sets the boosting parameters
.. ocv:function:: void Boost::setBParams( const Params& p )
:param p: Training parameters of type Boost::Params.
The method sets the training parameters.
Prediction with Boost
---------------------
StatModel::predict(samples, results, flags) should be used. Pass ``flags=StatModel::RAW_OUTPUT`` to get the raw sum from Boost classifier.

View File

@ -3,10 +3,7 @@ Decision Trees
The ML classes discussed in this section implement Classification and Regression Tree algorithms described in [Breiman84]_.
The class
:ocv:class:`CvDTree` represents a single decision tree that may be used alone or as a base class in tree ensembles (see
:ref:`Boosting` and
:ref:`Random Trees` ).
The class ``cv::ml::DTrees`` represents a single decision tree or a collection of decision trees. It's also a base class for ``RTrees`` and ``Boost``.
A decision tree is a binary tree (tree where each non-leaf node has two child nodes). It can be used either for classification or for regression. For classification, each tree leaf is marked with a class label; multiple leaves may have the same label. For regression, a constant is also assigned to each tree leaf, so the approximation function is piecewise constant.
@ -55,123 +52,107 @@ Besides the prediction that is an obvious use of decision trees, the tree can be
Importance of each variable is computed over all the splits on this variable in the tree, primary and surrogate ones. Thus, to compute variable importance correctly, the surrogate splits must be enabled in the training parameters, even if there is no missing data.
CvDTreeSplit
------------
.. ocv:struct:: CvDTreeSplit
DTrees::Split
-------------
.. ocv:class:: DTrees::Split
The class represents split in a decision tree. It has public members:
The structure represents a possible decision tree node split. It has public members:
.. ocv:member:: int var_idx
.. ocv:member:: int varIdx
Index of variable on which the split is created.
.. ocv:member:: int inversed
.. ocv:member:: bool inversed
If it is not null then inverse split rule is used that is left and right branches are exchanged in the rule expressions below.
If true, then the inverse split rule is used (i.e. left and right branches are exchanged in the rule expressions below).
.. ocv:member:: float quality
The split quality, a positive number. It is used to choose the best primary split, then to choose and sort the surrogate splits. After the tree is constructed, it is also used to compute variable importance.
The split quality, a positive number. It is used to choose the best split.
.. ocv:member:: CvDTreeSplit* next
.. ocv:member:: int next
Pointer to the next split in the node list of splits.
Index of the next split in the list of splits for the node
.. ocv:member:: int[] subset
Bit array indicating the value subset in case of split on a categorical variable. The rule is: ::
if var_value in subset
then next_node <- left
else next_node <- right
.. ocv:member:: float ord::c
.. ocv:member:: float c
The threshold value in case of split on an ordered variable. The rule is: ::
if var_value < ord.c
then next_node<-left
else next_node<-right
if var_value < c
then next_node<-left
else next_node<-right
.. ocv:member:: int ord::split_point
.. ocv:member:: int subsetOfs
Used internally by the training algorithm.
Offset of the bitset used by the split on a categorical variable. The rule is: ::
CvDTreeNode
-----------
.. ocv:struct:: CvDTreeNode
if bitset[var_value] == 1
then next_node <- left
else next_node <- right
DTrees::Node
------------
.. ocv:class:: DTrees::Node
The structure represents a node in a decision tree. It has public members:
.. ocv:member:: int class_idx
Class index normalized to 0..class_count-1 range and assigned to the node. It is used internally in classification trees and tree ensembles.
.. ocv:member:: int Tn
Tree index in a ordered sequence of pruned trees. The indices are used during and after the pruning procedure. The root node has the maximum value ``Tn`` of the whole tree, child nodes have ``Tn`` less than or equal to the parent's ``Tn``, and nodes with :math:`Tn \leq CvDTree::pruned\_tree\_idx` are not used at prediction stage (the corresponding branches are considered as cut-off), even if they have not been physically deleted from the tree at the pruning stage.
The class represents a decision tree node. It has public members:
.. ocv:member:: double value
Value at the node: a class label in case of classification or estimated function value in case of regression.
.. ocv:member:: CvDTreeNode* parent
.. ocv:member:: int classIdx
Pointer to the parent node.
Class index normalized to 0..class_count-1 range and assigned to the node. It is used internally in classification trees and tree ensembles.
.. ocv:member:: CvDTreeNode* left
.. ocv:member:: int parent
Pointer to the left child node.
Index of the parent node
.. ocv:member:: CvDTreeNode* right
.. ocv:member:: int left
Pointer to the right child node.
Index of the left child node
.. ocv:member:: CvDTreeSplit* split
.. ocv:member:: int right
Pointer to the first (primary) split in the node list of splits.
Index of right child node.
.. ocv:member:: int sample_count
.. ocv:member:: int defaultDir
The number of samples that fall into the node at the training stage. It is used to resolve the difficult cases - when the variable for the primary split is missing and all the variables for other surrogate splits are missing too. In this case the sample is directed to the left if ``left->sample_count > right->sample_count`` and to the right otherwise.
Default direction where to go (-1: left or +1: right). It helps in the case of missing values.
.. ocv:member:: int depth
.. ocv:member:: int split
Depth of the node. The root node depth is 0, the child nodes depth is the parent's depth + 1.
Index of the first split
Other numerous fields of ``CvDTreeNode`` are used internally at the training stage.
CvDTreeParams
-------------
.. ocv:struct:: CvDTreeParams
DTrees::Params
---------------
.. ocv:class:: DTrees::Params
The structure contains all the decision tree training parameters. You can initialize it by default constructor and then override any parameters directly before training, or the structure may be fully initialized using the advanced variant of the constructor.
CvDTreeParams::CvDTreeParams
DTrees::Params::Params
----------------------------
The constructors.
The constructors
.. ocv:function:: CvDTreeParams::CvDTreeParams()
.. ocv:function:: DTrees::Params::Params()
.. ocv:function:: CvDTreeParams::CvDTreeParams( int max_depth, int min_sample_count, float regression_accuracy, bool use_surrogates, int max_categories, int cv_folds, bool use_1se_rule, bool truncate_pruned_tree, const float* priors )
.. ocv:function:: DTrees::Params::Params( int maxDepth, int minSampleCount, double regressionAccuracy, bool useSurrogates, int maxCategories, int CVFolds, bool use1SERule, bool truncatePrunedTree, const Mat& priors )
:param max_depth: The maximum possible depth of the tree. That is the training algorithms attempts to split a node while its depth is less than ``max_depth``. The actual depth may be smaller if the other termination criteria are met (see the outline of the training procedure in the beginning of the section), and/or if the tree is pruned.
:param maxDepth: The maximum possible depth of the tree. That is the training algorithms attempts to split a node while its depth is less than ``maxDepth``. The root node has zero depth. The actual depth may be smaller if the other termination criteria are met (see the outline of the training procedure in the beginning of the section), and/or if the tree is pruned.
:param min_sample_count: If the number of samples in a node is less than this parameter then the node will not be split.
:param minSampleCount: If the number of samples in a node is less than this parameter then the node will not be split.
:param regression_accuracy: Termination criteria for regression trees. If all absolute differences between an estimated value in a node and values of train samples in this node are less than this parameter then the node will not be split.
:param regressionAccuracy: Termination criteria for regression trees. If all absolute differences between an estimated value in a node and values of train samples in this node are less than this parameter then the node will not be split further.
:param use_surrogates: If true then surrogate splits will be built. These splits allow to work with missing data and compute variable importance correctly.
:param useSurrogates: If true then surrogate splits will be built. These splits allow to work with missing data and compute variable importance correctly. .. note:: currently it's not implemented.
:param max_categories: Cluster possible values of a categorical variable into ``K`` :math:`\leq` ``max_categories`` clusters to find a suboptimal split. If a discrete variable, on which the training procedure tries to make a split, takes more than ``max_categories`` values, the precise best subset estimation may take a very long time because the algorithm is exponential. Instead, many decision trees engines (including ML) try to find sub-optimal split in this case by clustering all the samples into ``max_categories`` clusters that is some categories are merged together. The clustering is applied only in ``n``>2-class classification problems for categorical variables with ``N > max_categories`` possible values. In case of regression and 2-class classification the optimal split can be found efficiently without employing clustering, thus the parameter is not used in these cases.
:param maxCategories: Cluster possible values of a categorical variable into ``K<=maxCategories`` clusters to find a suboptimal split. If a discrete variable, on which the training procedure tries to make a split, takes more than ``maxCategories`` values, the precise best subset estimation may take a very long time because the algorithm is exponential. Instead, many decision trees engines (including our implementation) try to find sub-optimal split in this case by clustering all the samples into ``maxCategories`` clusters that is some categories are merged together. The clustering is applied only in ``n > 2``-class classification problems for categorical variables with ``N > max_categories`` possible values. In case of regression and 2-class classification the optimal split can be found efficiently without employing clustering, thus the parameter is not used in these cases.
:param cv_folds: If ``cv_folds > 1`` then prune a tree with ``K``-fold cross-validation where ``K`` is equal to ``cv_folds``.
:param CVFolds: If ``CVFolds > 1`` then algorithms prunes the built decision tree using ``K``-fold cross-validation procedure where ``K`` is equal to ``CVFolds``.
:param use_1se_rule: If true then a pruning will be harsher. This will make a tree more compact and more resistant to the training data noise but a bit less accurate.
:param use1SERule: If true then a pruning will be harsher. This will make a tree more compact and more resistant to the training data noise but a bit less accurate.
:param truncate_pruned_tree: If true then pruned branches are physically removed from the tree. Otherwise they are retained and it is possible to get results from the original unpruned (or pruned less aggressively) tree by decreasing ``CvDTree::pruned_tree_idx`` parameter.
:param truncatePrunedTree: If true then pruned branches are physically removed from the tree. Otherwise they are retained and it is possible to get results from the original unpruned (or pruned less aggressively) tree.
:param priors: The array of a priori class probabilities, sorted by the class label value. The parameter can be used to tune the decision tree preferences toward a certain class. For example, if you want to detect some rare anomaly occurrence, the training base will likely contain much more normal cases than anomalies, so a very good classification performance will be achieved just by considering every case as normal. To avoid this, the priors can be specified, where the anomaly probability is artificially increased (up to 0.5 or even greater), so the weight of the misclassified anomalies becomes much bigger, and the tree is adjusted properly. You can also think about this parameter as weights of prediction categories which determine relative weights that you give to misclassification. That is, if the weight of the first category is 1 and the weight of the second category is 10, then each mistake in predicting the second category is equivalent to making 10 mistakes in predicting the first category.
@ -179,142 +160,82 @@ The default constructor initializes all the parameters with the default values t
::
CvDTreeParams() : max_categories(10), max_depth(INT_MAX), min_sample_count(10),
cv_folds(10), use_surrogates(true), use_1se_rule(true),
truncate_pruned_tree(true), regression_accuracy(0.01f), priors(0)
{}
DTrees::Params::Params()
{
maxDepth = INT_MAX;
minSampleCount = 10;
regressionAccuracy = 0.01f;
useSurrogates = false;
maxCategories = 10;
CVFolds = 10;
use1SERule = true;
truncatePrunedTree = true;
priors = Mat();
}
CvDTreeTrainData
DTrees
------
.. ocv:class:: DTrees : public StatModel
The class represents a single decision tree or a collection of decision trees. The current public interface of the class allows user to train only a single decision tree, however the class is capable of storing multiple decision trees and using them for prediction (by summing responses or using a voting schemes), and the derived from DTrees classes (such as ``RTrees`` and ``Boost``) use this capability to implement decision tree ensembles.
DTrees::create
----------------
.. ocv:struct:: CvDTreeTrainData
Creates the empty model
Decision tree training data and shared data for tree ensembles. The structure is mostly used internally for storing both standalone trees and tree ensembles efficiently. Basically, it contains the following types of information:
.. ocv:function:: Ptr<DTrees> DTrees::create(const Params& params=Params())
#. Training parameters, an instance of :ocv:class:`CvDTreeParams`.
The static method creates empty decision tree with the specified parameters. It should be then trained using ``train`` method (see ``StatModel::train``). Alternatively, you can load the model from file using ``StatModel::load<DTrees>(filename)``.
#. Training data preprocessed to find the best splits more efficiently. For tree ensembles, this preprocessed data is reused by all trees. Additionally, the training data characteristics shared by all trees in the ensemble are stored here: variable types, the number of classes, a class label compression map, and so on.
DTrees::getDParams
------------------
Returns the training parameters
#. Buffers, memory storages for tree nodes, splits, and other elements of the constructed trees.
.. ocv:function:: Params DTrees::getDParams() const
There are two ways of using this structure. In simple cases (for example, a standalone tree or the ready-to-use "black box" tree ensemble from machine learning, like
:ref:`Random Trees` or
:ref:`Boosting` ), there is no need to care or even to know about the structure. You just construct the needed statistical model, train it, and use it. The ``CvDTreeTrainData`` structure is constructed and used internally. However, for custom tree algorithms or another sophisticated cases, the structure may be constructed and used explicitly. The scheme is the following:
The method returns the training parameters.
#.
The structure is initialized using the default constructor, followed by ``set_data``, or it is built using the full form of constructor. The parameter ``_shared`` must be set to ``true``.
#.
One or more trees are trained using this data (see the special form of the method :ocv:func:`CvDTree::train`).
#.
The structure is released as soon as all the trees using it are released.
CvDTree
-------
.. ocv:class:: CvDTree : public CvStatModel
The class implements a decision tree as described in the beginning of this section.
CvDTree::train
--------------
Trains a decision tree.
.. ocv:function:: bool CvDTree::train( const Mat& trainData, int tflag, const Mat& responses, const Mat& varIdx=Mat(), const Mat& sampleIdx=Mat(), const Mat& varType=Mat(), const Mat& missingDataMask=Mat(), CvDTreeParams params=CvDTreeParams() )
.. ocv:function:: bool CvDTree::train( const CvMat* trainData, int tflag, const CvMat* responses, const CvMat* varIdx=0, const CvMat* sampleIdx=0, const CvMat* varType=0, const CvMat* missingDataMask=0, CvDTreeParams params=CvDTreeParams() )
.. ocv:function:: bool CvDTree::train( CvMLData* trainData, CvDTreeParams params=CvDTreeParams() )
.. ocv:function:: bool CvDTree::train( CvDTreeTrainData* trainData, const CvMat* subsampleIdx )
.. ocv:pyfunction:: cv2.DTree.train(trainData, tflag, responses[, varIdx[, sampleIdx[, varType[, missingDataMask[, params]]]]]) -> retval
There are four ``train`` methods in :ocv:class:`CvDTree`:
* The **first two** methods follow the generic :ocv:func:`CvStatModel::train` conventions. It is the most complete form. Both data layouts (``tflag=CV_ROW_SAMPLE`` and ``tflag=CV_COL_SAMPLE``) are supported, as well as sample and variable subsets, missing measurements, arbitrary combinations of input and output variable types, and so on. The last parameter contains all of the necessary training parameters (see the :ocv:class:`CvDTreeParams` description).
* The **third** method uses :ocv:class:`CvMLData` to pass training data to a decision tree.
* The **last** method ``train`` is mostly used for building tree ensembles. It takes the pre-constructed :ocv:class:`CvDTreeTrainData` instance and an optional subset of the training set. The indices in ``subsampleIdx`` are counted relatively to the ``_sample_idx`` , passed to the ``CvDTreeTrainData`` constructor. For example, if ``_sample_idx=[1, 5, 7, 100]`` , then ``subsampleIdx=[0,3]`` means that the samples ``[1, 100]`` of the original training set are used.
The function is parallelized with the TBB library.
CvDTree::predict
----------------
Returns the leaf node of a decision tree corresponding to the input vector.
.. ocv:function:: CvDTreeNode* CvDTree::predict( const Mat& sample, const Mat& missingDataMask=Mat(), bool preprocessedInput=false ) const
.. ocv:function:: CvDTreeNode* CvDTree::predict( const CvMat* sample, const CvMat* missingDataMask=0, bool preprocessedInput=false ) const
.. ocv:pyfunction:: cv2.DTree.predict(sample[, missingDataMask[, preprocessedInput]]) -> retval
:param sample: Sample for prediction.
:param missingDataMask: Optional input missing measurement mask.
:param preprocessedInput: This parameter is normally set to ``false``, implying a regular input. If it is ``true``, the method assumes that all the values of the discrete input variables have been already normalized to :math:`0` to :math:`num\_of\_categories_i-1` ranges since the decision tree uses such normalized representation internally. It is useful for faster prediction with tree ensembles. For ordered input variables, the flag is not used.
The method traverses the decision tree and returns the reached leaf node as output. The prediction result, either the class label or the estimated function value, may be retrieved as the ``value`` field of the :ocv:class:`CvDTreeNode` structure, for example: ``dtree->predict(sample,mask)->value``.
CvDTree::calc_error
DTrees::setDParams
-------------------
Returns error of the decision tree.
Sets the training parameters
.. ocv:function:: float CvDTree::calc_error( CvMLData* trainData, int type, std::vector<float> *resp = 0 )
.. ocv:function:: void DTrees::setDParams( const Params& p )
:param trainData: Data for the decision tree.
:param p: Training parameters of type DTrees::Params.
:param type: Type of error. Possible values are:
* **CV_TRAIN_ERROR** Error on train samples.
* **CV_TEST_ERROR** Error on test samples.
:param resp: If it is not null then size of this vector will be set to the number of samples and each element will be set to result of prediction on the corresponding sample.
The method calculates error of the decision tree. In case of classification it is the percentage of incorrectly classified samples and in case of regression it is the mean of squared errors on samples.
The method sets the training parameters.
CvDTree::getVarImportance
-------------------------
Returns the variable importance array.
DTrees::getRoots
-------------------
Returns indices of root nodes
.. ocv:function:: Mat CvDTree::getVarImportance()
.. ocv:function:: std::vector<int>& DTrees::getRoots() const
.. ocv:function:: const CvMat* CvDTree::get_var_importance()
DTrees::getNodes
-------------------
Returns all the nodes
.. ocv:pyfunction:: cv2.DTree.getVarImportance() -> retval
.. ocv:function:: std::vector<Node>& DTrees::getNodes() const
CvDTree::get_root
-----------------
Returns the root of the decision tree.
all the node indices, mentioned above (left, right, parent, root indices) are indices in the returned vector
.. ocv:function:: const CvDTreeNode* CvDTree::get_root() const
DTrees::getSplits
-------------------
Returns all the splits
.. ocv:function:: std::vector<Split>& DTrees::getSplits() const
CvDTree::get_pruned_tree_idx
----------------------------
Returns the ``CvDTree::pruned_tree_idx`` parameter.
all the split indices, mentioned above (split, next etc.) are indices in the returned vector
.. ocv:function:: int CvDTree::get_pruned_tree_idx() const
DTrees::getSubsets
-------------------
Returns all the bitsets for categorical splits
The parameter ``DTree::pruned_tree_idx`` is used to prune a decision tree. See the ``CvDTreeNode::Tn`` parameter.
CvDTree::get_data
-----------------
Returns used train data of the decision tree.
.. ocv:function:: CvDTreeTrainData* CvDTree::get_data() const
Example: building a tree for classifying mushrooms. See the ``mushroom.cpp`` sample that demonstrates how to build and use the
decision tree.
.. ocv:function:: std::vector<int>& DTrees::getSubsets() const
``Split::subsetOfs`` is an offset in the returned vector
.. [Breiman84] Breiman, L., Friedman, J. Olshen, R. and Stone, C. (1984), *Classification and Regression Trees*, Wadsworth.

View File

@ -1,15 +0,0 @@
Extremely randomized trees
==========================
Extremely randomized trees have been introduced by Pierre Geurts, Damien Ernst and Louis Wehenkel in the article "Extremely randomized trees", 2006 [http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.65.7485&rep=rep1&type=pdf]. The algorithm of growing Extremely randomized trees is similar to :ref:`Random Trees` (Random Forest), but there are two differences:
#. Extremely randomized trees don't apply the bagging procedure to construct a set of the training samples for each tree. The same input training set is used to train all trees.
#. Extremely randomized trees pick a node split very extremely (both a variable index and variable splitting value are chosen randomly), whereas Random Forest finds the best split (optimal one by variable index and variable splitting value) among random subset of variables.
CvERTrees
----------
.. ocv:class:: CvERTrees : public CvRTrees
The class implements the Extremely randomized trees algorithm. ``CvERTrees`` is inherited from :ocv:class:`CvRTrees` and has the same interface, so see description of :ocv:class:`CvRTrees` class to get details. To set the training parameters of Extremely randomized trees the same class :ocv:struct:`CvRTParams` is used.

View File

@ -66,7 +66,7 @@ Alternatively, the algorithm may start with the M-step when the initial values f
:math:`p_{i,k}` can be provided. Another alternative when
:math:`p_{i,k}` are unknown is to use a simpler clustering algorithm to pre-cluster the input samples and thus obtain initial
:math:`p_{i,k}` . Often (including machine learning) the
:ocv:func:`kmeans` algorithm is used for that purpose.
``k-means`` algorithm is used for that purpose.
One of the main problems of the EM algorithm is a large number
of parameters to estimate. The majority of the parameters reside in
@ -91,18 +91,21 @@ already a good enough approximation).
EM
--
.. ocv:class:: EM : public Algorithm
.. ocv:class:: EM : public StatModel
The class implements the EM algorithm as described in the beginning of this section. It is inherited from :ocv:class:`Algorithm`.
The class implements the EM algorithm as described in the beginning of this section.
EM::Params
----------
.. ocv:class:: EM::Params
EM::EM
------
The constructor of the class
The class describes EM training parameters.
.. ocv:function:: EM::EM(int nclusters=EM::DEFAULT_NCLUSTERS, int covMatType=EM::COV_MAT_DIAGONAL, const TermCriteria& termCrit=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, EM::DEFAULT_MAX_ITERS, FLT_EPSILON) )
EM::Params::Params
------------------
The constructor
.. ocv:pyfunction:: cv2.EM([nclusters[, covMatType[, termCrit]]]) -> <EM object>
.. ocv:function:: EM::Params::Params( int nclusters=DEFAULT_NCLUSTERS, int covMatType=EM::COV_MAT_DIAGONAL,const TermCriteria& termCrit=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, EM::DEFAULT_MAX_ITERS, 1e-6))
:param nclusters: The number of mixture components in the Gaussian mixture model. Default value of the parameter is ``EM::DEFAULT_NCLUSTERS=5``. Some of EM implementation could determine the optimal number of mixtures within a specified value range, but that is not the case in ML yet.
@ -116,21 +119,26 @@ The constructor of the class
:param termCrit: The termination criteria of the EM algorithm. The EM algorithm can be terminated by the number of iterations ``termCrit.maxCount`` (number of M-steps) or when relative change of likelihood logarithm is less than ``termCrit.epsilon``. Default maximum number of iterations is ``EM::DEFAULT_MAX_ITERS=100``.
EM::create
----------
Creates empty EM model
.. ocv:function:: Ptr<EM> EM::create(const Params& params=Params())
:param params: EM parameters
The model should be trained then using ``StatModel::train(traindata, flags)`` method. Alternatively, you can use one of the ``EM::train*`` methods or load it from file using ``StatModel::load<EM>(filename)``.
EM::train
---------
Estimates the Gaussian mixture parameters from a samples set.
Static methods that estimate the Gaussian mixture parameters from a samples set
.. ocv:function:: bool EM::train(InputArray samples, OutputArray logLikelihoods=noArray(), OutputArray labels=noArray(), OutputArray probs=noArray())
.. ocv:function:: Ptr<EM> EM::train(InputArray samples, OutputArray logLikelihoods=noArray(), OutputArray labels=noArray(), OutputArray probs=noArray(), const Params& params=Params())
.. ocv:function:: bool EM::trainE(InputArray samples, InputArray means0, InputArray covs0=noArray(), InputArray weights0=noArray(), OutputArray logLikelihoods=noArray(), OutputArray labels=noArray(), OutputArray probs=noArray())
.. ocv:function:: bool EM::train_startWithE(InputArray samples, InputArray means0, InputArray covs0=noArray(), InputArray weights0=noArray(), OutputArray logLikelihoods=noArray(), OutputArray labels=noArray(), OutputArray probs=noArray(), const Params& params=Params())
.. ocv:function:: bool EM::trainM(InputArray samples, InputArray probs0, OutputArray logLikelihoods=noArray(), OutputArray labels=noArray(), OutputArray probs=noArray())
.. ocv:pyfunction:: cv2.EM.train(samples[, logLikelihoods[, labels[, probs]]]) -> retval, logLikelihoods, labels, probs
.. ocv:pyfunction:: cv2.EM.trainE(samples, means0[, covs0[, weights0[, logLikelihoods[, labels[, probs]]]]]) -> retval, logLikelihoods, labels, probs
.. ocv:pyfunction:: cv2.EM.trainM(samples, probs0[, logLikelihoods[, labels[, probs]]]) -> retval, logLikelihoods, labels, probs
.. ocv:function:: bool EM::train_startWithM(InputArray samples, InputArray probs0, OutputArray logLikelihoods=noArray(), OutputArray labels=noArray(), OutputArray probs=noArray(), const Params& params=Params())
:param samples: Samples from which the Gaussian mixture model will be estimated. It should be a one-channel matrix, each row of which is a sample. If the matrix does not have ``CV_64F`` type it will be converted to the inner matrix of such type for the further computing.
@ -148,6 +156,8 @@ Estimates the Gaussian mixture parameters from a samples set.
:param probs: The optional output matrix that contains posterior probabilities of each Gaussian mixture component given the each sample. It has :math:`nsamples \times nclusters` size and ``CV_64FC1`` type.
:param params: The Gaussian mixture params, see ``EM::Params`` description above.
Three versions of training method differ in the initialization of Gaussian mixture model parameters and start step:
* **train** - Starts with Expectation step. Initial values of the model parameters will be estimated by the k-means algorithm.
@ -167,15 +177,13 @@ Unlike many of the ML models, EM is an unsupervised learning algorithm and it do
:math:`\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N` (indices of the most probable mixture component for each sample).
The trained model can be used further for prediction, just like any other classifier. The trained model is similar to the
:ocv:class:`CvNormalBayesClassifier`.
``NormalBayesClassifier``.
EM::predict
-----------
EM::predict2
------------
Returns a likelihood logarithm value and an index of the most probable mixture component for the given sample.
.. ocv:function:: Vec2d EM::predict(InputArray sample, OutputArray probs=noArray()) const
.. ocv:pyfunction:: cv2.EM.predict(sample[, probs]) -> retval, probs
.. ocv:function:: Vec2d EM::predict2(InputArray sample, OutputArray probs=noArray()) const
:param sample: A sample for classification. It should be a one-channel matrix of :math:`1 \times dims` or :math:`dims \times 1` size.
@ -183,28 +191,29 @@ Returns a likelihood logarithm value and an index of the most probable mixture c
The method returns a two-element ``double`` vector. Zero element is a likelihood logarithm value for the sample. First element is an index of the most probable mixture component for the given sample.
CvEM::isTrained
---------------
Returns ``true`` if the Gaussian mixture model was trained.
.. ocv:function:: bool EM::isTrained() const
EM::getMeans
------------
Returns the cluster centers (means of the Gaussian mixture)
.. ocv:pyfunction:: cv2.EM.isTrained() -> retval
.. ocv:function:: Mat EM::getMeans() const
EM::read, EM::write
-------------------
See :ocv:func:`Algorithm::read` and :ocv:func:`Algorithm::write`.
Returns matrix with the number of rows equal to the number of mixtures and number of columns equal to the space dimensionality.
EM::get, EM::set
----------------
See :ocv:func:`Algorithm::get` and :ocv:func:`Algorithm::set`. The following parameters are available:
* ``"nclusters"``
* ``"covMatType"``
* ``"maxIters"``
* ``"epsilon"``
* ``"weights"`` *(read-only)*
* ``"means"`` *(read-only)*
* ``"covs"`` *(read-only)*
EM::getWeights
--------------
Returns weights of the mixtures
..
.. ocv:function:: Mat EM::getWeights() const
Returns vector with the number of elements equal to the number of mixtures.
EM::getCovs
--------------
Returns covariation matrices
.. ocv:function:: void EM::getCovs(std::vector<Mat>& covs) const
Returns vector of covariation matrices. Number of matrices is the number of gaussian mixtures, each matrix is a square floating-point matrix NxN, where N is the space dimensionality.

View File

@ -1,272 +0,0 @@
.. _Gradient Boosted Trees:
Gradient Boosted Trees
======================
.. highlight:: cpp
Gradient Boosted Trees (GBT) is a generalized boosting algorithm introduced by
Jerome Friedman: http://www.salfordsystems.com/doc/GreedyFuncApproxSS.pdf .
In contrast to the AdaBoost.M1 algorithm, GBT can deal with both multiclass
classification and regression problems. Moreover, it can use any
differential loss function, some popular ones are implemented.
Decision trees (:ocv:class:`CvDTree`) usage as base learners allows to process ordered
and categorical variables.
.. _Training GBT:
Training the GBT model
----------------------
Gradient Boosted Trees model represents an ensemble of single regression trees
built in a greedy fashion. Training procedure is an iterative process
similar to the numerical optimization via the gradient descent method. Summary loss
on the training set depends only on the current model predictions for the
training samples, in other words
:math:`\sum^N_{i=1}L(y_i, F(x_i)) \equiv \mathcal{L}(F(x_1), F(x_2), ... , F(x_N))
\equiv \mathcal{L}(F)`. And the :math:`\mathcal{L}(F)`
gradient can be computed as follows:
.. math::
grad(\mathcal{L}(F)) = \left( \dfrac{\partial{L(y_1, F(x_1))}}{\partial{F(x_1)}},
\dfrac{\partial{L(y_2, F(x_2))}}{\partial{F(x_2)}}, ... ,
\dfrac{\partial{L(y_N, F(x_N))}}{\partial{F(x_N)}} \right) .
At every training step, a single regression tree is built to predict an
antigradient vector components. Step length is computed corresponding to the
loss function and separately for every region determined by the tree leaf. It
can be eliminated by changing values of the leaves directly.
See below the main scheme of the training process:
#.
Find the best constant model.
#.
For :math:`i` in :math:`[1,M]`:
#.
Compute the antigradient.
#.
Grow a regression tree to predict antigradient components.
#.
Change values in the tree leaves.
#.
Add the tree to the model.
The following loss functions are implemented for regression problems:
*
Squared loss (``CvGBTrees::SQUARED_LOSS``):
:math:`L(y,f(x))=\dfrac{1}{2}(y-f(x))^2`
*
Absolute loss (``CvGBTrees::ABSOLUTE_LOSS``):
:math:`L(y,f(x))=|y-f(x)|`
*
Huber loss (``CvGBTrees::HUBER_LOSS``):
:math:`L(y,f(x)) = \left\{ \begin{array}{lr}
\delta\cdot\left(|y-f(x)|-\dfrac{\delta}{2}\right) & : |y-f(x)|>\delta\\
\dfrac{1}{2}\cdot(y-f(x))^2 & : |y-f(x)|\leq\delta \end{array} \right.`,
where :math:`\delta` is the :math:`\alpha`-quantile estimation of the
:math:`|y-f(x)|`. In the current implementation :math:`\alpha=0.2`.
The following loss functions are implemented for classification problems:
*
Deviance or cross-entropy loss (``CvGBTrees::DEVIANCE_LOSS``):
:math:`K` functions are built, one function for each output class, and
:math:`L(y,f_1(x),...,f_K(x)) = -\sum^K_{k=0}1(y=k)\ln{p_k(x)}`,
where :math:`p_k(x)=\dfrac{\exp{f_k(x)}}{\sum^K_{i=1}\exp{f_i(x)}}`
is the estimation of the probability of :math:`y=k`.
As a result, you get the following model:
.. math:: f(x) = f_0 + \nu\cdot\sum^M_{i=1}T_i(x) ,
where :math:`f_0` is the initial guess (the best constant model) and :math:`\nu`
is a regularization parameter from the interval :math:`(0,1]`, further called
*shrinkage*.
.. _Predicting with GBT:
Predicting with the GBT Model
-----------------------------
To get the GBT model prediction, you need to compute the sum of responses of
all the trees in the ensemble. For regression problems, it is the answer.
For classification problems, the result is :math:`\arg\max_{i=1..K}(f_i(x))`.
.. highlight:: cpp
CvGBTreesParams
---------------
.. ocv:struct:: CvGBTreesParams : public CvDTreeParams
GBT training parameters.
The structure contains parameters for each single decision tree in the ensemble,
as well as the whole model characteristics. The structure is derived from
:ocv:class:`CvDTreeParams` but not all of the decision tree parameters are supported:
cross-validation, pruning, and class priorities are not used.
CvGBTreesParams::CvGBTreesParams
--------------------------------
.. ocv:function:: CvGBTreesParams::CvGBTreesParams()
.. ocv:function:: CvGBTreesParams::CvGBTreesParams( int loss_function_type, int weak_count, float shrinkage, float subsample_portion, int max_depth, bool use_surrogates )
:param loss_function_type: Type of the loss function used for training
(see :ref:`Training GBT`). It must be one of the
following types: ``CvGBTrees::SQUARED_LOSS``, ``CvGBTrees::ABSOLUTE_LOSS``,
``CvGBTrees::HUBER_LOSS``, ``CvGBTrees::DEVIANCE_LOSS``. The first three
types are used for regression problems, and the last one for
classification.
:param weak_count: Count of boosting algorithm iterations. ``weak_count*K`` is the total
count of trees in the GBT model, where ``K`` is the output classes count
(equal to one in case of a regression).
:param shrinkage: Regularization parameter (see :ref:`Training GBT`).
:param subsample_portion: Portion of the whole training set used for each algorithm iteration.
Subset is generated randomly. For more information see
http://www.salfordsystems.com/doc/StochasticBoostingSS.pdf.
:param max_depth: Maximal depth of each decision tree in the ensemble (see :ocv:class:`CvDTree`).
:param use_surrogates: If ``true``, surrogate splits are built (see :ocv:class:`CvDTree`).
By default the following constructor is used:
.. code-block:: cpp
CvGBTreesParams(CvGBTrees::SQUARED_LOSS, 200, 0.01f, 0.8f, 3, false)
: CvDTreeParams( 3, 10, 0, false, 10, 0, false, false, 0 )
CvGBTrees
---------
.. ocv:class:: CvGBTrees : public CvStatModel
The class implements the Gradient boosted tree model as described in the beginning of this section.
CvGBTrees::CvGBTrees
--------------------
Default and training constructors.
.. ocv:function:: CvGBTrees::CvGBTrees()
.. ocv:function:: CvGBTrees::CvGBTrees( const Mat& trainData, int tflag, const Mat& responses, const Mat& varIdx=Mat(), const Mat& sampleIdx=Mat(), const Mat& varType=Mat(), const Mat& missingDataMask=Mat(), CvGBTreesParams params=CvGBTreesParams() )
.. ocv:function:: CvGBTrees::CvGBTrees( const CvMat* trainData, int tflag, const CvMat* responses, const CvMat* varIdx=0, const CvMat* sampleIdx=0, const CvMat* varType=0, const CvMat* missingDataMask=0, CvGBTreesParams params=CvGBTreesParams() )
.. ocv:pyfunction:: cv2.GBTrees([trainData, tflag, responses[, varIdx[, sampleIdx[, varType[, missingDataMask[, params]]]]]]) -> <GBTrees object>
The constructors follow conventions of :ocv:func:`CvStatModel::CvStatModel`. See :ocv:func:`CvStatModel::train` for parameters descriptions.
CvGBTrees::train
----------------
Trains a Gradient boosted tree model.
.. ocv:function:: bool CvGBTrees::train(const Mat& trainData, int tflag, const Mat& responses, const Mat& varIdx=Mat(), const Mat& sampleIdx=Mat(), const Mat& varType=Mat(), const Mat& missingDataMask=Mat(), CvGBTreesParams params=CvGBTreesParams(), bool update=false)
.. ocv:function:: bool CvGBTrees::train( const CvMat* trainData, int tflag, const CvMat* responses, const CvMat* varIdx=0, const CvMat* sampleIdx=0, const CvMat* varType=0, const CvMat* missingDataMask=0, CvGBTreesParams params=CvGBTreesParams(), bool update=false )
.. ocv:function:: bool CvGBTrees::train(CvMLData* data, CvGBTreesParams params=CvGBTreesParams(), bool update=false)
.. ocv:pyfunction:: cv2.GBTrees.train(trainData, tflag, responses[, varIdx[, sampleIdx[, varType[, missingDataMask[, params[, update]]]]]]) -> retval
The first train method follows the common template (see :ocv:func:`CvStatModel::train`).
Both ``tflag`` values (``CV_ROW_SAMPLE``, ``CV_COL_SAMPLE``) are supported.
``trainData`` must be of the ``CV_32F`` type. ``responses`` must be a matrix of type
``CV_32S`` or ``CV_32F``. In both cases it is converted into the ``CV_32F``
matrix inside the training procedure. ``varIdx`` and ``sampleIdx`` must be a
list of indices (``CV_32S``) or a mask (``CV_8U`` or ``CV_8S``). ``update`` is
a dummy parameter.
The second form of :ocv:func:`CvGBTrees::train` function uses :ocv:class:`CvMLData` as a
data set container. ``update`` is still a dummy parameter.
All parameters specific to the GBT model are passed into the training function
as a :ocv:class:`CvGBTreesParams` structure.
CvGBTrees::predict
------------------
Predicts a response for an input sample.
.. ocv:function:: float CvGBTrees::predict(const Mat& sample, const Mat& missing=Mat(), const Range& slice = Range::all(), int k=-1) const
.. ocv:function:: float CvGBTrees::predict( const CvMat* sample, const CvMat* missing=0, CvMat* weakResponses=0, CvSlice slice = CV_WHOLE_SEQ, int k=-1 ) const
.. ocv:pyfunction:: cv2.GBTrees.predict(sample[, missing[, slice[, k]]]) -> retval
:param sample: Input feature vector that has the same format as every training set
element. If not all the variables were actually used during training,
``sample`` contains forged values at the appropriate places.
:param missing: Missing values mask, which is a dimensional matrix of the same size as
``sample`` having the ``CV_8U`` type. ``1`` corresponds to the missing value
in the same position in the ``sample`` vector. If there are no missing values
in the feature vector, an empty matrix can be passed instead of the missing mask.
:param weakResponses: Matrix used to obtain predictions of all the trees.
The matrix has :math:`K` rows,
where :math:`K` is the count of output classes (1 for the regression case).
The matrix has as many columns as the ``slice`` length.
:param slice: Parameter defining the part of the ensemble used for prediction.
If ``slice = Range::all()``, all trees are used. Use this parameter to
get predictions of the GBT models with different ensemble sizes learning
only one model.
:param k: Number of tree ensembles built in case of the classification problem
(see :ref:`Training GBT`). Use this
parameter to change the output to sum of the trees' predictions in the
``k``-th ensemble only. To get the total GBT model prediction, ``k`` value
must be -1. For regression problems, ``k`` is also equal to -1.
The method predicts the response corresponding to the given sample
(see :ref:`Predicting with GBT`).
The result is either the class label or the estimated function value. The
:ocv:func:`CvGBTrees::predict` method enables using the parallel version of the GBT model
prediction if the OpenCV is built with the TBB library. In this case, predictions
of single trees are computed in a parallel fashion.
CvGBTrees::clear
----------------
Clears the model.
.. ocv:function:: void CvGBTrees::clear()
.. ocv:pyfunction:: cv2.GBTrees.clear() -> None
The function deletes the data set information and all the weak models and sets all internal
variables to the initial state. The function is called in :ocv:func:`CvGBTrees::train` and in the
destructor.
CvGBTrees::calc_error
---------------------
Calculates a training or testing error.
.. ocv:function:: float CvGBTrees::calc_error( CvMLData* _data, int type, std::vector<float> *resp = 0 )
:param _data: Data set.
:param type: Parameter defining the error that should be computed: train (``CV_TRAIN_ERROR``) or test
(``CV_TEST_ERROR``).
:param resp: If non-zero, a vector of predictions on the corresponding data set is
returned.
If the :ocv:class:`CvMLData` data is used to store the data set, :ocv:func:`CvGBTrees::calc_error` can be
used to get a training/testing error easily and (optionally) all predictions
on the training/testing set. If the Intel* TBB* library is used, the error is computed in a
parallel way, namely, predictions for different samples are computed at the same time.
In case of a regression problem, a mean squared error is returned. For
classifications, the result is a misclassification error in percent.

View File

@ -5,9 +5,9 @@ K-Nearest Neighbors
The algorithm caches all training samples and predicts the response for a new sample by analyzing a certain number (**K**) of the nearest neighbors of the sample using voting, calculating weighted sum, and so on. The method is sometimes referred to as "learning by example" because for prediction it looks for the feature vector with a known response that is closest to the given vector.
CvKNearest
KNearest
----------
.. ocv:class:: CvKNearest : public CvStatModel
.. ocv:class:: KNearest : public StatModel
The class implements K-Nearest Neighbors model as described in the beginning of this section.
@ -17,65 +17,32 @@ The class implements K-Nearest Neighbors model as described in the beginning of
* (Python) An example of grid search digit recognition using KNearest can be found at opencv_source/samples/python2/digits_adjust.py
* (Python) An example of video digit recognition using KNearest can be found at opencv_source/samples/python2/digits_video.py
CvKNearest::CvKNearest
KNearest::create
----------------------
Default and training constructors.
Creates the empty model
.. ocv:function:: CvKNearest::CvKNearest()
.. ocv:function:: Ptr<KNearest> KNearest::create(const Params& params=Params())
.. ocv:function:: CvKNearest::CvKNearest( const Mat& trainData, const Mat& responses, const Mat& sampleIdx=Mat(), bool isRegression=false, int max_k=32 )
:param params: The model parameters: default number of neighbors to use in predict method (in ``KNearest::findNearest`` this number must be passed explicitly) and the flag on whether classification or regression model should be trained.
.. ocv:function:: CvKNearest::CvKNearest( const CvMat* trainData, const CvMat* responses, const CvMat* sampleIdx=0, bool isRegression=false, int max_k=32 )
The static method creates empty KNearest classifier. It should be then trained using ``train`` method (see ``StatModel::train``). Alternatively, you can load boost model from file using ``StatModel::load<KNearest>(filename)``.
See :ocv:func:`CvKNearest::train` for additional parameters descriptions.
CvKNearest::train
-----------------
Trains the model.
.. ocv:function:: bool CvKNearest::train( const Mat& trainData, const Mat& responses, const Mat& sampleIdx=Mat(), bool isRegression=false, int maxK=32, bool updateBase=false )
.. ocv:function:: bool CvKNearest::train( const CvMat* trainData, const CvMat* responses, const CvMat* sampleIdx=0, bool is_regression=false, int maxK=32, bool updateBase=false )
.. ocv:pyfunction:: cv2.KNearest.train(trainData, responses[, sampleIdx[, isRegression[, maxK[, updateBase]]]]) -> retval
:param isRegression: Type of the problem: ``true`` for regression and ``false`` for classification.
:param maxK: Number of maximum neighbors that may be passed to the method :ocv:func:`CvKNearest::find_nearest`.
:param updateBase: Specifies whether the model is trained from scratch (``update_base=false``), or it is updated using the new training data (``update_base=true``). In the latter case, the parameter ``maxK`` must not be larger than the original value.
The method trains the K-Nearest model. It follows the conventions of the generic :ocv:func:`CvStatModel::train` approach with the following limitations:
* Only ``CV_ROW_SAMPLE`` data layout is supported.
* Input variables are all ordered.
* Output variables can be either categorical ( ``is_regression=false`` ) or ordered ( ``is_regression=true`` ).
* Variable subsets (``var_idx``) and missing measurements are not supported.
CvKNearest::find_nearest
KNearest::findNearest
------------------------
Finds the neighbors and predicts responses for input vectors.
.. ocv:function:: float CvKNearest::find_nearest( const Mat& samples, int k, Mat* results=0, const float** neighbors=0, Mat* neighborResponses=0, Mat* dist=0 ) const
.. ocv:function:: float KNearest::findNearest( InputArray samples, int k, OutputArray results, OutputArray neighborResponses=noArray(), OutputArray dist=noArray() ) const
.. ocv:function:: float CvKNearest::find_nearest( const Mat& samples, int k, Mat& results, Mat& neighborResponses, Mat& dists) const
:param samples: Input samples stored by rows. It is a single-precision floating-point matrix of ``<number_of_samples> * k`` size.
.. ocv:function:: float CvKNearest::find_nearest( const CvMat* samples, int k, CvMat* results=0, const float** neighbors=0, CvMat* neighborResponses=0, CvMat* dist=0 ) const
:param k: Number of used nearest neighbors. Should be greater than 1.
.. ocv:pyfunction:: cv2.KNearest.find_nearest(samples, k[, results[, neighborResponses[, dists]]]) -> retval, results, neighborResponses, dists
:param results: Vector with results of prediction (regression or classification) for each input sample. It is a single-precision floating-point vector with ``<number_of_samples>`` elements.
:param neighborResponses: Optional output values for corresponding neighbors. It is a single-precision floating-point matrix of ``<number_of_samples> * k`` size.
:param samples: Input samples stored by rows. It is a single-precision floating-point matrix of :math:`number\_of\_samples \times number\_of\_features` size.
:param k: Number of used nearest neighbors. It must satisfy constraint: :math:`k \le` :ocv:func:`CvKNearest::get_max_k`.
:param results: Vector with results of prediction (regression or classification) for each input sample. It is a single-precision floating-point vector with ``number_of_samples`` elements.
:param neighbors: Optional output pointers to the neighbor vectors themselves. It is an array of ``k*samples->rows`` pointers.
:param neighborResponses: Optional output values for corresponding ``neighbors``. It is a single-precision floating-point matrix of :math:`number\_of\_samples \times k` size.
:param dist: Optional output distances from the input vectors to the corresponding ``neighbors``. It is a single-precision floating-point matrix of :math:`number\_of\_samples \times k` size.
:param dist: Optional output distances from the input vectors to the corresponding neighbors. It is a single-precision floating-point matrix of ``<number_of_samples> * k`` size.
For each input vector (a row of the matrix ``samples``), the method finds the ``k`` nearest neighbors. In case of regression, the predicted result is a mean value of the particular vector's neighbor responses. In case of classification, the class is determined by voting.
@ -87,110 +54,18 @@ If only a single input vector is passed, all output matrices are optional and th
The function is parallelized with the TBB library.
CvKNearest::get_max_k
KNearest::getDefaultK
---------------------
Returns the number of maximum neighbors that may be passed to the method :ocv:func:`CvKNearest::find_nearest`.
Returns the default number of neighbors
.. ocv:function:: int CvKNearest::get_max_k() const
.. ocv:function:: int KNearest::getDefaultK() const
CvKNearest::get_var_count
-------------------------
Returns the number of used features (variables count).
The function returns the default number of neighbors that is used in a simpler ``predict`` method, not ``findNearest``.
.. ocv:function:: int CvKNearest::get_var_count() const
KNearest::setDefaultK
---------------------
Returns the default number of neighbors
CvKNearest::get_sample_count
----------------------------
Returns the total number of train samples.
.. ocv:function:: void KNearest::setDefaultK(int k)
.. ocv:function:: int CvKNearest::get_sample_count() const
CvKNearest::is_regression
-------------------------
Returns type of the problem: ``true`` for regression and ``false`` for classification.
.. ocv:function:: bool CvKNearest::is_regression() const
The sample below (currently using the obsolete ``CvMat`` structures) demonstrates the use of the k-nearest classifier for 2D point classification: ::
#include "ml.h"
#include "highgui.h"
int main( int argc, char** argv )
{
const int K = 10;
int i, j, k, accuracy;
float response;
int train_sample_count = 100;
CvRNG rng_state = cvRNG(-1);
CvMat* trainData = cvCreateMat( train_sample_count, 2, CV_32FC1 );
CvMat* trainClasses = cvCreateMat( train_sample_count, 1, CV_32FC1 );
IplImage* img = cvCreateImage( cvSize( 500, 500 ), 8, 3 );
float _sample[2];
CvMat sample = cvMat( 1, 2, CV_32FC1, _sample );
cvZero( img );
CvMat trainData1, trainData2, trainClasses1, trainClasses2;
// form the training samples
cvGetRows( trainData, &trainData1, 0, train_sample_count/2 );
cvRandArr( &rng_state, &trainData1, CV_RAND_NORMAL, cvScalar(200,200), cvScalar(50,50) );
cvGetRows( trainData, &trainData2, train_sample_count/2, train_sample_count );
cvRandArr( &rng_state, &trainData2, CV_RAND_NORMAL, cvScalar(300,300), cvScalar(50,50) );
cvGetRows( trainClasses, &trainClasses1, 0, train_sample_count/2 );
cvSet( &trainClasses1, cvScalar(1) );
cvGetRows( trainClasses, &trainClasses2, train_sample_count/2, train_sample_count );
cvSet( &trainClasses2, cvScalar(2) );
// learn classifier
CvKNearest knn( trainData, trainClasses, 0, false, K );
CvMat* nearests = cvCreateMat( 1, K, CV_32FC1);
for( i = 0; i < img->height; i++ )
{
for( j = 0; j < img->width; j++ )
{
sample.data.fl[0] = (float)j;
sample.data.fl[1] = (float)i;
// estimate the response and get the neighbors' labels
response = knn.find_nearest(&sample,K,0,0,nearests,0);
// compute the number of neighbors representing the majority
for( k = 0, accuracy = 0; k < K; k++ )
{
if( nearests->data.fl[k] == response)
accuracy++;
}
// highlight the pixel depending on the accuracy (or confidence)
cvSet2D( img, i, j, response == 1 ?
(accuracy > 5 ? CV_RGB(180,0,0) : CV_RGB(180,120,0)) :
(accuracy > 5 ? CV_RGB(0,180,0) : CV_RGB(120,120,0)) );
}
}
// display the original training samples
for( i = 0; i < train_sample_count/2; i++ )
{
CvPoint pt;
pt.x = cvRound(trainData1.data.fl[i*2]);
pt.y = cvRound(trainData1.data.fl[i*2+1]);
cvCircle( img, pt, 2, CV_RGB(255,0,0), CV_FILLED );
pt.x = cvRound(trainData2.data.fl[i*2]);
pt.y = cvRound(trainData2.data.fl[i*2+1]);
cvCircle( img, pt, 2, CV_RGB(0,255,0), CV_FILLED );
}
cvNamedWindow( "classifier result", 1 );
cvShowImage( "classifier result", img );
cvWaitKey(0);
cvReleaseMat( &trainClasses );
cvReleaseMat( &trainData );
return 0;
}
The function sets the default number of neighbors that is used in a simpler ``predict`` method, not ``findNearest``.

View File

@ -15,9 +15,7 @@ Most of the classification and regression algorithms are implemented as C++ clas
support_vector_machines
decision_trees
boosting
gradient_boosted_trees
random_trees
ertrees
expectation_maximization
neural_networks
mldata

View File

@ -1,279 +1,126 @@
MLData
Training Data
===================
.. highlight:: cpp
For the machine learning algorithms, the data set is often stored in a file of the ``.csv``-like format. The file contains a table of predictor and response values where each row of the table corresponds to a sample. Missing values are supported. The UC Irvine Machine Learning Repository (http://archive.ics.uci.edu/ml/) provides many data sets stored in such a format to the machine learning community. The class ``MLData`` is implemented to easily load the data for training one of the OpenCV machine learning algorithms. For float values, only the ``'.'`` separator is supported. The table can have a header and in such case the user have to set the number of the header lines to skip them duaring the file reading.
In machine learning algorithms there is notion of training data. Training data includes several components:
CvMLData
--------
.. ocv:class:: CvMLData
* A set of training samples. Each training sample is a vector of values (in Computer Vision it's sometimes referred to as feature vector). Usually all the vectors have the same number of components (features); OpenCV ml module assumes that. Each feature can be ordered (i.e. its values are floating-point numbers that can be compared with each other and strictly ordered, i.e. sorted) or categorical (i.e. its value belongs to a fixed set of values that can be integers, strings etc.).
Class for loading the data from a ``.csv`` file.
::
* Optional set of responses corresponding to the samples. Training data with no responses is used in unsupervised learning algorithms that learn structure of the supplied data based on distances between different samples. Training data with responses is used in supervised learning algorithms, which learn the function mapping samples to responses. Usually the responses are scalar values, ordered (when we deal with regression problem) or categorical (when we deal with classification problem; in this case the responses are often called "labels"). Some algorithms, most noticeably Neural networks, can handle not only scalar, but also multi-dimensional or vector responses.
class CV_EXPORTS CvMLData
{
public:
CvMLData();
virtual ~CvMLData();
* Another optional component is the mask of missing measurements. Most algorithms require all the components in all the training samples be valid, but some other algorithms, such as decision tress, can handle the cases of missing measurements.
int read_csv(const char* filename);
* In the case of classification problem user may want to give different weights to different classes. This is useful, for example, when
* user wants to shift prediction accuracy towards lower false-alarm rate or higher hit-rate.
* user wants to compensate for significantly different amounts of training samples from different classes.
const CvMat* get_values() const;
const CvMat* get_responses();
const CvMat* get_missing() const;
* In addition to that, each training sample may be given a weight, if user wants the algorithm to pay special attention to certain training samples and adjust the training model accordingly.
void set_response_idx( int idx );
int get_response_idx() const;
* Also, user may wish not to use the whole training data at once, but rather use parts of it, e.g. to do parameter optimization via cross-validation procedure.
As you can see, training data can have rather complex structure; besides, it may be very big and/or not entirely available, so there is need to make abstraction for this concept. In OpenCV ml there is ``cv::ml::TrainData`` class for that.
void set_train_test_split( const CvTrainTestSplit * spl);
const CvMat* get_train_sample_idx() const;
const CvMat* get_test_sample_idx() const;
void mix_train_and_test_idx();
TrainData
---------
.. ocv:class:: TrainData
const CvMat* get_var_idx();
void change_var_idx( int vi, bool state );
Class encapsulating training data. Please note that the class only specifies the interface of training data, but not implementation. All the statistical model classes in ml take Ptr<TrainData>. In other words, you can create your own class derived from ``TrainData`` and supply smart pointer to the instance of this class into ``StatModel::train``.
const CvMat* get_var_types();
void set_var_types( const char* str );
TrainData::loadFromCSV
----------------------
Reads the dataset from a .csv file and returns the ready-to-use training data.
int get_var_type( int var_idx ) const;
void change_var_type( int var_idx, int type);
void set_delimiter( char ch );
char get_delimiter() const;
void set_miss_ch( char ch );
char get_miss_ch() const;
const std::map<String, int>& get_class_labels_map() const;
protected:
...
};
CvMLData::read_csv
------------------
Reads the data set from a ``.csv``-like ``filename`` file and stores all read values in a matrix.
.. ocv:function:: int CvMLData::read_csv(const char* filename)
.. ocv:function:: Ptr<TrainData> loadFromCSV(const String& filename, int headerLineCount, int responseStartIdx=-1, int responseEndIdx=-1, const String& varTypeSpec=String(), char delimiter=',', char missch='?')
:param filename: The input file name
While reading the data, the method tries to define the type of variables (predictors and responses): ordered or categorical. If a value of the variable is not numerical (except for the label for a missing value), the type of the variable is set to ``CV_VAR_CATEGORICAL``. If all existing values of the variable are numerical, the type of the variable is set to ``CV_VAR_ORDERED``. So, the default definition of variables types works correctly for all cases except the case of a categorical variable with numerical class labels. In this case, the type ``CV_VAR_ORDERED`` is set. You should change the type to ``CV_VAR_CATEGORICAL`` using the method :ocv:func:`CvMLData::change_var_type`. For categorical variables, a common map is built to convert a string class label to the numerical class label. Use :ocv:func:`CvMLData::get_class_labels_map` to obtain this map.
:param headerLineCount: The number of lines in the beginning to skip; besides the header, the function also skips empty lines and lines staring with '#'
Also, when reading the data, the method constructs the mask of missing values. For example, values are equal to `'?'`.
:param responseStartIdx: Index of the first output variable. If -1, the function considers the last variable as the response
CvMLData::get_values
--------------------
Returns a pointer to the matrix of predictors and response values
:param responseEndIdx: Index of the last output variable + 1. If -1, then there is single response variable at ``responseStartIdx``.
.. ocv:function:: const CvMat* CvMLData::get_values() const
:param varTypeSpec: The optional text string that specifies the variables' types. It has the format ``ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]``. That is, variables from n1 to n2 (inclusive range), n3, n4 to n5 ... are considered ordered and n6, n7 to n8 ... are considered as categorical. The range [n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8] should cover all the variables. If varTypeSpec is not specified, then algorithm uses the following rules:
1. all input variables are considered ordered by default. If some column contains has non-numerical values, e.g. 'apple', 'pear', 'apple', 'apple', 'mango', the corresponding variable is considered categorical.
2. if there are several output variables, they are all considered as ordered. Error is reported when non-numerical values are used.
3. if there is a single output variable, then if its values are non-numerical or are all integers, then it's considered categorical. Otherwise, it's considered ordered.
The method returns a pointer to the matrix of predictor and response ``values`` or ``0`` if the data has not been loaded from the file yet.
:param delimiter: The character used to separate values in each line.
The row count of this matrix equals the sample count. The column count equals predictors ``+ 1`` for the response (if exists) count. This means that each row of the matrix contains values of one sample predictor and response. The matrix type is ``CV_32FC1``.
:param missch: The character used to specify missing measurements. It should not be a digit. Although it's a non-numerical value, it surely does not affect the decision of whether the variable ordered or categorical.
CvMLData::get_responses
-----------------------
Returns a pointer to the matrix of response values
TrainData::create
-----------------
Creates training data from in-memory arrays.
.. ocv:function:: const CvMat* CvMLData::get_responses()
.. ocv:function:: Ptr<TrainData> create(InputArray samples, int layout, InputArray responses, InputArray varIdx=noArray(), InputArray sampleIdx=noArray(), InputArray sampleWeights=noArray(), InputArray varType=noArray())
The method returns a pointer to the matrix of response values or throws an exception if the data has not been loaded from the file yet.
:param samples: matrix of samples. It should have ``CV_32F`` type.
This is a single-column matrix of the type ``CV_32FC1``. Its row count is equal to the sample count, one column and .
:param layout: it's either ``ROW_SAMPLE``, which means that each training sample is a row of ``samples``, or ``COL_SAMPLE``, which means that each training sample occupies a column of ``samples``.
CvMLData::get_missing
---------------------
Returns a pointer to the mask matrix of missing values
:param responses: matrix of responses. If the responses are scalar, they should be stored as a single row or as a single column. The matrix should have type ``CV_32F`` or ``CV_32S`` (in the former case the responses are considered as ordered by default; in the latter case - as categorical)
.. ocv:function:: const CvMat* CvMLData::get_missing() const
:param varIdx: vector specifying which variables to use for training. It can be an integer vector (``CV_32S``) containing 0-based variable indices or byte vector (``CV_8U``) containing a mask of active variables.
The method returns a pointer to the mask matrix of missing values or throws an exception if the data has not been loaded from the file yet.
:param sampleIdx: vector specifying which samples to use for training. It can be an integer vector (``CV_32S``) containing 0-based sample indices or byte vector (``CV_8U``) containing a mask of training samples.
This matrix has the same size as the ``values`` matrix (see :ocv:func:`CvMLData::get_values`) and the type ``CV_8UC1``.
:param sampleWeights: optional vector with weights for each sample. It should have ``CV_32F`` type.
CvMLData::set_response_idx
:param varType: optional vector of type ``CV_8U`` and size <number_of_variables_in_samples> + <number_of_variables_in_responses>, containing types of each input and output variable. The ordered variables are denoted by value ``VAR_ORDERED``, and categorical - by ``VAR_CATEGORICAL``.
TrainData::getTrainSamples
--------------------------
Specifies index of response column in the data matrix
Returns matrix of train samples
.. ocv:function:: void CvMLData::set_response_idx( int idx )
.. ocv:function:: Mat TrainData::getTrainSamples(int layout=ROW_SAMPLE, bool compressSamples=true, bool compressVars=true) const
The method sets the index of a response column in the ``values`` matrix (see :ocv:func:`CvMLData::get_values`) or throws an exception if the data has not been loaded from the file yet.
:param layout: The requested layout. If it's different from the initial one, the matrix is transposed.
The old response columns become predictors. If ``idx < 0``, there is no response.
:param compressSamples: if true, the function returns only the training samples (specified by sampleIdx)
CvMLData::get_response_idx
--------------------------
Returns index of the response column in the loaded data matrix
:param compressVars: if true, the function returns the shorter training samples, containing only the active variables.
.. ocv:function:: int CvMLData::get_response_idx() const
In current implementation the function tries to avoid physical data copying and returns the matrix stored inside TrainData (unless the transposition or compression is needed).
The method returns the index of a response column in the ``values`` matrix (see :ocv:func:`CvMLData::get_values`) or throws an exception if the data has not been loaded from the file yet.
If ``idx < 0``, there is no response.
TrainData::getTrainResponses
----------------------------
Returns the vector of responses
.. ocv:function:: Mat TrainData::getTrainResponses() const
CvMLData::set_train_test_split
------------------------------
Divides the read data set into two disjoint training and test subsets.
The function returns ordered or the original categorical responses. Usually it's used in regression algorithms.
.. ocv:function:: void CvMLData::set_train_test_split( const CvTrainTestSplit * spl )
This method sets parameters for such a split using ``spl`` (see :ocv:class:`CvTrainTestSplit`) or throws an exception if the data has not been loaded from the file yet.
TrainData::getClassLabels
----------------------------
Returns the vector of class labels
CvMLData::get_train_sample_idx
------------------------------
Returns the matrix of sample indices for a training subset
.. ocv:function:: Mat TrainData::getClassLabels() const
.. ocv:function:: const CvMat* CvMLData::get_train_sample_idx() const
The function returns vector of unique labels occurred in the responses.
The method returns the matrix of sample indices for a training subset. This is a single-row matrix of the type ``CV_32SC1``. If data split is not set, the method returns ``0``. If the data has not been loaded from the file yet, an exception is thrown.
CvMLData::get_test_sample_idx
-----------------------------
Returns the matrix of sample indices for a testing subset
TrainData::getTrainNormCatResponses
-----------------------------------
Returns the vector of normalized categorical responses
.. ocv:function:: const CvMat* CvMLData::get_test_sample_idx() const
.. ocv:function:: Mat TrainData::getTrainNormCatResponses() const
The function returns vector of responses. Each response is integer from 0 to <number of classes>-1. The actual label value can be retrieved then from the class label vector, see ``TrainData::getClassLabels``.
CvMLData::mix_train_and_test_idx
--------------------------------
Mixes the indices of training and test samples
TrainData::setTrainTestSplitRatio
-----------------------------------
Splits the training data into the training and test parts
.. ocv:function:: void CvMLData::mix_train_and_test_idx()
.. ocv:function:: void TrainData::setTrainTestSplitRatio(double ratio, bool shuffle=true)
The method shuffles the indices of training and test samples preserving sizes of training and test subsets if the data split is set by :ocv:func:`CvMLData::get_values`. If the data has not been loaded from the file yet, an exception is thrown.
The function selects a subset of specified relative size and then returns it as the training set. If the function is not called, all the data is used for training. Please, note that for each of ``TrainData::getTrain*`` there is corresponding ``TrainData::getTest*``, so that the test subset can be retrieved and processed as well.
CvMLData::get_var_idx
---------------------
Returns the indices of the active variables in the data matrix
.. ocv:function:: const CvMat* CvMLData::get_var_idx()
The method returns the indices of variables (columns) used in the ``values`` matrix (see :ocv:func:`CvMLData::get_values`).
It returns ``0`` if the used subset is not set. It throws an exception if the data has not been loaded from the file yet. Returned matrix is a single-row matrix of the type ``CV_32SC1``. Its column count is equal to the size of the used variable subset.
CvMLData::change_var_idx
------------------------
Enables or disables particular variable in the loaded data
.. ocv:function:: void CvMLData::change_var_idx( int vi, bool state )
By default, after reading the data set all variables in the ``values`` matrix (see :ocv:func:`CvMLData::get_values`) are used. But you may want to use only a subset of variables and include/exclude (depending on ``state`` value) a variable with the ``vi`` index from the used subset. If the data has not been loaded from the file yet, an exception is thrown.
CvMLData::get_var_types
-----------------------
Returns a matrix of the variable types.
.. ocv:function:: const CvMat* CvMLData::get_var_types()
The function returns a single-row matrix of the type ``CV_8UC1``, where each element is set to either ``CV_VAR_ORDERED`` or ``CV_VAR_CATEGORICAL``. The number of columns is equal to the number of variables. If data has not been loaded from file yet an exception is thrown.
CvMLData::set_var_types
-----------------------
Sets the variables types in the loaded data.
.. ocv:function:: void CvMLData::set_var_types( const char* str )
In the string, a variable type is followed by a list of variables indices. For example: ``"ord[0-17],cat[18]"``, ``"ord[0,2,4,10-12], cat[1,3,5-9,13,14]"``, ``"cat"`` (all variables are categorical), ``"ord"`` (all variables are ordered).
CvMLData::get_header_lines_number
---------------------------------
Returns a number of the table header lines.
.. ocv:function:: int CvMLData::get_header_lines_number() const
CvMLData::set_header_lines_number
---------------------------------
Sets a number of the table header lines.
.. ocv:function:: void CvMLData::set_header_lines_number( int n )
By default it is supposed that the table does not have a header, i.e. it contains only the data.
CvMLData::get_var_type
----------------------
Returns type of the specified variable
.. ocv:function:: int CvMLData::get_var_type( int var_idx ) const
The method returns the type of a variable by the index ``var_idx`` ( ``CV_VAR_ORDERED`` or ``CV_VAR_CATEGORICAL``).
CvMLData::change_var_type
-------------------------
Changes type of the specified variable
.. ocv:function:: void CvMLData::change_var_type( int var_idx, int type)
The method changes type of variable with index ``var_idx`` from existing type to ``type`` ( ``CV_VAR_ORDERED`` or ``CV_VAR_CATEGORICAL``).
CvMLData::set_delimiter
-----------------------
Sets the delimiter in the file used to separate input numbers
.. ocv:function:: void CvMLData::set_delimiter( char ch )
The method sets the delimiter for variables in a file. For example: ``','`` (default), ``';'``, ``' '`` (space), or other characters. The floating-point separator ``'.'`` is not allowed.
CvMLData::get_delimiter
-----------------------
Returns the currently used delimiter character.
.. ocv:function:: char CvMLData::get_delimiter() const
CvMLData::set_miss_ch
---------------------
Sets the character used to specify missing values
.. ocv:function:: void CvMLData::set_miss_ch( char ch )
The method sets the character used to specify missing values. For example: ``'?'`` (default), ``'-'``. The floating-point separator ``'.'`` is not allowed.
CvMLData::get_miss_ch
---------------------
Returns the currently used missing value character.
.. ocv:function:: char CvMLData::get_miss_ch() const
CvMLData::get_class_labels_map
-------------------------------
Returns a map that converts strings to labels.
.. ocv:function:: const std::map<String, int>& CvMLData::get_class_labels_map() const
The method returns a map that converts string class labels to the numerical class labels. It can be used to get an original class label as in a file.
CvTrainTestSplit
----------------
.. ocv:struct:: CvTrainTestSplit
Structure setting the split of a data set read by :ocv:class:`CvMLData`.
::
struct CvTrainTestSplit
{
CvTrainTestSplit();
CvTrainTestSplit( int train_sample_count, bool mix = true);
CvTrainTestSplit( float train_sample_portion, bool mix = true);
union
{
int count;
float portion;
} train_sample_part;
int train_sample_part_mode;
bool mix;
};
There are two ways to construct a split:
* Set the training sample count (subset size) ``train_sample_count``. Other existing samples are located in a test subset.
* Set a training sample portion in ``[0,..1]``. The flag ``mix`` is used to mix training and test samples indices when the split is set. Otherwise, the data set is split in the storing order: the first part of samples of a given size is a training subset, the second part is a test subset.
Other methods
-------------
The class includes many other methods that can be used to access normalized categorical input variables, access training data by parts, so that does not have to fit into the memory etc.

View File

@ -29,17 +29,17 @@ In other words, given the outputs
Different activation functions may be used. ML implements three standard functions:
*
Identity function ( ``CvANN_MLP::IDENTITY`` ):
Identity function ( ``ANN_MLP::IDENTITY`` ):
:math:`f(x)=x`
*
Symmetrical sigmoid ( ``CvANN_MLP::SIGMOID_SYM`` ):
Symmetrical sigmoid ( ``ANN_MLP::SIGMOID_SYM`` ):
:math:`f(x)=\beta*(1-e^{-\alpha x})/(1+e^{-\alpha x}` ), which is the default choice for MLP. The standard sigmoid with
:math:`\beta =1, \alpha =1` is shown below:
.. image:: pics/sigmoid_bipolar.png
*
Gaussian function ( ``CvANN_MLP::GAUSSIAN`` ):
Gaussian function ( ``ANN_MLP::GAUSSIAN`` ):
:math:`f(x)=\beta e^{-\alpha x*x}` , which is not completely supported at the moment.
In ML, all the neurons have the same activation functions, with the same free parameters (
@ -95,60 +95,90 @@ The second (default) one is a batch RPROP algorithm.
.. [RPROP93] M. Riedmiller and H. Braun, *A Direct Adaptive Method for Faster Backpropagation Learning: The RPROP Algorithm*, Proc. ICNN, San Francisco (1993).
CvANN_MLP_TrainParams
ANN_MLP::Params
---------------------
.. ocv:struct:: CvANN_MLP_TrainParams
.. ocv:class:: ANN_MLP::Params
Parameters of the MLP training algorithm. You can initialize the structure by a constructor or the individual parameters can be adjusted after the structure is created.
Parameters of the MLP and of the training algorithm. You can initialize the structure by a constructor or the individual parameters can be adjusted after the structure is created.
The network structure:
.. ocv:member:: Mat layerSizes
The number of elements in each layer of network. The very first element specifies the number of elements in the input layer. The last element - number of elements in the output layer.
.. ocv:member:: int activateFunc
The activation function. Currently the only fully supported activation function is ``ANN_MLP::SIGMOID_SYM``.
.. ocv:member:: double fparam1
The first parameter of activation function, 0 by default.
.. ocv:member:: double fparam2
The second parameter of the activation function, 0 by default.
.. note::
If you are using the default ``ANN_MLP::SIGMOID_SYM`` activation function with the default parameter values fparam1=0 and fparam2=0 then the function used is y = 1.7159*tanh(2/3 * x), so the output will range from [-1.7159, 1.7159], instead of [0,1].
The back-propagation algorithm parameters:
.. ocv:member:: double bp_dw_scale
.. ocv:member:: double bpDWScale
Strength of the weight gradient term. The recommended value is about 0.1.
.. ocv:member:: double bp_moment_scale
.. ocv:member:: double bpMomentScale
Strength of the momentum term (the difference between weights on the 2 previous iterations). This parameter provides some inertia to smooth the random fluctuations of the weights. It can vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good enough
The RPROP algorithm parameters (see [RPROP93]_ for details):
.. ocv:member:: double rp_dw0
.. ocv:member:: double prDW0
Initial value :math:`\Delta_0` of update-values :math:`\Delta_{ij}`.
.. ocv:member:: double rp_dw_plus
.. ocv:member:: double rpDWPlus
Increase factor :math:`\eta^+`. It must be >1.
.. ocv:member:: double rp_dw_minus
.. ocv:member:: double rpDWMinus
Decrease factor :math:`\eta^-`. It must be <1.
.. ocv:member:: double rp_dw_min
.. ocv:member:: double rpDWMin
Update-values lower limit :math:`\Delta_{min}`. It must be positive.
.. ocv:member:: double rp_dw_max
.. ocv:member:: double rpDWMax
Update-values upper limit :math:`\Delta_{max}`. It must be >1.
CvANN_MLP_TrainParams::CvANN_MLP_TrainParams
ANN_MLP::Params::Params
--------------------------------------------
The constructors.
Construct the parameter structure
.. ocv:function:: CvANN_MLP_TrainParams::CvANN_MLP_TrainParams()
.. ocv:function:: ANN_MLP::Params()
.. ocv:function:: CvANN_MLP_TrainParams::CvANN_MLP_TrainParams( CvTermCriteria term_crit, int train_method, double param1, double param2=0 )
.. ocv:function:: ANN_MLP::Params::Params( const Mat& layerSizes, int activateFunc, double fparam1, double fparam2, TermCriteria termCrit, int trainMethod, double param1, double param2=0 )
:param term_crit: Termination criteria of the training algorithm. You can specify the maximum number of iterations (``max_iter``) and/or how much the error could change between the iterations to make the algorithm continue (``epsilon``).
:param layerSizes: Integer vector specifying the number of neurons in each layer including the input and output layers.
:param activateFunc: Parameter specifying the activation function for each neuron: one of ``ANN_MLP::IDENTITY``, ``ANN_MLP::SIGMOID_SYM``, and ``ANN_MLP::GAUSSIAN``.
:param fparam1: The first parameter of the activation function, :math:`\alpha`. See the formulas in the introduction section.
:param fparam2: The second parameter of the activation function, :math:`\beta`. See the formulas in the introduction section.
:param termCrit: Termination criteria of the training algorithm. You can specify the maximum number of iterations (``maxCount``) and/or how much the error could change between the iterations to make the algorithm continue (``epsilon``).
:param train_method: Training method of the MLP. Possible values are:
* **CvANN_MLP_TrainParams::BACKPROP** The back-propagation algorithm.
* **ANN_MLP_TrainParams::BACKPROP** The back-propagation algorithm.
* **CvANN_MLP_TrainParams::RPROP** The RPROP algorithm.
* **ANN_MLP_TrainParams::RPROP** The RPROP algorithm.
:param param1: Parameter of the training method. It is ``rp_dw0`` for ``RPROP`` and ``bp_dw_scale`` for ``BACKPROP``.
@ -158,126 +188,54 @@ By default the RPROP algorithm is used:
::
CvANN_MLP_TrainParams::CvANN_MLP_TrainParams()
ANN_MLP_TrainParams::ANN_MLP_TrainParams()
{
term_crit = cvTermCriteria( CV_TERMCRIT_ITER + CV_TERMCRIT_EPS, 1000, 0.01 );
layerSizes = Mat();
activateFun = SIGMOID_SYM;
fparam1 = fparam2 = 0;
term_crit = TermCriteria( TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, 0.01 );
train_method = RPROP;
bp_dw_scale = bp_moment_scale = 0.1;
rp_dw0 = 0.1; rp_dw_plus = 1.2; rp_dw_minus = 0.5;
rp_dw_min = FLT_EPSILON; rp_dw_max = 50.;
bpDWScale = bpMomentScale = 0.1;
rpDW0 = 0.1; rpDWPlus = 1.2; rpDWMinus = 0.5;
rpDWMin = FLT_EPSILON; rpDWMax = 50.;
}
CvANN_MLP
ANN_MLP
---------
.. ocv:class:: CvANN_MLP : public CvStatModel
.. ocv:class:: ANN_MLP : public StatModel
MLP model.
Unlike many other models in ML that are constructed and trained at once, in the MLP model these steps are separated. First, a network with the specified topology is created using the non-default constructor or the method :ocv:func:`CvANN_MLP::create`. All the weights are set to zeros. Then, the network is trained using a set of input and output vectors. The training procedure can be repeated more than once, that is, the weights can be adjusted based on the new training data.
Unlike many other models in ML that are constructed and trained at once, in the MLP model these steps are separated. First, a network with the specified topology is created using the non-default constructor or the method :ocv:func:`ANN_MLP::create`. All the weights are set to zeros. Then, the network is trained using a set of input and output vectors. The training procedure can be repeated more than once, that is, the weights can be adjusted based on the new training data.
CvANN_MLP::CvANN_MLP
ANN_MLP::create
--------------------
The constructors.
Creates empty model
.. ocv:function:: CvANN_MLP::CvANN_MLP()
.. ocv:function:: Ptr<ANN_MLP> ANN_MLP::create(const Params& params=Params())
.. ocv:function:: CvANN_MLP::CvANN_MLP( const CvMat* layerSizes, int activateFunc=CvANN_MLP::SIGMOID_SYM, double fparam1=0, double fparam2=0 )
Use ``StatModel::train`` to train the model, ``StatModel::train<ANN_MLP>(traindata, params)`` to create and train the model, ``StatModel::load<ANN_MLP>(filename)`` to load the pre-trained model. Note that the train method has optional flags, and the following flags are handled by ``ANN_MLP``:
.. ocv:pyfunction:: cv2.ANN_MLP([layerSizes[, activateFunc[, fparam1[, fparam2]]]]) -> <ANN_MLP object>
* **UPDATE_WEIGHTS** Algorithm updates the network weights, rather than computes them from scratch. In the latter case the weights are initialized using the Nguyen-Widrow algorithm.
The advanced constructor allows to create MLP with the specified topology. See :ocv:func:`CvANN_MLP::create` for details.
* **NO_INPUT_SCALE** Algorithm does not normalize the input vectors. If this flag is not set, the training algorithm normalizes each input feature independently, shifting its mean value to 0 and making the standard deviation equal to 1. If the network is assumed to be updated frequently, the new training data could be much different from original one. In this case, you should take care of proper normalization.
CvANN_MLP::create
-----------------
Constructs MLP with the specified topology.
* **NO_OUTPUT_SCALE** Algorithm does not normalize the output vectors. If the flag is not set, the training algorithm normalizes each output feature independently, by transforming it to the certain range depending on the used activation function.
.. ocv:function:: void CvANN_MLP::create( const Mat& layerSizes, int activateFunc=CvANN_MLP::SIGMOID_SYM, double fparam1=0, double fparam2=0 )
.. ocv:function:: void CvANN_MLP::create( const CvMat* layerSizes, int activateFunc=CvANN_MLP::SIGMOID_SYM, double fparam1=0, double fparam2=0 )
ANN_MLP::setParams
-------------------
Sets the new network parameters
.. ocv:pyfunction:: cv2.ANN_MLP.create(layerSizes[, activateFunc[, fparam1[, fparam2]]]) -> None
.. ocv:function:: void ANN_MLP::setParams(const Params& params)
:param layerSizes: Integer vector specifying the number of neurons in each layer including the input and output layers.
:param params: The new parameters
:param activateFunc: Parameter specifying the activation function for each neuron: one of ``CvANN_MLP::IDENTITY``, ``CvANN_MLP::SIGMOID_SYM``, and ``CvANN_MLP::GAUSSIAN``.
The existing network, if any, will be destroyed and new empty one will be created. It should be re-trained after that.
:param fparam1: Free parameter of the activation function, :math:`\alpha`. See the formulas in the introduction section.
ANN_MLP::getParams
-------------------
Retrieves the current network parameters
:param fparam2: Free parameter of the activation function, :math:`\beta`. See the formulas in the introduction section.
The method creates an MLP network with the specified topology and assigns the same activation function to all the neurons.
CvANN_MLP::train
----------------
Trains/updates MLP.
.. ocv:function:: int CvANN_MLP::train( const Mat& inputs, const Mat& outputs, const Mat& sampleWeights, const Mat& sampleIdx=Mat(), CvANN_MLP_TrainParams params = CvANN_MLP_TrainParams(), int flags=0 )
.. ocv:function:: int CvANN_MLP::train( const CvMat* inputs, const CvMat* outputs, const CvMat* sampleWeights, const CvMat* sampleIdx=0, CvANN_MLP_TrainParams params = CvANN_MLP_TrainParams(), int flags=0 )
.. ocv:pyfunction:: cv2.ANN_MLP.train(inputs, outputs, sampleWeights[, sampleIdx[, params[, flags]]]) -> retval
:param inputs: Floating-point matrix of input vectors, one vector per row.
:param outputs: Floating-point matrix of the corresponding output vectors, one vector per row.
:param sampleWeights: (RPROP only) Optional floating-point vector of weights for each sample. Some samples may be more important than others for training. You may want to raise the weight of certain classes to find the right balance between hit-rate and false-alarm rate, and so on.
:param sampleIdx: Optional integer vector indicating the samples (rows of ``inputs`` and ``outputs``) that are taken into account.
:param params: Training parameters. See the :ocv:class:`CvANN_MLP_TrainParams` description.
:param flags: Various parameters to control the training algorithm. A combination of the following parameters is possible:
* **UPDATE_WEIGHTS** Algorithm updates the network weights, rather than computes them from scratch. In the latter case the weights are initialized using the Nguyen-Widrow algorithm.
* **NO_INPUT_SCALE** Algorithm does not normalize the input vectors. If this flag is not set, the training algorithm normalizes each input feature independently, shifting its mean value to 0 and making the standard deviation equal to 1. If the network is assumed to be updated frequently, the new training data could be much different from original one. In this case, you should take care of proper normalization.
* **NO_OUTPUT_SCALE** Algorithm does not normalize the output vectors. If the flag is not set, the training algorithm normalizes each output feature independently, by transforming it to the certain range depending on the used activation function.
This method applies the specified training algorithm to computing/adjusting the network weights. It returns the number of done iterations.
The RPROP training algorithm is parallelized with the TBB library.
If you are using the default ``cvANN_MLP::SIGMOID_SYM`` activation function then the output should be in the range [-1,1], instead of [0,1], for optimal results.
CvANN_MLP::predict
------------------
Predicts responses for input samples.
.. ocv:function:: float CvANN_MLP::predict( const Mat& inputs, Mat& outputs ) const
.. ocv:function:: float CvANN_MLP::predict( const CvMat* inputs, CvMat* outputs ) const
.. ocv:pyfunction:: cv2.ANN_MLP.predict(inputs[, outputs]) -> retval, outputs
:param inputs: Input samples.
:param outputs: Predicted responses for corresponding samples.
The method returns a dummy value which should be ignored.
If you are using the default ``cvANN_MLP::SIGMOID_SYM`` activation function with the default parameter values fparam1=0 and fparam2=0 then the function used is y = 1.7159*tanh(2/3 * x), so the output will range from [-1.7159, 1.7159], instead of [0,1].
CvANN_MLP::get_layer_count
--------------------------
Returns the number of layers in the MLP.
.. ocv:function:: int CvANN_MLP::get_layer_count()
CvANN_MLP::get_layer_sizes
--------------------------
Returns numbers of neurons in each layer of the MLP.
.. ocv:function:: const CvMat* CvANN_MLP::get_layer_sizes()
The method returns the integer vector specifying the number of neurons in each layer including the input and output layers of the MLP.
CvANN_MLP::get_weights
----------------------
Returns neurons weights of the particular layer.
.. ocv:function:: double* CvANN_MLP::get_weights(int layer)
:param layer: Index of the particular layer.
.. ocv:function:: Params ANN_MLP::getParams() const

View File

@ -9,55 +9,26 @@ This simple classification model assumes that feature vectors from each class ar
.. [Fukunaga90] K. Fukunaga. *Introduction to Statistical Pattern Recognition*. second ed., New York: Academic Press, 1990.
CvNormalBayesClassifier
NormalBayesClassifier
-----------------------
.. ocv:class:: CvNormalBayesClassifier : public CvStatModel
.. ocv:class:: NormalBayesClassifier : public StatModel
Bayes classifier for normally distributed data.
CvNormalBayesClassifier::CvNormalBayesClassifier
------------------------------------------------
Default and training constructors.
NormalBayesClassifier::create
-----------------------------
Creates empty model
.. ocv:function:: CvNormalBayesClassifier::CvNormalBayesClassifier()
.. ocv:function:: Ptr<NormalBayesClassifier> NormalBayesClassifier::create(const NormalBayesClassifier::Params& params=Params())
.. ocv:function:: CvNormalBayesClassifier::CvNormalBayesClassifier( const Mat& trainData, const Mat& responses, const Mat& varIdx=Mat(), const Mat& sampleIdx=Mat() )
:param params: The model parameters. There is none so far, the structure is used as a placeholder for possible extensions.
.. ocv:function:: CvNormalBayesClassifier::CvNormalBayesClassifier( const CvMat* trainData, const CvMat* responses, const CvMat* varIdx=0, const CvMat* sampleIdx=0 )
Use ``StatModel::train`` to train the model, ``StatModel::train<NormalBayesClassifier>(traindata, params)`` to create and train the model, ``StatModel::load<NormalBayesClassifier>(filename)`` to load the pre-trained model.
.. ocv:pyfunction:: cv2.NormalBayesClassifier([trainData, responses[, varIdx[, sampleIdx]]]) -> <NormalBayesClassifier object>
The constructors follow conventions of :ocv:func:`CvStatModel::CvStatModel`. See :ocv:func:`CvStatModel::train` for parameters descriptions.
CvNormalBayesClassifier::train
------------------------------
Trains the model.
.. ocv:function:: bool CvNormalBayesClassifier::train( const Mat& trainData, const Mat& responses, const Mat& varIdx = Mat(), const Mat& sampleIdx=Mat(), bool update=false )
.. ocv:function:: bool CvNormalBayesClassifier::train( const CvMat* trainData, const CvMat* responses, const CvMat* varIdx = 0, const CvMat* sampleIdx=0, bool update=false )
.. ocv:pyfunction:: cv2.NormalBayesClassifier.train(trainData, responses[, varIdx[, sampleIdx[, update]]]) -> retval
:param update: Identifies whether the model should be trained from scratch (``update=false``) or should be updated using the new training data (``update=true``).
The method trains the Normal Bayes classifier. It follows the conventions of the generic :ocv:func:`CvStatModel::train` approach with the following limitations:
* Only ``CV_ROW_SAMPLE`` data layout is supported.
* Input variables are all ordered.
* Output variable is categorical , which means that elements of ``responses`` must be integer numbers, though the vector may have the ``CV_32FC1`` type.
* Missing measurements are not supported.
CvNormalBayesClassifier::predict
--------------------------------
NormalBayesClassifier::predictProb
----------------------------------
Predicts the response for sample(s).
.. ocv:function:: float CvNormalBayesClassifier::predict( const Mat& samples, Mat* results=0, Mat* results_prob=0 ) const
.. ocv:function:: float NormalBayesClassifier::predictProb( InputArray inputs, OutputArray outputs, OutputArray outputProbs, int flags=0 ) const
.. ocv:function:: float CvNormalBayesClassifier::predict( const CvMat* samples, CvMat* results=0, CvMat* results_prob=0 ) const
.. ocv:pyfunction:: cv2.NormalBayesClassifier.predict(samples) -> retval, results
The method estimates the most probable classes for input vectors. Input vectors (one or more) are stored as rows of the matrix ``samples``. In case of multiple input vectors, there should be one output vector ``results``. The predicted class for a single input vector is returned by the method. The vector ``results_prob`` contains the output probabilities coresponding to each element of ``result``.
The function is parallelized with the TBB library.
The method estimates the most probable classes for input vectors. Input vectors (one or more) are stored as rows of the matrix ``inputs``. In case of multiple input vectors, there should be one output vector ``outputs``. The predicted class for a single input vector is returned by the method. The vector ``outputProbs`` contains the output probabilities corresponding to each element of ``result``.

View File

@ -40,179 +40,64 @@ For the random trees usage example, please, see letter_recog.cpp sample in OpenC
* And other articles from the web site http://www.stat.berkeley.edu/users/breiman/RandomForests/cc_home.htm
CvRTParams
----------
.. ocv:struct:: CvRTParams : public CvDTreeParams
RTrees::Params
--------------
.. ocv:struct:: RTrees::Params : public DTrees::Params
Training parameters of random trees.
The set of training parameters for the forest is a superset of the training parameters for a single tree. However, random trees do not need all the functionality/features of decision trees. Most noticeably, the trees are not pruned, so the cross-validation parameters are not used.
CvRTParams::CvRTParams:
RTrees::Params::Params
-----------------------
The constructors.
The constructors
.. ocv:function:: CvRTParams::CvRTParams()
.. ocv:function:: RTrees::Params::Params()
.. ocv:function:: CvRTParams::CvRTParams( int max_depth, int min_sample_count, float regression_accuracy, bool use_surrogates, int max_categories, const float* priors, bool calc_var_importance, int nactive_vars, int max_num_of_trees_in_the_forest, float forest_accuracy, int termcrit_type )
.. ocv:function:: RTrees::Params::Params( int maxDepth, int minSampleCount, double regressionAccuracy, bool useSurrogates, int maxCategories, const Mat& priors, bool calcVarImportance, int nactiveVars, TermCriteria termCrit )
:param max_depth: the depth of the tree. A low value will likely underfit and conversely a high value will likely overfit. The optimal value can be obtained using cross validation or other suitable methods.
:param maxDepth: the depth of the tree. A low value will likely underfit and conversely a high value will likely overfit. The optimal value can be obtained using cross validation or other suitable methods.
:param min_sample_count: minimum samples required at a leaf node for it to be split. A reasonable value is a small percentage of the total data e.g. 1%.
:param minSampleCount: minimum samples required at a leaf node for it to be split. A reasonable value is a small percentage of the total data e.g. 1%.
:param max_categories: Cluster possible values of a categorical variable into ``K`` :math:`\leq` ``max_categories`` clusters to find a suboptimal split. If a discrete variable, on which the training procedure tries to make a split, takes more than ``max_categories`` values, the precise best subset estimation may take a very long time because the algorithm is exponential. Instead, many decision trees engines (including ML) try to find sub-optimal split in this case by clustering all the samples into ``max_categories`` clusters that is some categories are merged together. The clustering is applied only in ``n``>2-class classification problems for categorical variables with ``N > max_categories`` possible values. In case of regression and 2-class classification the optimal split can be found efficiently without employing clustering, thus the parameter is not used in these cases.
:param maxCategories: Cluster possible values of a categorical variable into ``K <= maxCategories`` clusters to find a suboptimal split. If a discrete variable, on which the training procedure tries to make a split, takes more than ``max_categories`` values, the precise best subset estimation may take a very long time because the algorithm is exponential. Instead, many decision trees engines (including ML) try to find sub-optimal split in this case by clustering all the samples into ``maxCategories`` clusters that is some categories are merged together. The clustering is applied only in ``n``>2-class classification problems for categorical variables with ``N > max_categories`` possible values. In case of regression and 2-class classification the optimal split can be found efficiently without employing clustering, thus the parameter is not used in these cases.
:param calc_var_importance: If true then variable importance will be calculated and then it can be retrieved by :ocv:func:`CvRTrees::get_var_importance`.
:param calcVarImportance: If true then variable importance will be calculated and then it can be retrieved by ``RTrees::getVarImportance``.
:param nactive_vars: The size of the randomly selected subset of features at each tree node and that are used to find the best split(s). If you set it to 0 then the size will be set to the square root of the total number of features.
:param nactiveVars: The size of the randomly selected subset of features at each tree node and that are used to find the best split(s). If you set it to 0 then the size will be set to the square root of the total number of features.
:param max_num_of_trees_in_the_forest: The maximum number of trees in the forest (surprise, surprise). Typically the more trees you have the better the accuracy. However, the improvement in accuracy generally diminishes and asymptotes pass a certain number of trees. Also to keep in mind, the number of tree increases the prediction time linearly.
:param termCrit: The termination criteria that specifies when the training algorithm stops - either when the specified number of trees is trained and added to the ensemble or when sufficient accuracy (measured as OOB error) is achieved. Typically the more trees you have the better the accuracy. However, the improvement in accuracy generally diminishes and asymptotes pass a certain number of trees. Also to keep in mind, the number of tree increases the prediction time linearly.
:param forest_accuracy: Sufficient accuracy (OOB error).
:param termcrit_type: The type of the termination criteria:
* **CV_TERMCRIT_ITER** Terminate learning by the ``max_num_of_trees_in_the_forest``;
* **CV_TERMCRIT_EPS** Terminate learning by the ``forest_accuracy``;
* **CV_TERMCRIT_ITER | CV_TERMCRIT_EPS** Use both termination criteria.
For meaning of other parameters see :ocv:func:`CvDTreeParams::CvDTreeParams`.
The default constructor sets all parameters to default values which are different from default values of :ocv:class:`CvDTreeParams`:
The default constructor sets all parameters to default values which are different from default values of ``DTrees::Params``:
::
CvRTParams::CvRTParams() : CvDTreeParams( 5, 10, 0, false, 10, 0, false, false, 0 ),
calc_var_importance(false), nactive_vars(0)
RTrees::Params::Params() : DTrees::Params( 5, 10, 0, false, 10, 0, false, false, Mat() ),
calcVarImportance(false), nactiveVars(0)
{
term_crit = cvTermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 50, 0.1 );
termCrit = cvTermCriteria( TermCriteria::MAX_ITERS + TermCriteria::EPS, 50, 0.1 );
}
CvRTrees
RTrees
--------
.. ocv:class:: CvRTrees : public CvStatModel
.. ocv:class:: RTrees : public DTrees
The class implements the random forest predictor as described in the beginning of this section.
CvRTrees::train
RTrees::create
---------------
Trains the Random Trees model.
Creates the empty model
.. ocv:function:: bool CvRTrees::train( const Mat& trainData, int tflag, const Mat& responses, const Mat& varIdx=Mat(), const Mat& sampleIdx=Mat(), const Mat& varType=Mat(), const Mat& missingDataMask=Mat(), CvRTParams params=CvRTParams() )
.. ocv:function:: bool RTrees::create(const RTrees::Params& params=Params())
.. ocv:function:: bool CvRTrees::train( const CvMat* trainData, int tflag, const CvMat* responses, const CvMat* varIdx=0, const CvMat* sampleIdx=0, const CvMat* varType=0, const CvMat* missingDataMask=0, CvRTParams params=CvRTParams() )
Use ``StatModel::train`` to train the model, ``StatModel::train<RTrees>(traindata, params)`` to create and train the model, ``StatModel::load<RTrees>(filename)`` to load the pre-trained model.
.. ocv:function:: bool CvRTrees::train( CvMLData* data, CvRTParams params=CvRTParams() )
.. ocv:pyfunction:: cv2.RTrees.train(trainData, tflag, responses[, varIdx[, sampleIdx[, varType[, missingDataMask[, params]]]]]) -> retval
The method :ocv:func:`CvRTrees::train` is very similar to the method :ocv:func:`CvDTree::train` and follows the generic method :ocv:func:`CvStatModel::train` conventions. All the parameters specific to the algorithm training are passed as a :ocv:class:`CvRTParams` instance. The estimate of the training error (``oob-error``) is stored in the protected class member ``oob_error``.
The function is parallelized with the TBB library.
CvRTrees::predict
-----------------
Predicts the output for an input sample.
.. ocv:function:: float CvRTrees::predict( const Mat& sample, const Mat& missing=Mat() ) const
.. ocv:function:: float CvRTrees::predict( const CvMat* sample, const CvMat* missing = 0 ) const
.. ocv:pyfunction:: cv2.RTrees.predict(sample[, missing]) -> retval
:param sample: Sample for classification.
:param missing: Optional missing measurement mask of the sample.
The input parameters of the prediction method are the same as in :ocv:func:`CvDTree::predict` but the return value type is different. This method returns the cumulative result from all the trees in the forest (the class that receives the majority of voices, or the mean of the regression function estimates).
CvRTrees::predict_prob
----------------------
Returns a fuzzy-predicted class label.
.. ocv:function:: float CvRTrees::predict_prob( const cv::Mat& sample, const cv::Mat& missing = cv::Mat() ) const
.. ocv:function:: float CvRTrees::predict_prob( const CvMat* sample, const CvMat* missing = 0 ) const
.. ocv:pyfunction:: cv2.RTrees.predict_prob(sample[, missing]) -> retval
:param sample: Sample for classification.
:param missing: Optional missing measurement mask of the sample.
The function works for binary classification problems only. It returns the number between 0 and 1. This number represents probability or confidence of the sample belonging to the second class. It is calculated as the proportion of decision trees that classified the sample to the second class.
CvRTrees::getVarImportance
RTrees::getVarImportance
----------------------------
Returns the variable importance array.
.. ocv:function:: Mat CvRTrees::getVarImportance()
.. ocv:function:: Mat RTrees::getVarImportance() const
.. ocv:function:: const CvMat* CvRTrees::get_var_importance()
.. ocv:pyfunction:: cv2.RTrees.getVarImportance() -> retval
The method returns the variable importance vector, computed at the training stage when ``CvRTParams::calc_var_importance`` is set to true. If this flag was set to false, the ``NULL`` pointer is returned. This differs from the decision trees where variable importance can be computed anytime after the training.
CvRTrees::get_proximity
-----------------------
Retrieves the proximity measure between two training samples.
.. ocv:function:: float CvRTrees::get_proximity( const CvMat* sample1, const CvMat* sample2, const CvMat* missing1 = 0, const CvMat* missing2 = 0 ) const
:param sample1: The first sample.
:param sample2: The second sample.
:param missing1: Optional missing measurement mask of the first sample.
:param missing2: Optional missing measurement mask of the second sample.
The method returns proximity measure between any two samples. This is a ratio of those trees in the ensemble, in which the samples fall into the same leaf node, to the total number of the trees.
CvRTrees::calc_error
--------------------
Returns error of the random forest.
.. ocv:function:: float CvRTrees::calc_error( CvMLData* data, int type, std::vector<float>* resp=0 )
The method is identical to :ocv:func:`CvDTree::calc_error` but uses the random forest as predictor.
CvRTrees::get_train_error
-------------------------
Returns the train error.
.. ocv:function:: float CvRTrees::get_train_error()
The method works for classification problems only. It returns the proportion of incorrectly classified train samples.
CvRTrees::get_rng
-----------------
Returns the state of the used random number generator.
.. ocv:function:: CvRNG* CvRTrees::get_rng()
CvRTrees::get_tree_count
------------------------
Returns the number of trees in the constructed random forest.
.. ocv:function:: int CvRTrees::get_tree_count() const
CvRTrees::get_tree
------------------
Returns the specific decision tree in the constructed random forest.
.. ocv:function:: CvForestTree* CvRTrees::get_tree(int i) const
:param i: Index of the decision tree.
The method returns the variable importance vector, computed at the training stage when ``RTParams::calcVarImportance`` is set to true. If this flag was set to false, the empty matrix is returned.

View File

@ -3,161 +3,110 @@ Statistical Models
.. highlight:: cpp
.. index:: CvStatModel
.. index:: StatModel
CvStatModel
StatModel
-----------
.. ocv:class:: CvStatModel
.. ocv:class:: StatModel
Base class for statistical models in ML. ::
class CvStatModel
{
public:
/* CvStatModel(); */
/* CvStatModel( const Mat& train_data ... ); */
virtual ~CvStatModel();
virtual void clear()=0;
/* virtual bool train( const Mat& train_data, [int tflag,] ..., const
Mat& responses, ...,
[const Mat& var_idx,] ..., [const Mat& sample_idx,] ...
[const Mat& var_type,] ..., [const Mat& missing_mask,]
<misc_training_alg_params> ... )=0;
*/
/* virtual float predict( const Mat& sample ... ) const=0; */
virtual void save( const char* filename, const char* name=0 )=0;
virtual void load( const char* filename, const char* name=0 )=0;
virtual void write( CvFileStorage* storage, const char* name )=0;
virtual void read( CvFileStorage* storage, CvFileNode* node )=0;
};
Base class for statistical models in OpenCV ML.
In this declaration, some methods are commented off. These are methods for which there is no unified API (with the exception of the default constructor). However, there are many similarities in the syntax and semantics that are briefly described below in this section, as if they are part of the base class.
CvStatModel::CvStatModel
StatModel::train
------------------------
The default constructor.
Trains the statistical model
.. ocv:function:: CvStatModel::CvStatModel()
.. ocv:function:: bool StatModel::train( const Ptr<TrainData>& trainData, int flags=0 )
Each statistical model class in ML has a default constructor without parameters. This constructor is useful for a two-stage model construction, when the default constructor is followed by :ocv:func:`CvStatModel::train` or :ocv:func:`CvStatModel::load`.
.. ocv:function:: bool StatModel::train( InputArray samples, int layout, InputArray responses )
CvStatModel::CvStatModel(...)
.. ocv:function:: Ptr<_Tp> StatModel::train(const Ptr<TrainData>& data, const _Tp::Params& p, int flags=0 )
.. ocv:function:: Ptr<_Tp> StatModel::train(InputArray samples, int layout, InputArray responses, const _Tp::Params& p, int flags=0 )
:param trainData: training data that can be loaded from file using ``TrainData::loadFromCSV`` or created with ``TrainData::create``.
:param samples: training samples
:param layout: ``ROW_SAMPLE`` (training samples are the matrix rows) or ``COL_SAMPLE`` (training samples are the matrix columns)
:param responses: vector of responses associated with the training samples.
:param p: the stat model parameters.
:param flags: optional flags, depending on the model. Some of the models can be updated with the new training samples, not completely overwritten (such as ``NormalBayesClassifier`` or ``ANN_MLP``).
There are 2 instance methods and 2 static (class) template methods. The first two train the already created model (the very first method must be overwritten in the derived classes). And the latter two variants are convenience methods that construct empty model and then call its train method.
StatModel::isTrained
-----------------------------
The training constructor.
Returns true if the model is trained
.. ocv:function:: CvStatModel::CvStatModel()
.. ocv:function:: bool StatModel::isTrained()
Most ML classes provide a single-step constructor and train constructors. This constructor is equivalent to the default constructor, followed by the :ocv:func:`CvStatModel::train` method with the parameters that are passed to the constructor.
The method must be overwritten in the derived classes.
CvStatModel::~CvStatModel
-------------------------
The virtual destructor.
StatModel::isClassifier
-----------------------------
Returns true if the model is classifier
.. ocv:function:: CvStatModel::~CvStatModel()
.. ocv:function:: bool StatModel::isClassifier()
The destructor of the base class is declared as virtual. So, it is safe to write the following code: ::
The method must be overwritten in the derived classes.
CvStatModel* model;
if( use_svm )
model = new CvSVM(... /* SVM params */);
else
model = new CvDTree(... /* Decision tree params */);
...
delete model;
StatModel::getVarCount
-----------------------------
Returns the number of variables in training samples
.. ocv:function:: int StatModel::getVarCount()
Normally, the destructor of each derived class does nothing. But in this instance, it calls the overridden method :ocv:func:`CvStatModel::clear` that deallocates all the memory.
The method must be overwritten in the derived classes.
CvStatModel::clear
StatModel::predict
------------------
Deallocates memory and resets the model state.
Predicts response(s) for the provided sample(s)
.. ocv:function:: void CvStatModel::clear()
.. ocv:function:: float StatModel::predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const
The method ``clear`` does the same job as the destructor: it deallocates all the memory occupied by the class members. But the object itself is not destructed and can be reused further. This method is called from the destructor, from the :ocv:func:`CvStatModel::train` methods of the derived classes, from the methods :ocv:func:`CvStatModel::load`, :ocv:func:`CvStatModel::read()`, or even explicitly by the user.
:param samples: The input samples, floating-point matrix
CvStatModel::save
:param results: The optional output matrix of results.
:param flags: The optional flags, model-dependent. Some models, such as ``Boost``, ``SVM`` recognize ``StatModel::RAW_OUTPUT`` flag, which makes the method return the raw results (the sum), not the class label.
StatModel::calcError
-------------------------
Computes error on the training or test dataset
.. ocv:function:: float StatModel::calcError( const Ptr<TrainData>& data, bool test, OutputArray resp ) const
:param data: the training data
:param test: if true, the error is computed over the test subset of the data, otherwise it's computed over the training subset of the data. Please note that if you loaded a completely different dataset to evaluate already trained classifier, you will probably want not to set the test subset at all with ``TrainData::setTrainTestSplitRatio`` and specify ``test=false``, so that the error is computed for the whole new set. Yes, this sounds a bit confusing.
:param resp: the optional output responses.
The method uses ``StatModel::predict`` to compute the error. For regression models the error is computed as RMS, for classifiers - as a percent of missclassified samples (0%-100%).
StatModel::save
-----------------
Saves the model to a file.
.. ocv:function:: void CvStatModel::save( const char* filename, const char* name=0 )
.. ocv:function:: void StatModel::save( const String& filename )
.. ocv:pyfunction:: cv2.StatModel.save(filename[, name]) -> None
In order to make this method work, the derived class must overwrite ``Algorithm::write(FileStorage& fs)``.
The method ``save`` saves the complete model state to the specified XML or YAML file with the specified name or default name (which depends on a particular class). *Data persistence* functionality from ``CxCore`` is used.
CvStatModel::load
StatModel::load
-----------------
Loads the model from a file.
Loads model from the file
.. ocv:function:: void CvStatModel::load( const char* filename, const char* name=0 )
.. ocv:function:: Ptr<_Tp> StatModel::load( const String& filename )
.. ocv:pyfunction:: cv2.StatModel.load(filename[, name]) -> None
This is static template method of StatModel. It's usage is following (in the case of SVM): ::
The method ``load`` loads the complete model state with the specified name (or default model-dependent name) from the specified XML or YAML file. The previous model state is cleared by :ocv:func:`CvStatModel::clear`.
Ptr<SVM> svm = StatModel::load<SVM>("my_svm_model.xml");
CvStatModel::write
------------------
Writes the model to the file storage.
.. ocv:function:: void CvStatModel::write( CvFileStorage* storage, const char* name )
The method ``write`` stores the complete model state in the file storage with the specified name or default name (which depends on the particular class). The method is called by :ocv:func:`CvStatModel::save`.
CvStatModel::read
-----------------
Reads the model from the file storage.
.. ocv:function:: void CvStatModel::read( CvFileStorage* storage, CvFileNode* node )
The method ``read`` restores the complete model state from the specified node of the file storage. Use the function
:ocv:cfunc:`GetFileNodeByName` to locate the node.
The previous model state is cleared by :ocv:func:`CvStatModel::clear`.
CvStatModel::train
------------------
Trains the model.
.. ocv:function:: bool CvStatModel::train( const Mat& train_data, [int tflag,] ..., const Mat& responses, ..., [const Mat& var_idx,] ..., [const Mat& sample_idx,] ... [const Mat& var_type,] ..., [const Mat& missing_mask,] <misc_training_alg_params> ... )
The method trains the statistical model using a set of input feature vectors and the corresponding output values (responses). Both input and output vectors/values are passed as matrices. By default, the input feature vectors are stored as ``train_data`` rows, that is, all the components (features) of a training vector are stored continuously. However, some algorithms can handle the transposed representation when all values of each particular feature (component/input variable) over the whole input set are stored continuously. If both layouts are supported, the method includes the ``tflag`` parameter that specifies the orientation as follows:
* ``tflag=CV_ROW_SAMPLE`` The feature vectors are stored as rows.
* ``tflag=CV_COL_SAMPLE`` The feature vectors are stored as columns.
The ``train_data`` must have the ``CV_32FC1`` (32-bit floating-point, single-channel) format. Responses are usually stored in a 1D vector (a row or a column) of ``CV_32SC1`` (only in the classification problem) or ``CV_32FC1`` format, one value per input vector. Although, some algorithms, like various flavors of neural nets, take vector responses.
For classification problems, the responses are discrete class labels. For regression problems, the responses are values of the function to be approximated. Some algorithms can deal only with classification problems, some - only with regression problems, and some can deal with both problems. In the latter case, the type of output variable is either passed as a separate parameter or as the last element of the ``var_type`` vector:
* ``CV_VAR_CATEGORICAL`` The output values are discrete class labels.
* ``CV_VAR_ORDERED(=CV_VAR_NUMERICAL)`` The output values are ordered. This means that two different values can be compared as numbers, and this is a regression problem.
Types of input variables can be also specified using ``var_type``. Most algorithms can handle only ordered input variables.
Many ML models may be trained on a selected feature subset, and/or on a selected sample subset of the training set. To make it easier for you, the method ``train`` usually includes the ``var_idx`` and ``sample_idx`` parameters. The former parameter identifies variables (features) of interest, and the latter one identifies samples of interest. Both vectors are either integer (``CV_32SC1``) vectors (lists of 0-based indices) or 8-bit (``CV_8UC1``) masks of active variables/samples. You may pass ``NULL`` pointers instead of either of the arguments, meaning that all of the variables/samples are used for training.
Additionally, some algorithms can handle missing measurements, that is, when certain features of certain training samples have unknown values (for example, they forgot to measure a temperature of patient A on Monday). The parameter ``missing_mask``, an 8-bit matrix of the same size as ``train_data``, is used to mark the missed values (non-zero elements of the mask).
Usually, the previous model state is cleared by :ocv:func:`CvStatModel::clear` before running the training procedure. However, some algorithms may optionally update the model state with the new training data, instead of resetting it.
CvStatModel::predict
--------------------
Predicts the response for a sample.
.. ocv:function:: float CvStatModel::predict( const Mat& sample, ... ) const
The method is used to predict the response for a new sample. In case of a classification, the method returns the class label. In case of a regression, the method returns the output function value. The input sample must have as many components as the ``train_data`` passed to ``train`` contains. If the ``var_idx`` parameter is passed to ``train``, it is remembered and then is used to extract only the necessary components from the input sample in the method ``predict``.
The suffix ``const`` means that prediction does not affect the internal model state, so the method can be safely called from within different threads.
In order to make this method work, the derived class must overwrite ``Algorithm::read(const FileNode& fn)``.

View File

@ -14,21 +14,21 @@ SVM implementation in OpenCV is based on [LibSVM]_.
.. [LibSVM] C.-C. Chang and C.-J. Lin. *LIBSVM: a library for support vector machines*, ACM Transactions on Intelligent Systems and Technology, 2:27:1--27:27, 2011. (http://www.csie.ntu.edu.tw/~cjlin/papers/libsvm.pdf)
CvParamGrid
ParamGrid
-----------
.. ocv:struct:: CvParamGrid
.. ocv:class:: ParamGrid
The structure represents the logarithmic grid range of statmodel parameters. It is used for optimizing statmodel accuracy by varying model parameters, the accuracy estimate being computed by cross-validation.
.. ocv:member:: double CvParamGrid::min_val
.. ocv:member:: double ParamGrid::minVal
Minimum value of the statmodel parameter.
.. ocv:member:: double CvParamGrid::max_val
.. ocv:member:: double ParamGrid::maxVal
Maximum value of the statmodel parameter.
.. ocv:member:: double CvParamGrid::step
.. ocv:member:: double ParamGrid::logStep
Logarithmic step for iterating the statmodel parameter.
@ -36,88 +36,78 @@ The grid determines the following iteration sequence of the statmodel parameter
.. math::
(min\_val, min\_val*step, min\_val*{step}^2, \dots, min\_val*{step}^n),
(minVal, minVal*step, minVal*{step}^2, \dots, minVal*{logStep}^n),
where :math:`n` is the maximal index satisfying
.. math::
\texttt{min\_val} * \texttt{step} ^n < \texttt{max\_val}
\texttt{minVal} * \texttt{logStep} ^n < \texttt{maxVal}
The grid is logarithmic, so ``step`` must always be greater then 1.
The grid is logarithmic, so ``logStep`` must always be greater then 1.
CvParamGrid::CvParamGrid
ParamGrid::ParamGrid
------------------------
The constructors.
.. ocv:function:: CvParamGrid::CvParamGrid()
.. ocv:function:: ParamGrid::ParamGrid()
.. ocv:function:: CvParamGrid::CvParamGrid( double min_val, double max_val, double log_step )
.. ocv:function:: ParamGrid::ParamGrid( double minVal, double maxVal, double logStep )
The full constructor initializes corresponding members. The default constructor creates a dummy grid:
::
CvParamGrid::CvParamGrid()
ParamGrid::ParamGrid()
{
min_val = max_val = step = 0;
minVal = maxVal = 0;
logStep = 1;
}
CvParamGrid::check
------------------
Checks validness of the grid.
.. ocv:function:: bool CvParamGrid::check()
Returns ``true`` if the grid is valid and ``false`` otherwise. The grid is valid if and only if:
* Lower bound of the grid is less then the upper one.
* Lower bound of the grid is positive.
* Grid step is greater then 1.
CvSVMParams
SVM::Params
-----------
.. ocv:struct:: CvSVMParams
.. ocv:class:: SVM::Params
SVM training parameters.
The structure must be initialized and passed to the training method of :ocv:class:`CvSVM`.
The structure must be initialized and passed to the training method of :ocv:class:`SVM`.
CvSVMParams::CvSVMParams
SVM::Params::Params
------------------------
The constructors.
The constructors
.. ocv:function:: CvSVMParams::CvSVMParams()
.. ocv:function:: SVM::Params::Params()
.. ocv:function:: CvSVMParams::CvSVMParams( int svm_type, int kernel_type, double degree, double gamma, double coef0, double Cvalue, double nu, double p, CvMat* class_weights, CvTermCriteria term_crit )
.. ocv:function:: SVM::Params::Params( int svmType, int kernelType, double degree, double gamma, double coef0, double Cvalue, double nu, double p, const Mat& classWeights, TermCriteria termCrit )
:param svm_type: Type of a SVM formulation. Possible values are:
:param svmType: Type of a SVM formulation. Possible values are:
* **CvSVM::C_SVC** C-Support Vector Classification. ``n``-class classification (``n`` :math:`\geq` 2), allows imperfect separation of classes with penalty multiplier ``C`` for outliers.
* **SVM::C_SVC** C-Support Vector Classification. ``n``-class classification (``n`` :math:`\geq` 2), allows imperfect separation of classes with penalty multiplier ``C`` for outliers.
* **CvSVM::NU_SVC** :math:`\nu`-Support Vector Classification. ``n``-class classification with possible imperfect separation. Parameter :math:`\nu` (in the range 0..1, the larger the value, the smoother the decision boundary) is used instead of ``C``.
* **SVM::NU_SVC** :math:`\nu`-Support Vector Classification. ``n``-class classification with possible imperfect separation. Parameter :math:`\nu` (in the range 0..1, the larger the value, the smoother the decision boundary) is used instead of ``C``.
* **CvSVM::ONE_CLASS** Distribution Estimation (One-class SVM). All the training data are from the same class, SVM builds a boundary that separates the class from the rest of the feature space.
* **SVM::ONE_CLASS** Distribution Estimation (One-class SVM). All the training data are from the same class, SVM builds a boundary that separates the class from the rest of the feature space.
* **CvSVM::EPS_SVR** :math:`\epsilon`-Support Vector Regression. The distance between feature vectors from the training set and the fitting hyper-plane must be less than ``p``. For outliers the penalty multiplier ``C`` is used.
* **SVM::EPS_SVR** :math:`\epsilon`-Support Vector Regression. The distance between feature vectors from the training set and the fitting hyper-plane must be less than ``p``. For outliers the penalty multiplier ``C`` is used.
* **CvSVM::NU_SVR** :math:`\nu`-Support Vector Regression. :math:`\nu` is used instead of ``p``.
* **SVM::NU_SVR** :math:`\nu`-Support Vector Regression. :math:`\nu` is used instead of ``p``.
See [LibSVM]_ for details.
:param kernel_type: Type of a SVM kernel. Possible values are:
:param kernelType: Type of a SVM kernel. Possible values are:
* **CvSVM::LINEAR** Linear kernel. No mapping is done, linear discrimination (or regression) is done in the original feature space. It is the fastest option. :math:`K(x_i, x_j) = x_i^T x_j`.
* **SVM::LINEAR** Linear kernel. No mapping is done, linear discrimination (or regression) is done in the original feature space. It is the fastest option. :math:`K(x_i, x_j) = x_i^T x_j`.
* **CvSVM::POLY** Polynomial kernel: :math:`K(x_i, x_j) = (\gamma x_i^T x_j + coef0)^{degree}, \gamma > 0`.
* **SVM::POLY** Polynomial kernel: :math:`K(x_i, x_j) = (\gamma x_i^T x_j + coef0)^{degree}, \gamma > 0`.
* **CvSVM::RBF** Radial basis function (RBF), a good choice in most cases. :math:`K(x_i, x_j) = e^{-\gamma ||x_i - x_j||^2}, \gamma > 0`.
* **SVM::RBF** Radial basis function (RBF), a good choice in most cases. :math:`K(x_i, x_j) = e^{-\gamma ||x_i - x_j||^2}, \gamma > 0`.
* **CvSVM::SIGMOID** Sigmoid kernel: :math:`K(x_i, x_j) = \tanh(\gamma x_i^T x_j + coef0)`.
* **SVM::SIGMOID** Sigmoid kernel: :math:`K(x_i, x_j) = \tanh(\gamma x_i^T x_j + coef0)`.
* **CvSVM::CHI2** Exponential Chi2 kernel, similar to the RBF kernel: :math:`K(x_i, x_j) = e^{-\gamma \chi^2(x_i,x_j)}, \chi^2(x_i,x_j) = (x_i-x_j)^2/(x_i+x_j), \gamma > 0`.
* **SVM::CHI2** Exponential Chi2 kernel, similar to the RBF kernel: :math:`K(x_i, x_j) = e^{-\gamma \chi^2(x_i,x_j)}, \chi^2(x_i,x_j) = (x_i-x_j)^2/(x_i+x_j), \gamma > 0`.
* **CvSVM::INTER** Histogram intersection kernel. A fast kernel. :math:`K(x_i, x_j) = min(x_i,x_j)`.
* **SVM::INTER** Histogram intersection kernel. A fast kernel. :math:`K(x_i, x_j) = min(x_i,x_j)`.
:param degree: Parameter ``degree`` of a kernel function (POLY).
@ -131,19 +121,19 @@ The constructors.
:param p: Parameter :math:`\epsilon` of a SVM optimization problem (EPS_SVR).
:param class_weights: Optional weights in the C_SVC problem , assigned to particular classes. They are multiplied by ``C`` so the parameter ``C`` of class ``#i`` becomes :math:`class\_weights_i * C`. Thus these weights affect the misclassification penalty for different classes. The larger weight, the larger penalty on misclassification of data from the corresponding class.
:param classWeights: Optional weights in the C_SVC problem , assigned to particular classes. They are multiplied by ``C`` so the parameter ``C`` of class ``#i`` becomes ``classWeights(i) * C``. Thus these weights affect the misclassification penalty for different classes. The larger weight, the larger penalty on misclassification of data from the corresponding class.
:param term_crit: Termination criteria of the iterative SVM training procedure which solves a partial case of constrained quadratic optimization problem. You can specify tolerance and/or the maximum number of iterations.
:param termCrit: Termination criteria of the iterative SVM training procedure which solves a partial case of constrained quadratic optimization problem. You can specify tolerance and/or the maximum number of iterations.
The default constructor initialize the structure with following values:
::
CvSVMParams::CvSVMParams() :
svm_type(CvSVM::C_SVC), kernel_type(CvSVM::RBF), degree(0),
gamma(1), coef0(0), C(1), nu(0), p(0), class_weights(0)
SVMParams::SVMParams() :
svmType(SVM::C_SVC), kernelType(SVM::RBF), degree(0),
gamma(1), coef0(0), C(1), nu(0), p(0), classWeights(0)
{
term_crit = cvTermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 1000, FLT_EPSILON );
termCrit = TermCriteria( TermCriteria::MAX_ITER+TermCriteria::EPS, 1000, FLT_EPSILON );
}
A comparison of different kernels on the following 2D test case with four classes. Four C_SVC SVMs have been trained (one against rest) with auto_train. Evaluation on three different kernels (CHI2, INTER, RBF). The color depicts the class with max score. Bright means max-score > 0, dark means max-score < 0.
@ -151,10 +141,9 @@ A comparison of different kernels on the following 2D test case with four classe
.. image:: pics/SVM_Comparison.png
CvSVM
SVM
-----
.. ocv:class:: CvSVM : public CvStatModel
.. ocv:class:: SVM : public StatModel
Support Vector Machines.
@ -164,55 +153,27 @@ Support Vector Machines.
* (Python) An example of grid search digit recognition using SVM can be found at opencv_source/samples/python2/digits_adjust.py
* (Python) An example of video digit recognition using SVM can be found at opencv_source/samples/python2/digits_video.py
CvSVM::CvSVM
SVM::create
------------
Default and training constructors.
Creates empty model
.. ocv:function:: CvSVM::CvSVM()
.. ocv:function:: Ptr<SVM> SVM::create(const Params& p=Params(), const Ptr<Kernel>& customKernel=Ptr<Kernel>())
.. ocv:function:: CvSVM::CvSVM( const Mat& trainData, const Mat& responses, const Mat& varIdx=Mat(), const Mat& sampleIdx=Mat(), CvSVMParams params=CvSVMParams() )
:param p: SVM parameters
:param customKernel: the optional custom kernel to use. It must implement ``SVM::Kernel`` interface.
.. ocv:function:: CvSVM::CvSVM( const CvMat* trainData, const CvMat* responses, const CvMat* varIdx=0, const CvMat* sampleIdx=0, CvSVMParams params=CvSVMParams() )
.. ocv:pyfunction:: cv2.SVM([trainData, responses[, varIdx[, sampleIdx[, params]]]]) -> <SVM object>
The constructors follow conventions of :ocv:func:`CvStatModel::CvStatModel`. See :ocv:func:`CvStatModel::train` for parameters descriptions.
CvSVM::train
------------
Trains an SVM.
.. ocv:function:: bool CvSVM::train( const Mat& trainData, const Mat& responses, const Mat& varIdx=Mat(), const Mat& sampleIdx=Mat(), CvSVMParams params=CvSVMParams() )
.. ocv:function:: bool CvSVM::train( const CvMat* trainData, const CvMat* responses, const CvMat* varIdx=0, const CvMat* sampleIdx=0, CvSVMParams params=CvSVMParams() )
.. ocv:pyfunction:: cv2.SVM.train(trainData, responses[, varIdx[, sampleIdx[, params]]]) -> retval
The method trains the SVM model. It follows the conventions of the generic :ocv:func:`CvStatModel::train` approach with the following limitations:
* Only the ``CV_ROW_SAMPLE`` data layout is supported.
* Input variables are all ordered.
* Output variables can be either categorical (``params.svm_type=CvSVM::C_SVC`` or ``params.svm_type=CvSVM::NU_SVC``), or ordered (``params.svm_type=CvSVM::EPS_SVR`` or ``params.svm_type=CvSVM::NU_SVR``), or not required at all (``params.svm_type=CvSVM::ONE_CLASS``).
* Missing measurements are not supported.
All the other parameters are gathered in the
:ocv:class:`CvSVMParams` structure.
Use ``StatModel::train`` to train the model, ``StatModel::train<RTrees>(traindata, params)`` to create and train the model, ``StatModel::load<RTrees>(filename)`` to load the pre-trained model. Since SVM has several parameters, you may want to find the best parameters for your problem. It can be done with ``SVM::trainAuto``.
CvSVM::train_auto
SVM::trainAuto
-----------------
Trains an SVM with optimal parameters.
.. ocv:function:: bool CvSVM::train_auto( const Mat& trainData, const Mat& responses, const Mat& varIdx, const Mat& sampleIdx, CvSVMParams params, int k_fold = 10, CvParamGrid Cgrid = CvSVM::get_default_grid(CvSVM::C), CvParamGrid gammaGrid = CvSVM::get_default_grid(CvSVM::GAMMA), CvParamGrid pGrid = CvSVM::get_default_grid(CvSVM::P), CvParamGrid nuGrid = CvSVM::get_default_grid(CvSVM::NU), CvParamGrid coeffGrid = CvSVM::get_default_grid(CvSVM::COEF), CvParamGrid degreeGrid = CvSVM::get_default_grid(CvSVM::DEGREE), bool balanced=false)
.. ocv:function:: bool SVM::trainAuto( const Ptr<TrainData>& data, int kFold = 10, ParamGrid Cgrid = SVM::getDefaultGrid(SVM::C), ParamGrid gammaGrid = SVM::getDefaultGrid(SVM::GAMMA), ParamGrid pGrid = SVM::getDefaultGrid(SVM::P), ParamGrid nuGrid = SVM::getDefaultGrid(SVM::NU), ParamGrid coeffGrid = SVM::getDefaultGrid(SVM::COEF), ParamGrid degreeGrid = SVM::getDefaultGrid(SVM::DEGREE), bool balanced=false)
.. ocv:function:: bool CvSVM::train_auto( const CvMat* trainData, const CvMat* responses, const CvMat* varIdx, const CvMat* sampleIdx, CvSVMParams params, int kfold = 10, CvParamGrid Cgrid = get_default_grid(CvSVM::C), CvParamGrid gammaGrid = get_default_grid(CvSVM::GAMMA), CvParamGrid pGrid = get_default_grid(CvSVM::P), CvParamGrid nuGrid = get_default_grid(CvSVM::NU), CvParamGrid coeffGrid = get_default_grid(CvSVM::COEF), CvParamGrid degreeGrid = get_default_grid(CvSVM::DEGREE), bool balanced=false )
:param data: the training data that can be constructed using ``TrainData::create`` or ``TrainData::loadFromCSV``.
.. ocv:pyfunction:: cv2.SVM.train_auto(trainData, responses, varIdx, sampleIdx, params[, k_fold[, Cgrid[, gammaGrid[, pGrid[, nuGrid[, coeffGrid[, degreeGrid[, balanced]]]]]]]]) -> retval
:param k_fold: Cross-validation parameter. The training set is divided into ``k_fold`` subsets. One subset is used to test the model, the others form the train set. So, the SVM algorithm is executed ``k_fold`` times.
:param kFold: Cross-validation parameter. The training set is divided into ``kFold`` subsets. One subset is used to test the model, the others form the train set. So, the SVM algorithm is executed ``kFold`` times.
:param \*Grid: Iteration grid for the corresponding SVM parameter.
@ -220,97 +181,76 @@ Trains an SVM with optimal parameters.
The method trains the SVM model automatically by choosing the optimal
parameters ``C``, ``gamma``, ``p``, ``nu``, ``coef0``, ``degree`` from
:ocv:class:`CvSVMParams`. Parameters are considered optimal
``SVM::Params``. Parameters are considered optimal
when the cross-validation estimate of the test set error
is minimal.
If there is no need to optimize a parameter, the corresponding grid step should be set to any value less than or equal to 1. For example, to avoid optimization in ``gamma``, set ``gamma_grid.step = 0``, ``gamma_grid.min_val``, ``gamma_grid.max_val`` as arbitrary numbers. In this case, the value ``params.gamma`` is taken for ``gamma``.
If there is no need to optimize a parameter, the corresponding grid step should be set to any value less than or equal to 1. For example, to avoid optimization in ``gamma``, set ``gammaGrid.step = 0``, ``gammaGrid.minVal``, ``gamma_grid.maxVal`` as arbitrary numbers. In this case, the value ``params.gamma`` is taken for ``gamma``.
And, finally, if the optimization in a parameter is required but
the corresponding grid is unknown, you may call the function :ocv:func:`CvSVM::get_default_grid`. To generate a grid, for example, for ``gamma``, call ``CvSVM::get_default_grid(CvSVM::GAMMA)``.
the corresponding grid is unknown, you may call the function :ocv:func:`SVM::getDefaulltGrid`. To generate a grid, for example, for ``gamma``, call ``SVM::getDefaulltGrid(SVM::GAMMA)``.
This function works for the classification
(``params.svm_type=CvSVM::C_SVC`` or ``params.svm_type=CvSVM::NU_SVC``)
(``params.svmType=SVM::C_SVC`` or ``params.svmType=SVM::NU_SVC``)
as well as for the regression
(``params.svm_type=CvSVM::EPS_SVR`` or ``params.svm_type=CvSVM::NU_SVR``). If ``params.svm_type=CvSVM::ONE_CLASS``, no optimization is made and the usual SVM with parameters specified in ``params`` is executed.
CvSVM::predict
--------------
Predicts the response for input sample(s).
.. ocv:function:: float CvSVM::predict( const Mat& sample, bool returnDFVal=false ) const
.. ocv:function:: float CvSVM::predict( const CvMat* sample, bool returnDFVal=false ) const
.. ocv:function:: float CvSVM::predict( const CvMat* samples, CvMat* results, bool returnDFVal=false ) const
.. ocv:pyfunction:: cv2.SVM.predict(sample[, returnDFVal]) -> retval
.. ocv:pyfunction:: cv2.SVM.predict_all(samples[, results]) -> results
:param sample: Input sample for prediction.
:param samples: Input samples for prediction.
:param returnDFVal: Specifies a type of the return value. If ``true`` and the problem is 2-class classification then the method returns the decision function value that is signed distance to the margin, else the function returns a class label (classification) or estimated function value (regression).
:param results: Output prediction responses for corresponding samples.
If you pass one sample then prediction result is returned. If you want to get responses for several samples then you should pass the ``results`` matrix where prediction results will be stored.
The function is parallelized with the TBB library.
(``params.svmType=SVM::EPS_SVR`` or ``params.svmType=SVM::NU_SVR``). If ``params.svmType=SVM::ONE_CLASS``, no optimization is made and the usual SVM with parameters specified in ``params`` is executed.
CvSVM::get_default_grid
SVM::getDefaulltGrid
-----------------------
Generates a grid for SVM parameters.
.. ocv:function:: CvParamGrid CvSVM::get_default_grid( int param_id )
.. ocv:function:: ParamGrid SVM::getDefaulltGrid( int param_id )
:param param_id: SVM parameters IDs that must be one of the following:
* **CvSVM::C**
* **SVM::C**
* **CvSVM::GAMMA**
* **SVM::GAMMA**
* **CvSVM::P**
* **SVM::P**
* **CvSVM::NU**
* **SVM::NU**
* **CvSVM::COEF**
* **SVM::COEF**
* **CvSVM::DEGREE**
* **SVM::DEGREE**
The grid is generated for the parameter with this ID.
The function generates a grid for the specified parameter of the SVM algorithm. The grid may be passed to the function :ocv:func:`CvSVM::train_auto`.
The function generates a grid for the specified parameter of the SVM algorithm. The grid may be passed to the function :ocv:func:`SVM::trainAuto`.
CvSVM::get_params
SVM::getParams
-----------------
Returns the current SVM parameters.
.. ocv:function:: CvSVMParams CvSVM::get_params() const
.. ocv:function:: SVM::Params SVM::getParams() const
This function may be used to get the optimal parameters obtained while automatically training :ocv:func:`CvSVM::train_auto`.
This function may be used to get the optimal parameters obtained while automatically training ``SVM::trainAuto``.
CvSVM::get_support_vector
SVM::getSupportVectors
--------------------------
Retrieves a number of support vectors and the particular vector.
Retrieves all the support vectors
.. ocv:function:: int CvSVM::get_support_vector_count() const
.. ocv:function:: Mat SVM::getSupportVectors() const
.. ocv:function:: const float* CvSVM::get_support_vector(int i) const
The method returns all the support vector as floating-point matrix, where support vectors are stored as matrix rows.
.. ocv:pyfunction:: cv2.SVM.get_support_vector_count() -> retval
SVM::getDecisionFunction
--------------------------
Retrieves the decision function
:param i: Index of the particular support vector.
.. ocv:function:: double SVM::getDecisionFunction(int i, OutputArray alpha, OutputArray svidx) const
The methods can be used to retrieve a set of support vectors.
:param i: the index of the decision function. If the problem solved is regression, 1-class or 2-class classification, then there will be just one decision function and the index should always be 0. Otherwise, in the case of N-class classification, there will be N*(N-1)/2 decision functions.
CvSVM::get_var_count
:param alpha: the optional output vector for weights, corresponding to different support vectors. In the case of linear SVM all the alpha's will be 1's.
:param svidx: the optional output vector of indices of support vectors within the matrix of support vectors (which can be retrieved by ``SVM::getSupportVectors``). In the case of linear SVM each decision function consists of a single "compressed" support vector.
The method returns ``rho`` parameter of the decision function, a scalar subtracted from the weighted sum of kernel responses.
Prediction with SVM
--------------------
Returns the number of used features (variables count).
.. ocv:function:: int CvSVM::get_var_count() const
.. ocv:pyfunction:: cv2.SVM.get_var_count() -> retval
StatModel::predict(samples, results, flags) should be used. Pass ``flags=StatModel::RAW_OUTPUT`` to get the raw response from SVM (in the case of regression, 1-class or 2-class classification problem).

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,728 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#if 0
ML_IMPL int
icvCmpIntegers (const void* a, const void* b) {return *(const int*)a - *(const int*)b;}
/****************************************************************************************\
* Cross-validation algorithms realizations *
\****************************************************************************************/
// Return pointer to trainIdx. Function DOES NOT FILL this matrix!
ML_IMPL
const CvMat* cvCrossValGetTrainIdxMatrix (const CvStatModel* estimateModel)
{
CvMat* result = NULL;
CV_FUNCNAME ("cvCrossValGetTrainIdxMatrix");
__BEGIN__
if (!CV_IS_CROSSVAL(estimateModel))
{
CV_ERROR (CV_StsBadArg, "Pointer point to not CvCrossValidationModel");
}
result = ((CvCrossValidationModel*)estimateModel)->sampleIdxTrain;
__END__
return result;
} // End of cvCrossValGetTrainIdxMatrix
/****************************************************************************************/
// Return pointer to checkIdx. Function DOES NOT FILL this matrix!
ML_IMPL
const CvMat* cvCrossValGetCheckIdxMatrix (const CvStatModel* estimateModel)
{
CvMat* result = NULL;
CV_FUNCNAME ("cvCrossValGetCheckIdxMatrix");
__BEGIN__
if (!CV_IS_CROSSVAL (estimateModel))
{
CV_ERROR (CV_StsBadArg, "Pointer point to not CvCrossValidationModel");
}
result = ((CvCrossValidationModel*)estimateModel)->sampleIdxEval;
__END__
return result;
} // End of cvCrossValGetCheckIdxMatrix
/****************************************************************************************/
// Create new Idx-matrix for next classifiers training and return code of result.
// Result is 0 if function can't make next step (error input or folds are finished),
// it is 1 if all was correct, and it is 2 if current fold wasn't' checked.
ML_IMPL
int cvCrossValNextStep (CvStatModel* estimateModel)
{
int result = 0;
CV_FUNCNAME ("cvCrossValGetNextTrainIdx");
__BEGIN__
CvCrossValidationModel* crVal = (CvCrossValidationModel*) estimateModel;
int k, fold;
if (!CV_IS_CROSSVAL (estimateModel))
{
CV_ERROR (CV_StsBadArg, "Pointer point to not CvCrossValidationModel");
}
fold = ++crVal->current_fold;
if (fold >= crVal->folds_all)
{
if (fold == crVal->folds_all)
EXIT;
else
{
CV_ERROR (CV_StsInternal, "All iterations has end long ago");
}
}
k = crVal->folds[fold + 1] - crVal->folds[fold];
crVal->sampleIdxTrain->data.i = crVal->sampleIdxAll + crVal->folds[fold + 1];
crVal->sampleIdxTrain->cols = crVal->samples_all - k;
crVal->sampleIdxEval->data.i = crVal->sampleIdxAll + crVal->folds[fold];
crVal->sampleIdxEval->cols = k;
if (crVal->is_checked)
{
crVal->is_checked = 0;
result = 1;
}
else
{
result = 2;
}
__END__
return result;
}
/****************************************************************************************/
// Do checking part of loop of cross-validations metod.
ML_IMPL
void cvCrossValCheckClassifier (CvStatModel* estimateModel,
const CvStatModel* model,
const CvMat* trainData,
int sample_t_flag,
const CvMat* trainClasses)
{
CV_FUNCNAME ("cvCrossValCheckClassifier ");
__BEGIN__
CvCrossValidationModel* crVal = (CvCrossValidationModel*) estimateModel;
int i, j, k;
int* data;
float* responses_fl;
int step;
float* responses_result;
int* responses_i;
double te, te1;
double sum_c, sum_p, sum_pp, sum_cp, sum_cc, sq_err;
// Check input data to correct values.
if (!CV_IS_CROSSVAL (estimateModel))
{
CV_ERROR (CV_StsBadArg,"First parameter point to not CvCrossValidationModel");
}
if (!CV_IS_STAT_MODEL (model))
{
CV_ERROR (CV_StsBadArg, "Second parameter point to not CvStatModel");
}
if (!CV_IS_MAT (trainData))
{
CV_ERROR (CV_StsBadArg, "Third parameter point to not CvMat");
}
if (!CV_IS_MAT (trainClasses))
{
CV_ERROR (CV_StsBadArg, "Fifth parameter point to not CvMat");
}
if (crVal->is_checked)
{
CV_ERROR (CV_StsInternal, "This iterations already was checked");
}
// Initialize.
k = crVal->sampleIdxEval->cols;
data = crVal->sampleIdxEval->data.i;
// Eval tested feature vectors.
CV_CALL (cvStatModelMultiPredict (model, trainData, sample_t_flag,
crVal->predict_results, NULL, crVal->sampleIdxEval));
// Count number if correct results.
responses_result = crVal->predict_results->data.fl;
if (crVal->is_regression)
{
sum_c = sum_p = sum_pp = sum_cp = sum_cc = sq_err = 0;
if (CV_MAT_TYPE (trainClasses->type) == CV_32FC1)
{
responses_fl = trainClasses->data.fl;
step = trainClasses->rows == 1 ? 1 : trainClasses->step / sizeof(float);
for (i = 0; i < k; i++)
{
te = responses_result[*data];
te1 = responses_fl[*data * step];
sum_c += te1;
sum_p += te;
sum_cc += te1 * te1;
sum_pp += te * te;
sum_cp += te1 * te;
te -= te1;
sq_err += te * te;
data++;
}
}
else
{
responses_i = trainClasses->data.i;
step = trainClasses->rows == 1 ? 1 : trainClasses->step / sizeof(int);
for (i = 0; i < k; i++)
{
te = responses_result[*data];
te1 = responses_i[*data * step];
sum_c += te1;
sum_p += te;
sum_cc += te1 * te1;
sum_pp += te * te;
sum_cp += te1 * te;
te -= te1;
sq_err += te * te;
data++;
}
}
// Fixing new internal values of accuracy.
crVal->sum_correct += sum_c;
crVal->sum_predict += sum_p;
crVal->sum_cc += sum_cc;
crVal->sum_pp += sum_pp;
crVal->sum_cp += sum_cp;
crVal->sq_error += sq_err;
}
else
{
if (CV_MAT_TYPE (trainClasses->type) == CV_32FC1)
{
responses_fl = trainClasses->data.fl;
step = trainClasses->rows == 1 ? 1 : trainClasses->step / sizeof(float);
for (i = 0, j = 0; i < k; i++)
{
if (cvRound (responses_result[*data]) == cvRound (responses_fl[*data * step]))
j++;
data++;
}
}
else
{
responses_i = trainClasses->data.i;
step = trainClasses->rows == 1 ? 1 : trainClasses->step / sizeof(int);
for (i = 0, j = 0; i < k; i++)
{
if (cvRound (responses_result[*data]) == responses_i[*data * step])
j++;
data++;
}
}
// Fixing new internal values of accuracy.
crVal->correct_results += j;
}
// Fixing that this fold already checked.
crVal->all_results += k;
crVal->is_checked = 1;
__END__
} // End of cvCrossValCheckClassifier
/****************************************************************************************/
// Return current accuracy.
ML_IMPL
float cvCrossValGetResult (const CvStatModel* estimateModel,
float* correlation)
{
float result = 0;
CV_FUNCNAME ("cvCrossValGetResult");
__BEGIN__
double te, te1;
CvCrossValidationModel* crVal = (CvCrossValidationModel*)estimateModel;
if (!CV_IS_CROSSVAL (estimateModel))
{
CV_ERROR (CV_StsBadArg, "Pointer point to not CvCrossValidationModel");
}
if (crVal->all_results)
{
if (crVal->is_regression)
{
result = ((float)crVal->sq_error) / crVal->all_results;
if (correlation)
{
te = crVal->all_results * crVal->sum_cp -
crVal->sum_correct * crVal->sum_predict;
te *= te;
te1 = (crVal->all_results * crVal->sum_cc -
crVal->sum_correct * crVal->sum_correct) *
(crVal->all_results * crVal->sum_pp -
crVal->sum_predict * crVal->sum_predict);
*correlation = (float)(te / te1);
}
}
else
{
result = ((float)crVal->correct_results) / crVal->all_results;
}
}
__END__
return result;
}
/****************************************************************************************/
// Reset cross-validation EstimateModel to state the same as it was immidiatly after
// its creating.
ML_IMPL
void cvCrossValReset (CvStatModel* estimateModel)
{
CV_FUNCNAME ("cvCrossValReset");
__BEGIN__
CvCrossValidationModel* crVal = (CvCrossValidationModel*)estimateModel;
if (!CV_IS_CROSSVAL (estimateModel))
{
CV_ERROR (CV_StsBadArg, "Pointer point to not CvCrossValidationModel");
}
crVal->current_fold = -1;
crVal->is_checked = 1;
crVal->all_results = 0;
crVal->correct_results = 0;
crVal->sq_error = 0;
crVal->sum_correct = 0;
crVal->sum_predict = 0;
crVal->sum_cc = 0;
crVal->sum_pp = 0;
crVal->sum_cp = 0;
__END__
}
/****************************************************************************************/
// This function is standart CvStatModel field to release cross-validation EstimateModel.
ML_IMPL
void cvReleaseCrossValidationModel (CvStatModel** model)
{
CvCrossValidationModel* pModel;
CV_FUNCNAME ("cvReleaseCrossValidationModel");
__BEGIN__
if (!model)
{
CV_ERROR (CV_StsNullPtr, "");
}
pModel = (CvCrossValidationModel*)*model;
if (!pModel)
{
return;
}
if (!CV_IS_CROSSVAL (pModel))
{
CV_ERROR (CV_StsBadArg, "");
}
cvFree (&pModel->sampleIdxAll);
cvFree (&pModel->folds);
cvReleaseMat (&pModel->sampleIdxEval);
cvReleaseMat (&pModel->sampleIdxTrain);
cvReleaseMat (&pModel->predict_results);
cvFree (model);
__END__
} // End of cvReleaseCrossValidationModel.
/****************************************************************************************/
// This function create cross-validation EstimateModel.
ML_IMPL CvStatModel*
cvCreateCrossValidationEstimateModel(
int samples_all,
const CvStatModelParams* estimateParams,
const CvMat* sampleIdx)
{
CvStatModel* model = NULL;
CvCrossValidationModel* crVal = NULL;
CV_FUNCNAME ("cvCreateCrossValidationEstimateModel");
__BEGIN__
int k_fold = 10;
int i, j, k, s_len;
int samples_selected;
CvRNG rng;
CvRNG* prng;
int* res_s_data;
int* te_s_data;
int* folds;
rng = cvRNG(cvGetTickCount());
cvRandInt (&rng); cvRandInt (&rng); cvRandInt (&rng); cvRandInt (&rng);
// Check input parameters.
if (estimateParams)
k_fold = ((CvCrossValidationParams*)estimateParams)->k_fold;
if (!k_fold)
{
CV_ERROR (CV_StsBadArg, "Error in parameters of cross-validation (k_fold == 0)!");
}
if (samples_all <= 0)
{
CV_ERROR (CV_StsBadArg, "<samples_all> should be positive!");
}
// Alloc memory and fill standart StatModel's fields.
CV_CALL (crVal = (CvCrossValidationModel*)cvCreateStatModel (
CV_STAT_MODEL_MAGIC_VAL | CV_CROSSVAL_MAGIC_VAL,
sizeof(CvCrossValidationModel),
cvReleaseCrossValidationModel,
NULL, NULL));
crVal->current_fold = -1;
crVal->folds_all = k_fold;
if (estimateParams && ((CvCrossValidationParams*)estimateParams)->is_regression)
crVal->is_regression = 1;
else
crVal->is_regression = 0;
if (estimateParams && ((CvCrossValidationParams*)estimateParams)->rng)
prng = ((CvCrossValidationParams*)estimateParams)->rng;
else
prng = &rng;
// Check and preprocess sample indices.
if (sampleIdx)
{
int s_step;
int s_type = 0;
if (!CV_IS_MAT (sampleIdx))
CV_ERROR (CV_StsBadArg, "Invalid sampleIdx array");
if (sampleIdx->rows != 1 && sampleIdx->cols != 1)
CV_ERROR (CV_StsBadSize, "sampleIdx array must be 1-dimensional");
s_len = sampleIdx->rows + sampleIdx->cols - 1;
s_step = sampleIdx->rows == 1 ?
1 : sampleIdx->step / CV_ELEM_SIZE(sampleIdx->type);
s_type = CV_MAT_TYPE (sampleIdx->type);
switch (s_type)
{
case CV_8UC1:
case CV_8SC1:
{
uchar* s_data = sampleIdx->data.ptr;
// sampleIdx is array of 1's and 0's -
// i.e. it is a mask of the selected samples
if( s_len != samples_all )
CV_ERROR (CV_StsUnmatchedSizes,
"Sample mask should contain as many elements as the total number of samples");
samples_selected = 0;
for (i = 0; i < s_len; i++)
samples_selected += s_data[i * s_step] != 0;
if (samples_selected == 0)
CV_ERROR (CV_StsOutOfRange, "No samples is selected!");
}
s_len = samples_selected;
break;
case CV_32SC1:
if (s_len > samples_all)
CV_ERROR (CV_StsOutOfRange,
"sampleIdx array may not contain more elements than the total number of samples");
samples_selected = s_len;
break;
default:
CV_ERROR (CV_StsUnsupportedFormat, "Unsupported sampleIdx array data type "
"(it should be 8uC1, 8sC1 or 32sC1)");
}
// Alloc additional memory for internal Idx and fill it.
/*!!*/ CV_CALL (res_s_data = crVal->sampleIdxAll =
(int*)cvAlloc (2 * s_len * sizeof(int)));
if (s_type < CV_32SC1)
{
uchar* s_data = sampleIdx->data.ptr;
for (i = 0; i < s_len; i++)
if (s_data[i * s_step])
{
*res_s_data++ = i;
}
res_s_data = crVal->sampleIdxAll;
}
else
{
int* s_data = sampleIdx->data.i;
int out_of_order = 0;
for (i = 0; i < s_len; i++)
{
res_s_data[i] = s_data[i * s_step];
if (i > 0 && res_s_data[i] < res_s_data[i - 1])
out_of_order = 1;
}
if (out_of_order)
qsort (res_s_data, s_len, sizeof(res_s_data[0]), icvCmpIntegers);
if (res_s_data[0] < 0 ||
res_s_data[s_len - 1] >= samples_all)
CV_ERROR (CV_StsBadArg, "There are out-of-range sample indices");
for (i = 1; i < s_len; i++)
if (res_s_data[i] <= res_s_data[i - 1])
CV_ERROR (CV_StsBadArg, "There are duplicated");
}
}
else // if (sampleIdx)
{
// Alloc additional memory for internal Idx and fill it.
s_len = samples_all;
CV_CALL (res_s_data = crVal->sampleIdxAll = (int*)cvAlloc (2 * s_len * sizeof(int)));
for (i = 0; i < s_len; i++)
{
*res_s_data++ = i;
}
res_s_data = crVal->sampleIdxAll;
} // if (sampleIdx) ... else
// Resort internal Idx.
te_s_data = res_s_data + s_len;
for (i = s_len; i > 1; i--)
{
j = cvRandInt (prng) % i;
k = *(--te_s_data);
*te_s_data = res_s_data[j];
res_s_data[j] = k;
}
// Duplicate resorted internal Idx.
// It will be used to simplify operation of getting trainIdx.
te_s_data = res_s_data + s_len;
for (i = 0; i < s_len; i++)
{
*te_s_data++ = *res_s_data++;
}
// Cut sampleIdxAll to parts.
if (k_fold > 0)
{
if (k_fold > s_len)
{
CV_ERROR (CV_StsBadArg,
"Error in parameters of cross-validation ('k_fold' > #samples)!");
}
folds = crVal->folds = (int*) cvAlloc ((k_fold + 1) * sizeof (int));
*folds++ = 0;
for (i = 1; i < k_fold; i++)
{
*folds++ = cvRound (i * s_len * 1. / k_fold);
}
*folds = s_len;
folds = crVal->folds;
crVal->max_fold_size = (s_len - 1) / k_fold + 1;
}
else
{
k = -k_fold;
crVal->max_fold_size = k;
if (k >= s_len)
{
CV_ERROR (CV_StsBadArg,
"Error in parameters of cross-validation (-'k_fold' > #samples)!");
}
crVal->folds_all = k = (s_len - 1) / k + 1;
folds = crVal->folds = (int*) cvAlloc ((k + 1) * sizeof (int));
for (i = 0; i < k; i++)
{
*folds++ = -i * k_fold;
}
*folds = s_len;
folds = crVal->folds;
}
// Prepare other internal fields to working.
CV_CALL (crVal->predict_results = cvCreateMat (1, samples_all, CV_32FC1));
CV_CALL (crVal->sampleIdxEval = cvCreateMatHeader (1, 1, CV_32SC1));
CV_CALL (crVal->sampleIdxTrain = cvCreateMatHeader (1, 1, CV_32SC1));
crVal->sampleIdxEval->cols = 0;
crVal->sampleIdxTrain->cols = 0;
crVal->samples_all = s_len;
crVal->is_checked = 1;
crVal->getTrainIdxMat = cvCrossValGetTrainIdxMatrix;
crVal->getCheckIdxMat = cvCrossValGetCheckIdxMatrix;
crVal->nextStep = cvCrossValNextStep;
crVal->check = cvCrossValCheckClassifier;
crVal->getResult = cvCrossValGetResult;
crVal->reset = cvCrossValReset;
model = (CvStatModel*)crVal;
__END__
if (!model)
{
cvReleaseCrossValidationModel ((CvStatModel**)&crVal);
}
return model;
} // End of cvCreateCrossValidationEstimateModel
/****************************************************************************************\
* Extended interface with backcalls for models *
\****************************************************************************************/
ML_IMPL float
cvCrossValidation (const CvMat* trueData,
int tflag,
const CvMat* trueClasses,
CvStatModel* (*createClassifier) (const CvMat*,
int,
const CvMat*,
const CvClassifierTrainParams*,
const CvMat*,
const CvMat*,
const CvMat*,
const CvMat*),
const CvClassifierTrainParams* estimateParams,
const CvClassifierTrainParams* trainParams,
const CvMat* compIdx,
const CvMat* sampleIdx,
CvStatModel** pCrValModel,
const CvMat* typeMask,
const CvMat* missedMeasurementMask)
{
CvCrossValidationModel* crVal = NULL;
float result = 0;
CvStatModel* pClassifier = NULL;
CV_FUNCNAME ("cvCrossValidation");
__BEGIN__
const CvMat* trainDataIdx;
int samples_all;
// checking input data
if ((createClassifier) == NULL)
{
CV_ERROR (CV_StsNullPtr, "Null pointer to functiion which create classifier");
}
if (pCrValModel && *pCrValModel && !CV_IS_CROSSVAL(*pCrValModel))
{
CV_ERROR (CV_StsBadArg,
"<pCrValModel> point to not cross-validation model");
}
// initialization
if (pCrValModel && *pCrValModel)
{
crVal = (CvCrossValidationModel*)*pCrValModel;
crVal->reset ((CvStatModel*)crVal);
}
else
{
samples_all = ((tflag) ? trueData->rows : trueData->cols);
CV_CALL (crVal = (CvCrossValidationModel*)
cvCreateCrossValidationEstimateModel (samples_all, estimateParams, sampleIdx));
}
CV_CALL (trainDataIdx = crVal->getTrainIdxMat ((CvStatModel*)crVal));
// operation loop
for (; crVal->nextStep((CvStatModel*)crVal) != 0; )
{
CV_CALL (pClassifier = createClassifier (trueData, tflag, trueClasses,
trainParams, compIdx, trainDataIdx, typeMask, missedMeasurementMask));
CV_CALL (crVal->check ((CvStatModel*)crVal, pClassifier,
trueData, tflag, trueClasses));
pClassifier->release (&pClassifier);
}
// Get result and fill output field.
CV_CALL (result = crVal->getResult ((CvStatModel*)crVal, 0));
if (pCrValModel && !*pCrValModel)
*pCrValModel = (CvStatModel*)crVal;
__END__
// Free all memory that should be freed.
if (pClassifier)
pClassifier->release (&pClassifier);
if (crVal && (!pCrValModel || !*pCrValModel))
crVal->release ((CvStatModel**)&crVal);
return result;
} // End of cvCrossValidation
#endif
/* End of file */

View File

@ -2,6 +2,8 @@
#include "precomp.hpp"
#include <time.h>
#if 0
#define pCvSeq CvSeq*
#define pCvDTreeNode CvDTreeNode*
@ -1359,3 +1361,5 @@ float CvGBTrees::predict( const cv::Mat& sample, const cv::Mat& _missing,
return predict(&_sample, _missing.empty() ? 0 : &miss, 0,
slice==cv::Range::all() ? CV_WHOLE_SEQ : cvSlice(slice.start, slice.end), k);
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -7,9 +7,11 @@
// copy or use the software.
//
//
// Intel License Agreement
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Copyright (C) 2014, Itseez Inc, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
@ -22,7 +24,7 @@
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
@ -41,442 +43,321 @@
#include "precomp.hpp"
/****************************************************************************************\
* K-Nearest Neighbors Classifier *
* K-Nearest Neighbors Classifier *
\****************************************************************************************/
// k Nearest Neighbors
CvKNearest::CvKNearest()
namespace cv {
namespace ml {
KNearest::Params::Params(int k, bool isclassifier_)
{
samples = 0;
clear();
defaultK = k;
isclassifier = isclassifier_;
}
CvKNearest::~CvKNearest()
class KNearestImpl : public KNearest
{
clear();
}
CvKNearest::CvKNearest( const CvMat* _train_data, const CvMat* _responses,
const CvMat* _sample_idx, bool _is_regression, int _max_k )
{
samples = 0;
train( _train_data, _responses, _sample_idx, _is_regression, _max_k, false );
}
void CvKNearest::clear()
{
while( samples )
public:
KNearestImpl(const Params& p)
{
CvVectors* next_samples = samples->next;
cvFree( &samples->data.fl );
cvFree( &samples );
samples = next_samples;
}
var_count = 0;
total = 0;
max_k = 0;
}
int CvKNearest::get_max_k() const { return max_k; }
int CvKNearest::get_var_count() const { return var_count; }
bool CvKNearest::is_regression() const { return regression; }
int CvKNearest::get_sample_count() const { return total; }
bool CvKNearest::train( const CvMat* _train_data, const CvMat* _responses,
const CvMat* _sample_idx, bool _is_regression,
int _max_k, bool _update_base )
{
bool ok = false;
CvMat* responses = 0;
CV_FUNCNAME( "CvKNearest::train" );
__BEGIN__;
CvVectors* _samples = 0;
float** _data = 0;
int _count = 0, _dims = 0, _dims_all = 0, _rsize = 0;
if( !_update_base )
clear();
// Prepare training data and related parameters.
// Treat categorical responses as ordered - to prevent class label compression and
// to enable entering new classes in the updates
CV_CALL( cvPrepareTrainData( "CvKNearest::train", _train_data, CV_ROW_SAMPLE,
_responses, CV_VAR_ORDERED, 0, _sample_idx, true, (const float***)&_data,
&_count, &_dims, &_dims_all, &responses, 0, 0 ));
if( !responses )
CV_ERROR( CV_StsNoMem, "Could not allocate memory for responses" );
if( _update_base && _dims != var_count )
CV_ERROR( CV_StsBadArg, "The newly added data have different dimensionality" );
if( !_update_base )
{
if( _max_k < 1 )
CV_ERROR( CV_StsOutOfRange, "max_k must be a positive number" );
regression = _is_regression;
var_count = _dims;
max_k = _max_k;
params = p;
}
_rsize = _count*sizeof(float);
CV_CALL( _samples = (CvVectors*)cvAlloc( sizeof(*_samples) + _rsize ));
_samples->next = samples;
_samples->type = CV_32F;
_samples->data.fl = _data;
_samples->count = _count;
total += _count;
virtual ~KNearestImpl() {}
samples = _samples;
memcpy( _samples + 1, responses->data.fl, _rsize );
Params getParams() const { return params; }
void setParams(const Params& p) { params = p; }
ok = true;
bool isClassifier() const { return params.isclassifier; }
bool isTrained() const { return !samples.empty(); }
__END__;
String getDefaultModelName() const { return "opencv_ml_knn"; }
if( responses && responses->data.ptr != _responses->data.ptr )
cvReleaseMat(&responses);
return ok;
}
void CvKNearest::find_neighbors_direct( const CvMat* _samples, int k, int start, int end,
float* neighbor_responses, const float** neighbors, float* dist ) const
{
int i, j, count = end - start, k1 = 0, k2 = 0, d = var_count;
CvVectors* s = samples;
for( ; s != 0; s = s->next )
void clear()
{
int n = s->count;
for( j = 0; j < n; j++ )
samples.release();
responses.release();
}
int getVarCount() const { return samples.cols; }
bool train( const Ptr<TrainData>& data, int flags )
{
Mat new_samples = data->getTrainSamples(ROW_SAMPLE);
Mat new_responses;
data->getTrainResponses().convertTo(new_responses, CV_32F);
bool update = (flags & UPDATE_MODEL) != 0 && !samples.empty();
CV_Assert( new_samples.type() == CV_32F );
if( !update )
{
for( i = 0; i < count; i++ )
clear();
}
else
{
CV_Assert( new_samples.cols == samples.cols &&
new_responses.cols == responses.cols );
}
samples.push_back(new_samples);
responses.push_back(new_responses);
return true;
}
void findNearestCore( const Mat& _samples, int k0, const Range& range,
Mat* results, Mat* neighbor_responses,
Mat* dists, float* presult ) const
{
int testidx, baseidx, i, j, d = samples.cols, nsamples = samples.rows;
int testcount = range.end - range.start;
int k = std::min(k0, nsamples);
AutoBuffer<float> buf(testcount*k*2);
float* dbuf = buf;
float* rbuf = dbuf + testcount*k;
const float* rptr = responses.ptr<float>();
for( testidx = 0; testidx < testcount; testidx++ )
{
for( i = 0; i < k; i++ )
{
double sum = 0;
dbuf[testidx*k + i] = FLT_MAX;
rbuf[testidx*k + i] = 0.f;
}
}
for( baseidx = 0; baseidx < nsamples; baseidx++ )
{
for( testidx = 0; testidx < testcount; testidx++ )
{
const float* v = samples.ptr<float>(baseidx);
const float* u = _samples.ptr<float>(testidx + range.start);
float s = 0;
for( i = 0; i <= d - 4; i += 4 )
{
float t0 = u[i] - v[i], t1 = u[i+1] - v[i+1];
float t2 = u[i+2] - v[i+2], t3 = u[i+3] - v[i+3];
s += t0*t0 + t1*t1 + t2*t2 + t3*t3;
}
for( ; i < d; i++ )
{
float t0 = u[i] - v[i];
s += t0*t0;
}
Cv32suf si;
const float* v = s->data.fl[j];
const float* u = (float*)(_samples->data.ptr + _samples->step*(start + i));
Cv32suf* dd = (Cv32suf*)(dist + i*k);
float* nr;
const float** nn;
int t, ii, ii1;
si.f = (float)s;
Cv32suf* dd = (Cv32suf*)(&dbuf[testidx*k]);
float* nr = &rbuf[testidx*k];
for( t = 0; t <= d - 4; t += 4 )
{
double t0 = u[t] - v[t], t1 = u[t+1] - v[t+1];
double t2 = u[t+2] - v[t+2], t3 = u[t+3] - v[t+3];
sum += t0*t0 + t1*t1 + t2*t2 + t3*t3;
}
for( ; t < d; t++ )
{
double t0 = u[t] - v[t];
sum += t0*t0;
}
si.f = (float)sum;
for( ii = k1-1; ii >= 0; ii-- )
if( si.i > dd[ii].i )
for( i = k; i > 0; i-- )
if( si.i >= dd[i-1].i )
break;
if( ii >= k-1 )
if( i >= k )
continue;
nr = neighbor_responses + i*k;
nn = neighbors ? neighbors + (start + i)*k : 0;
for( ii1 = k2 - 1; ii1 > ii; ii1-- )
for( j = k-2; j >= i; j-- )
{
dd[ii1+1].i = dd[ii1].i;
nr[ii1+1] = nr[ii1];
if( nn ) nn[ii1+1] = nn[ii1];
dd[j+1].i = dd[j].i;
nr[j+1] = nr[j];
}
dd[ii+1].i = si.i;
nr[ii+1] = ((float*)(s + 1))[j];
if( nn )
nn[ii+1] = v;
dd[i].i = si.i;
nr[i] = rptr[baseidx];
}
k1 = MIN( k1+1, k );
k2 = MIN( k1, k-1 );
}
}
}
float result = 0.f;
float inv_scale = 1.f/k;
float CvKNearest::write_results( int k, int k1, int start, int end,
const float* neighbor_responses, const float* dist,
CvMat* _results, CvMat* _neighbor_responses,
CvMat* _dist, Cv32suf* sort_buf ) const
{
float result = 0.f;
int i, j, j1, count = end - start;
double inv_scale = 1./k1;
int rstep = _results && !CV_IS_MAT_CONT(_results->type) ? _results->step/sizeof(result) : 1;
for( i = 0; i < count; i++ )
{
const Cv32suf* nr = (const Cv32suf*)(neighbor_responses + i*k);
float* dst;
float r;
if( _results || start+i == 0 )
for( testidx = 0; testidx < testcount; testidx++ )
{
if( regression )
if( neighbor_responses )
{
double s = 0;
for( j = 0; j < k1; j++ )
s += nr[j].f;
r = (float)(s*inv_scale);
float* nr = neighbor_responses->ptr<float>(testidx + range.start);
for( j = 0; j < k; j++ )
nr[j] = rbuf[testidx*k + j];
for( ; j < k0; j++ )
nr[j] = 0.f;
}
else
if( dists )
{
int prev_start = 0, best_count = 0, cur_count;
Cv32suf best_val;
float* dptr = dists->ptr<float>(testidx + range.start);
for( j = 0; j < k; j++ )
dptr[j] = dbuf[testidx*k + j];
for( ; j < k0; j++ )
dptr[j] = 0.f;
}
for( j = 0; j < k1; j++ )
sort_buf[j].i = nr[j].i;
for( j = k1-1; j > 0; j-- )
if( results || testidx+range.start == 0 )
{
if( !params.isclassifier || k == 1 )
{
bool swap_fl = false;
for( j1 = 0; j1 < j; j1++ )
if( sort_buf[j1].i > sort_buf[j1+1].i )
{
int t;
CV_SWAP( sort_buf[j1].i, sort_buf[j1+1].i, t );
swap_fl = true;
}
if( !swap_fl )
break;
float s = 0.f;
for( j = 0; j < k; j++ )
s += rbuf[testidx*k + j];
result = (float)(s*inv_scale);
}
best_val.i = 0;
for( j = 1; j <= k1; j++ )
if( j == k1 || sort_buf[j].i != sort_buf[j-1].i )
else
{
float* rp = rbuf + testidx*k;
for( j = k-1; j > 0; j-- )
{
cur_count = j - prev_start;
if( best_count < cur_count )
bool swap_fl = false;
for( i = 0; i < j; i++ )
{
best_count = cur_count;
best_val.i = sort_buf[j-1].i;
if( rp[i] > rp[i+1] )
{
std::swap(rp[i], rp[i+1]);
swap_fl = true;
}
}
prev_start = j;
if( !swap_fl )
break;
}
r = best_val.f;
result = rp[0];
int prev_start = 0;
int best_count = 0;
for( j = 1; j <= k; j++ )
{
if( j == k || rp[j] != rp[j-1] )
{
int count = j - prev_start;
if( best_count < count )
{
best_count = count;
result = rp[j-1];
}
prev_start = j;
}
}
}
if( results )
results->at<float>(testidx + range.start) = result;
if( presult && testidx+range.start == 0 )
*presult = result;
}
if( start+i == 0 )
result = r;
if( _results )
_results->data.fl[(start + i)*rstep] = r;
}
if( _neighbor_responses )
{
dst = (float*)(_neighbor_responses->data.ptr +
(start + i)*_neighbor_responses->step);
for( j = 0; j < k1; j++ )
dst[j] = nr[j].f;
for( ; j < k; j++ )
dst[j] = 0.f;
}
if( _dist )
{
dst = (float*)(_dist->data.ptr + (start + i)*_dist->step);
for( j = 0; j < k1; j++ )
dst[j] = dist[j + i*k];
for( ; j < k; j++ )
dst[j] = 0.f;
}
}
return result;
}
struct P1 : cv::ParallelLoopBody {
P1(const CvKNearest* _pointer, int _buf_sz, int _k, const CvMat* __samples, const float** __neighbors,
int _k1, CvMat* __results, CvMat* __neighbor_responses, CvMat* __dist, float* _result)
{
pointer = _pointer;
k = _k;
_samples = __samples;
_neighbors = __neighbors;
k1 = _k1;
_results = __results;
_neighbor_responses = __neighbor_responses;
_dist = __dist;
result = _result;
buf_sz = _buf_sz;
}
const CvKNearest* pointer;
int k;
const CvMat* _samples;
const float** _neighbors;
int k1;
CvMat* _results;
CvMat* _neighbor_responses;
CvMat* _dist;
float* result;
int buf_sz;
void operator()( const cv::Range& range ) const
{
cv::AutoBuffer<float> buf(buf_sz);
for(int i = range.start; i < range.end; i += 1 )
struct findKNearestInvoker : public ParallelLoopBody
{
float* neighbor_responses = &buf[0];
float* dist = neighbor_responses + 1*k;
Cv32suf* sort_buf = (Cv32suf*)(dist + 1*k);
findKNearestInvoker(const KNearestImpl* _p, int _k, const Mat& __samples,
Mat* __results, Mat* __neighbor_responses, Mat* __dists, float* _presult)
{
p = _p;
k = _k;
_samples = &__samples;
_results = __results;
_neighbor_responses = __neighbor_responses;
_dists = __dists;
presult = _presult;
}
pointer->find_neighbors_direct( _samples, k, i, i + 1,
neighbor_responses, _neighbors, dist );
void operator()( const Range& range ) const
{
int delta = std::min(range.end - range.start, 256);
for( int start = range.start; start < range.end; start += delta )
{
p->findNearestCore( *_samples, k, Range(start, std::min(start + delta, range.end)),
_results, _neighbor_responses, _dists, presult );
}
}
float r = pointer->write_results( k, k1, i, i + 1, neighbor_responses, dist,
_results, _neighbor_responses, _dist, sort_buf );
const KNearestImpl* p;
int k;
const Mat* _samples;
Mat* _results;
Mat* _neighbor_responses;
Mat* _dists;
float* presult;
};
if( i == 0 )
*result = r;
float findNearest( InputArray _samples, int k,
OutputArray _results,
OutputArray _neighborResponses,
OutputArray _dists ) const
{
float result = 0.f;
CV_Assert( 0 < k );
Mat test_samples = _samples.getMat();
CV_Assert( test_samples.type() == CV_32F && test_samples.cols == samples.cols );
int testcount = test_samples.rows;
if( testcount == 0 )
{
_results.release();
_neighborResponses.release();
_dists.release();
return 0.f;
}
Mat res, nr, d, *pres = 0, *pnr = 0, *pd = 0;
if( _results.needed() )
{
_results.create(testcount, 1, CV_32F);
pres = &(res = _results.getMat());
}
if( _neighborResponses.needed() )
{
_neighborResponses.create(testcount, k, CV_32F);
pnr = &(nr = _neighborResponses.getMat());
}
if( _dists.needed() )
{
_dists.create(testcount, k, CV_32F);
pd = &(d = _dists.getMat());
}
findKNearestInvoker invoker(this, k, test_samples, pres, pnr, pd, &result);
parallel_for_(Range(0, testcount), invoker);
//invoker(Range(0, testcount));
return result;
}
}
float predict(InputArray inputs, OutputArray outputs, int) const
{
return findNearest( inputs, params.defaultK, outputs, noArray(), noArray() );
}
void write( FileStorage& fs ) const
{
fs << "is_classifier" << (int)params.isclassifier;
fs << "default_k" << params.defaultK;
fs << "samples" << samples;
fs << "responses" << responses;
}
void read( const FileNode& fn )
{
clear();
params.isclassifier = (int)fn["is_classifier"] != 0;
params.defaultK = (int)fn["default_k"];
fn["samples"] >> samples;
fn["responses"] >> responses;
}
Mat samples;
Mat responses;
Params params;
};
float CvKNearest::find_nearest( const CvMat* _samples, int k, CvMat* _results,
const float** _neighbors, CvMat* _neighbor_responses, CvMat* _dist ) const
Ptr<KNearest> KNearest::create(const Params& p)
{
float result = 0.f;
const int max_blk_count = 128, max_buf_sz = 1 << 12;
if( !samples )
CV_Error( CV_StsError, "The search tree must be constructed first using train method" );
if( !CV_IS_MAT(_samples) ||
CV_MAT_TYPE(_samples->type) != CV_32FC1 ||
_samples->cols != var_count )
CV_Error( CV_StsBadArg, "Input samples must be floating-point matrix (<num_samples>x<var_count>)" );
if( _results && (!CV_IS_MAT(_results) ||
(_results->cols != 1 && _results->rows != 1) ||
_results->cols + _results->rows - 1 != _samples->rows) )
CV_Error( CV_StsBadArg,
"The results must be 1d vector containing as much elements as the number of samples" );
if( _results && CV_MAT_TYPE(_results->type) != CV_32FC1 &&
(CV_MAT_TYPE(_results->type) != CV_32SC1 || regression))
CV_Error( CV_StsUnsupportedFormat,
"The results must be floating-point or integer (in case of classification) vector" );
if( k < 1 || k > max_k )
CV_Error( CV_StsOutOfRange, "k must be within 1..max_k range" );
if( _neighbor_responses )
{
if( !CV_IS_MAT(_neighbor_responses) || CV_MAT_TYPE(_neighbor_responses->type) != CV_32FC1 ||
_neighbor_responses->rows != _samples->rows || _neighbor_responses->cols != k )
CV_Error( CV_StsBadArg,
"The neighbor responses (if present) must be floating-point matrix of <num_samples> x <k> size" );
}
if( _dist )
{
if( !CV_IS_MAT(_dist) || CV_MAT_TYPE(_dist->type) != CV_32FC1 ||
_dist->rows != _samples->rows || _dist->cols != k )
CV_Error( CV_StsBadArg,
"The distances from the neighbors (if present) must be floating-point matrix of <num_samples> x <k> size" );
}
int count = _samples->rows;
int count_scale = k*2;
int blk_count0 = MIN( count, max_blk_count );
int buf_sz = MIN( blk_count0 * count_scale, max_buf_sz );
blk_count0 = MAX( buf_sz/count_scale, 1 );
blk_count0 += blk_count0 % 2;
blk_count0 = MIN( blk_count0, count );
buf_sz = blk_count0 * count_scale + k;
int k1 = get_sample_count();
k1 = MIN( k1, k );
cv::parallel_for_(cv::Range(0, count), P1(this, buf_sz, k, _samples, _neighbors, k1,
_results, _neighbor_responses, _dist, &result)
);
return result;
return makePtr<KNearestImpl>(p);
}
using namespace cv;
CvKNearest::CvKNearest( const Mat& _train_data, const Mat& _responses,
const Mat& _sample_idx, bool _is_regression, int _max_k )
{
samples = 0;
train(_train_data, _responses, _sample_idx, _is_regression, _max_k, false );
}
bool CvKNearest::train( const Mat& _train_data, const Mat& _responses,
const Mat& _sample_idx, bool _is_regression,
int _max_k, bool _update_base )
{
CvMat tdata = _train_data, responses = _responses, sidx = _sample_idx;
return train(&tdata, &responses, sidx.data.ptr ? &sidx : 0, _is_regression, _max_k, _update_base );
}
float CvKNearest::find_nearest( const Mat& _samples, int k, Mat* _results,
const float** _neighbors, Mat* _neighbor_responses,
Mat* _dist ) const
{
CvMat s = _samples, results, *presults = 0, nresponses, *pnresponses = 0, dist, *pdist = 0;
if( _results )
{
if(!(_results->data && (_results->type() == CV_32F ||
(_results->type() == CV_32S && regression)) &&
(_results->cols == 1 || _results->rows == 1) &&
_results->cols + _results->rows - 1 == _samples.rows) )
_results->create(_samples.rows, 1, CV_32F);
presults = &(results = *_results);
}
if( _neighbor_responses )
{
if(!(_neighbor_responses->data && _neighbor_responses->type() == CV_32F &&
_neighbor_responses->cols == k && _neighbor_responses->rows == _samples.rows) )
_neighbor_responses->create(_samples.rows, k, CV_32F);
pnresponses = &(nresponses = *_neighbor_responses);
}
if( _dist )
{
if(!(_dist->data && _dist->type() == CV_32F &&
_dist->cols == k && _dist->rows == _samples.rows) )
_dist->create(_samples.rows, k, CV_32F);
pdist = &(dist = *_dist);
}
return find_nearest(&s, k, presults, _neighbors, pnresponses, pdist );
}
float CvKNearest::find_nearest( const cv::Mat& _samples, int k, CV_OUT cv::Mat& results,
CV_OUT cv::Mat& neighborResponses, CV_OUT cv::Mat& dists) const
{
return find_nearest(_samples, k, &results, 0, &neighborResponses, &dists);
}
/* End of file */

View File

@ -1,63 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
namespace cv
{
CV_INIT_ALGORITHM(EM, "StatModel.EM",
obj.info()->addParam(obj, "nclusters", obj.nclusters);
obj.info()->addParam(obj, "covMatType", obj.covMatType);
obj.info()->addParam(obj, "maxIters", obj.maxIters);
obj.info()->addParam(obj, "epsilon", obj.epsilon);
obj.info()->addParam(obj, "weights", obj.weights, true);
obj.info()->addParam(obj, "means", obj.means, true);
obj.info()->addParam(obj, "covs", obj.covs, true))
bool initModule_ml(void)
{
Ptr<Algorithm> em = createEM_ptr_hidden();
return em->info() != 0;
}
}

View File

@ -40,622 +40,428 @@
#include "precomp.hpp"
CvNormalBayesClassifier::CvNormalBayesClassifier()
{
var_count = var_all = 0;
var_idx = 0;
cls_labels = 0;
count = 0;
sum = 0;
productsum = 0;
avg = 0;
inv_eigen_values = 0;
cov_rotate_mats = 0;
c = 0;
default_model_name = "my_nb";
}
namespace cv {
namespace ml {
NormalBayesClassifier::Params::Params() {}
void CvNormalBayesClassifier::clear()
class NormalBayesClassifierImpl : public NormalBayesClassifier
{
if( cls_labels )
public:
NormalBayesClassifierImpl()
{
for( int cls = 0; cls < cls_labels->cols; cls++ )
{
cvReleaseMat( &count[cls] );
cvReleaseMat( &sum[cls] );
cvReleaseMat( &productsum[cls] );
cvReleaseMat( &avg[cls] );
cvReleaseMat( &inv_eigen_values[cls] );
cvReleaseMat( &cov_rotate_mats[cls] );
}
nallvars = 0;
}
cvReleaseMat( &cls_labels );
cvReleaseMat( &var_idx );
cvReleaseMat( &c );
cvFree( &count );
}
void setParams(const Params&) {}
Params getParams() const { return Params(); }
CvNormalBayesClassifier::~CvNormalBayesClassifier()
{
clear();
}
CvNormalBayesClassifier::CvNormalBayesClassifier(
const CvMat* _train_data, const CvMat* _responses,
const CvMat* _var_idx, const CvMat* _sample_idx )
{
var_count = var_all = 0;
var_idx = 0;
cls_labels = 0;
count = 0;
sum = 0;
productsum = 0;
avg = 0;
inv_eigen_values = 0;
cov_rotate_mats = 0;
c = 0;
default_model_name = "my_nb";
train( _train_data, _responses, _var_idx, _sample_idx );
}
bool CvNormalBayesClassifier::train( const CvMat* _train_data, const CvMat* _responses,
const CvMat* _var_idx, const CvMat* _sample_idx, bool update )
{
const float min_variation = FLT_EPSILON;
bool result = false;
CvMat* responses = 0;
const float** train_data = 0;
CvMat* __cls_labels = 0;
CvMat* __var_idx = 0;
CvMat* cov = 0;
CV_FUNCNAME( "CvNormalBayesClassifier::train" );
__BEGIN__;
int cls, nsamples = 0, _var_count = 0, _var_all = 0, nclasses = 0;
int s, c1, c2;
const int* responses_data;
CV_CALL( cvPrepareTrainData( 0,
_train_data, CV_ROW_SAMPLE, _responses, CV_VAR_CATEGORICAL,
_var_idx, _sample_idx, false, &train_data,
&nsamples, &_var_count, &_var_all, &responses,
&__cls_labels, &__var_idx ));
if( !update )
bool train( const Ptr<TrainData>& trainData, int flags )
{
const size_t mat_size = sizeof(CvMat*);
size_t data_size;
const float min_variation = FLT_EPSILON;
Mat responses = trainData->getNormCatResponses();
Mat __cls_labels = trainData->getClassLabels();
Mat __var_idx = trainData->getVarIdx();
Mat samples = trainData->getTrainSamples();
int nclasses = (int)__cls_labels.total();
clear();
int nvars = trainData->getNVars();
int s, c1, c2, cls;
var_idx = __var_idx;
cls_labels = __cls_labels;
__var_idx = __cls_labels = 0;
var_count = _var_count;
var_all = _var_all;
int __nallvars = trainData->getNAllVars();
bool update = (flags & UPDATE_MODEL) != 0;
nclasses = cls_labels->cols;
data_size = nclasses*6*mat_size;
if( !update )
{
nallvars = __nallvars;
count.resize(nclasses);
sum.resize(nclasses);
productsum.resize(nclasses);
avg.resize(nclasses);
inv_eigen_values.resize(nclasses);
cov_rotate_mats.resize(nclasses);
CV_CALL( count = (CvMat**)cvAlloc( data_size ));
memset( count, 0, data_size );
for( cls = 0; cls < nclasses; cls++ )
{
count[cls] = Mat::zeros( 1, nvars, CV_32SC1 );
sum[cls] = Mat::zeros( 1, nvars, CV_64FC1 );
productsum[cls] = Mat::zeros( nvars, nvars, CV_64FC1 );
avg[cls] = Mat::zeros( 1, nvars, CV_64FC1 );
inv_eigen_values[cls] = Mat::zeros( 1, nvars, CV_64FC1 );
cov_rotate_mats[cls] = Mat::zeros( nvars, nvars, CV_64FC1 );
}
sum = count + nclasses;
productsum = sum + nclasses;
avg = productsum + nclasses;
inv_eigen_values= avg + nclasses;
cov_rotate_mats = inv_eigen_values + nclasses;
var_idx = __var_idx;
cls_labels = __cls_labels;
CV_CALL( c = cvCreateMat( 1, nclasses, CV_64FC1 ));
c.create(1, nclasses, CV_64FC1);
}
else
{
// check that the new training data has the same dimensionality etc.
if( nallvars != __nallvars ||
var_idx.size() != __var_idx.size() ||
norm(var_idx, __var_idx, NORM_INF) != 0 ||
cls_labels.size() != __cls_labels.size() ||
norm(cls_labels, __cls_labels, NORM_INF) != 0 )
CV_Error( CV_StsBadArg,
"The new training data is inconsistent with the original training data; varIdx and the class labels should be the same" );
}
Mat cov( nvars, nvars, CV_64FC1 );
int nsamples = samples.rows;
// process train data (count, sum , productsum)
for( s = 0; s < nsamples; s++ )
{
cls = responses.at<int>(s);
int* count_data = count[cls].ptr<int>();
double* sum_data = sum[cls].ptr<double>();
double* prod_data = productsum[cls].ptr<double>();
const float* train_vec = samples.ptr<float>(s);
for( c1 = 0; c1 < nvars; c1++, prod_data += nvars )
{
double val1 = train_vec[c1];
sum_data[c1] += val1;
count_data[c1]++;
for( c2 = c1; c2 < nvars; c2++ )
prod_data[c2] += train_vec[c2]*val1;
}
}
Mat vt;
// calculate avg, covariance matrix, c
for( cls = 0; cls < nclasses; cls++ )
{
CV_CALL(count[cls] = cvCreateMat( 1, var_count, CV_32SC1 ));
CV_CALL(sum[cls] = cvCreateMat( 1, var_count, CV_64FC1 ));
CV_CALL(productsum[cls] = cvCreateMat( var_count, var_count, CV_64FC1 ));
CV_CALL(avg[cls] = cvCreateMat( 1, var_count, CV_64FC1 ));
CV_CALL(inv_eigen_values[cls] = cvCreateMat( 1, var_count, CV_64FC1 ));
CV_CALL(cov_rotate_mats[cls] = cvCreateMat( var_count, var_count, CV_64FC1 ));
CV_CALL(cvZero( count[cls] ));
CV_CALL(cvZero( sum[cls] ));
CV_CALL(cvZero( productsum[cls] ));
CV_CALL(cvZero( avg[cls] ));
CV_CALL(cvZero( inv_eigen_values[cls] ));
CV_CALL(cvZero( cov_rotate_mats[cls] ));
}
}
else
{
// check that the new training data has the same dimensionality etc.
if( _var_count != var_count || _var_all != var_all || !((!_var_idx && !var_idx) ||
(_var_idx && var_idx && cvNorm(_var_idx,var_idx,CV_C) < DBL_EPSILON)) )
CV_ERROR( CV_StsBadArg,
"The new training data is inconsistent with the original training data" );
double det = 1;
int i, j;
Mat& w = inv_eigen_values[cls];
int* count_data = count[cls].ptr<int>();
double* avg_data = avg[cls].ptr<double>();
double* sum1 = sum[cls].ptr<double>();
if( cls_labels->cols != __cls_labels->cols ||
cvNorm(cls_labels, __cls_labels, CV_C) > DBL_EPSILON )
CV_ERROR( CV_StsNotImplemented,
"In the current implementation the new training data must have absolutely "
"the same set of class labels as used in the original training data" );
completeSymm(productsum[cls], 0);
nclasses = cls_labels->cols;
}
responses_data = responses->data.i;
CV_CALL( cov = cvCreateMat( _var_count, _var_count, CV_64FC1 ));
/* process train data (count, sum , productsum) */
for( s = 0; s < nsamples; s++ )
{
cls = responses_data[s];
int* count_data = count[cls]->data.i;
double* sum_data = sum[cls]->data.db;
double* prod_data = productsum[cls]->data.db;
const float* train_vec = train_data[s];
for( c1 = 0; c1 < _var_count; c1++, prod_data += _var_count )
{
double val1 = train_vec[c1];
sum_data[c1] += val1;
count_data[c1]++;
for( c2 = c1; c2 < _var_count; c2++ )
prod_data[c2] += train_vec[c2]*val1;
}
}
cvReleaseMat( &responses );
responses = 0;
/* calculate avg, covariance matrix, c */
for( cls = 0; cls < nclasses; cls++ )
{
double det = 1;
int i, j;
CvMat* w = inv_eigen_values[cls];
int* count_data = count[cls]->data.i;
double* avg_data = avg[cls]->data.db;
double* sum1 = sum[cls]->data.db;
cvCompleteSymm( productsum[cls], 0 );
for( j = 0; j < _var_count; j++ )
{
int n = count_data[j];
avg_data[j] = n ? sum1[j] / n : 0.;
}
count_data = count[cls]->data.i;
avg_data = avg[cls]->data.db;
sum1 = sum[cls]->data.db;
for( i = 0; i < _var_count; i++ )
{
double* avg2_data = avg[cls]->data.db;
double* sum2 = sum[cls]->data.db;
double* prod_data = productsum[cls]->data.db + i*_var_count;
double* cov_data = cov->data.db + i*_var_count;
double s1val = sum1[i];
double avg1 = avg_data[i];
int _count = count_data[i];
for( j = 0; j <= i; j++ )
for( j = 0; j < nvars; j++ )
{
double avg2 = avg2_data[j];
double cov_val = prod_data[j] - avg1 * sum2[j] - avg2 * s1val + avg1 * avg2 * _count;
cov_val = (_count > 1) ? cov_val / (_count - 1) : cov_val;
cov_data[j] = cov_val;
int n = count_data[j];
avg_data[j] = n ? sum1[j] / n : 0.;
}
count_data = count[cls].ptr<int>();
avg_data = avg[cls].ptr<double>();
sum1 = sum[cls].ptr<double>();
for( i = 0; i < nvars; i++ )
{
double* avg2_data = avg[cls].ptr<double>();
double* sum2 = sum[cls].ptr<double>();
double* prod_data = productsum[cls].ptr<double>(i);
double* cov_data = cov.ptr<double>(i);
double s1val = sum1[i];
double avg1 = avg_data[i];
int _count = count_data[i];
for( j = 0; j <= i; j++ )
{
double avg2 = avg2_data[j];
double cov_val = prod_data[j] - avg1 * sum2[j] - avg2 * s1val + avg1 * avg2 * _count;
cov_val = (_count > 1) ? cov_val / (_count - 1) : cov_val;
cov_data[j] = cov_val;
}
}
completeSymm( cov, 1 );
SVD::compute(cov, w, cov_rotate_mats[cls], noArray());
transpose(cov_rotate_mats[cls], cov_rotate_mats[cls]);
cv::max(w, min_variation, w);
for( j = 0; j < nvars; j++ )
det *= w.at<double>(j);
divide(1., w, w);
c.at<double>(cls) = det > 0 ? log(det) : -700;
}
return true;
}
class NBPredictBody : public ParallelLoopBody
{
public:
NBPredictBody( const Mat& _c, const vector<Mat>& _cov_rotate_mats,
const vector<Mat>& _inv_eigen_values,
const vector<Mat>& _avg,
const Mat& _samples, const Mat& _vidx, const Mat& _cls_labels,
Mat& _results, Mat& _results_prob, bool _rawOutput )
{
c = &_c;
cov_rotate_mats = &_cov_rotate_mats;
inv_eigen_values = &_inv_eigen_values;
avg = &_avg;
samples = &_samples;
vidx = &_vidx;
cls_labels = &_cls_labels;
results = &_results;
results_prob = _results_prob.data ? &_results_prob : 0;
rawOutput = _rawOutput;
}
const Mat* c;
const vector<Mat>* cov_rotate_mats;
const vector<Mat>* inv_eigen_values;
const vector<Mat>* avg;
const Mat* samples;
const Mat* vidx;
const Mat* cls_labels;
Mat* results_prob;
Mat* results;
float* value;
bool rawOutput;
void operator()( const Range& range ) const
{
int cls = -1;
int rtype = 0, rptype = 0;
size_t rstep = 0, rpstep = 0;
int nclasses = (int)cls_labels->total();
int nvars = avg->at(0).cols;
double probability = 0;
const int* vptr = vidx && !vidx->empty() ? vidx->ptr<int>() : 0;
if (results)
{
rtype = results->type();
rstep = results->isContinuous() ? 1 : results->step/results->elemSize();
}
if (results_prob)
{
rptype = results_prob->type();
rpstep = results_prob->isContinuous() ? 1 : results_prob->step/results_prob->elemSize();
}
// allocate memory and initializing headers for calculating
cv::AutoBuffer<double> _buffer(nvars*2);
double* _diffin = _buffer;
double* _diffout = _buffer + nvars;
Mat diffin( 1, nvars, CV_64FC1, _diffin );
Mat diffout( 1, nvars, CV_64FC1, _diffout );
for(int k = range.start; k < range.end; k++ )
{
double opt = FLT_MAX;
for(int i = 0; i < nclasses; i++ )
{
double cur = c->at<double>(i);
const Mat& u = cov_rotate_mats->at(i);
const Mat& w = inv_eigen_values->at(i);
const double* avg_data = avg->at(i).ptr<double>();
const float* x = samples->ptr<float>(k);
// cov = u w u' --> cov^(-1) = u w^(-1) u'
for(int j = 0; j < nvars; j++ )
_diffin[j] = avg_data[j] - x[vptr ? vptr[j] : j];
gemm( diffin, u, 1, noArray(), 0, diffout, GEMM_2_T );
for(int j = 0; j < nvars; j++ )
{
double d = _diffout[j];
cur += d*d*w.ptr<double>()[j];
}
if( cur < opt )
{
cls = i;
opt = cur;
}
probability = exp( -0.5 * cur );
if( results_prob )
{
if ( rptype == CV_32FC1 )
results_prob->ptr<float>()[k*rpstep + i] = (float)probability;
else
results_prob->ptr<double>()[k*rpstep + i] = probability;
}
}
int ival = rawOutput ? cls : cls_labels->at<int>(cls);
if( results )
{
if( rtype == CV_32SC1 )
results->ptr<int>()[k*rstep] = ival;
else
results->ptr<float>()[k*rstep] = (float)ival;
}
}
}
};
CV_CALL( cvCompleteSymm( cov, 1 ));
CV_CALL( cvSVD( cov, w, cov_rotate_mats[cls], 0, CV_SVD_U_T ));
CV_CALL( cvMaxS( w, min_variation, w ));
for( j = 0; j < _var_count; j++ )
det *= w->data.db[j];
CV_CALL( cvDiv( NULL, w, w ));
c->data.db[cls] = det > 0 ? log(det) : -700;
float predict( InputArray _samples, OutputArray _results, int flags ) const
{
return predictProb(_samples, _results, noArray(), flags);
}
result = true;
float predictProb( InputArray _samples, OutputArray _results, OutputArray _resultsProb, int flags ) const
{
int value=0;
Mat samples = _samples.getMat(), results, resultsProb;
int nsamples = samples.rows, nclasses = (int)cls_labels.total();
bool rawOutput = (flags & RAW_OUTPUT) != 0;
__END__;
if( samples.type() != CV_32F || samples.cols != nallvars )
CV_Error( CV_StsBadArg,
"The input samples must be 32f matrix with the number of columns = nallvars" );
if( !result || cvGetErrStatus() < 0 )
if( samples.rows > 1 && _results.needed() )
CV_Error( CV_StsNullPtr,
"When the number of input samples is >1, the output vector of results must be passed" );
if( _results.needed() )
{
_results.create(nsamples, 1, CV_32S);
results = _results.getMat();
}
else
results = Mat(1, 1, CV_32S, &value);
if( _resultsProb.needed() )
{
_resultsProb.create(nsamples, nclasses, CV_32F);
resultsProb = _resultsProb.getMat();
}
cv::parallel_for_(cv::Range(0, nsamples),
NBPredictBody(c, cov_rotate_mats, inv_eigen_values, avg, samples,
var_idx, cls_labels, results, resultsProb, rawOutput));
return (float)value;
}
void write( FileStorage& fs ) const
{
int nclasses = (int)cls_labels.total(), i;
fs << "var_count" << (var_idx.empty() ? nallvars : (int)var_idx.total());
fs << "var_all" << nallvars;
if( !var_idx.empty() )
fs << "var_idx" << var_idx;
fs << "cls_labels" << cls_labels;
fs << "count" << "[";
for( i = 0; i < nclasses; i++ )
fs << count[i];
fs << "]" << "sum" << "[";
for( i = 0; i < nclasses; i++ )
fs << sum[i];
fs << "]" << "productsum" << "[";
for( i = 0; i < nclasses; i++ )
fs << productsum[i];
fs << "]" << "avg" << "[";
for( i = 0; i < nclasses; i++ )
fs << avg[i];
fs << "]" << "inv_eigen_values" << "[";
for( i = 0; i < nclasses; i++ )
fs << inv_eigen_values[i];
fs << "]" << "cov_rotate_mats" << "[";
for( i = 0; i < nclasses; i++ )
fs << cov_rotate_mats[i];
fs << "]";
fs << "c" << c;
}
void read( const FileNode& fn )
{
clear();
cvReleaseMat( &cov );
cvReleaseMat( &__cls_labels );
cvReleaseMat( &__var_idx );
cvFree( &train_data );
fn["var_all"] >> nallvars;
return result;
}
if( nallvars <= 0 )
CV_Error( CV_StsParseError,
"The field \"var_count\" of NBayes classifier is missing or non-positive" );
struct predict_body : cv::ParallelLoopBody {
predict_body(CvMat* _c, CvMat** _cov_rotate_mats, CvMat** _inv_eigen_values, CvMat** _avg,
const CvMat* _samples, const int* _vidx, CvMat* _cls_labels,
CvMat* _results, float* _value, int _var_count1, CvMat* _results_prob
)
{
c = _c;
cov_rotate_mats = _cov_rotate_mats;
inv_eigen_values = _inv_eigen_values;
avg = _avg;
samples = _samples;
vidx = _vidx;
cls_labels = _cls_labels;
results = _results;
value = _value;
var_count1 = _var_count1;
results_prob = _results_prob;
}
fn["var_idx"] >> var_idx;
fn["cls_labels"] >> cls_labels;
CvMat* c;
CvMat** cov_rotate_mats;
CvMat** inv_eigen_values;
CvMat** avg;
const CvMat* samples;
const int* vidx;
CvMat* cls_labels;
int nclasses = (int)cls_labels.total(), i;
CvMat* results_prob;
CvMat* results;
float* value;
int var_count1;
if( cls_labels.empty() || nclasses < 1 )
CV_Error( CV_StsParseError, "No or invalid \"cls_labels\" in NBayes classifier" );
void operator()( const cv::Range& range ) const
{
FileNodeIterator
count_it = fn["count"].begin(),
sum_it = fn["sum"].begin(),
productsum_it = fn["productsum"].begin(),
avg_it = fn["avg"].begin(),
inv_eigen_values_it = fn["inv_eigen_values"].begin(),
cov_rotate_mats_it = fn["cov_rotate_mats"].begin();
int cls = -1;
int rtype = 0, rstep = 0, rptype = 0, rpstep = 0;
int nclasses = cls_labels->cols;
int _var_count = avg[0]->cols;
double probability = 0;
count.resize(nclasses);
sum.resize(nclasses);
productsum.resize(nclasses);
avg.resize(nclasses);
inv_eigen_values.resize(nclasses);
cov_rotate_mats.resize(nclasses);
if (results)
{
rtype = CV_MAT_TYPE(results->type);
rstep = CV_IS_MAT_CONT(results->type) ? 1 : results->step/CV_ELEM_SIZE(rtype);
}
if (results_prob)
{
rptype = CV_MAT_TYPE(results_prob->type);
rpstep = CV_IS_MAT_CONT(results_prob->type) ? 1 : results_prob->step/CV_ELEM_SIZE(rptype);
}
// allocate memory and initializing headers for calculating
cv::AutoBuffer<double> buffer(nclasses + var_count1);
CvMat diff = cvMat( 1, var_count1, CV_64FC1, &buffer[0] );
for(int k = range.start; k < range.end; k += 1 )
{
int ival;
double opt = FLT_MAX;
for(int i = 0; i < nclasses; i++ )
for( i = 0; i < nclasses; i++, ++count_it, ++sum_it, ++productsum_it, ++avg_it,
++inv_eigen_values_it, ++cov_rotate_mats_it )
{
double cur = c->data.db[i];
CvMat* u = cov_rotate_mats[i];
CvMat* w = inv_eigen_values[i];
const double* avg_data = avg[i]->data.db;
const float* x = (const float*)(samples->data.ptr + samples->step*k);
// cov = u w u' --> cov^(-1) = u w^(-1) u'
for(int j = 0; j < _var_count; j++ )
diff.data.db[j] = avg_data[j] - x[vidx ? vidx[j] : j];
cvGEMM( &diff, u, 1, 0, 0, &diff, CV_GEMM_B_T );
for(int j = 0; j < _var_count; j++ )
{
double d = diff.data.db[j];
cur += d*d*w->data.db[j];
}
if( cur < opt )
{
cls = i;
opt = cur;
}
/* probability = exp( -0.5 * cur ) */
probability = exp( -0.5 * cur );
*count_it >> count[i];
*sum_it >> sum[i];
*productsum_it >> productsum[i];
*avg_it >> avg[i];
*inv_eigen_values_it >> inv_eigen_values[i];
*cov_rotate_mats_it >> cov_rotate_mats[i];
}
ival = cls_labels->data.i[cls];
if( results )
{
if( rtype == CV_32SC1 )
results->data.i[k*rstep] = ival;
else
results->data.fl[k*rstep] = (float)ival;
}
if ( results_prob )
{
if ( rptype == CV_32FC1 )
results_prob->data.fl[k*rpstep] = (float)probability;
else
results_prob->data.db[k*rpstep] = probability;
}
if( k == 0 )
*value = (float)ival;
fn["c"] >> c;
}
}
void clear()
{
count.clear();
sum.clear();
productsum.clear();
avg.clear();
inv_eigen_values.clear();
cov_rotate_mats.clear();
var_idx.release();
cls_labels.release();
c.release();
nallvars = 0;
}
bool isTrained() const { return !avg.empty(); }
bool isClassifier() const { return true; }
int getVarCount() const { return nallvars; }
String getDefaultModelName() const { return "opencv_ml_nbayes"; }
int nallvars;
Mat var_idx, cls_labels, c;
vector<Mat> count, sum, productsum, avg, inv_eigen_values, cov_rotate_mats;
};
float CvNormalBayesClassifier::predict( const CvMat* samples, CvMat* results, CvMat* results_prob ) const
Ptr<NormalBayesClassifier> NormalBayesClassifier::create(const Params&)
{
float value = 0;
if( !CV_IS_MAT(samples) || CV_MAT_TYPE(samples->type) != CV_32FC1 || samples->cols != var_all )
CV_Error( CV_StsBadArg,
"The input samples must be 32f matrix with the number of columns = var_all" );
if( samples->rows > 1 && !results )
CV_Error( CV_StsNullPtr,
"When the number of input samples is >1, the output vector of results must be passed" );
if( results )
{
if( !CV_IS_MAT(results) || (CV_MAT_TYPE(results->type) != CV_32FC1 &&
CV_MAT_TYPE(results->type) != CV_32SC1) ||
(results->cols != 1 && results->rows != 1) ||
results->cols + results->rows - 1 != samples->rows )
CV_Error( CV_StsBadArg, "The output array must be integer or floating-point vector "
"with the number of elements = number of rows in the input matrix" );
}
if( results_prob )
{
if( !CV_IS_MAT(results_prob) || (CV_MAT_TYPE(results_prob->type) != CV_32FC1 &&
CV_MAT_TYPE(results_prob->type) != CV_64FC1) ||
(results_prob->cols != 1 && results_prob->rows != 1) ||
results_prob->cols + results_prob->rows - 1 != samples->rows )
CV_Error( CV_StsBadArg, "The output array must be double or float vector "
"with the number of elements = number of rows in the input matrix" );
}
const int* vidx = var_idx ? var_idx->data.i : 0;
cv::parallel_for_(cv::Range(0, samples->rows),
predict_body(c, cov_rotate_mats, inv_eigen_values, avg, samples,
vidx, cls_labels, results, &value, var_count, results_prob));
return value;
Ptr<NormalBayesClassifierImpl> p = makePtr<NormalBayesClassifierImpl>();
return p;
}
void CvNormalBayesClassifier::write( CvFileStorage* fs, const char* name ) const
{
CV_FUNCNAME( "CvNormalBayesClassifier::write" );
__BEGIN__;
int nclasses, i;
nclasses = cls_labels->cols;
cvStartWriteStruct( fs, name, CV_NODE_MAP, CV_TYPE_NAME_ML_NBAYES );
CV_CALL( cvWriteInt( fs, "var_count", var_count ));
CV_CALL( cvWriteInt( fs, "var_all", var_all ));
if( var_idx )
CV_CALL( cvWrite( fs, "var_idx", var_idx ));
CV_CALL( cvWrite( fs, "cls_labels", cls_labels ));
CV_CALL( cvStartWriteStruct( fs, "count", CV_NODE_SEQ ));
for( i = 0; i < nclasses; i++ )
CV_CALL( cvWrite( fs, NULL, count[i] ));
CV_CALL( cvEndWriteStruct( fs ));
CV_CALL( cvStartWriteStruct( fs, "sum", CV_NODE_SEQ ));
for( i = 0; i < nclasses; i++ )
CV_CALL( cvWrite( fs, NULL, sum[i] ));
CV_CALL( cvEndWriteStruct( fs ));
CV_CALL( cvStartWriteStruct( fs, "productsum", CV_NODE_SEQ ));
for( i = 0; i < nclasses; i++ )
CV_CALL( cvWrite( fs, NULL, productsum[i] ));
CV_CALL( cvEndWriteStruct( fs ));
CV_CALL( cvStartWriteStruct( fs, "avg", CV_NODE_SEQ ));
for( i = 0; i < nclasses; i++ )
CV_CALL( cvWrite( fs, NULL, avg[i] ));
CV_CALL( cvEndWriteStruct( fs ));
CV_CALL( cvStartWriteStruct( fs, "inv_eigen_values", CV_NODE_SEQ ));
for( i = 0; i < nclasses; i++ )
CV_CALL( cvWrite( fs, NULL, inv_eigen_values[i] ));
CV_CALL( cvEndWriteStruct( fs ));
CV_CALL( cvStartWriteStruct( fs, "cov_rotate_mats", CV_NODE_SEQ ));
for( i = 0; i < nclasses; i++ )
CV_CALL( cvWrite( fs, NULL, cov_rotate_mats[i] ));
CV_CALL( cvEndWriteStruct( fs ));
CV_CALL( cvWrite( fs, "c", c ));
cvEndWriteStruct( fs );
__END__;
}
void CvNormalBayesClassifier::read( CvFileStorage* fs, CvFileNode* root_node )
{
bool ok = false;
CV_FUNCNAME( "CvNormalBayesClassifier::read" );
__BEGIN__;
int nclasses, i;
size_t data_size;
CvFileNode* node;
CvSeq* seq;
CvSeqReader reader;
clear();
CV_CALL( var_count = cvReadIntByName( fs, root_node, "var_count", -1 ));
CV_CALL( var_all = cvReadIntByName( fs, root_node, "var_all", -1 ));
CV_CALL( var_idx = (CvMat*)cvReadByName( fs, root_node, "var_idx" ));
CV_CALL( cls_labels = (CvMat*)cvReadByName( fs, root_node, "cls_labels" ));
if( !cls_labels )
CV_ERROR( CV_StsParseError, "No \"cls_labels\" in NBayes classifier" );
if( cls_labels->cols < 1 )
CV_ERROR( CV_StsBadArg, "Number of classes is less 1" );
if( var_count <= 0 )
CV_ERROR( CV_StsParseError,
"The field \"var_count\" of NBayes classifier is missing" );
nclasses = cls_labels->cols;
data_size = nclasses*6*sizeof(CvMat*);
CV_CALL( count = (CvMat**)cvAlloc( data_size ));
memset( count, 0, data_size );
sum = count + nclasses;
productsum = sum + nclasses;
avg = productsum + nclasses;
inv_eigen_values = avg + nclasses;
cov_rotate_mats = inv_eigen_values + nclasses;
CV_CALL( node = cvGetFileNodeByName( fs, root_node, "count" ));
seq = node->data.seq;
if( !CV_NODE_IS_SEQ(node->tag) || seq->total != nclasses)
CV_ERROR( CV_StsBadArg, "" );
CV_CALL( cvStartReadSeq( seq, &reader, 0 ));
for( i = 0; i < nclasses; i++ )
{
CV_CALL( count[i] = (CvMat*)cvRead( fs, (CvFileNode*)reader.ptr ));
CV_NEXT_SEQ_ELEM( seq->elem_size, reader );
}
CV_CALL( node = cvGetFileNodeByName( fs, root_node, "sum" ));
seq = node->data.seq;
if( !CV_NODE_IS_SEQ(node->tag) || seq->total != nclasses)
CV_ERROR( CV_StsBadArg, "" );
CV_CALL( cvStartReadSeq( seq, &reader, 0 ));
for( i = 0; i < nclasses; i++ )
{
CV_CALL( sum[i] = (CvMat*)cvRead( fs, (CvFileNode*)reader.ptr ));
CV_NEXT_SEQ_ELEM( seq->elem_size, reader );
}
CV_CALL( node = cvGetFileNodeByName( fs, root_node, "productsum" ));
seq = node->data.seq;
if( !CV_NODE_IS_SEQ(node->tag) || seq->total != nclasses)
CV_ERROR( CV_StsBadArg, "" );
CV_CALL( cvStartReadSeq( seq, &reader, 0 ));
for( i = 0; i < nclasses; i++ )
{
CV_CALL( productsum[i] = (CvMat*)cvRead( fs, (CvFileNode*)reader.ptr ));
CV_NEXT_SEQ_ELEM( seq->elem_size, reader );
}
CV_CALL( node = cvGetFileNodeByName( fs, root_node, "avg" ));
seq = node->data.seq;
if( !CV_NODE_IS_SEQ(node->tag) || seq->total != nclasses)
CV_ERROR( CV_StsBadArg, "" );
CV_CALL( cvStartReadSeq( seq, &reader, 0 ));
for( i = 0; i < nclasses; i++ )
{
CV_CALL( avg[i] = (CvMat*)cvRead( fs, (CvFileNode*)reader.ptr ));
CV_NEXT_SEQ_ELEM( seq->elem_size, reader );
}
CV_CALL( node = cvGetFileNodeByName( fs, root_node, "inv_eigen_values" ));
seq = node->data.seq;
if( !CV_NODE_IS_SEQ(node->tag) || seq->total != nclasses)
CV_ERROR( CV_StsBadArg, "" );
CV_CALL( cvStartReadSeq( seq, &reader, 0 ));
for( i = 0; i < nclasses; i++ )
{
CV_CALL( inv_eigen_values[i] = (CvMat*)cvRead( fs, (CvFileNode*)reader.ptr ));
CV_NEXT_SEQ_ELEM( seq->elem_size, reader );
}
CV_CALL( node = cvGetFileNodeByName( fs, root_node, "cov_rotate_mats" ));
seq = node->data.seq;
if( !CV_NODE_IS_SEQ(node->tag) || seq->total != nclasses)
CV_ERROR( CV_StsBadArg, "" );
CV_CALL( cvStartReadSeq( seq, &reader, 0 ));
for( i = 0; i < nclasses; i++ )
{
CV_CALL( cov_rotate_mats[i] = (CvMat*)cvRead( fs, (CvFileNode*)reader.ptr ));
CV_NEXT_SEQ_ELEM( seq->elem_size, reader );
}
CV_CALL( c = (CvMat*)cvReadByName( fs, root_node, "c" ));
ok = true;
__END__;
if( !ok )
clear();
}
using namespace cv;
CvNormalBayesClassifier::CvNormalBayesClassifier( const Mat& _train_data, const Mat& _responses,
const Mat& _var_idx, const Mat& _sample_idx )
{
var_count = var_all = 0;
var_idx = 0;
cls_labels = 0;
count = 0;
sum = 0;
productsum = 0;
avg = 0;
inv_eigen_values = 0;
cov_rotate_mats = 0;
c = 0;
default_model_name = "my_nb";
CvMat tdata = _train_data, responses = _responses, vidx = _var_idx, sidx = _sample_idx;
train(&tdata, &responses, vidx.data.ptr ? &vidx : 0,
sidx.data.ptr ? &sidx : 0);
}
bool CvNormalBayesClassifier::train( const Mat& _train_data, const Mat& _responses,
const Mat& _var_idx, const Mat& _sample_idx, bool update )
{
CvMat tdata = _train_data, responses = _responses, vidx = _var_idx, sidx = _sample_idx;
return train(&tdata, &responses, vidx.data.ptr ? &vidx : 0,
sidx.data.ptr ? &sidx : 0, update);
}
float CvNormalBayesClassifier::predict( const Mat& _samples, Mat* _results, Mat* _results_prob ) const
{
CvMat samples = _samples, results, *presults = 0, results_prob, *presults_prob = 0;
if( _results )
{
if( !(_results->data && _results->type() == CV_32F &&
(_results->cols == 1 || _results->rows == 1) &&
_results->cols + _results->rows - 1 == _samples.rows) )
_results->create(_samples.rows, 1, CV_32F);
presults = &(results = *_results);
}
if( _results_prob )
{
if( !(_results_prob->data && _results_prob->type() == CV_64F &&
(_results_prob->cols == 1 || _results_prob->rows == 1) &&
_results_prob->cols + _results_prob->rows - 1 == _samples.rows) )
_results_prob->create(_samples.rows, 1, CV_64F);
presults_prob = &(results_prob = *_results_prob);
}
return predict(&samples, presults, presults_prob);
}
/* End of file. */

View File

@ -38,8 +38,8 @@
//
//M*/
#ifndef __OPENCV_PRECOMP_H__
#define __OPENCV_PRECOMP_H__
#ifndef __OPENCV_ML_PRECOMP_HPP__
#define __OPENCV_ML_PRECOMP_HPP__
#include "opencv2/core.hpp"
#include "opencv2/ml.hpp"
@ -56,321 +56,218 @@
#include <stdio.h>
#include <string.h>
#include <time.h>
#define ML_IMPL CV_IMPL
#define __BEGIN__ __CV_BEGIN__
#define __END__ __CV_END__
#define EXIT __CV_EXIT__
#define CV_MAT_ELEM_FLAG( mat, type, comp, vect, tflag ) \
(( tflag == CV_ROW_SAMPLE ) \
? (CV_MAT_ELEM( mat, type, comp, vect )) \
: (CV_MAT_ELEM( mat, type, vect, comp )))
/* Convert matrix to vector */
#define ICV_MAT2VEC( mat, vdata, vstep, num ) \
if( MIN( (mat).rows, (mat).cols ) != 1 ) \
CV_ERROR( CV_StsBadArg, "" ); \
(vdata) = ((mat).data.ptr); \
if( (mat).rows == 1 ) \
{ \
(vstep) = CV_ELEM_SIZE( (mat).type ); \
(num) = (mat).cols; \
} \
else \
{ \
(vstep) = (mat).step; \
(num) = (mat).rows; \
}
/* get raw data */
#define ICV_RAWDATA( mat, flags, rdata, sstep, cstep, m, n ) \
(rdata) = (mat).data.ptr; \
if( CV_IS_ROW_SAMPLE( flags ) ) \
{ \
(sstep) = (mat).step; \
(cstep) = CV_ELEM_SIZE( (mat).type ); \
(m) = (mat).rows; \
(n) = (mat).cols; \
} \
else \
{ \
(cstep) = (mat).step; \
(sstep) = CV_ELEM_SIZE( (mat).type ); \
(n) = (mat).rows; \
(m) = (mat).cols; \
}
#define ICV_IS_MAT_OF_TYPE( mat, mat_type) \
(CV_IS_MAT( mat ) && CV_MAT_TYPE( mat->type ) == (mat_type) && \
(mat)->cols > 0 && (mat)->rows > 0)
/*
uchar* data; int sstep, cstep; - trainData->data
uchar* classes; int clstep; int ncl;- trainClasses
uchar* tmask; int tmstep; int ntm; - typeMask
uchar* missed;int msstep, mcstep; -missedMeasurements...
int mm, mn; == m,n == size,dim
uchar* sidx;int sistep; - sampleIdx
uchar* cidx;int cistep; - compIdx
int k, l; == n,m == dim,size (length of cidx, sidx)
int m, n; == size,dim
*/
#define ICV_DECLARE_TRAIN_ARGS() \
uchar* data; \
int sstep, cstep; \
uchar* classes; \
int clstep; \
int ncl; \
uchar* tmask; \
int tmstep; \
int ntm; \
uchar* missed; \
int msstep, mcstep; \
int mm, mn; \
uchar* sidx; \
int sistep; \
uchar* cidx; \
int cistep; \
int k, l; \
int m, n; \
\
data = classes = tmask = missed = sidx = cidx = NULL; \
sstep = cstep = clstep = ncl = tmstep = ntm = msstep = mcstep = mm = mn = 0; \
sistep = cistep = k = l = m = n = 0;
#define ICV_TRAIN_DATA_REQUIRED( param, flags ) \
if( !ICV_IS_MAT_OF_TYPE( (param), CV_32FC1 ) ) \
{ \
CV_ERROR( CV_StsBadArg, "Invalid " #param " parameter" ); \
} \
else \
{ \
ICV_RAWDATA( *(param), (flags), data, sstep, cstep, m, n ); \
k = n; \
l = m; \
}
#define ICV_TRAIN_CLASSES_REQUIRED( param ) \
if( !ICV_IS_MAT_OF_TYPE( (param), CV_32FC1 ) ) \
{ \
CV_ERROR( CV_StsBadArg, "Invalid " #param " parameter" ); \
} \
else \
{ \
ICV_MAT2VEC( *(param), classes, clstep, ncl ); \
if( m != ncl ) \
{ \
CV_ERROR( CV_StsBadArg, "Unmatched sizes" ); \
} \
}
#define ICV_ARG_NULL( param ) \
if( (param) != NULL ) \
{ \
CV_ERROR( CV_StsBadArg, #param " parameter must be NULL" ); \
}
#define ICV_MISSED_MEASUREMENTS_OPTIONAL( param, flags ) \
if( param ) \
{ \
if( !ICV_IS_MAT_OF_TYPE( param, CV_8UC1 ) ) \
{ \
CV_ERROR( CV_StsBadArg, "Invalid " #param " parameter" ); \
} \
else \
{ \
ICV_RAWDATA( *(param), (flags), missed, msstep, mcstep, mm, mn ); \
if( mm != m || mn != n ) \
{ \
CV_ERROR( CV_StsBadArg, "Unmatched sizes" ); \
} \
} \
}
#define ICV_COMP_IDX_OPTIONAL( param ) \
if( param ) \
{ \
if( !ICV_IS_MAT_OF_TYPE( param, CV_32SC1 ) ) \
{ \
CV_ERROR( CV_StsBadArg, "Invalid " #param " parameter" ); \
} \
else \
{ \
ICV_MAT2VEC( *(param), cidx, cistep, k ); \
if( k > n ) \
CV_ERROR( CV_StsBadArg, "Invalid " #param " parameter" ); \
} \
}
#define ICV_SAMPLE_IDX_OPTIONAL( param ) \
if( param ) \
{ \
if( !ICV_IS_MAT_OF_TYPE( param, CV_32SC1 ) ) \
{ \
CV_ERROR( CV_StsBadArg, "Invalid " #param " parameter" ); \
} \
else \
{ \
ICV_MAT2VEC( *sampleIdx, sidx, sistep, l ); \
if( l > m ) \
CV_ERROR( CV_StsBadArg, "Invalid " #param " parameter" ); \
} \
}
/****************************************************************************************/
#define ICV_CONVERT_FLOAT_ARRAY_TO_MATRICE( array, matrice ) \
{ \
CvMat a, b; \
int dims = (matrice)->cols; \
int nsamples = (matrice)->rows; \
int type = CV_MAT_TYPE((matrice)->type); \
int i, offset = dims; \
\
CV_ASSERT( type == CV_32FC1 || type == CV_64FC1 ); \
offset *= ((type == CV_32FC1) ? sizeof(float) : sizeof(double));\
\
b = cvMat( 1, dims, CV_32FC1 ); \
cvGetRow( matrice, &a, 0 ); \
for( i = 0; i < nsamples; i++, a.data.ptr += offset ) \
{ \
b.data.fl = (float*)array[i]; \
CV_CALL( cvConvert( &b, &a ) ); \
} \
}
#include <vector>
/****************************************************************************************\
* Auxiliary functions declarations *
\****************************************************************************************/
* Main struct definitions *
\****************************************************************************************/
/* Generates a set of classes centers in quantity <num_of_clusters> that are generated as
uniform random vectors in parallelepiped, where <data> is concentrated. Vectors in
<data> should have horizontal orientation. If <centers> != NULL, the function doesn't
allocate any memory and stores generated centers in <centers>, returns <centers>.
If <centers> == NULL, the function allocates memory and creates the matrice. Centers
are supposed to be oriented horizontally. */
CvMat* icvGenerateRandomClusterCenters( int seed,
const CvMat* data,
int num_of_clusters,
CvMat* centers CV_DEFAULT(0));
/* Fills the <labels> using <probs> by choosing the maximal probability. Outliers are
fixed by <oulier_tresh> and have cluster label (-1). Function also controls that there
weren't "empty" clusters by filling empty clusters with the maximal probability vector.
If probs_sums != NULL, filles it with the sums of probabilities for each sample (it is
useful for normalizing probabilities' matrice of FCM) */
void icvFindClusterLabels( const CvMat* probs, float outlier_thresh, float r,
const CvMat* labels );
typedef struct CvSparseVecElem32f
{
int idx;
float val;
}
CvSparseVecElem32f;
/* Prepare training data and related parameters */
#define CV_TRAIN_STATMODEL_DEFRAGMENT_TRAIN_DATA 1
#define CV_TRAIN_STATMODEL_SAMPLES_AS_ROWS 2
#define CV_TRAIN_STATMODEL_SAMPLES_AS_COLUMNS 4
#define CV_TRAIN_STATMODEL_CATEGORICAL_RESPONSE 8
#define CV_TRAIN_STATMODEL_ORDERED_RESPONSE 16
#define CV_TRAIN_STATMODEL_RESPONSES_ON_OUTPUT 32
#define CV_TRAIN_STATMODEL_ALWAYS_COPY_TRAIN_DATA 64
#define CV_TRAIN_STATMODEL_SPARSE_AS_SPARSE 128
int
cvPrepareTrainData( const char* /*funcname*/,
const CvMat* train_data, int tflag,
const CvMat* responses, int response_type,
const CvMat* var_idx,
const CvMat* sample_idx,
bool always_copy_data,
const float*** out_train_samples,
int* _sample_count,
int* _var_count,
int* _var_all,
CvMat** out_responses,
CvMat** out_response_map,
CvMat** out_var_idx,
CvMat** out_sample_idx=0 );
void
cvSortSamplesByClasses( const float** samples, const CvMat* classes,
int* class_ranges, const uchar** mask CV_DEFAULT(0) );
void
cvCombineResponseMaps (CvMat* _responses,
const CvMat* old_response_map,
CvMat* new_response_map,
CvMat** out_response_map);
void
cvPreparePredictData( const CvArr* sample, int dims_all, const CvMat* comp_idx,
int class_count, const CvMat* prob, float** row_sample,
int as_sparse CV_DEFAULT(0) );
/* copies clustering [or batch "predict"] results
(labels and/or centers and/or probs) back to the output arrays */
void
cvWritebackLabels( const CvMat* labels, CvMat* dst_labels,
const CvMat* centers, CvMat* dst_centers,
const CvMat* probs, CvMat* dst_probs,
const CvMat* sample_idx, int samples_all,
const CvMat* comp_idx, int dims_all );
#define cvWritebackResponses cvWritebackLabels
#define XML_FIELD_NAME "_name"
CvFileNode* icvFileNodeGetChild(CvFileNode* father, const char* name);
CvFileNode* icvFileNodeGetChildArrayElem(CvFileNode* father, const char* name,int index);
CvFileNode* icvFileNodeGetNext(CvFileNode* n, const char* name);
void cvCheckTrainData( const CvMat* train_data, int tflag,
const CvMat* missing_mask,
int* var_all, int* sample_all );
CvMat* cvPreprocessIndexArray( const CvMat* idx_arr, int data_arr_size, bool check_for_duplicates=false );
CvMat* cvPreprocessVarType( const CvMat* type_mask, const CvMat* var_idx,
int var_all, int* response_type );
CvMat* cvPreprocessOrderedResponses( const CvMat* responses,
const CvMat* sample_idx, int sample_all );
CvMat* cvPreprocessCategoricalResponses( const CvMat* responses,
const CvMat* sample_idx, int sample_all,
CvMat** out_response_map, CvMat** class_counts=0 );
const float** cvGetTrainSamples( const CvMat* train_data, int tflag,
const CvMat* var_idx, const CvMat* sample_idx,
int* _var_count, int* _sample_count,
bool always_copy_data=false );
/* log(2*PI) */
#define CV_LOG2PI (1.8378770664093454835606594728112)
namespace cv
{
struct DTreeBestSplitFinder
namespace ml
{
using std::vector;
#define CV_DTREE_CAT_DIR(idx,subset) \
(2*((subset[(idx)>>5]&(1 << ((idx) & 31)))==0)-1)
template<typename _Tp> struct cmp_lt_idx
{
DTreeBestSplitFinder(){ splitSize = 0, tree = 0; node = 0; }
DTreeBestSplitFinder( CvDTree* _tree, CvDTreeNode* _node);
DTreeBestSplitFinder( const DTreeBestSplitFinder& finder, Split );
virtual ~DTreeBestSplitFinder() {}
virtual void operator()(const BlockedRange& range);
void join( DTreeBestSplitFinder& rhs );
Ptr<CvDTreeSplit> bestSplit;
Ptr<CvDTreeSplit> split;
int splitSize;
CvDTree* tree;
CvDTreeNode* node;
cmp_lt_idx(const _Tp* _arr) : arr(_arr) {}
bool operator ()(int a, int b) const { return arr[a] < arr[b]; }
const _Tp* arr;
};
struct ForestTreeBestSplitFinder : DTreeBestSplitFinder
template<typename _Tp> struct cmp_lt_ptr
{
ForestTreeBestSplitFinder() : DTreeBestSplitFinder() {}
ForestTreeBestSplitFinder( CvForestTree* _tree, CvDTreeNode* _node );
ForestTreeBestSplitFinder( const ForestTreeBestSplitFinder& finder, Split );
virtual void operator()(const BlockedRange& range);
cmp_lt_ptr() {}
bool operator ()(const _Tp* a, const _Tp* b) const { return *a < *b; }
};
}
#endif /* __ML_H__ */
static inline void setRangeVector(std::vector<int>& vec, int n)
{
vec.resize(n);
for( int i = 0; i < n; i++ )
vec[i] = i;
}
static inline void writeTermCrit(FileStorage& fs, const TermCriteria& termCrit)
{
if( (termCrit.type & TermCriteria::EPS) != 0 )
fs << "epsilon" << termCrit.epsilon;
if( (termCrit.type & TermCriteria::COUNT) != 0 )
fs << "iterations" << termCrit.maxCount;
}
static inline TermCriteria readTermCrit(const FileNode& fn)
{
TermCriteria termCrit;
double epsilon = (double)fn["epsilon"];
if( epsilon > 0 )
{
termCrit.type |= TermCriteria::EPS;
termCrit.epsilon = epsilon;
}
int iters = (int)fn["iterations"];
if( iters > 0 )
{
termCrit.type |= TermCriteria::COUNT;
termCrit.maxCount = iters;
}
return termCrit;
}
class DTreesImpl : public DTrees
{
public:
struct WNode
{
WNode()
{
class_idx = sample_count = depth = complexity = 0;
parent = left = right = split = defaultDir = -1;
Tn = INT_MAX;
value = maxlr = alpha = node_risk = tree_risk = tree_error = 0.;
}
int class_idx;
double Tn;
double value;
int parent;
int left;
int right;
int defaultDir;
int split;
int sample_count;
int depth;
double maxlr;
// global pruning data
int complexity;
double alpha;
double node_risk, tree_risk, tree_error;
};
struct WSplit
{
WSplit()
{
varIdx = next = 0;
inversed = false;
quality = c = 0.f;
subsetOfs = -1;
}
int varIdx;
bool inversed;
float quality;
int next;
float c;
int subsetOfs;
};
struct WorkData
{
WorkData(const Ptr<TrainData>& _data);
Ptr<TrainData> data;
vector<WNode> wnodes;
vector<WSplit> wsplits;
vector<int> wsubsets;
vector<double> cv_Tn;
vector<double> cv_node_risk;
vector<double> cv_node_error;
vector<int> cv_labels;
vector<double> sample_weights;
vector<int> cat_responses;
vector<double> ord_responses;
vector<int> sidx;
int maxSubsetSize;
};
DTreesImpl();
virtual ~DTreesImpl();
virtual void clear();
String getDefaultModelName() const { return "opencv_ml_dtree"; }
bool isTrained() const { return !roots.empty(); }
bool isClassifier() const { return _isClassifier; }
int getVarCount() const { return varType.empty() ? 0 : (int)(varType.size() - 1); }
int getCatCount(int vi) const { return catOfs[vi][1] - catOfs[vi][0]; }
int getSubsetSize(int vi) const { return (getCatCount(vi) + 31)/32; }
virtual void setDParams(const Params& _params);
virtual Params getDParams() const;
virtual void startTraining( const Ptr<TrainData>& trainData, int flags );
virtual void endTraining();
virtual void initCompVarIdx();
virtual bool train( const Ptr<TrainData>& trainData, int flags );
virtual int addTree( const vector<int>& sidx );
virtual int addNodeAndTrySplit( int parent, const vector<int>& sidx );
virtual const vector<int>& getActiveVars();
virtual int findBestSplit( const vector<int>& _sidx );
virtual void calcValue( int nidx, const vector<int>& _sidx );
virtual WSplit findSplitOrdClass( int vi, const vector<int>& _sidx, double initQuality );
// simple k-means, slightly modified to take into account the "weight" (L1-norm) of each vector.
virtual void clusterCategories( const double* vectors, int n, int m, double* csums, int k, int* labels );
virtual WSplit findSplitCatClass( int vi, const vector<int>& _sidx, double initQuality, int* subset );
virtual WSplit findSplitOrdReg( int vi, const vector<int>& _sidx, double initQuality );
virtual WSplit findSplitCatReg( int vi, const vector<int>& _sidx, double initQuality, int* subset );
virtual int calcDir( int splitidx, const vector<int>& _sidx, vector<int>& _sleft, vector<int>& _sright );
virtual int pruneCV( int root );
virtual double updateTreeRNC( int root, double T, int fold );
virtual bool cutTree( int root, double T, int fold, double min_alpha );
virtual float predictTrees( const Range& range, const Mat& sample, int flags ) const;
virtual float predict( InputArray inputs, OutputArray outputs, int flags ) const;
virtual void writeTrainingParams( FileStorage& fs ) const;
virtual void writeParams( FileStorage& fs ) const;
virtual void writeSplit( FileStorage& fs, int splitidx ) const;
virtual void writeNode( FileStorage& fs, int nidx, int depth ) const;
virtual void writeTree( FileStorage& fs, int root ) const;
virtual void write( FileStorage& fs ) const;
virtual void readParams( const FileNode& fn );
virtual int readSplit( const FileNode& fn );
virtual int readNode( const FileNode& fn );
virtual int readTree( const FileNode& fn );
virtual void read( const FileNode& fn );
virtual const std::vector<int>& getRoots() const { return roots; }
virtual const std::vector<Node>& getNodes() const { return nodes; }
virtual const std::vector<Split>& getSplits() const { return splits; }
virtual const std::vector<int>& getSubsets() const { return subsets; }
Params params0, params;
vector<int> varIdx;
vector<int> compVarIdx;
vector<uchar> varType;
vector<Vec2i> catOfs;
vector<int> catMap;
vector<int> roots;
vector<Node> nodes;
vector<Split> splits;
vector<int> subsets;
vector<int> classLabels;
vector<float> missingSubst;
bool _isClassifier;
Ptr<WorkData> w;
};
}}
#endif /* __OPENCV_ML_PRECOMP_HPP__ */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -40,131 +40,74 @@
#include "precomp.hpp"
typedef struct CvDI
namespace cv { namespace ml {
struct PairDI
{
double d;
int i;
} CvDI;
};
static int CV_CDECL
icvCmpDI( const void* a, const void* b, void* )
struct CmpPairDI
{
const CvDI* e1 = (const CvDI*) a;
const CvDI* e2 = (const CvDI*) b;
bool operator ()(const PairDI& e1, const PairDI& e2) const
{
return (e1.d < e2.d) || (e1.d == e2.d && e1.i < e2.i);
}
};
return (e1->d < e2->d) ? -1 : (e1->d > e2->d);
}
CV_IMPL void
cvCreateTestSet( int type, CvMat** samples,
int num_samples,
int num_features,
CvMat** responses,
int num_classes, ... )
void createConcentricSpheresTestSet( int num_samples, int num_features, int num_classes,
OutputArray _samples, OutputArray _responses)
{
CvMat* mean = NULL;
CvMat* cov = NULL;
CvMemStorage* storage = NULL;
CV_FUNCNAME( "cvCreateTestSet" );
__BEGIN__;
if( samples )
*samples = NULL;
if( responses )
*responses = NULL;
if( type != CV_TS_CONCENTRIC_SPHERES )
CV_ERROR( CV_StsBadArg, "Invalid type parameter" );
if( !samples )
CV_ERROR( CV_StsNullPtr, "samples parameter must be not NULL" );
if( !responses )
CV_ERROR( CV_StsNullPtr, "responses parameter must be not NULL" );
if( num_samples < 1 )
CV_ERROR( CV_StsBadArg, "num_samples parameter must be positive" );
CV_Error( CV_StsBadArg, "num_samples parameter must be positive" );
if( num_features < 1 )
CV_ERROR( CV_StsBadArg, "num_features parameter must be positive" );
CV_Error( CV_StsBadArg, "num_features parameter must be positive" );
if( num_classes < 1 )
CV_ERROR( CV_StsBadArg, "num_classes parameter must be positive" );
CV_Error( CV_StsBadArg, "num_classes parameter must be positive" );
if( type == CV_TS_CONCENTRIC_SPHERES )
int i, cur_class;
_samples.create( num_samples, num_features, CV_32F );
_responses.create( 1, num_samples, CV_32S );
Mat responses = _responses.getMat();
Mat mean = Mat::zeros(1, num_features, CV_32F);
Mat cov = Mat::eye(num_features, num_features, CV_32F);
// fill the feature values matrix with random numbers drawn from standard normal distribution
randMVNormal( mean, cov, num_samples, _samples );
Mat samples = _samples.getMat();
// calculate distances from the origin to the samples and put them
// into the sequence along with indices
std::vector<PairDI> dis(samples.rows);
for( i = 0; i < samples.rows; i++ )
{
CvSeqWriter writer;
CvSeqReader reader;
CvMat sample;
CvDI elem;
CvSeq* seq = NULL;
int i, cur_class;
CV_CALL( *samples = cvCreateMat( num_samples, num_features, CV_32FC1 ) );
CV_CALL( *responses = cvCreateMat( 1, num_samples, CV_32SC1 ) );
CV_CALL( mean = cvCreateMat( 1, num_features, CV_32FC1 ) );
CV_CALL( cvSetZero( mean ) );
CV_CALL( cov = cvCreateMat( num_features, num_features, CV_32FC1 ) );
CV_CALL( cvSetIdentity( cov ) );
/* fill the feature values matrix with random numbers drawn from standard
normal distribution */
CV_CALL( cvRandMVNormal( mean, cov, *samples ) );
/* calculate distances from the origin to the samples and put them
into the sequence along with indices */
CV_CALL( storage = cvCreateMemStorage() );
CV_CALL( cvStartWriteSeq( 0, sizeof( CvSeq ), sizeof( CvDI ), storage, &writer ));
for( i = 0; i < (*samples)->rows; ++i )
{
CV_CALL( cvGetRow( *samples, &sample, i ));
elem.i = i;
CV_CALL( elem.d = cvNorm( &sample, NULL, CV_L2 ));
CV_WRITE_SEQ_ELEM( elem, writer );
}
CV_CALL( seq = cvEndWriteSeq( &writer ) );
/* sort the sequence in a distance ascending order */
CV_CALL( cvSeqSort( seq, icvCmpDI, NULL ) );
/* assign class labels */
num_classes = MIN( num_samples, num_classes );
CV_CALL( cvStartReadSeq( seq, &reader ) );
CV_READ_SEQ_ELEM( elem, reader );
for( i = 0, cur_class = 0; i < num_samples; ++cur_class )
{
int last_idx;
double max_dst;
last_idx = num_samples * (cur_class + 1) / num_classes - 1;
CV_CALL( max_dst = (*((CvDI*) cvGetSeqElem( seq, last_idx ))).d );
max_dst = MAX( max_dst, elem.d );
for( ; elem.d <= max_dst && i < num_samples; ++i )
{
CV_MAT_ELEM( **responses, int, 0, elem.i ) = cur_class;
if( i < num_samples - 1 )
{
CV_READ_SEQ_ELEM( elem, reader );
}
}
}
PairDI& elem = dis[i];
elem.i = i;
elem.d = norm(samples.row(i), NORM_L2);
}
__END__;
std::sort(dis.begin(), dis.end(), CmpPairDI());
if( cvGetErrStatus() < 0 )
// assign class labels
num_classes = std::min( num_samples, num_classes );
for( i = 0, cur_class = 0; i < num_samples; ++cur_class )
{
if( samples )
cvReleaseMat( samples );
if( responses )
cvReleaseMat( responses );
int last_idx = num_samples * (cur_class + 1) / num_classes - 1;
double max_dst = dis[last_idx].d;
max_dst = std::max( max_dst, dis[i].d );
for( ; i < num_samples && dis[i].d <= max_dst; ++i )
responses.at<int>(i) = cur_class;
}
cvReleaseMat( &mean );
cvReleaseMat( &cov );
cvReleaseMemStorage( &storage );
}
}}
/* End of file. */

File diff suppressed because it is too large Load Diff

View File

@ -43,6 +43,9 @@
using namespace std;
using namespace cv;
using cv::ml::TrainData;
using cv::ml::EM;
using cv::ml::KNearest;
static
void defaultDistribs( Mat& means, vector<Mat>& covs, int type=CV_32FC1 )
@ -309,9 +312,9 @@ void CV_KNearestTest::run( int /*start_from*/ )
generateData( testData, testLabels, sizes, means, covs, CV_32FC1, CV_32FC1 );
int code = cvtest::TS::OK;
KNearest knearest;
knearest.train( trainData, trainLabels );
knearest.find_nearest( testData, 4, &bestLabels );
Ptr<KNearest> knearest = KNearest::create(true);
knearest->train(trainData, cv::ml::ROW_SAMPLE, trainLabels);
knearest->findNearest( testData, 4, bestLabels);
float err;
if( !calcErr( bestLabels, testLabels, sizes, err, true ) )
{
@ -373,13 +376,16 @@ int CV_EMTest::runCase( int caseIndex, const EM_Params& params,
cv::Mat labels;
float err;
cv::EM em(params.nclusters, params.covMatType, params.termCrit);
Ptr<EM> em;
EM::Params emp(params.nclusters, params.covMatType, params.termCrit);
if( params.startStep == EM::START_AUTO_STEP )
em.train( trainData, noArray(), labels );
em = EM::train( trainData, noArray(), labels, noArray(), emp );
else if( params.startStep == EM::START_E_STEP )
em.trainE( trainData, *params.means, *params.covs, *params.weights, noArray(), labels );
em = EM::train_startWithE( trainData, *params.means, *params.covs,
*params.weights, noArray(), labels, noArray(), emp );
else if( params.startStep == EM::START_M_STEP )
em.trainM( trainData, *params.probs, noArray(), labels );
em = EM::train_startWithM( trainData, *params.probs,
noArray(), labels, noArray(), emp );
// check train error
if( !calcErr( labels, trainLabels, sizes, err , false, false ) )
@ -399,7 +405,7 @@ int CV_EMTest::runCase( int caseIndex, const EM_Params& params,
{
Mat sample = testData.row(i);
Mat probs;
labels.at<int>(i) = static_cast<int>(em.predict( sample, probs )[1]);
labels.at<int>(i) = static_cast<int>(em->predict2( sample, probs )[1]);
}
if( !calcErr( labels, testLabels, sizes, err, false, false ) )
{
@ -446,56 +452,56 @@ void CV_EMTest::run( int /*start_from*/ )
int code = cvtest::TS::OK;
int caseIndex = 0;
{
params.startStep = cv::EM::START_AUTO_STEP;
params.covMatType = cv::EM::COV_MAT_GENERIC;
params.startStep = EM::START_AUTO_STEP;
params.covMatType = EM::COV_MAT_GENERIC;
int currCode = runCase(caseIndex++, params, trainData, trainLabels, testData, testLabels, sizes);
code = currCode == cvtest::TS::OK ? code : currCode;
}
{
params.startStep = cv::EM::START_AUTO_STEP;
params.covMatType = cv::EM::COV_MAT_DIAGONAL;
params.startStep = EM::START_AUTO_STEP;
params.covMatType = EM::COV_MAT_DIAGONAL;
int currCode = runCase(caseIndex++, params, trainData, trainLabels, testData, testLabels, sizes);
code = currCode == cvtest::TS::OK ? code : currCode;
}
{
params.startStep = cv::EM::START_AUTO_STEP;
params.covMatType = cv::EM::COV_MAT_SPHERICAL;
params.startStep = EM::START_AUTO_STEP;
params.covMatType = EM::COV_MAT_SPHERICAL;
int currCode = runCase(caseIndex++, params, trainData, trainLabels, testData, testLabels, sizes);
code = currCode == cvtest::TS::OK ? code : currCode;
}
{
params.startStep = cv::EM::START_M_STEP;
params.covMatType = cv::EM::COV_MAT_GENERIC;
params.startStep = EM::START_M_STEP;
params.covMatType = EM::COV_MAT_GENERIC;
int currCode = runCase(caseIndex++, params, trainData, trainLabels, testData, testLabels, sizes);
code = currCode == cvtest::TS::OK ? code : currCode;
}
{
params.startStep = cv::EM::START_M_STEP;
params.covMatType = cv::EM::COV_MAT_DIAGONAL;
params.startStep = EM::START_M_STEP;
params.covMatType = EM::COV_MAT_DIAGONAL;
int currCode = runCase(caseIndex++, params, trainData, trainLabels, testData, testLabels, sizes);
code = currCode == cvtest::TS::OK ? code : currCode;
}
{
params.startStep = cv::EM::START_M_STEP;
params.covMatType = cv::EM::COV_MAT_SPHERICAL;
params.startStep = EM::START_M_STEP;
params.covMatType = EM::COV_MAT_SPHERICAL;
int currCode = runCase(caseIndex++, params, trainData, trainLabels, testData, testLabels, sizes);
code = currCode == cvtest::TS::OK ? code : currCode;
}
{
params.startStep = cv::EM::START_E_STEP;
params.covMatType = cv::EM::COV_MAT_GENERIC;
params.startStep = EM::START_E_STEP;
params.covMatType = EM::COV_MAT_GENERIC;
int currCode = runCase(caseIndex++, params, trainData, trainLabels, testData, testLabels, sizes);
code = currCode == cvtest::TS::OK ? code : currCode;
}
{
params.startStep = cv::EM::START_E_STEP;
params.covMatType = cv::EM::COV_MAT_DIAGONAL;
params.startStep = EM::START_E_STEP;
params.covMatType = EM::COV_MAT_DIAGONAL;
int currCode = runCase(caseIndex++, params, trainData, trainLabels, testData, testLabels, sizes);
code = currCode == cvtest::TS::OK ? code : currCode;
}
{
params.startStep = cv::EM::START_E_STEP;
params.covMatType = cv::EM::COV_MAT_SPHERICAL;
params.startStep = EM::START_E_STEP;
params.covMatType = EM::COV_MAT_SPHERICAL;
int currCode = runCase(caseIndex++, params, trainData, trainLabels, testData, testLabels, sizes);
code = currCode == cvtest::TS::OK ? code : currCode;
}
@ -511,7 +517,6 @@ protected:
{
int code = cvtest::TS::OK;
const int nclusters = 2;
cv::EM em(nclusters);
Mat samples = Mat(3,1,CV_64FC1);
samples.at<double>(0,0) = 1;
@ -520,11 +525,11 @@ protected:
Mat labels;
em.train(samples, labels);
Ptr<EM> em = EM::train(samples, noArray(), labels, noArray(), EM::Params(nclusters));
Mat firstResult(samples.rows, 1, CV_32SC1);
for( int i = 0; i < samples.rows; i++)
firstResult.at<int>(i) = static_cast<int>(em.predict(samples.row(i))[1]);
firstResult.at<int>(i) = static_cast<int>(em->predict2(samples.row(i), noArray())[1]);
// Write out
string filename = cv::tempfile(".xml");
@ -533,7 +538,7 @@ protected:
try
{
fs << "em" << "{";
em.write(fs);
em->write(fs);
fs << "}";
}
catch(...)
@ -543,29 +548,24 @@ protected:
}
}
em.clear();
em.release();
// Read in
try
{
FileStorage fs = FileStorage(filename, FileStorage::READ);
CV_Assert(fs.isOpened());
FileNode fn = fs["em"];
try
{
em.read(fn);
}
catch(...)
{
ts->printf( cvtest::TS::LOG, "Crash in read method.\n" );
ts->set_failed_test_info( cvtest::TS::FAIL_EXCEPTION );
}
em = StatModel::load<EM>(filename);
}
catch(...)
{
ts->printf( cvtest::TS::LOG, "Crash in read method.\n" );
ts->set_failed_test_info( cvtest::TS::FAIL_EXCEPTION );
}
remove( filename.c_str() );
int errCaseCount = 0;
for( int i = 0; i < samples.rows; i++)
errCaseCount = std::abs(em.predict(samples.row(i))[1] - firstResult.at<int>(i)) < FLT_EPSILON ? 0 : 1;
errCaseCount = std::abs(em->predict2(samples.row(i), noArray())[1] - firstResult.at<int>(i)) < FLT_EPSILON ? 0 : 1;
if( errCaseCount > 0 )
{
@ -588,21 +588,18 @@ protected:
// 1. estimates distributions of "spam" / "not spam"
// 2. predict classID using Bayes classifier for estimated distributions.
CvMLData data;
string dataFilename = string(ts->get_data_path()) + "spambase.data";
Ptr<TrainData> data = TrainData::loadFromCSV(dataFilename, 0);
if(data.read_csv(dataFilename.c_str()) != 0)
if( data.empty() )
{
ts->printf(cvtest::TS::LOG, "File with spambase dataset cann't be read.\n");
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
}
Mat values = cv::cvarrToMat(data.get_values());
CV_Assert(values.cols == 58);
int responseIndex = 57;
Mat samples = values.colRange(0, responseIndex);
Mat responses = values.col(responseIndex);
Mat samples = data->getSamples();
CV_Assert(samples.cols == 57);
Mat responses = data->getResponses();
vector<int> trainSamplesMask(samples.rows, 0);
int trainSamplesCount = (int)(0.5f * samples.rows);
@ -616,7 +613,6 @@ protected:
std::swap(trainSamplesMask[i1], trainSamplesMask[i2]);
}
EM model0(3), model1(3);
Mat samples0, samples1;
for(int i = 0; i < samples.rows; i++)
{
@ -630,8 +626,8 @@ protected:
samples1.push_back(sample);
}
}
model0.train(samples0);
model1.train(samples1);
Ptr<EM> model0 = EM::train(samples0, noArray(), noArray(), noArray(), EM::Params(3));
Ptr<EM> model1 = EM::train(samples1, noArray(), noArray(), noArray(), EM::Params(3));
Mat trainConfusionMat(2, 2, CV_32SC1, Scalar(0)),
testConfusionMat(2, 2, CV_32SC1, Scalar(0));
@ -639,8 +635,8 @@ protected:
for(int i = 0; i < samples.rows; i++)
{
Mat sample = samples.row(i);
double sampleLogLikelihoods0 = model0.predict(sample)[0];
double sampleLogLikelihoods1 = model1.predict(sample)[0];
double sampleLogLikelihoods0 = model0->predict2(sample, noArray())[0];
double sampleLogLikelihoods1 = model1->predict2(sample, noArray())[0];
int classID = sampleLogLikelihoods0 >= lambda * sampleLogLikelihoods1 ? 0 : 1;

View File

@ -1,6 +1,8 @@
#include "test_precomp.hpp"
#if 0
#include <string>
#include <fstream>
#include <iostream>
@ -284,3 +286,5 @@ void CV_GBTreesTest::run(int)
/////////////////////////////////////////////////////////////////////////////
TEST(ML_GBTrees, regression) { CV_GBTreesTest test; test.safe_run(); }
#endif

View File

@ -65,7 +65,7 @@ int CV_AMLTest::run_test_case( int testCaseIdx )
for (int k = 0; k < icount; k++)
{
#endif
data.mix_train_and_test_idx();
data->shuffleTrainTest();
code = train( testCaseIdx );
#ifdef GET_STAT
float case_result = get_error();
@ -101,9 +101,10 @@ int CV_AMLTest::validate_test_results( int testCaseIdx )
{
resultNode["mean"] >> mean;
resultNode["sigma"] >> sigma;
float curErr = get_error( testCaseIdx, CV_TEST_ERROR );
model->save(format("/Users/vp/tmp/dtree/testcase_%02d.cur.yml", testCaseIdx));
float curErr = get_test_error( testCaseIdx );
const int coeff = 4;
ts->printf( cvtest::TS::LOG, "Test case = %d; test error = %f; mean error = %f (diff=%f), %d*sigma = %f",
ts->printf( cvtest::TS::LOG, "Test case = %d; test error = %f; mean error = %f (diff=%f), %d*sigma = %f\n",
testCaseIdx, curErr, mean, abs( curErr - mean), coeff, coeff*sigma );
if ( abs( curErr - mean) > coeff*sigma )
{
@ -125,6 +126,6 @@ int CV_AMLTest::validate_test_results( int testCaseIdx )
TEST(ML_DTree, regression) { CV_AMLTest test( CV_DTREE ); test.safe_run(); }
TEST(ML_Boost, regression) { CV_AMLTest test( CV_BOOST ); test.safe_run(); }
TEST(ML_RTrees, regression) { CV_AMLTest test( CV_RTREES ); test.safe_run(); }
TEST(ML_ERTrees, regression) { CV_AMLTest test( CV_ERTREES ); test.safe_run(); }
TEST(DISABLED_ML_ERTrees, regression) { CV_AMLTest test( CV_ERTREES ); test.safe_run(); }
/* End of file. */

View File

@ -44,257 +44,49 @@
using namespace cv;
using namespace std;
// auxiliary functions
// 1. nbayes
void nbayes_check_data( CvMLData* _data )
{
if( _data->get_missing() )
CV_Error( CV_StsBadArg, "missing values are not supported" );
const CvMat* var_types = _data->get_var_types();
bool is_classifier = var_types->data.ptr[var_types->cols-1] == CV_VAR_CATEGORICAL;
Mat _var_types = cvarrToMat(var_types);
if( ( fabs( cvtest::norm( _var_types, Mat::zeros(_var_types.dims, _var_types.size, _var_types.type()), CV_L1 ) -
(var_types->rows + var_types->cols - 2)*CV_VAR_ORDERED - CV_VAR_CATEGORICAL ) > FLT_EPSILON ) ||
!is_classifier )
CV_Error( CV_StsBadArg, "incorrect types of predictors or responses" );
}
bool nbayes_train( CvNormalBayesClassifier* nbayes, CvMLData* _data )
{
nbayes_check_data( _data );
const CvMat* values = _data->get_values();
const CvMat* responses = _data->get_responses();
const CvMat* train_sidx = _data->get_train_sample_idx();
const CvMat* var_idx = _data->get_var_idx();
return nbayes->train( values, responses, var_idx, train_sidx );
}
float nbayes_calc_error( CvNormalBayesClassifier* nbayes, CvMLData* _data, int type, vector<float> *resp )
{
float err = 0;
nbayes_check_data( _data );
const CvMat* values = _data->get_values();
const CvMat* response = _data->get_responses();
const CvMat* sample_idx = (type == CV_TEST_ERROR) ? _data->get_test_sample_idx() : _data->get_train_sample_idx();
int* sidx = sample_idx ? sample_idx->data.i : 0;
int r_step = CV_IS_MAT_CONT(response->type) ?
1 : response->step / CV_ELEM_SIZE(response->type);
int sample_count = sample_idx ? sample_idx->cols : 0;
sample_count = (type == CV_TRAIN_ERROR && sample_count == 0) ? values->rows : sample_count;
float* pred_resp = 0;
if( resp && (sample_count > 0) )
{
resp->resize( sample_count );
pred_resp = &((*resp)[0]);
}
for( int i = 0; i < sample_count; i++ )
{
CvMat sample;
int si = sidx ? sidx[i] : i;
cvGetRow( values, &sample, si );
float r = (float)nbayes->predict( &sample, 0 );
if( pred_resp )
pred_resp[i] = r;
int d = fabs((double)r - response->data.fl[si*r_step]) <= FLT_EPSILON ? 0 : 1;
err += d;
}
err = sample_count ? err / (float)sample_count * 100 : -FLT_MAX;
return err;
}
// 2. knearest
void knearest_check_data_and_get_predictors( CvMLData* _data, CvMat* _predictors )
{
const CvMat* values = _data->get_values();
const CvMat* var_idx = _data->get_var_idx();
if( var_idx->cols + var_idx->rows != values->cols )
CV_Error( CV_StsBadArg, "var_idx is not supported" );
if( _data->get_missing() )
CV_Error( CV_StsBadArg, "missing values are not supported" );
int resp_idx = _data->get_response_idx();
if( resp_idx == 0)
cvGetCols( values, _predictors, 1, values->cols );
else if( resp_idx == values->cols - 1 )
cvGetCols( values, _predictors, 0, values->cols - 1 );
else
CV_Error( CV_StsBadArg, "responses must be in the first or last column; other cases are not supported" );
}
bool knearest_train( CvKNearest* knearest, CvMLData* _data )
{
const CvMat* responses = _data->get_responses();
const CvMat* train_sidx = _data->get_train_sample_idx();
bool is_regression = _data->get_var_type( _data->get_response_idx() ) == CV_VAR_ORDERED;
CvMat predictors;
knearest_check_data_and_get_predictors( _data, &predictors );
return knearest->train( &predictors, responses, train_sidx, is_regression );
}
float knearest_calc_error( CvKNearest* knearest, CvMLData* _data, int k, int type, vector<float> *resp )
{
float err = 0;
const CvMat* response = _data->get_responses();
const CvMat* sample_idx = (type == CV_TEST_ERROR) ? _data->get_test_sample_idx() : _data->get_train_sample_idx();
int* sidx = sample_idx ? sample_idx->data.i : 0;
int r_step = CV_IS_MAT_CONT(response->type) ?
1 : response->step / CV_ELEM_SIZE(response->type);
bool is_regression = _data->get_var_type( _data->get_response_idx() ) == CV_VAR_ORDERED;
CvMat predictors;
knearest_check_data_and_get_predictors( _data, &predictors );
int sample_count = sample_idx ? sample_idx->cols : 0;
sample_count = (type == CV_TRAIN_ERROR && sample_count == 0) ? predictors.rows : sample_count;
float* pred_resp = 0;
if( resp && (sample_count > 0) )
{
resp->resize( sample_count );
pred_resp = &((*resp)[0]);
}
if ( !is_regression )
{
for( int i = 0; i < sample_count; i++ )
{
CvMat sample;
int si = sidx ? sidx[i] : i;
cvGetRow( &predictors, &sample, si );
float r = knearest->find_nearest( &sample, k );
if( pred_resp )
pred_resp[i] = r;
int d = fabs((double)r - response->data.fl[si*r_step]) <= FLT_EPSILON ? 0 : 1;
err += d;
}
err = sample_count ? err / (float)sample_count * 100 : -FLT_MAX;
}
else
{
for( int i = 0; i < sample_count; i++ )
{
CvMat sample;
int si = sidx ? sidx[i] : i;
cvGetRow( &predictors, &sample, si );
float r = knearest->find_nearest( &sample, k );
if( pred_resp )
pred_resp[i] = r;
float d = r - response->data.fl[si*r_step];
err += d*d;
}
err = sample_count ? err / (float)sample_count : -FLT_MAX;
}
return err;
}
// 3. svm
int str_to_svm_type(String& str)
{
if( !str.compare("C_SVC") )
return CvSVM::C_SVC;
return SVM::C_SVC;
if( !str.compare("NU_SVC") )
return CvSVM::NU_SVC;
return SVM::NU_SVC;
if( !str.compare("ONE_CLASS") )
return CvSVM::ONE_CLASS;
return SVM::ONE_CLASS;
if( !str.compare("EPS_SVR") )
return CvSVM::EPS_SVR;
return SVM::EPS_SVR;
if( !str.compare("NU_SVR") )
return CvSVM::NU_SVR;
return SVM::NU_SVR;
CV_Error( CV_StsBadArg, "incorrect svm type string" );
return -1;
}
int str_to_svm_kernel_type( String& str )
{
if( !str.compare("LINEAR") )
return CvSVM::LINEAR;
return SVM::LINEAR;
if( !str.compare("POLY") )
return CvSVM::POLY;
return SVM::POLY;
if( !str.compare("RBF") )
return CvSVM::RBF;
return SVM::RBF;
if( !str.compare("SIGMOID") )
return CvSVM::SIGMOID;
return SVM::SIGMOID;
CV_Error( CV_StsBadArg, "incorrect svm type string" );
return -1;
}
void svm_check_data( CvMLData* _data )
Ptr<SVM> svm_train_auto( Ptr<TrainData> _data, SVM::Params _params,
int k_fold, ParamGrid C_grid, ParamGrid gamma_grid,
ParamGrid p_grid, ParamGrid nu_grid, ParamGrid coef_grid,
ParamGrid degree_grid )
{
if( _data->get_missing() )
CV_Error( CV_StsBadArg, "missing values are not supported" );
const CvMat* var_types = _data->get_var_types();
for( int i = 0; i < var_types->cols-1; i++ )
if (var_types->data.ptr[i] == CV_VAR_CATEGORICAL)
{
char msg[50];
sprintf( msg, "incorrect type of %d-predictor", i );
CV_Error( CV_StsBadArg, msg );
}
}
bool svm_train( CvSVM* svm, CvMLData* _data, CvSVMParams _params )
{
svm_check_data(_data);
const CvMat* _train_data = _data->get_values();
const CvMat* _responses = _data->get_responses();
const CvMat* _var_idx = _data->get_var_idx();
const CvMat* _sample_idx = _data->get_train_sample_idx();
return svm->train( _train_data, _responses, _var_idx, _sample_idx, _params );
}
bool svm_train_auto( CvSVM* svm, CvMLData* _data, CvSVMParams _params,
int k_fold, CvParamGrid C_grid, CvParamGrid gamma_grid,
CvParamGrid p_grid, CvParamGrid nu_grid, CvParamGrid coef_grid,
CvParamGrid degree_grid )
{
svm_check_data(_data);
const CvMat* _train_data = _data->get_values();
const CvMat* _responses = _data->get_responses();
const CvMat* _var_idx = _data->get_var_idx();
const CvMat* _sample_idx = _data->get_train_sample_idx();
return svm->train_auto( _train_data, _responses, _var_idx,
_sample_idx, _params, k_fold, C_grid, gamma_grid, p_grid, nu_grid, coef_grid, degree_grid );
}
float svm_calc_error( CvSVM* svm, CvMLData* _data, int type, vector<float> *resp )
{
svm_check_data(_data);
float err = 0;
const CvMat* values = _data->get_values();
const CvMat* response = _data->get_responses();
const CvMat* sample_idx = (type == CV_TEST_ERROR) ? _data->get_test_sample_idx() : _data->get_train_sample_idx();
const CvMat* var_types = _data->get_var_types();
int* sidx = sample_idx ? sample_idx->data.i : 0;
int r_step = CV_IS_MAT_CONT(response->type) ?
1 : response->step / CV_ELEM_SIZE(response->type);
bool is_classifier = var_types->data.ptr[var_types->cols-1] == CV_VAR_CATEGORICAL;
int sample_count = sample_idx ? sample_idx->cols : 0;
sample_count = (type == CV_TRAIN_ERROR && sample_count == 0) ? values->rows : sample_count;
float* pred_resp = 0;
if( resp && (sample_count > 0) )
{
resp->resize( sample_count );
pred_resp = &((*resp)[0]);
}
if ( is_classifier )
{
for( int i = 0; i < sample_count; i++ )
{
CvMat sample;
int si = sidx ? sidx[i] : i;
cvGetRow( values, &sample, si );
float r = svm->predict( &sample );
if( pred_resp )
pred_resp[i] = r;
int d = fabs((double)r - response->data.fl[si*r_step]) <= FLT_EPSILON ? 0 : 1;
err += d;
}
err = sample_count ? err / (float)sample_count * 100 : -FLT_MAX;
}
else
{
for( int i = 0; i < sample_count; i++ )
{
CvMat sample;
int si = sidx ? sidx[i] : i;
cvGetRow( values, &sample, si );
float r = svm->predict( &sample );
if( pred_resp )
pred_resp[i] = r;
float d = r - response->data.fl[si*r_step];
err += d*d;
}
err = sample_count ? err / (float)sample_count : -FLT_MAX;
}
return err;
Mat _train_data = _data->getSamples();
Mat _responses = _data->getResponses();
Mat _var_idx = _data->getVarIdx();
Mat _sample_idx = _data->getTrainSampleIdx();
Ptr<SVM> svm = SVM::create(_params);
if( svm->trainAuto( _data, k_fold, C_grid, gamma_grid, p_grid, nu_grid, coef_grid, degree_grid ) )
return svm;
return Ptr<SVM>();
}
// 4. em
@ -302,79 +94,66 @@ float svm_calc_error( CvSVM* svm, CvMLData* _data, int type, vector<float> *resp
int str_to_ann_train_method( String& str )
{
if( !str.compare("BACKPROP") )
return CvANN_MLP_TrainParams::BACKPROP;
return ANN_MLP::Params::BACKPROP;
if( !str.compare("RPROP") )
return CvANN_MLP_TrainParams::RPROP;
return ANN_MLP::Params::RPROP;
CV_Error( CV_StsBadArg, "incorrect ann train method string" );
return -1;
}
void ann_check_data_and_get_predictors( CvMLData* _data, CvMat* _inputs )
void ann_check_data( Ptr<TrainData> _data )
{
const CvMat* values = _data->get_values();
const CvMat* var_idx = _data->get_var_idx();
if( var_idx->cols + var_idx->rows != values->cols )
Mat values = _data->getSamples();
Mat var_idx = _data->getVarIdx();
int nvars = (int)var_idx.total();
if( nvars != 0 && nvars != values.cols )
CV_Error( CV_StsBadArg, "var_idx is not supported" );
if( _data->get_missing() )
if( !_data->getMissing().empty() )
CV_Error( CV_StsBadArg, "missing values are not supported" );
int resp_idx = _data->get_response_idx();
if( resp_idx == 0)
cvGetCols( values, _inputs, 1, values->cols );
else if( resp_idx == values->cols - 1 )
cvGetCols( values, _inputs, 0, values->cols - 1 );
else
CV_Error( CV_StsBadArg, "outputs must be in the first or last column; other cases are not supported" );
}
void ann_get_new_responses( CvMLData* _data, Mat& new_responses, map<int, int>& cls_map )
// unroll the categorical responses to binary vectors
Mat ann_get_new_responses( Ptr<TrainData> _data, map<int, int>& cls_map )
{
const CvMat* train_sidx = _data->get_train_sample_idx();
int* train_sidx_ptr = train_sidx->data.i;
const CvMat* responses = _data->get_responses();
float* responses_ptr = responses->data.fl;
int r_step = CV_IS_MAT_CONT(responses->type) ?
1 : responses->step / CV_ELEM_SIZE(responses->type);
Mat train_sidx = _data->getTrainSampleIdx();
int* train_sidx_ptr = train_sidx.ptr<int>();
Mat responses = _data->getResponses();
int cls_count = 0;
// construct cls_map
cls_map.clear();
for( int si = 0; si < train_sidx->cols; si++ )
int nresponses = (int)responses.total();
int si, n = !train_sidx.empty() ? (int)train_sidx.total() : nresponses;
for( si = 0; si < n; si++ )
{
int sidx = train_sidx_ptr[si];
int r = cvRound(responses_ptr[sidx*r_step]);
CV_DbgAssert( fabs(responses_ptr[sidx*r_step]-r) < FLT_EPSILON );
int cls_map_size = (int)cls_map.size();
cls_map[r];
if ( (int)cls_map.size() > cls_map_size )
int sidx = train_sidx_ptr ? train_sidx_ptr[si] : si;
int r = cvRound(responses.at<float>(sidx));
CV_DbgAssert( fabs(responses.at<float>(sidx) - r) < FLT_EPSILON );
map<int,int>::iterator it = cls_map.find(r);
if( it == cls_map.end() )
cls_map[r] = cls_count++;
}
new_responses.create( responses->rows, cls_count, CV_32F );
new_responses.setTo( 0 );
for( int si = 0; si < train_sidx->cols; si++ )
Mat new_responses = Mat::zeros( nresponses, cls_count, CV_32F );
for( si = 0; si < n; si++ )
{
int sidx = train_sidx_ptr[si];
int r = cvRound(responses_ptr[sidx*r_step]);
int sidx = train_sidx_ptr ? train_sidx_ptr[si] : si;
int r = cvRound(responses.at<float>(sidx));
int cidx = cls_map[r];
new_responses.ptr<float>(sidx)[cidx] = 1;
new_responses.at<float>(sidx, cidx) = 1.f;
}
return new_responses;
}
int ann_train( CvANN_MLP* ann, CvMLData* _data, Mat& new_responses, CvANN_MLP_TrainParams _params, int flags = 0 )
{
const CvMat* train_sidx = _data->get_train_sample_idx();
CvMat predictors;
ann_check_data_and_get_predictors( _data, &predictors );
CvMat _new_responses = CvMat( new_responses );
return ann->train( &predictors, &_new_responses, 0, train_sidx, _params, flags );
}
float ann_calc_error( CvANN_MLP* ann, CvMLData* _data, map<int, int>& cls_map, int type , vector<float> *resp_labels )
float ann_calc_error( Ptr<StatModel> ann, Ptr<TrainData> _data, map<int, int>& cls_map, int type, vector<float> *resp_labels )
{
float err = 0;
const CvMat* responses = _data->get_responses();
const CvMat* sample_idx = (type == CV_TEST_ERROR) ? _data->get_test_sample_idx() : _data->get_train_sample_idx();
int* sidx = sample_idx ? sample_idx->data.i : 0;
int r_step = CV_IS_MAT_CONT(responses->type) ?
1 : responses->step / CV_ELEM_SIZE(responses->type);
CvMat predictors;
ann_check_data_and_get_predictors( _data, &predictors );
int sample_count = sample_idx ? sample_idx->cols : 0;
sample_count = (type == CV_TRAIN_ERROR && sample_count == 0) ? predictors.rows : sample_count;
Mat samples = _data->getSamples();
Mat responses = _data->getResponses();
Mat sample_idx = (type == CV_TEST_ERROR) ? _data->getTestSampleIdx() : _data->getTrainSampleIdx();
int* sidx = !sample_idx.empty() ? sample_idx.ptr<int>() : 0;
ann_check_data( _data );
int sample_count = (int)sample_idx.total();
sample_count = (type == CV_TRAIN_ERROR && sample_count == 0) ? samples.rows : sample_count;
float* pred_resp = 0;
vector<float> innresp;
if( sample_count > 0 )
@ -392,17 +171,16 @@ float ann_calc_error( CvANN_MLP* ann, CvMLData* _data, map<int, int>& cls_map, i
}
int cls_count = (int)cls_map.size();
Mat output( 1, cls_count, CV_32FC1 );
CvMat _output = CvMat(output);
for( int i = 0; i < sample_count; i++ )
{
CvMat sample;
int si = sidx ? sidx[i] : i;
cvGetRow( &predictors, &sample, si );
ann->predict( &sample, &_output );
CvPoint best_cls;
cvMinMaxLoc( &_output, 0, 0, 0, &best_cls, 0 );
int r = cvRound(responses->data.fl[si*r_step]);
CV_DbgAssert( fabs(responses->data.fl[si*r_step]-r) < FLT_EPSILON );
Mat sample = samples.row(si);
ann->predict( sample, output );
Point best_cls;
minMaxLoc(output, 0, 0, 0, &best_cls, 0);
int r = cvRound(responses.at<float>(si));
CV_DbgAssert( fabs(responses.at<float>(si) - r) < FLT_EPSILON );
r = cls_map[r];
int d = best_cls.x == r ? 0 : 1;
err += d;
@ -417,13 +195,13 @@ float ann_calc_error( CvANN_MLP* ann, CvMLData* _data, map<int, int>& cls_map, i
int str_to_boost_type( String& str )
{
if ( !str.compare("DISCRETE") )
return CvBoost::DISCRETE;
return Boost::DISCRETE;
if ( !str.compare("REAL") )
return CvBoost::REAL;
return Boost::REAL;
if ( !str.compare("LOGIT") )
return CvBoost::LOGIT;
return Boost::LOGIT;
if ( !str.compare("GENTLE") )
return CvBoost::GENTLE;
return Boost::GENTLE;
CV_Error( CV_StsBadArg, "incorrect boost type string" );
return -1;
}
@ -446,76 +224,37 @@ CV_MLBaseTest::CV_MLBaseTest(const char* _modelName)
RNG& rng = theRNG();
initSeed = rng.state;
rng.state = seeds[rng(seedCount)];
modelName = _modelName;
nbayes = 0;
knearest = 0;
svm = 0;
ann = 0;
dtree = 0;
boost = 0;
rtrees = 0;
ertrees = 0;
if( !modelName.compare(CV_NBAYES) )
nbayes = new CvNormalBayesClassifier;
else if( !modelName.compare(CV_KNEAREST) )
knearest = new CvKNearest;
else if( !modelName.compare(CV_SVM) )
svm = new CvSVM;
else if( !modelName.compare(CV_ANN) )
ann = new CvANN_MLP;
else if( !modelName.compare(CV_DTREE) )
dtree = new CvDTree;
else if( !modelName.compare(CV_BOOST) )
boost = new CvBoost;
else if( !modelName.compare(CV_RTREES) )
rtrees = new CvRTrees;
else if( !modelName.compare(CV_ERTREES) )
ertrees = new CvERTrees;
}
CV_MLBaseTest::~CV_MLBaseTest()
{
if( validationFS.isOpened() )
validationFS.release();
if( nbayes )
delete nbayes;
if( knearest )
delete knearest;
if( svm )
delete svm;
if( ann )
delete ann;
if( dtree )
delete dtree;
if( boost )
delete boost;
if( rtrees )
delete rtrees;
if( ertrees )
delete ertrees;
theRNG().state = initSeed;
}
int CV_MLBaseTest::read_params( CvFileStorage* _fs )
int CV_MLBaseTest::read_params( CvFileStorage* __fs )
{
if( !_fs )
FileStorage _fs(__fs, false);
if( !_fs.isOpened() )
test_case_count = -1;
else
{
CvFileNode* fn = cvGetRootFileNode( _fs, 0 );
fn = (CvFileNode*)cvGetSeqElem( fn->data.seq, 0 );
fn = cvGetFileNodeByName( _fs, fn, "run_params" );
CvSeq* dataSetNamesSeq = cvGetFileNodeByName( _fs, fn, modelName.c_str() )->data.seq;
test_case_count = dataSetNamesSeq ? dataSetNamesSeq->total : -1;
FileNode fn = _fs.getFirstTopLevelNode()["run_params"][modelName];
test_case_count = (int)fn.size();
if( test_case_count <= 0 )
test_case_count = -1;
if( test_case_count > 0 )
{
dataSetNames.resize( test_case_count );
vector<string>::iterator it = dataSetNames.begin();
for( int i = 0; i < test_case_count; i++, it++ )
*it = ((CvFileNode*)cvGetSeqElem( dataSetNamesSeq, i ))->data.str.ptr;
FileNodeIterator it = fn.begin();
for( int i = 0; i < test_case_count; i++, ++it )
{
dataSetNames[i] = (string)*it;
}
}
}
return cvtest::TS::OK;;
@ -547,8 +286,6 @@ void CV_MLBaseTest::run( int )
int CV_MLBaseTest::prepare_test_case( int test_case_idx )
{
int trainSampleCount, respIdx;
String varTypes;
clear();
string dataPath = ts->get_data_path();
@ -560,30 +297,27 @@ int CV_MLBaseTest::prepare_test_case( int test_case_idx )
string dataName = dataSetNames[test_case_idx],
filename = dataPath + dataName + ".data";
if ( data.read_csv( filename.c_str() ) != 0)
{
char msg[100];
sprintf( msg, "file %s can not be read", filename.c_str() );
ts->printf( cvtest::TS::LOG, msg );
return cvtest::TS::FAIL_INVALID_TEST_DATA;
}
FileNode dataParamsNode = validationFS.getFirstTopLevelNode()["validation"][modelName][dataName]["data_params"];
CV_DbgAssert( !dataParamsNode.empty() );
CV_DbgAssert( !dataParamsNode["LS"].empty() );
dataParamsNode["LS"] >> trainSampleCount;
CvTrainTestSplit spl( trainSampleCount );
data.set_train_test_split( &spl );
int trainSampleCount = (int)dataParamsNode["LS"];
CV_DbgAssert( !dataParamsNode["resp_idx"].empty() );
dataParamsNode["resp_idx"] >> respIdx;
data.set_response_idx( respIdx );
int respIdx = (int)dataParamsNode["resp_idx"];
CV_DbgAssert( !dataParamsNode["types"].empty() );
dataParamsNode["types"] >> varTypes;
data.set_var_types( varTypes.c_str() );
String varTypes = (String)dataParamsNode["types"];
data = TrainData::loadFromCSV(filename, 0, respIdx, respIdx+1, varTypes);
if( data.empty() )
{
ts->printf( cvtest::TS::LOG, "file %s can not be read\n", filename.c_str() );
return cvtest::TS::FAIL_INVALID_TEST_DATA;
}
data->setTrainTestSplit(trainSampleCount);
return cvtest::TS::OK;
}
@ -598,114 +332,98 @@ int CV_MLBaseTest::train( int testCaseIdx )
FileNode modelParamsNode =
validationFS.getFirstTopLevelNode()["validation"][modelName][dataSetNames[testCaseIdx]]["model_params"];
if( !modelName.compare(CV_NBAYES) )
is_trained = nbayes_train( nbayes, &data );
else if( !modelName.compare(CV_KNEAREST) )
if( modelName == CV_NBAYES )
model = NormalBayesClassifier::create();
else if( modelName == CV_KNEAREST )
{
assert( 0 );
//is_trained = knearest->train( &data );
model = KNearest::create();
}
else if( !modelName.compare(CV_SVM) )
else if( modelName == CV_SVM )
{
String svm_type_str, kernel_type_str;
modelParamsNode["svm_type"] >> svm_type_str;
modelParamsNode["kernel_type"] >> kernel_type_str;
CvSVMParams params;
params.svm_type = str_to_svm_type( svm_type_str );
params.kernel_type = str_to_svm_kernel_type( kernel_type_str );
SVM::Params params;
params.svmType = str_to_svm_type( svm_type_str );
params.kernelType = str_to_svm_kernel_type( kernel_type_str );
modelParamsNode["degree"] >> params.degree;
modelParamsNode["gamma"] >> params.gamma;
modelParamsNode["coef0"] >> params.coef0;
modelParamsNode["C"] >> params.C;
modelParamsNode["nu"] >> params.nu;
modelParamsNode["p"] >> params.p;
is_trained = svm_train( svm, &data, params );
model = SVM::create(params);
}
else if( !modelName.compare(CV_EM) )
else if( modelName == CV_EM )
{
assert( 0 );
}
else if( !modelName.compare(CV_ANN) )
else if( modelName == CV_ANN )
{
String train_method_str;
double param1, param2;
modelParamsNode["train_method"] >> train_method_str;
modelParamsNode["param1"] >> param1;
modelParamsNode["param2"] >> param2;
Mat new_responses;
ann_get_new_responses( &data, new_responses, cls_map );
int layer_sz[] = { data.get_values()->cols - 1, 100, 100, (int)cls_map.size() };
CvMat layer_sizes =
cvMat( 1, (int)(sizeof(layer_sz)/sizeof(layer_sz[0])), CV_32S, layer_sz );
ann->create( &layer_sizes );
is_trained = ann_train( ann, &data, new_responses, CvANN_MLP_TrainParams(cvTermCriteria(CV_TERMCRIT_ITER,300,0.01),
str_to_ann_train_method(train_method_str), param1, param2) ) >= 0;
Mat new_responses = ann_get_new_responses( data, cls_map );
// binarize the responses
data = TrainData::create(data->getSamples(), data->getLayout(), new_responses,
data->getVarIdx(), data->getTrainSampleIdx());
int layer_sz[] = { data->getNAllVars(), 100, 100, (int)cls_map.size() };
Mat layer_sizes( 1, (int)(sizeof(layer_sz)/sizeof(layer_sz[0])), CV_32S, layer_sz );
model = ANN_MLP::create(ANN_MLP::Params(layer_sizes, ANN_MLP::SIGMOID_SYM, 0, 0,
TermCriteria(TermCriteria::COUNT,300,0.01),
str_to_ann_train_method(train_method_str), param1, param2));
}
else if( !modelName.compare(CV_DTREE) )
else if( modelName == CV_DTREE )
{
int MAX_DEPTH, MIN_SAMPLE_COUNT, MAX_CATEGORIES, CV_FOLDS;
float REG_ACCURACY = 0;
bool USE_SURROGATE, IS_PRUNED;
bool USE_SURROGATE = false, IS_PRUNED;
modelParamsNode["max_depth"] >> MAX_DEPTH;
modelParamsNode["min_sample_count"] >> MIN_SAMPLE_COUNT;
modelParamsNode["use_surrogate"] >> USE_SURROGATE;
//modelParamsNode["use_surrogate"] >> USE_SURROGATE;
modelParamsNode["max_categories"] >> MAX_CATEGORIES;
modelParamsNode["cv_folds"] >> CV_FOLDS;
modelParamsNode["is_pruned"] >> IS_PRUNED;
is_trained = dtree->train( &data,
CvDTreeParams(MAX_DEPTH, MIN_SAMPLE_COUNT, REG_ACCURACY, USE_SURROGATE,
MAX_CATEGORIES, CV_FOLDS, false, IS_PRUNED, 0 )) != 0;
model = DTrees::create(DTrees::Params(MAX_DEPTH, MIN_SAMPLE_COUNT, REG_ACCURACY, USE_SURROGATE,
MAX_CATEGORIES, CV_FOLDS, false, IS_PRUNED, Mat() ));
}
else if( !modelName.compare(CV_BOOST) )
else if( modelName == CV_BOOST )
{
int BOOST_TYPE, WEAK_COUNT, MAX_DEPTH;
float WEIGHT_TRIM_RATE;
bool USE_SURROGATE;
bool USE_SURROGATE = false;
String typeStr;
modelParamsNode["type"] >> typeStr;
BOOST_TYPE = str_to_boost_type( typeStr );
modelParamsNode["weak_count"] >> WEAK_COUNT;
modelParamsNode["weight_trim_rate"] >> WEIGHT_TRIM_RATE;
modelParamsNode["max_depth"] >> MAX_DEPTH;
modelParamsNode["use_surrogate"] >> USE_SURROGATE;
is_trained = boost->train( &data,
CvBoostParams(BOOST_TYPE, WEAK_COUNT, WEIGHT_TRIM_RATE, MAX_DEPTH, USE_SURROGATE, 0) ) != 0;
//modelParamsNode["use_surrogate"] >> USE_SURROGATE;
model = Boost::create( Boost::Params(BOOST_TYPE, WEAK_COUNT, WEIGHT_TRIM_RATE, MAX_DEPTH, USE_SURROGATE, Mat()) );
}
else if( !modelName.compare(CV_RTREES) )
else if( modelName == CV_RTREES )
{
int MAX_DEPTH, MIN_SAMPLE_COUNT, MAX_CATEGORIES, CV_FOLDS, NACTIVE_VARS, MAX_TREES_NUM;
float REG_ACCURACY = 0, OOB_EPS = 0.0;
bool USE_SURROGATE, IS_PRUNED;
bool USE_SURROGATE = false, IS_PRUNED;
modelParamsNode["max_depth"] >> MAX_DEPTH;
modelParamsNode["min_sample_count"] >> MIN_SAMPLE_COUNT;
modelParamsNode["use_surrogate"] >> USE_SURROGATE;
//modelParamsNode["use_surrogate"] >> USE_SURROGATE;
modelParamsNode["max_categories"] >> MAX_CATEGORIES;
modelParamsNode["cv_folds"] >> CV_FOLDS;
modelParamsNode["is_pruned"] >> IS_PRUNED;
modelParamsNode["nactive_vars"] >> NACTIVE_VARS;
modelParamsNode["max_trees_num"] >> MAX_TREES_NUM;
is_trained = rtrees->train( &data, CvRTParams( MAX_DEPTH, MIN_SAMPLE_COUNT, REG_ACCURACY,
USE_SURROGATE, MAX_CATEGORIES, 0, true, // (calc_var_importance == true) <=> RF processes variable importance
NACTIVE_VARS, MAX_TREES_NUM, OOB_EPS, CV_TERMCRIT_ITER)) != 0;
}
else if( !modelName.compare(CV_ERTREES) )
{
int MAX_DEPTH, MIN_SAMPLE_COUNT, MAX_CATEGORIES, CV_FOLDS, NACTIVE_VARS, MAX_TREES_NUM;
float REG_ACCURACY = 0, OOB_EPS = 0.0;
bool USE_SURROGATE, IS_PRUNED;
modelParamsNode["max_depth"] >> MAX_DEPTH;
modelParamsNode["min_sample_count"] >> MIN_SAMPLE_COUNT;
modelParamsNode["use_surrogate"] >> USE_SURROGATE;
modelParamsNode["max_categories"] >> MAX_CATEGORIES;
modelParamsNode["cv_folds"] >> CV_FOLDS;
modelParamsNode["is_pruned"] >> IS_PRUNED;
modelParamsNode["nactive_vars"] >> NACTIVE_VARS;
modelParamsNode["max_trees_num"] >> MAX_TREES_NUM;
is_trained = ertrees->train( &data, CvRTParams( MAX_DEPTH, MIN_SAMPLE_COUNT, REG_ACCURACY,
USE_SURROGATE, MAX_CATEGORIES, 0, false, // (calc_var_importance == true) <=> RF processes variable importance
NACTIVE_VARS, MAX_TREES_NUM, OOB_EPS, CV_TERMCRIT_ITER)) != 0;
model = RTrees::create(RTrees::Params( MAX_DEPTH, MIN_SAMPLE_COUNT, REG_ACCURACY,
USE_SURROGATE, MAX_CATEGORIES, Mat(), true, // (calc_var_importance == true) <=> RF processes variable importance
NACTIVE_VARS, TermCriteria(TermCriteria::COUNT, MAX_TREES_NUM, OOB_EPS)));
}
if( !model.empty() )
is_trained = model->train(data, 0);
if( !is_trained )
{
ts->printf( cvtest::TS::LOG, "in test case %d model training was failed", testCaseIdx );
@ -714,78 +432,46 @@ int CV_MLBaseTest::train( int testCaseIdx )
return cvtest::TS::OK;
}
float CV_MLBaseTest::get_error( int /*testCaseIdx*/, int type, vector<float> *resp )
float CV_MLBaseTest::get_test_error( int /*testCaseIdx*/, vector<float> *resp )
{
int type = CV_TEST_ERROR;
float err = 0;
if( !modelName.compare(CV_NBAYES) )
err = nbayes_calc_error( nbayes, &data, type, resp );
else if( !modelName.compare(CV_KNEAREST) )
{
Mat _resp;
if( modelName == CV_EM )
assert( 0 );
/*testCaseIdx = 0;
int k = 2;
validationFS.getFirstTopLevelNode()["validation"][modelName][dataSetNames[testCaseIdx]]["model_params"]["k"] >> k;
err = knearest->calc_error( &data, k, type, resp );*/
}
else if( !modelName.compare(CV_SVM) )
err = svm_calc_error( svm, &data, type, resp );
else if( !modelName.compare(CV_EM) )
assert( 0 );
else if( !modelName.compare(CV_ANN) )
err = ann_calc_error( ann, &data, cls_map, type, resp );
else if( !modelName.compare(CV_DTREE) )
err = dtree->calc_error( &data, type, resp );
else if( !modelName.compare(CV_BOOST) )
err = boost->calc_error( &data, type, resp );
else if( !modelName.compare(CV_RTREES) )
err = rtrees->calc_error( &data, type, resp );
else if( !modelName.compare(CV_ERTREES) )
err = ertrees->calc_error( &data, type, resp );
else if( modelName == CV_ANN )
err = ann_calc_error( model, data, cls_map, type, resp );
else if( modelName == CV_DTREE || modelName == CV_BOOST || modelName == CV_RTREES ||
modelName == CV_SVM || modelName == CV_NBAYES || modelName == CV_KNEAREST )
err = model->calcError( data, true, _resp );
if( !_resp.empty() && resp )
_resp.convertTo(*resp, CV_32F);
return err;
}
void CV_MLBaseTest::save( const char* filename )
{
if( !modelName.compare(CV_NBAYES) )
nbayes->save( filename );
else if( !modelName.compare(CV_KNEAREST) )
knearest->save( filename );
else if( !modelName.compare(CV_SVM) )
svm->save( filename );
else if( !modelName.compare(CV_ANN) )
ann->save( filename );
else if( !modelName.compare(CV_DTREE) )
dtree->save( filename );
else if( !modelName.compare(CV_BOOST) )
boost->save( filename );
else if( !modelName.compare(CV_RTREES) )
rtrees->save( filename );
else if( !modelName.compare(CV_ERTREES) )
ertrees->save( filename );
model->save( filename );
}
void CV_MLBaseTest::load( const char* filename )
{
if( !modelName.compare(CV_NBAYES) )
nbayes->load( filename );
else if( !modelName.compare(CV_KNEAREST) )
knearest->load( filename );
else if( !modelName.compare(CV_SVM) )
{
delete svm;
svm = new CvSVM;
svm->load( filename );
}
else if( !modelName.compare(CV_ANN) )
ann->load( filename );
else if( !modelName.compare(CV_DTREE) )
dtree->load( filename );
else if( !modelName.compare(CV_BOOST) )
boost->load( filename );
else if( !modelName.compare(CV_RTREES) )
rtrees->load( filename );
else if( !modelName.compare(CV_ERTREES) )
ertrees->load( filename );
if( modelName == CV_NBAYES )
model = StatModel::load<NormalBayesClassifier>( filename );
else if( modelName == CV_KNEAREST )
model = StatModel::load<KNearest>( filename );
else if( modelName == CV_SVM )
model = StatModel::load<SVM>( filename );
else if( modelName == CV_ANN )
model = StatModel::load<ANN_MLP>( filename );
else if( modelName == CV_DTREE )
model = StatModel::load<DTrees>( filename );
else if( modelName == CV_BOOST )
model = StatModel::load<Boost>( filename );
else if( modelName == CV_RTREES )
model = StatModel::load<RTrees>( filename );
else
CV_Error( CV_StsNotImplemented, "invalid stat model name");
}
/* End of file. */

View File

@ -25,6 +25,20 @@
#define CV_RTREES "rtrees"
#define CV_ERTREES "ertrees"
enum { CV_TRAIN_ERROR=0, CV_TEST_ERROR=1 };
using cv::Ptr;
using cv::ml::StatModel;
using cv::ml::TrainData;
using cv::ml::NormalBayesClassifier;
using cv::ml::SVM;
using cv::ml::KNearest;
using cv::ml::ParamGrid;
using cv::ml::ANN_MLP;
using cv::ml::DTrees;
using cv::ml::Boost;
using cv::ml::RTrees;
class CV_MLBaseTest : public cvtest::BaseTest
{
public:
@ -39,24 +53,16 @@ protected:
virtual int validate_test_results( int testCaseIdx ) = 0;
int train( int testCaseIdx );
float get_error( int testCaseIdx, int type, std::vector<float> *resp = 0 );
float get_test_error( int testCaseIdx, std::vector<float> *resp = 0 );
void save( const char* filename );
void load( const char* filename );
CvMLData data;
Ptr<TrainData> data;
std::string modelName, validationFN;
std::vector<std::string> dataSetNames;
cv::FileStorage validationFS;
// MLL models
CvNormalBayesClassifier* nbayes;
CvKNearest* knearest;
CvSVM* svm;
CvANN_MLP* ann;
CvDTree* dtree;
CvBoost* boost;
CvRTrees* rtrees;
CvERTrees* ertrees;
Ptr<StatModel> model;
std::map<int, int> cls_map;
@ -67,6 +73,7 @@ class CV_AMLTest : public CV_MLBaseTest
{
public:
CV_AMLTest( const char* _modelName );
virtual ~CV_AMLTest() {}
protected:
virtual int run_test_case( int testCaseIdx );
virtual int validate_test_results( int testCaseIdx );
@ -76,6 +83,7 @@ class CV_SLMLTest : public CV_MLBaseTest
{
public:
CV_SLMLTest( const char* _modelName );
virtual ~CV_SLMLTest() {}
protected:
virtual int run_test_case( int testCaseIdx );
virtual int validate_test_results( int testCaseIdx );

View File

@ -59,20 +59,20 @@ int CV_SLMLTest::run_test_case( int testCaseIdx )
if( code == cvtest::TS::OK )
{
data.mix_train_and_test_idx();
code = train( testCaseIdx );
if( code == cvtest::TS::OK )
{
get_error( testCaseIdx, CV_TEST_ERROR, &test_resps1 );
fname1 = tempfile(".yml.gz");
save( fname1.c_str() );
load( fname1.c_str() );
get_error( testCaseIdx, CV_TEST_ERROR, &test_resps2 );
fname2 = tempfile(".yml.gz");
save( fname2.c_str() );
}
else
ts->printf( cvtest::TS::LOG, "model can not be trained" );
data->setTrainTestSplit(data->getNTrainSamples(), true);
code = train( testCaseIdx );
if( code == cvtest::TS::OK )
{
get_test_error( testCaseIdx, &test_resps1 );
fname1 = tempfile(".yml.gz");
save( fname1.c_str() );
load( fname1.c_str() );
get_test_error( testCaseIdx, &test_resps2 );
fname2 = tempfile(".yml.gz");
save( fname2.c_str() );
}
else
ts->printf( cvtest::TS::LOG, "model can not be trained" );
}
return code;
}
@ -130,15 +130,19 @@ int CV_SLMLTest::validate_test_results( int testCaseIdx )
remove( fname2.c_str() );
}
// 2. compare responses
CV_Assert( test_resps1.size() == test_resps2.size() );
vector<float>::const_iterator it1 = test_resps1.begin(), it2 = test_resps2.begin();
for( ; it1 != test_resps1.end(); ++it1, ++it2 )
if( code >= 0 )
{
if( fabs(*it1 - *it2) > FLT_EPSILON )
// 2. compare responses
CV_Assert( test_resps1.size() == test_resps2.size() );
vector<float>::const_iterator it1 = test_resps1.begin(), it2 = test_resps2.begin();
for( ; it1 != test_resps1.end(); ++it1, ++it2 )
{
ts->printf( cvtest::TS::LOG, "in test case %d responses predicted before saving and after loading is different", testCaseIdx );
code = cvtest::TS::FAIL_INVALID_OUTPUT;
if( fabs(*it1 - *it2) > FLT_EPSILON )
{
ts->printf( cvtest::TS::LOG, "in test case %d responses predicted before saving and after loading is different", testCaseIdx );
code = cvtest::TS::FAIL_INVALID_OUTPUT;
break;
}
}
}
return code;
@ -152,40 +156,41 @@ TEST(ML_ANN, save_load) { CV_SLMLTest test( CV_ANN ); test.safe_run(); }
TEST(ML_DTree, save_load) { CV_SLMLTest test( CV_DTREE ); test.safe_run(); }
TEST(ML_Boost, save_load) { CV_SLMLTest test( CV_BOOST ); test.safe_run(); }
TEST(ML_RTrees, save_load) { CV_SLMLTest test( CV_RTREES ); test.safe_run(); }
TEST(ML_ERTrees, save_load) { CV_SLMLTest test( CV_ERTREES ); test.safe_run(); }
TEST(DISABLED_ML_ERTrees, save_load) { CV_SLMLTest test( CV_ERTREES ); test.safe_run(); }
TEST(ML_SVM, throw_exception_when_save_untrained_model)
/*TEST(ML_SVM, throw_exception_when_save_untrained_model)
{
SVM svm;
Ptr<cv::ml::SVM> svm;
string filename = tempfile("svm.xml");
ASSERT_THROW(svm.save(filename.c_str()), Exception);
remove(filename.c_str());
}
}*/
TEST(DISABLED_ML_SVM, linear_save_load)
{
CvSVM svm1, svm2, svm3;
svm1.load("SVM45_X_38-1.xml");
svm2.load("SVM45_X_38-2.xml");
Ptr<cv::ml::SVM> svm1, svm2, svm3;
svm1 = StatModel::load<SVM>("SVM45_X_38-1.xml");
svm2 = StatModel::load<SVM>("SVM45_X_38-2.xml");
string tname = tempfile("a.xml");
svm2.save(tname.c_str());
svm3.load(tname.c_str());
svm2->save(tname);
svm3 = StatModel::load<SVM>(tname);
ASSERT_EQ(svm1.get_var_count(), svm2.get_var_count());
ASSERT_EQ(svm1.get_var_count(), svm3.get_var_count());
ASSERT_EQ(svm1->getVarCount(), svm2->getVarCount());
ASSERT_EQ(svm1->getVarCount(), svm3->getVarCount());
int m = 10000, n = svm1.get_var_count();
int m = 10000, n = svm1->getVarCount();
Mat samples(m, n, CV_32F), r1, r2, r3;
randu(samples, 0., 1.);
svm1.predict(samples, r1);
svm2.predict(samples, r2);
svm3.predict(samples, r3);
svm1->predict(samples, r1);
svm2->predict(samples, r2);
svm3->predict(samples, r3);
double eps = 1e-4;
EXPECT_LE(cvtest::norm(r1, r2, NORM_INF), eps);
EXPECT_LE(cvtest::norm(r1, r3, NORM_INF), eps);
EXPECT_LE(norm(r1, r2, NORM_INF), eps);
EXPECT_LE(norm(r1, r3, NORM_INF), eps);
remove(tname.c_str());
}

View File

@ -7,7 +7,7 @@ seamlessClone
-------------
Image editing tasks concern either global changes (color/intensity corrections, filters, deformations) or local changes concerned to a selection.
Here we are interested in achieving local changes, ones that are restricted to a region manually selected (ROI), in a seamless and effortless manner.
The extent of the changes ranges from slight distortions to complete replacement by novel content.
The extent of the changes ranges from slight distortions to complete replacement by novel content [PM03]_.
.. ocv:function:: void seamlessClone( InputArray src, InputArray dst, InputArray mask, Point p, OutputArray blend, int flags)
@ -25,13 +25,9 @@ The extent of the changes ranges from slight distortions to complete replacement
* **NORMAL_CLONE** The power of the method is fully expressed when inserting objects with complex outlines into a new background
* **MIXED_CLONE** The classic method, color-based selection and alpha
masking might be time consuming and often leaves an undesirable halo. Seamless
cloning, even averaged with the original image, is not effective. Mixed seamless
cloning based on a loose selection proves effective.
* **MIXED_CLONE** The classic method, color-based selection and alpha masking might be time consuming and often leaves an undesirable halo. Seamless cloning, even averaged with the original image, is not effective. Mixed seamless cloning based on a loose selection proves effective.
* **FEATURE_EXCHANGE** Feature exchange allows the user to replace easily certain
features of one object by alternative features.
* **FEATURE_EXCHANGE** Feature exchange allows the user to easily replace certain features of one object by alternative features.
@ -97,3 +93,5 @@ region, giving its contents a flat aspect. Here Canny Edge Detector is used.
**NOTE:**
The algorithm assumes that the color of the source image is close to that of the destination. This assumption means that when the colors don't match, the source image color gets tinted toward the color of the destination image.
.. [PM03] Patrick Perez, Michel Gangnet, Andrew Blake, "Poisson image editing", ACM Transactions on Graphics (SIGGRAPH), 2003.

View File

@ -6,7 +6,7 @@ Decolorization
decolor
-------
Transforms a color image to a grayscale image. It is a basic tool in digital printing, stylized black-and-white photograph rendering, and in many single channel image processing applications.
Transforms a color image to a grayscale image. It is a basic tool in digital printing, stylized black-and-white photograph rendering, and in many single channel image processing applications [CL12]_.
.. ocv:function:: void decolor( InputArray src, OutputArray grayscale, OutputArray color_boost )
@ -17,3 +17,5 @@ Transforms a color image to a grayscale image. It is a basic tool in digital pri
:param color_boost: Output 8-bit 3-channel image.
This function is to be applied on color images.
.. [CL12] Cewu Lu, Li Xu, Jiaya Jia, "Contrast Preserving Decolorization", IEEE International Conference on Computational Photography (ICCP), 2012.

View File

@ -356,7 +356,7 @@ Creates MergeRobertson object
.. ocv:function:: Ptr<MergeRobertson> createMergeRobertson()
References
==========
---------------------------
.. [DM03] F. Drago, K. Myszkowski, T. Annen, N. Chiba, "Adaptive Logarithmic Mapping For Displaying High Contrast Scenes", Computer Graphics Forum, 2003, 22, 419 - 426.

View File

@ -6,7 +6,7 @@ Non-Photorealistic Rendering
edgePreservingFilter
--------------------
Filtering is the fundamental operation in image and video processing. Edge-preserving smoothing filters are used in many different applications.
Filtering is the fundamental operation in image and video processing. Edge-preserving smoothing filters are used in many different applications [EM11]_.
.. ocv:function:: void edgePreservingFilter(InputArray src, OutputArray dst, int flags = 1, float sigma_s = 60, float sigma_r = 0.4f)
@ -16,9 +16,9 @@ Filtering is the fundamental operation in image and video processing. Edge-prese
:param flags: Edge preserving filters:
* **RECURS_FILTER**
* **RECURS_FILTER** = 1
* **NORMCONV_FILTER**
* **NORMCONV_FILTER** = 2
:param sigma_s: Range between 0 to 200.
@ -72,3 +72,5 @@ Stylization aims to produce digital imagery with a wide variety of effects not f
:param sigma_s: Range between 0 to 200.
:param sigma_r: Range between 0 to 1.
.. [EM11] Eduardo S. L. Gastal, Manuel M. Oliveira, "Domain transform for edge-aware image and video processing", ACM Trans. Graph. 30(4): 69, 2011.

View File

@ -173,6 +173,7 @@ void Domain_Filter::compute_Rfilter(Mat &output, Mat &hz, float sigma_h)
{
int h = output.rows;
int w = output.cols;
int channel = output.channels();
float a = (float) exp((-1.0 * sqrt(2.0)) / sigma_h);
@ -185,11 +186,15 @@ void Domain_Filter::compute_Rfilter(Mat &output, Mat &hz, float sigma_h)
for(int j=0;j<w;j++)
V.at<float>(i,j) = pow(a,hz.at<float>(i,j));
for(int i=0; i<h; i++)
for(int i=0; i<h; i++)
{
for(int j =1; j < w; j++)
{
temp.at<float>(i,j) = temp.at<float>(i,j) + (temp.at<float>(i,j-1) - temp.at<float>(i,j)) * V.at<float>(i,j);
for(int c = 0; c<channel; c++)
{
temp.at<float>(i,j*channel+c) = temp.at<float>(i,j*channel+c) +
(temp.at<float>(i,(j-1)*channel+c) - temp.at<float>(i,j*channel+c)) * V.at<float>(i,j);
}
}
}
@ -197,7 +202,11 @@ void Domain_Filter::compute_Rfilter(Mat &output, Mat &hz, float sigma_h)
{
for(int j =w-2; j >= 0; j--)
{
temp.at<float>(i,j) = temp.at<float>(i,j) + (temp.at<float>(i,j+1) - temp.at<float>(i,j)) * V.at<float>(i,j+1);
for(int c = 0; c<channel; c++)
{
temp.at<float>(i,j*channel+c) = temp.at<float>(i,j*channel+c) +
(temp.at<float>(i,(j+1)*channel+c) - temp.at<float>(i,j*channel+c))*V.at<float>(i,j+1);
}
}
}

View File

@ -108,6 +108,7 @@ void cv::seamlessClone(InputArray _src, InputArray _dst, InputArray _mask, Point
Cloning obj;
obj.normal_clone(dest,cd_mask,dst_mask,blend,flags);
}
void cv::colorChange(InputArray _src, InputArray _mask, OutputArray _dst, float r, float g, float b)
@ -136,7 +137,6 @@ void cv::colorChange(InputArray _src, InputArray _mask, OutputArray _dst, float
obj.local_color_change(src,cs_mask,gray,blend,red,green,blue);
}
void cv::illuminationChange(InputArray _src, InputArray _mask, OutputArray _dst, float a, float b)
{

View File

@ -455,6 +455,8 @@ void Cloning::normal_clone(Mat &I, Mat &mask, Mat &wmask, Mat &cloned, int num)
{
int w = I.size().width;
int h = I.size().height;
int channel = I.channels();
initialization(I,mask,wmask);
@ -466,20 +468,33 @@ void Cloning::normal_clone(Mat &I, Mat &mask, Mat &wmask, Mat &cloned, int num)
}
else if(num == 2)
{
for(int i=0;i < h; i++)
for(int j=0; j < w; j++)
{
for(int j=0; j < w; j++)
{
if(abs(sgx.at<float>(i,j) - sgy.at<float>(i,j)) > abs(grx.at<float>(i,j) - gry.at<float>(i,j)))
for(int c=0;c<channel;++c)
{
srx32.at<float>(i,j) = sgx.at<float>(i,j) * smask.at<float>(i,j);
sry32.at<float>(i,j) = sgy.at<float>(i,j) * smask.at<float>(i,j);
}
else
{
srx32.at<float>(i,j) = grx.at<float>(i,j) * smask.at<float>(i,j);
sry32.at<float>(i,j) = gry.at<float>(i,j) * smask.at<float>(i,j);
if(abs(sgx.at<float>(i,j*channel+c) - sgy.at<float>(i,j*channel+c)) >
abs(grx.at<float>(i,j*channel+c) - gry.at<float>(i,j*channel+c)))
{
srx32.at<float>(i,j*channel+c) = sgx.at<float>(i,j*channel+c)
* smask.at<float>(i,j);
sry32.at<float>(i,j*channel+c) = sgy.at<float>(i,j*channel+c)
* smask.at<float>(i,j);
}
else
{
srx32.at<float>(i,j*channel+c) = grx.at<float>(i,j*channel+c)
* smask.at<float>(i,j);
sry32.at<float>(i,j*channel+c) = gry.at<float>(i,j*channel+c)
* smask.at<float>(i,j);
}
}
}
}
}
else if(num == 3)
{

View File

@ -33,7 +33,8 @@ endforeach(m)
# header blacklist
ocv_list_filterout(opencv_hdrs ".h$")
ocv_list_filterout(opencv_hdrs "opencv2/core/cuda")
ocv_list_filterout(opencv_hdrs "cuda")
ocv_list_filterout(opencv_hdrs "cudev")
ocv_list_filterout(opencv_hdrs "opencv2/objdetect/detection_based_tracker.hpp")
ocv_list_filterout(opencv_hdrs "opencv2/optim.hpp")

View File

@ -10,6 +10,7 @@
#include <numpy/ndarrayobject.h>
#include "pyopencv_generated_include.h"
#include "opencv2/core/types_c.h"
#include "opencv2/opencv_modules.hpp"
@ -375,6 +376,12 @@ static bool pyopencv_to(PyObject* o, Mat& m, const ArgInfo info)
return true;
}
template<>
bool pyopencv_to(PyObject* o, Mat& m, const char* name)
{
return pyopencv_to(o, m, ArgInfo(name, 0));
}
template<>
PyObject* pyopencv_from(const Mat& m)
{
@ -1089,14 +1096,6 @@ bool pyopencv_to(PyObject* obj, CvSlice& r, const char* name)
return PyArg_ParseTuple(obj, "ii", &r.start_index, &r.end_index) > 0;
}
template<>
PyObject* pyopencv_from(CvDTreeNode* const & node)
{
double value = node->value;
int ivalue = cvRound(value);
return value == ivalue ? PyInt_FromLong(ivalue) : PyFloat_FromDouble(value);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
static void OnMouse(int event, int x, int y, int flags, void* param)

View File

@ -267,7 +267,7 @@ class ClassInfo(object):
#return sys.exit(-1)
if self.bases and self.bases[0].startswith("cv::"):
self.bases[0] = self.bases[0][4:]
if self.bases and self.bases[0] == "Algorithm":
if self.bases and self.bases[0] == "cv::Algorithm":
self.isalgorithm = True
for m in decl[2]:
if m.startswith("="):
@ -286,7 +286,7 @@ class ClassInfo(object):
code = "static bool pyopencv_to(PyObject* src, %s& dst, const char* name)\n{\n PyObject* tmp;\n bool ok;\n" % (self.cname)
code += "".join([gen_template_set_prop_from_map.substitute(propname=p.name,proptype=p.tp) for p in self.props])
if self.bases:
code += "\n return pyopencv_to(src, (%s&)dst, name);\n}\n" % all_classes[self.bases[0]].cname
code += "\n return pyopencv_to(src, (%s&)dst, name);\n}\n" % all_classes[self.bases[0].replace("::", "_")].cname
else:
code += "\n return true;\n}\n"
return code
@ -761,7 +761,7 @@ class PythonWrapperGenerator(object):
sys.exit(-1)
self.classes[classinfo.name] = classinfo
if classinfo.bases and not classinfo.isalgorithm:
classinfo.isalgorithm = self.classes[classinfo.bases[0]].isalgorithm
classinfo.isalgorithm = self.classes[classinfo.bases[0].replace("::", "_")].isalgorithm
def add_const(self, name, decl):
constinfo = ConstInfo(name, decl[1])

View File

@ -582,6 +582,7 @@ class CppHeaderParser(object):
return name
if name.startswith("cv."):
return name
qualified_name = (("." in name) or ("::" in name))
n = ""
for b in self.block_stack:
block_type, block_name = b[self.BLOCK_TYPE], b[self.BLOCK_NAME]
@ -590,9 +591,12 @@ class CppHeaderParser(object):
if block_type not in ["struct", "class", "namespace"]:
print("Error at %d: there are non-valid entries in the current block stack " % (self.lineno, self.block_stack))
sys.exit(-1)
if block_name:
if block_name and (block_type == "namespace" or not qualified_name):
n += block_name + "."
return n + name.replace("::", ".")
n += name.replace("::", ".")
if n.endswith(".Algorithm"):
n = "cv.Algorithm"
return n
def parse_stmt(self, stmt, end_token):
"""
@ -643,7 +647,7 @@ class CppHeaderParser(object):
classname = classname[1:]
decl = [stmt_type + " " + self.get_dotted_name(classname), "", modlist, []]
if bases:
decl[1] = ": " + ", ".join([b if "::" in b else self.get_dotted_name(b).replace(".","::") for b in bases])
decl[1] = ": " + ", ".join([self.get_dotted_name(b).replace(".","::") for b in bases])
return stmt_type, classname, True, decl
if stmt.startswith("class") or stmt.startswith("struct"):
@ -658,7 +662,7 @@ class CppHeaderParser(object):
if ("CV_EXPORTS_W" in stmt) or ("CV_EXPORTS_AS" in stmt) or (not self.wrap_mode):# and ("CV_EXPORTS" in stmt)):
decl = [stmt_type + " " + self.get_dotted_name(classname), "", modlist, []]
if bases:
decl[1] = ": " + ", ".join([b if "::" in b else self.get_dotted_name(b).replace(".","::") for b in bases])
decl[1] = ": " + ", ".join([self.get_dotted_name(b).replace(".","::") for b in bases])
return stmt_type, classname, True, decl
if stmt.startswith("enum"):

Some files were not shown because too many files have changed in this diff Show More