Set stricter warning rules for gcc

This commit is contained in:
Andrey Kamaev 2012-06-07 17:21:29 +00:00
parent 0395f7c63f
commit 49a1ba6038
241 changed files with 9054 additions and 8947 deletions

View File

@ -42,6 +42,9 @@
#ifndef __CVCOMMON_H_
#define __CVCOMMON_H_
#include "opencv2/core/core.hpp"
#include "opencv2/core/internal.hpp"
#include "cxcore.h"
#include "cv.h"
#include "cxmisc.h"

File diff suppressed because it is too large Load Diff

View File

@ -394,7 +394,7 @@ void icvSaveStageHaarClassifier( CvIntHaarClassifier* classifier, FILE* file )
CvIntHaarClassifier* icvLoadCARTStageHaarClassifierF( FILE* file, int step )
static CvIntHaarClassifier* icvLoadCARTStageHaarClassifierF( FILE* file, int step )
{
CvStageHaarClassifier* ptr = NULL;

View File

@ -108,7 +108,7 @@ CvBackgroundData* cvbgdata = NULL;
/*
* get sum image offsets for <rect> corner points
* get sum image offsets for <rect> corner points
* step - row step (measured in image pixels!) of sum image
*/
#define CV_SUM_OFFSETS( p0, p1, p2, p3, rect, step ) \
@ -122,7 +122,7 @@ CvBackgroundData* cvbgdata = NULL;
(p3) = (rect).x + (rect).width + (step) * ((rect).y + (rect).height);
/*
* get tilted image offsets for <rect> corner points
* get tilted image offsets for <rect> corner points
* step - row step (measured in image pixels!) of tilted image
*/
#define CV_TILTED_OFFSETS( p0, p1, p2, p3, rect, step ) \
@ -154,7 +154,7 @@ CvIntHaarFeatures* icvCreateIntHaarFeatures( CvSize winsize,
{
CvIntHaarFeatures* features = NULL;
CvTHaarFeature haarFeature;
CvMemStorage* storage = NULL;
CvSeq* seq = NULL;
CvSeqWriter writer;
@ -172,7 +172,7 @@ CvIntHaarFeatures* icvCreateIntHaarFeatures( CvSize winsize,
float factor = 1.0F;
factor = ((float) winsize.width) * winsize.height / (24 * 24);
#if 0
#if 0
s0 = (int) (s0 * factor);
s1 = (int) (s1 * factor);
s2 = (int) (s2 * factor);
@ -252,7 +252,7 @@ CvIntHaarFeatures* icvCreateIntHaarFeatures( CvSize winsize,
CV_WRITE_SEQ_ELEM( haarFeature, writer );
}
}
// haar_y4
if ( (x+dx <= winsize.width ) && (y+dy*4 <= winsize.height) ) {
if (dx*4*dy < s0) continue;
@ -277,7 +277,7 @@ CvIntHaarFeatures* icvCreateIntHaarFeatures( CvSize winsize,
}
}
if (mode != 0 /*BASIC*/) {
if (mode != 0 /*BASIC*/) {
// point
if ( (x+dx*3 <= winsize.width) && (y+dy*3 <= winsize.height) ) {
if (dx*9*dy < s0) continue;
@ -289,12 +289,12 @@ CvIntHaarFeatures* icvCreateIntHaarFeatures( CvSize winsize,
}
}
}
if (mode == 2 /*ALL*/) {
if (mode == 2 /*ALL*/) {
// tilted haar_x2 (x, y, w, h, b, weight)
if ( (x+2*dx <= winsize.width) && (y+2*dx+dy <= winsize.height) && (x-dy>= 0) ) {
if (dx*2*dy < s1) continue;
if (!symmetric || (x <= (winsize.width / 2) )) {
haarFeature = cvHaarFeature( "tilted_haar_x2",
x, y, dx*2, dy, -1,
@ -302,11 +302,11 @@ CvIntHaarFeatures* icvCreateIntHaarFeatures( CvSize winsize,
CV_WRITE_SEQ_ELEM( haarFeature, writer );
}
}
// tilted haar_y2 (x, y, w, h, b, weight)
if ( (x+dx <= winsize.width) && (y+dx+2*dy <= winsize.height) && (x-2*dy>= 0) ) {
if (dx*2*dy < s1) continue;
if (!symmetric || (x <= (winsize.width / 2) )) {
haarFeature = cvHaarFeature( "tilted_haar_y2",
x, y, dx, 2*dy, -1,
@ -314,11 +314,11 @@ CvIntHaarFeatures* icvCreateIntHaarFeatures( CvSize winsize,
CV_WRITE_SEQ_ELEM( haarFeature, writer );
}
}
// tilted haar_x3 (x, y, w, h, b, weight)
if ( (x+3*dx <= winsize.width) && (y+3*dx+dy <= winsize.height) && (x-dy>= 0) ) {
if (dx*3*dy < s2) continue;
if (!symmetric || (x <= (winsize.width / 2) )) {
haarFeature = cvHaarFeature( "tilted_haar_x3",
x, y, dx*3, dy, -1,
@ -326,11 +326,11 @@ CvIntHaarFeatures* icvCreateIntHaarFeatures( CvSize winsize,
CV_WRITE_SEQ_ELEM( haarFeature, writer );
}
}
// tilted haar_y3 (x, y, w, h, b, weight)
if ( (x+dx <= winsize.width) && (y+dx+3*dy <= winsize.height) && (x-3*dy>= 0) ) {
if (dx*3*dy < s2) continue;
if (!symmetric || (x <= (winsize.width / 2) )) {
haarFeature = cvHaarFeature( "tilted_haar_y3",
x, y, dx, 3*dy, -1,
@ -338,12 +338,12 @@ CvIntHaarFeatures* icvCreateIntHaarFeatures( CvSize winsize,
CV_WRITE_SEQ_ELEM( haarFeature, writer );
}
}
// tilted haar_x4 (x, y, w, h, b, weight)
if ( (x+4*dx <= winsize.width) && (y+4*dx+dy <= winsize.height) && (x-dy>= 0) ) {
if (dx*4*dy < s3) continue;
if (!symmetric || (x <= (winsize.width / 2) )) {
haarFeature = cvHaarFeature( "tilted_haar_x4",
@ -353,11 +353,11 @@ CvIntHaarFeatures* icvCreateIntHaarFeatures( CvSize winsize,
CV_WRITE_SEQ_ELEM( haarFeature, writer );
}
}
// tilted haar_y4 (x, y, w, h, b, weight)
if ( (x+dx <= winsize.width) && (y+dx+4*dy <= winsize.height) && (x-4*dy>= 0) ) {
if (dx*4*dy < s3) continue;
if (!symmetric || (x <= (winsize.width / 2) )) {
haarFeature = cvHaarFeature( "tilted_haar_y4",
x, y, dx, 4*dy, -1,
@ -365,10 +365,10 @@ CvIntHaarFeatures* icvCreateIntHaarFeatures( CvSize winsize,
CV_WRITE_SEQ_ELEM( haarFeature, writer );
}
}
/*
// tilted point
if ( (x+dx*3 <= winsize.width - 1) && (y+dy*3 <= winsize.height - 1) && (x-3*dy>= 0)) {
if (dx*9*dy < 36) continue;
@ -395,10 +395,10 @@ CvIntHaarFeatures* icvCreateIntHaarFeatures( CvSize winsize,
features->winsize = winsize;
cvCvtSeqToArray( seq, (CvArr*) features->feature );
cvReleaseMemStorage( &storage );
icvConvertToFastHaarFeature( features->feature, features->fastfeature,
features->count, (winsize.width + 1) );
return features;
}
@ -438,7 +438,7 @@ void icvConvertToFastHaarFeature( CvTHaarFeature* haarFeature,
fastHaarFeature[i].rect[j].p3,
haarFeature[i].rect[j].r, step )
}
}
else
{
@ -469,15 +469,15 @@ static
CvHaarTrainigData* icvCreateHaarTrainingData( CvSize winsize, int maxnumsamples )
{
CvHaarTrainigData* data;
CV_FUNCNAME( "icvCreateHaarTrainingData" );
__BEGIN__;
data = NULL;
uchar* ptr = NULL;
size_t datasize = 0;
datasize = sizeof( CvHaarTrainigData ) +
/* sum and tilted */
( 2 * (winsize.width + 1) * (winsize.height + 1) * sizeof( sum_type ) +
@ -548,7 +548,7 @@ void icvGetTrainingDataCallback( CvMat* mat, CvMat* sampleIdx, CvMat*,
int j = 0;
float val = 0.0F;
float normfactor = 0.0F;
CvHaarTrainingData* training_data;
CvIntHaarFeatures* haar_features;
@ -639,7 +639,7 @@ void icvGetTrainingDataCallback( CvMat* mat, CvMat* sampleIdx, CvMat*,
#if 0 /*def CV_VERBOSE*/
if( first % 5000 == 0 )
{
fprintf( stderr, "%3d%%\r", (int) (100.0 * first /
fprintf( stderr, "%3d%%\r", (int) (100.0 * first /
haar_features->count) );
fflush( stderr );
}
@ -692,7 +692,7 @@ void icvPrecalculate( CvHaarTrainingData* data, CvIntHaarFeatures* haarFeatures,
t_data = *data->valcache;
t_idx = *data->idxcache;
t_portion = MIN( portion, (numprecalculated - first) );
/* indices */
t_idx.rows = t_portion;
t_idx.data.ptr = data->idxcache->data.ptr + first * ((size_t)t_idx.step);
@ -766,7 +766,7 @@ void icvSplitIndicesCallback( int compidx, float threshold,
{
if( cvEvalFastHaarFeature( fastfeature,
(sum_type*) (data->sum.data.ptr + i * data->sum.step),
(sum_type*) (data->tilted.data.ptr + i * data->tilted.step) )
(sum_type*) (data->tilted.data.ptr + i * data->tilted.step) )
< threshold * data->normfactor.data.fl[i] )
{
(*left)->data.fl[(*left)->cols++] = (float) i;
@ -792,7 +792,7 @@ void icvSplitIndicesCallback( int compidx, float threshold,
index = (int) *((float*) (idxdata + i * idxstep));
if( cvEvalFastHaarFeature( fastfeature,
(sum_type*) (data->sum.data.ptr + index * data->sum.step),
(sum_type*) (data->tilted.data.ptr + index * data->tilted.step) )
(sum_type*) (data->tilted.data.ptr + index * data->tilted.step) )
< threshold * data->normfactor.data.fl[index] )
{
(*left)->data.fl[(*left)->cols++] = (float) index;
@ -858,7 +858,7 @@ CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
float sum_stage = 0.0F;
float threshold = 0.0F;
float falsealarm = 0.0F;
//CvMat* sampleIdx = NULL;
CvMat* trimmedIdx;
//float* idxdata = NULL;
@ -871,7 +871,7 @@ CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
int idx;
int numsamples;
int numtrimmed;
CvCARTHaarClassifier* classifier;
CvSeq* seq = NULL;
CvMemStorage* storage = NULL;
@ -885,7 +885,7 @@ CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
printf( "| N |%%SMP|F| ST.THR | HR | FA | EXP. ERR|\n" );
printf( "+----+----+-+---------+---------+---------+---------+\n" );
#endif /* CV_VERBOSE */
n = haarFeatures->count;
m = data->sum.rows;
numsamples = (sampleIdx) ? MAX( sampleIdx->rows, sampleIdx->cols ) : m;
@ -909,7 +909,7 @@ CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
trainParams.userdata = &userdata;
eval = cvMat( 1, m, CV_32FC1, cvAlloc( sizeof( float ) * m ) );
storage = cvCreateMemStorage();
seq = cvCreateSeq( 0, sizeof( *seq ), sizeof( classifier ), storage );
@ -919,7 +919,7 @@ CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
num_splits = 0;
sumalpha = 0.0F;
do
{
{
#ifdef CV_VERBOSE
int v_wt = 0;
@ -947,12 +947,12 @@ CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
num_splits += classifier->count;
cart->release( (CvClassifier**) &cart );
if( symmetric && (seq->total % 2) )
{
float normfactor = 0.0F;
CvStumpClassifier* stump;
/* flip haar features */
for( i = 0; i < classifier->count; i++ )
{
@ -961,9 +961,9 @@ CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
for( j = 0; j < CV_HAAR_FEATURE_MAX &&
classifier->feature[i].rect[j].weight != 0.0F; j++ )
{
classifier->feature[i].rect[j].r.x = data->winsize.width -
classifier->feature[i].rect[j].r.x = data->winsize.width -
classifier->feature[i].rect[j].r.x -
classifier->feature[i].rect[j].r.width;
classifier->feature[i].rect[j].r.width;
}
}
else
@ -975,7 +975,7 @@ CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
for( j = 0; j < CV_HAAR_FEATURE_MAX &&
classifier->feature[i].rect[j].weight != 0.0F; j++ )
{
classifier->feature[i].rect[j].r.x = data->winsize.width -
classifier->feature[i].rect[j].r.x = data->winsize.width -
classifier->feature[i].rect[j].r.x;
CV_SWAP( classifier->feature[i].rect[j].r.width,
classifier->feature[i].rect[j].r.height, tmp );
@ -1010,7 +1010,7 @@ CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
weakTrainVals, 0, 0, 0, trimmedIdx,
&(data->weights),
trainParams.stumpTrainParams );
classifier->threshold[i] = stump->threshold;
if( classifier->left[i] <= 0 )
{
@ -1021,8 +1021,8 @@ CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
classifier->val[-classifier->right[i]] = stump->right;
}
stump->release( (CvClassifier**) &stump );
stump->release( (CvClassifier**) &stump );
}
stumpTrainParams.getTrainData = icvGetTrainingDataCallback;
@ -1040,7 +1040,7 @@ CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
cvReleaseMat( &trimmedIdx );
trimmedIdx = NULL;
}
for( i = 0; i < numsamples; i++ )
{
idx = icvGetIdxAt( sampleIdx, i );
@ -1054,10 +1054,10 @@ CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
alpha = cvBoostNextWeakClassifier( &eval, &data->cls, weakTrainVals,
&data->weights, trainer );
sumalpha += alpha;
for( i = 0; i <= classifier->count; i++ )
{
if( boosttype == CV_RABCLASS )
if( boosttype == CV_RABCLASS )
{
classifier->val[i] = cvLogRatio( classifier->val[i] );
}
@ -1077,7 +1077,7 @@ CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
for( j = 0; j < seq->total; j++ )
{
classifier = *((CvCARTHaarClassifier**) cvGetSeqElem( seq, j ));
eval.data.fl[numpos] += classifier->eval(
eval.data.fl[numpos] += classifier->eval(
(CvIntHaarClassifier*) classifier,
(sum_type*) (data->sum.data.ptr + idx * data->sum.step),
(sum_type*) (data->tilted.data.ptr + idx * data->tilted.step),
@ -1163,7 +1163,7 @@ CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
fflush( stdout );
}
#endif /* CV_VERBOSE */
} while( falsealarm > maxfalsealarm && (!maxsplits || (num_splits < maxsplits) ) );
cvBoostEndTraining( &trainer );
@ -1177,12 +1177,12 @@ CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
threshold );
cvCvtSeqToArray( seq, (CvArr*) stage->classifier );
}
/* CLEANUP */
cvReleaseMemStorage( &storage );
cvReleaseMat( &weakTrainVals );
cvFree( &(eval.data.ptr) );
return (CvIntHaarClassifier*) stage;
}
@ -1192,7 +1192,7 @@ CvBackgroundData* icvCreateBackgroundData( const char* filename, CvSize winsize
{
CvBackgroundData* data = NULL;
const char* dir = NULL;
const char* dir = NULL;
char full[PATH_MAX];
char* imgfilename = NULL;
size_t datasize = 0;
@ -1202,7 +1202,7 @@ CvBackgroundData* icvCreateBackgroundData( const char* filename, CvSize winsize
int len = 0;
assert( filename != NULL );
dir = strrchr( filename, '\\' );
if( dir == NULL )
{
@ -1223,7 +1223,7 @@ CvBackgroundData* icvCreateBackgroundData( const char* filename, CvSize winsize
{
count = 0;
datasize = 0;
/* count */
while( !feof( input ) )
{
@ -1257,11 +1257,11 @@ CvBackgroundData* icvCreateBackgroundData( const char* filename, CvSize winsize
while( !feof( input ) )
{
*imgfilename = '\0';
if( !fgets( imgfilename, PATH_MAX - (int)(imgfilename - full) - 1, input ))
if( !fgets( imgfilename, PATH_MAX - (int)(imgfilename - full) - 1, input ))
break;
len = (int)strlen( imgfilename );
if( len > 0 && imgfilename[len-1] == '\n' )
imgfilename[len-1] = 0, len--;
if( len > 0 && imgfilename[len-1] == '\n' )
imgfilename[len-1] = 0, len--;
if( len > 0 )
{
if( (*imgfilename) == '#' ) continue; /* comment */
@ -1351,14 +1351,14 @@ void icvGetNextFromBackgroundData( CvBackgroundData* data,
{
round = data->round;
//#ifdef CV_VERBOSE
//#ifdef CV_VERBOSE
// printf( "Open background image: %s\n", data->filename[data->last] );
//#endif /* CV_VERBOSE */
data->last = rand() % data->count;
data->last %= data->count;
img = cvLoadImage( data->filename[data->last], 0 );
if( !img )
if( !img )
continue;
data->round += data->last / data->count;
data->round = data->round % (data->winsize.width * data->winsize.height);
@ -1368,7 +1368,7 @@ void icvGetNextFromBackgroundData( CvBackgroundData* data,
offset.x = MIN( offset.x, img->width - data->winsize.width );
offset.y = MIN( offset.y, img->height - data->winsize.height );
if( img != NULL && img->depth == IPL_DEPTH_8U && img->nChannels == 1 &&
offset.x >= 0 && offset.y >= 0 )
{
@ -1403,7 +1403,7 @@ void icvGetNextFromBackgroundData( CvBackgroundData* data,
reader->scale = MAX(
((float) data->winsize.width + reader->point.x) / ((float) reader->src.cols),
((float) data->winsize.height + reader->point.y) / ((float) reader->src.rows) );
reader->img = cvMat( (int) (reader->scale * reader->src.rows + 0.5F),
(int) (reader->scale * reader->src.cols + 0.5F),
CV_8UC1, (void*) cvAlloc( datasize ) );
@ -1576,11 +1576,11 @@ void icvGetAuxImages( CvMat* img, CvMat* sum, CvMat* tilted,
sum_type valsum = 0;
sqsum_type valsqsum = 0;
double area = 0.0;
cvIntegral( img, sum, sqsum, tilted );
normrect = cvRect( 1, 1, img->cols - 2, img->rows - 2 );
CV_SUM_OFFSETS( p0, p1, p2, p3, normrect, img->cols + 1 )
area = normrect.width * normrect.height;
valsum = ((sum_type*) (sum->data.ptr))[p0] - ((sum_type*) (sum->data.ptr))[p1]
- ((sum_type*) (sum->data.ptr))[p2] + ((sum_type*) (sum->data.ptr))[p3];
@ -1621,28 +1621,28 @@ int icvGetHaarTrainingData( CvHaarTrainingData* data, int first, int count,
int i = 0;
ccounter_t getcount = 0;
ccounter_t thread_getcount = 0;
ccounter_t consumed_count;
ccounter_t consumed_count;
ccounter_t thread_consumed_count;
/* private variables */
CvMat img;
CvMat sum;
CvMat tilted;
CvMat sqsum;
sum_type* sumdata;
sum_type* tilteddata;
float* normfactor;
/* end private variables */
assert( data != NULL );
assert( first + count <= data->maxnum );
assert( cascade != NULL );
assert( callback != NULL );
// if( !cvbgdata ) return 0; this check needs to be done in the callback for BG
CCOUNTER_SET_ZERO(getcount);
CCOUNTER_SET_ZERO(thread_getcount);
CCOUNTER_SET_ZERO(consumed_count);
@ -1691,14 +1691,14 @@ int icvGetHaarTrainingData( CvHaarTrainingData* data, int first, int count,
normfactor = data->normfactor.data.fl + i;
sum.data.ptr = (uchar*) sumdata;
tilted.data.ptr = (uchar*) tilteddata;
icvGetAuxImages( &img, &sum, &tilted, &sqsum, normfactor );
icvGetAuxImages( &img, &sum, &tilted, &sqsum, normfactor );
if( cascade->eval( cascade, sumdata, tilteddata, *normfactor ) != 0.0F )
{
CCOUNTER_INC(thread_getcount);
break;
}
}
#ifdef CV_VERBOSE
if( (i - first) % 500 == 0 )
{
@ -1720,7 +1720,7 @@ int icvGetHaarTrainingData( CvHaarTrainingData* data, int first, int count,
CCOUNTER_ADD(consumed_count, thread_consumed_count);
}
} /* omp parallel */
if( consumed != NULL )
{
*consumed = (int)consumed_count;
@ -1731,7 +1731,7 @@ int icvGetHaarTrainingData( CvHaarTrainingData* data, int first, int count,
/* *acceptance_ratio = ((double) count) / consumed_count; */
*acceptance_ratio = CCOUNTER_DIV(count, consumed_count);
}
return static_cast<int>(getcount);
}
@ -1791,7 +1791,7 @@ int icvGetHaarTrainingData( CvHaarTrainingData* data, int first, int count,
// CV_SQSUM_MAT_TYPE,
// cvAlloc( sizeof( sqsum_type ) * (data->winsize.height + 1)
// * (data->winsize.width + 1) ) );
//
//
// #ifdef CV_OPENMP
// #pragma omp for schedule(static, 1)
// #endif /* CV_OPENMP */
@ -1800,7 +1800,7 @@ int icvGetHaarTrainingData( CvHaarTrainingData* data, int first, int count,
// for( ; ; )
// {
// icvGetBackgroundImage( cvbgdata, cvbgreader, &img );
//
//
// CCOUNTER_INC(thread_consumed_count);
//
// sumdata = (sum_type*) (data->sum.data.ptr + i * data->sum.step);
@ -1808,7 +1808,7 @@ int icvGetHaarTrainingData( CvHaarTrainingData* data, int first, int count,
// normfactor = data->normfactor.data.fl + i;
// sum.data.ptr = (uchar*) sumdata;
// tilted.data.ptr = (uchar*) tilteddata;
// icvGetAuxImages( &img, &sum, &tilted, &sqsum, normfactor );
// icvGetAuxImages( &img, &sum, &tilted, &sqsum, normfactor );
// if( cascade->eval( cascade, sumdata, tilteddata, *normfactor ) != 0.0F )
// {
// break;
@ -1822,7 +1822,7 @@ int icvGetHaarTrainingData( CvHaarTrainingData* data, int first, int count,
// fflush( stderr );
// }
//#endif /* CV_VERBOSE */
//
//
// }
//
// cvFree( &(img.data.ptr) );
@ -1842,7 +1842,7 @@ int icvGetHaarTrainingData( CvHaarTrainingData* data, int first, int count,
// /* *acceptance_ratio = ((double) count) / consumed_count; */
// *acceptance_ratio = CCOUNTER_DIV(count, consumed_count);
// }
//
//
// return count;
//}
@ -1853,24 +1853,24 @@ int icvGetHaarTraininDataFromVecCallback( CvMat* img, void* userdata )
int c = 0;
assert( img->rows * img->cols == ((CvVecFile*) userdata)->vecsize );
size_t elements_read = fread( &tmp, sizeof( tmp ), 1, ((CvVecFile*) userdata)->input );
CV_Assert(elements_read == 1);
elements_read = fread( ((CvVecFile*) userdata)->vector, sizeof( short ),
((CvVecFile*) userdata)->vecsize, ((CvVecFile*) userdata)->input );
CV_Assert(elements_read == (size_t)((CvVecFile*) userdata)->vecsize);
if( feof( ((CvVecFile*) userdata)->input ) ||
if( feof( ((CvVecFile*) userdata)->input ) ||
(((CvVecFile*) userdata)->last)++ >= ((CvVecFile*) userdata)->count )
{
return 0;
}
for( r = 0; r < img->rows; r++ )
{
for( c = 0; c < img->cols; c++ )
{
CV_MAT_ELEM( *img, uchar, r, c ) =
CV_MAT_ELEM( *img, uchar, r, c ) =
(uchar) ( ((CvVecFile*) userdata)->vector[r * img->cols + c] );
}
}
@ -1878,14 +1878,14 @@ int icvGetHaarTraininDataFromVecCallback( CvMat* img, void* userdata )
return 1;
}
int icvGetHaarTrainingDataFromBGCallback ( CvMat* img, void* /*userdata*/ )
static int icvGetHaarTrainingDataFromBGCallback ( CvMat* img, void* /*userdata*/ )
{
if (! cvbgdata)
return 0;
if (! cvbgreader)
return 0;
// just in case icvGetBackgroundImage is not thread-safe ...
#ifdef CV_OPENMP
#pragma omp critical (get_background_image_callback)
@ -1893,7 +1893,7 @@ int icvGetHaarTrainingDataFromBGCallback ( CvMat* img, void* /*userdata*/ )
{
icvGetBackgroundImage( cvbgdata, cvbgreader, img );
}
return 1;
}
@ -1902,7 +1902,7 @@ int icvGetHaarTrainingDataFromBGCallback ( CvMat* img, void* /*userdata*/ )
* Get training data from .vec file
*/
static
int icvGetHaarTrainingDataFromVec( CvHaarTrainingData* data, int first, int count,
int icvGetHaarTrainingDataFromVec( CvHaarTrainingData* data, int first, int count,
CvIntHaarClassifier* cascade,
const char* filename,
int* consumed )
@ -1914,8 +1914,8 @@ int icvGetHaarTrainingDataFromVec( CvHaarTrainingData* data, int first, int coun
__BEGIN__;
CvVecFile file;
short tmp = 0;
short tmp = 0;
file.input = NULL;
if( filename ) file.input = fopen( filename, "rb" );
@ -1967,8 +1967,8 @@ int icvGetHaarTrainingDataFromBG( CvHaarTrainingData* data, int first, int count
if (filename)
{
CvVecFile file;
short tmp = 0;
short tmp = 0;
file.input = NULL;
if( filename ) file.input = fopen( filename, "rb" );
@ -2009,7 +2009,7 @@ int icvGetHaarTrainingDataFromBG( CvHaarTrainingData* data, int first, int count
void cvCreateCascadeClassifier( const char* dirname,
const char* vecfilename,
const char* bgfilename,
const char* bgfilename,
int npos, int nneg, int nstages,
int numprecalculated,
int numsplits,
@ -2048,7 +2048,7 @@ void cvCreateCascadeClassifier( const char* dirname,
cascade = (CvCascadeHaarClassifier*) icvCreateCascadeHaarClassifier( nstages );
cascade->count = 0;
if( icvInitBackgroundReaders( bgfilename, winsize ) )
{
data = icvCreateHaarTrainingData( winsize, npos + nneg );
@ -2061,7 +2061,7 @@ void cvCreateCascadeClassifier( const char* dirname,
for( i = 0; i < nstages; i++, cascade->count++ )
{
sprintf( stagename, "%s%d/%s", dirname, i, CV_STAGE_CART_FILE_NAME );
cascade->classifier[i] =
cascade->classifier[i] =
icvLoadCARTStageHaarClassifier( stagename, winsize.width + 1 );
if( !icvMkDir( stagename ) )
@ -2129,7 +2129,7 @@ void cvCreateCascadeClassifier( const char* dirname,
data->sum.rows = data->tilted.rows = poscount + negcount;
data->normfactor.cols = data->weights.cols = data->cls.cols =
poscount + negcount;
posweight = (equalweights) ? 1.0F / (poscount + negcount) : (0.5F / poscount);
negweight = (equalweights) ? 1.0F / (poscount + negcount) : (0.5F / negcount);
for( j = 0; j < poscount; j++ )
@ -2169,7 +2169,7 @@ void cvCreateCascadeClassifier( const char* dirname,
file = fopen( stagename, "w" );
if( file != NULL )
{
cascade->classifier[i]->save(
cascade->classifier[i]->save(
(CvIntHaarClassifier*) cascade->classifier[i], file );
fclose( file );
}
@ -2207,7 +2207,7 @@ void cvCreateCascadeClassifier( const char* dirname,
printf( "FAILED TO INITIALIZE BACKGROUND READERS\n" );
#endif /* CV_VERBOSE */
}
/* CLEAN UP */
icvDestroyBackgroundReaders();
cascade->release( (CvIntHaarClassifier**) &cascade );
@ -2215,7 +2215,7 @@ void cvCreateCascadeClassifier( const char* dirname,
/* tree cascade classifier */
int icvNumSplits( CvStageHaarClassifier* stage )
static int icvNumSplits( CvStageHaarClassifier* stage )
{
int i;
int num;
@ -2229,7 +2229,7 @@ int icvNumSplits( CvStageHaarClassifier* stage )
return num;
}
void icvSetNumSamples( CvHaarTrainingData* training_data, int num )
static void icvSetNumSamples( CvHaarTrainingData* training_data, int num )
{
assert( num <= training_data->maxnum );
@ -2238,7 +2238,7 @@ void icvSetNumSamples( CvHaarTrainingData* training_data, int num )
training_data->cls.cols = training_data->weights.cols = num;
}
void icvSetWeightsAndClasses( CvHaarTrainingData* training_data,
static void icvSetWeightsAndClasses( CvHaarTrainingData* training_data,
int num1, float weight1, float cls1,
int num2, float weight2, float cls2 )
{
@ -2258,7 +2258,7 @@ void icvSetWeightsAndClasses( CvHaarTrainingData* training_data,
}
}
CvMat* icvGetUsedValues( CvHaarTrainingData* training_data,
static CvMat* icvGetUsedValues( CvHaarTrainingData* training_data,
int start, int num,
CvIntHaarFeatures* haar_features,
CvStageHaarClassifier* stage )
@ -2302,7 +2302,7 @@ CvMat* icvGetUsedValues( CvHaarTrainingData* training_data,
}
total = last + 1;
CV_CALL( ptr = cvCreateMat( num, total, CV_32FC1 ) );
#ifdef CV_OPENMP
#pragma omp parallel for
@ -2351,7 +2351,7 @@ typedef struct CvSplit
void cvCreateTreeCascadeClassifier( const char* dirname,
const char* vecfilename,
const char* bgfilename,
const char* bgfilename,
int npos, int nneg, int nstages,
int numprecalculated,
int numsplits,
@ -2425,11 +2425,11 @@ void cvCreateTreeCascadeClassifier( const char* dirname,
sprintf( stage_name, "%s/", dirname );
suffix = stage_name + strlen( stage_name );
if (! bg_vecfile)
if( !icvInitBackgroundReaders( bgfilename, winsize ) && nstages > 0 )
CV_ERROR( CV_StsError, "Unable to read negative images" );
if( nstages > 0 )
{
/* width-first search in the tree */
@ -2438,7 +2438,7 @@ void cvCreateTreeCascadeClassifier( const char* dirname,
CvSplit* first_split;
CvSplit* last_split;
CvSplit* cur_split;
CvTreeCascadeNode* parent;
CvTreeCascadeNode* cur_node;
CvTreeCascadeNode* last_node;
@ -2447,7 +2447,7 @@ void cvCreateTreeCascadeClassifier( const char* dirname,
parent = leaves;
leaves = NULL;
do
{
{
int best_clusters; /* best selected number of clusters */
float posweight, negweight;
double leaf_fa_rate;
@ -2536,7 +2536,7 @@ void cvCreateTreeCascadeClassifier( const char* dirname,
multiple_clusters = NULL;
printf( "Number of used features: %d\n", single_num );
if( maxtreesplits >= 0 )
{
max_clusters = MIN( max_clusters, maxtreesplits - total_splits + 1 );
@ -2594,7 +2594,7 @@ void cvCreateTreeCascadeClassifier( const char* dirname,
printf( "Clusters are too small. Clustering aborted.\n" );
break;
}
cur_num = 0;
cur_node = last_node = NULL;
for( cluster = 0; (cluster < k) && (cur_num < best_num); cluster++ )
@ -2676,7 +2676,7 @@ void cvCreateTreeCascadeClassifier( const char* dirname,
CV_CALL( cur_split = (CvSplit*) cvAlloc( sizeof( *cur_split ) ) );
CV_ZERO_OBJ( cur_split );
if( last_split ) last_split->next = cur_split;
else first_split = cur_split;
last_split = cur_split;
@ -2734,7 +2734,7 @@ void cvCreateTreeCascadeClassifier( const char* dirname,
? last_split->multiple_clusters : last_split->single_cluster;
parent = last_split->parent;
if( parent ) parent->child = cur_node;
/* connect leaves via next_same_level and save them */
for( ; cur_node; cur_node = cur_node->next )
{
@ -2768,14 +2768,14 @@ void cvCreateTreeCascadeClassifier( const char* dirname,
printf( "\nParent node: %s\n", buf );
printf( "Chosen number of splits: %d\n\n", (last_split->multiple_clusters)
? (last_split->num_clusters - 1) : 0 );
cur_split = last_split;
last_split = last_split->next;
cvFree( &cur_split );
} /* for each split point */
printf( "Total number of splits: %d\n", total_splits );
if( !(tcc->root) ) tcc->root = leaves;
CV_CALL( icvPrintTreeCascade( tcc->root ) );
@ -2903,7 +2903,7 @@ void cvCreateTrainingSamples( const char* filename,
inverse = (rand() > (RAND_MAX/2));
}
icvPlaceDistortedSample( &sample, inverse, maxintensitydev,
maxxangle, maxyangle, maxzangle,
maxxangle, maxyangle, maxzangle,
0 /* nonzero means placing image without cut offs */,
0.0 /* nozero adds random shifting */,
0.0 /* nozero adds random scaling */,
@ -2931,13 +2931,13 @@ void cvCreateTrainingSamples( const char* filename,
cvFree( &(sample.data.ptr) );
fclose( output );
} /* if( output != NULL ) */
icvEndSampleDistortion( &data );
}
#ifdef CV_VERBOSE
printf( "\r \r" );
#endif /* CV_VERBOSE */
#endif /* CV_VERBOSE */
}
@ -2986,7 +2986,7 @@ void cvCreateTestSamples( const char* infoname,
{
cvNamedWindow( "Image", CV_WINDOW_AUTOSIZE );
}
info = fopen( infoname, "w" );
strcpy( fullname, infoname );
filename = strrchr( fullname, '\\' );
@ -3008,7 +3008,7 @@ void cvCreateTestSamples( const char* infoname,
for( i = 0; i < count; i++ )
{
icvGetNextFromBackgroundData( cvbgdata, cvbgreader );
maxscale = MIN( 0.7F * cvbgreader->src.cols / winwidth,
0.7F * cvbgreader->src.rows / winheight );
if( maxscale < 1.0F ) continue;
@ -3025,14 +3025,14 @@ void cvCreateTestSamples( const char* infoname,
inverse = (rand() > (RAND_MAX/2));
}
icvPlaceDistortedSample( &win, inverse, maxintensitydev,
maxxangle, maxyangle, maxzangle,
maxxangle, maxyangle, maxzangle,
1, 0.0, 0.0, &data );
sprintf( filename, "%04d_%04d_%04d_%04d_%04d.jpg",
(i + 1), x, y, width, height );
if( info )
if( info )
{
fprintf( info, "%s %d %d %d %d %d\n",
filename, 1, x, y, width, height );

View File

@ -83,7 +83,7 @@
* cij - coeffs[i][j], coeffs[2][2] = 1
* (ui, vi) - rectangle vertices
*/
void cvGetPerspectiveTransform( CvSize src_size, double quad[4][2],
static void cvGetPerspectiveTransform( CvSize src_size, double quad[4][2],
double coeffs[3][3] )
{
//CV_FUNCNAME( "cvWarpPerspective" );
@ -130,7 +130,7 @@ void cvGetPerspectiveTransform( CvSize src_size, double quad[4][2],
}
/* Warps source into destination by a perspective transform */
void cvWarpPerspective( CvArr* src, CvArr* dst, double quad[4][2] )
static void cvWarpPerspective( CvArr* src, CvArr* dst, double quad[4][2] )
{
CV_FUNCNAME( "cvWarpPerspective" );

View File

@ -44,6 +44,9 @@
*
* Measure performance of classifier
*/
#include "opencv2/core/core.hpp"
#include "opencv2/core/internal.hpp"
#include "cv.h"
#include "highgui.h"

View File

@ -1,3 +1,6 @@
#include "opencv2/core/core.hpp"
#include "opencv2/core/internal.hpp"
#include "HOGfeatures.h"
#include "cascadeclassifier.h"
@ -54,7 +57,7 @@ void CvHOGEvaluator::writeFeatures( FileStorage &fs, const Mat& featureMap ) con
features[featIdx].write( fs, componentIdx );
fs << "}";
}
fs << "]";
fs << "]";
}
void CvHOGEvaluator::generateFeatures()
@ -85,11 +88,11 @@ void CvHOGEvaluator::generateFeatures()
}
}
w = 4*t;
h = 2*t;
h = 2*t;
for (x = 0; x <= winSize.width - w; x += blockStep.width)
{
for (y = 0; y <= winSize.height - h; y += blockStep.height)
{
{
features.push_back(Feature(offset, x, y, 2*t, t));
}
}
@ -136,7 +139,7 @@ void CvHOGEvaluator::Feature::write(FileStorage &fs) const
// int cellIdx = featComponent / N_BINS;
// int binIdx = featComponent % N_BINS;
//
// fs << CC_RECTS << "[:" << rect[cellIdx].x << rect[cellIdx].y <<
// fs << CC_RECTS << "[:" << rect[cellIdx].x << rect[cellIdx].y <<
// rect[cellIdx].width << rect[cellIdx].height << binIdx << "]";
//}
@ -144,7 +147,7 @@ void CvHOGEvaluator::Feature::write(FileStorage &fs) const
//All block is nessesary for block normalization
void CvHOGEvaluator::Feature::write(FileStorage &fs, int featComponentIdx) const
{
fs << CC_RECT << "[:" << rect[0].x << rect[0].y <<
fs << CC_RECT << "[:" << rect[0].x << rect[0].y <<
rect[0].width << rect[0].height << featComponentIdx << "]";
}
@ -228,7 +231,7 @@ void CvHOGEvaluator::integralHistogram(const Mat &img, vector<Mat> &histogram, M
memset( histBuf, 0, histSize.width * sizeof(histBuf[0]) );
histBuf += histStep + 1;
for( y = 0; y < qangle.rows; y++ )
{
{
histBuf[-1] = 0.f;
float strSum = 0.f;
for( x = 0; x < qangle.cols; x++ )

View File

@ -1,3 +1,6 @@
#include "opencv2/core/core.hpp"
#include "opencv2/core/internal.hpp"
#include "boost.h"
#include "cascadeclassifier.h"
#include <queue>
@ -139,7 +142,7 @@ static CvMat* cvPreprocessIndexArray( const CvMat* idx_arr, int data_arr_size, b
//----------------------------- CascadeBoostParams -------------------------------------------------
CvCascadeBoostParams::CvCascadeBoostParams() : minHitRate( 0.995F), maxFalseAlarm( 0.5F )
{
{
boost_type = CvBoost::GENTLE;
use_surrogates = use_1se_rule = truncate_pruned_tree = false;
}
@ -157,7 +160,7 @@ CvCascadeBoostParams::CvCascadeBoostParams( int _boostType,
void CvCascadeBoostParams::write( FileStorage &fs ) const
{
String boostTypeStr = boost_type == CvBoost::DISCRETE ? CC_DISCRETE_BOOST :
String boostTypeStr = boost_type == CvBoost::DISCRETE ? CC_DISCRETE_BOOST :
boost_type == CvBoost::REAL ? CC_REAL_BOOST :
boost_type == CvBoost::LOGIT ? CC_LOGIT_BOOST :
boost_type == CvBoost::GENTLE ? CC_GENTLE_BOOST : String();
@ -197,7 +200,7 @@ bool CvCascadeBoostParams::read( const FileNode &node )
void CvCascadeBoostParams::printDefaults() const
{
cout << "--boostParams--" << endl;
cout << " [-bt <{" << CC_DISCRETE_BOOST << ", "
cout << " [-bt <{" << CC_DISCRETE_BOOST << ", "
<< CC_REAL_BOOST << ", "
<< CC_LOGIT_BOOST ", "
<< CC_GENTLE_BOOST << "(default)}>]" << endl;
@ -210,7 +213,7 @@ void CvCascadeBoostParams::printDefaults() const
void CvCascadeBoostParams::printAttrs() const
{
String boostTypeStr = boost_type == CvBoost::DISCRETE ? CC_DISCRETE_BOOST :
String boostTypeStr = boost_type == CvBoost::DISCRETE ? CC_DISCRETE_BOOST :
boost_type == CvBoost::REAL ? CC_REAL_BOOST :
boost_type == CvBoost::LOGIT ? CC_LOGIT_BOOST :
boost_type == CvBoost::GENTLE ? CC_GENTLE_BOOST : String();
@ -259,7 +262,7 @@ bool CvCascadeBoostParams::scanAttr( const String prmName, const String val)
else
res = false;
return res;
return res;
}
CvDTreeNode* CvCascadeBoostTrainData::subsample_data( const CvMat* _subsample_idx )
@ -440,7 +443,7 @@ CvCascadeBoostTrainData::CvCascadeBoostTrainData( const CvFeatureEvaluator* _fea
set_params( _params );
max_c_count = MAX( 2, featureEvaluator->getMaxCatCount() );
var_type = cvCreateMat( 1, var_count + 2, CV_32SC1 );
if ( featureEvaluator->getMaxCatCount() > 0 )
if ( featureEvaluator->getMaxCatCount() > 0 )
{
numPrecalcIdx = 0;
cat_var_count = var_count;
@ -448,7 +451,7 @@ CvCascadeBoostTrainData::CvCascadeBoostTrainData( const CvFeatureEvaluator* _fea
for( int vi = 0; vi < var_count; vi++ )
{
var_type->data.i[vi] = vi;
}
}
}
else
{
@ -457,8 +460,8 @@ CvCascadeBoostTrainData::CvCascadeBoostTrainData( const CvFeatureEvaluator* _fea
for( int vi = 1; vi <= var_count; vi++ )
{
var_type->data.i[vi-1] = -vi;
}
}
}
}
var_type->data.i[var_count] = cat_var_count;
var_type->data.i[var_count+1] = cat_var_count+1;
@ -467,7 +470,7 @@ CvCascadeBoostTrainData::CvCascadeBoostTrainData( const CvFeatureEvaluator* _fea
treeBlockSize = MAX(treeBlockSize + BlockSizeDelta, MinBlockSize);
tree_storage = cvCreateMemStorage( treeBlockSize );
node_heap = cvCreateSet( 0, sizeof(node_heap[0]), sizeof(CvDTreeNode), tree_storage );
split_heap = cvCreateSet( 0, sizeof(split_heap[0]), maxSplitSize, tree_storage );
split_heap = cvCreateSet( 0, sizeof(split_heap[0]), maxSplitSize, tree_storage );
}
CvCascadeBoostTrainData::CvCascadeBoostTrainData( const CvFeatureEvaluator* _featureEvaluator,
@ -477,15 +480,15 @@ CvCascadeBoostTrainData::CvCascadeBoostTrainData( const CvFeatureEvaluator* _fea
{
setData( _featureEvaluator, _numSamples, _precalcValBufSize, _precalcIdxBufSize, _params );
}
void CvCascadeBoostTrainData::setData( const CvFeatureEvaluator* _featureEvaluator,
int _numSamples,
int _precalcValBufSize, int _precalcIdxBufSize,
const CvDTreeParams& _params )
{
const CvDTreeParams& _params )
{
int* idst = 0;
unsigned short* udst = 0;
clear();
shared = true;
have_labels = true;
@ -503,16 +506,16 @@ void CvCascadeBoostTrainData::setData( const CvFeatureEvaluator* _featureEvaluat
_resp = featureEvaluator->getCls();
responses = &_resp;
// TODO: check responses: elements must be 0 or 1
if( _precalcValBufSize < 0 || _precalcIdxBufSize < 0)
if( _precalcValBufSize < 0 || _precalcIdxBufSize < 0)
CV_Error( CV_StsOutOfRange, "_numPrecalcVal and _numPrecalcIdx must be positive or 0" );
var_count = var_all = featureEvaluator->getNumFeatures() * featureEvaluator->getFeatureSize();
var_count = var_all = featureEvaluator->getNumFeatures() * featureEvaluator->getFeatureSize();
sample_count = _numSamples;
is_buf_16u = false;
if (sample_count < 65536)
is_buf_16u = true;
is_buf_16u = false;
if (sample_count < 65536)
is_buf_16u = true;
numPrecalcVal = min( cvRound((double)_precalcValBufSize*1048576. / (sizeof(float)*sample_count)), var_count );
numPrecalcIdx = min( cvRound((double)_precalcIdxBufSize*1048576. /
@ -522,8 +525,8 @@ void CvCascadeBoostTrainData::setData( const CvFeatureEvaluator* _featureEvaluat
valCache.create( numPrecalcVal, sample_count, CV_32FC1 );
var_type = cvCreateMat( 1, var_count + 2, CV_32SC1 );
if ( featureEvaluator->getMaxCatCount() > 0 )
if ( featureEvaluator->getMaxCatCount() > 0 )
{
numPrecalcIdx = 0;
cat_var_count = var_count;
@ -531,7 +534,7 @@ void CvCascadeBoostTrainData::setData( const CvFeatureEvaluator* _featureEvaluat
for( int vi = 0; vi < var_count; vi++ )
{
var_type->data.i[vi] = vi;
}
}
}
else
{
@ -540,14 +543,14 @@ void CvCascadeBoostTrainData::setData( const CvFeatureEvaluator* _featureEvaluat
for( int vi = 1; vi <= var_count; vi++ )
{
var_type->data.i[vi-1] = -vi;
}
}
}
var_type->data.i[var_count] = cat_var_count;
var_type->data.i[var_count+1] = cat_var_count+1;
work_var_count = ( cat_var_count ? 0 : numPrecalcIdx ) + 1/*cv_lables*/;
buf_size = (work_var_count + 1) * sample_count/*sample_indices*/;
buf_count = 2;
if ( is_buf_16u )
buf = cvCreateMat( buf_count, buf_size, CV_16UC1 );
else
@ -556,7 +559,7 @@ void CvCascadeBoostTrainData::setData( const CvFeatureEvaluator* _featureEvaluat
cat_count = cvCreateMat( 1, cat_var_count + 1, CV_32SC1 );
// precalculate valCache and set indices in buf
precalculate();
precalculate();
// now calculate the maximum size of split,
// create memory storage that will keep nodes and splits of the decision tree
@ -574,7 +577,7 @@ void CvCascadeBoostTrainData::setData( const CvFeatureEvaluator* _featureEvaluat
tempBlockSize = MAX( tempBlockSize + BlockSizeDelta, MinBlockSize );
temp_storage = cvCreateMemStorage( tempBlockSize );
nv_heap = cvCreateSet( 0, sizeof(*nv_heap), nvSize, temp_storage );
data_root = new_node( 0, sample_count, 0, 0 );
// set sample labels
@ -617,7 +620,7 @@ void CvCascadeBoostTrainData::free_train_data()
const int* CvCascadeBoostTrainData::get_class_labels( CvDTreeNode* n, int* labelsBuf)
{
int nodeSampleCount = n->sample_count;
int nodeSampleCount = n->sample_count;
int rStep = CV_IS_MAT_CONT( responses->type ) ? 1 : responses->step / CV_ELEM_SIZE( responses->type );
int* sampleIndicesBuf = labelsBuf; //
@ -626,7 +629,7 @@ const int* CvCascadeBoostTrainData::get_class_labels( CvDTreeNode* n, int* label
{
int sidx = sampleIndices[si];
labelsBuf[si] = (int)responses->data.fl[sidx*rStep];
}
}
return labelsBuf;
}
@ -643,9 +646,9 @@ const int* CvCascadeBoostTrainData::get_cv_labels( CvDTreeNode* n, int* labels_b
void CvCascadeBoostTrainData::get_ord_var_data( CvDTreeNode* n, int vi, float* ordValuesBuf, int* sortedIndicesBuf,
const float** ordValues, const int** sortedIndices, int* sampleIndicesBuf )
{
int nodeSampleCount = n->sample_count;
int nodeSampleCount = n->sample_count;
const int* sampleIndices = get_sample_indices(n, sampleIndicesBuf);
if ( vi < numPrecalcIdx )
{
if( !is_buf_16u )
@ -659,7 +662,7 @@ void CvCascadeBoostTrainData::get_ord_var_data( CvDTreeNode* n, int vi, float* o
*sortedIndices = sortedIndicesBuf;
}
if( vi < numPrecalcVal )
{
for( int i = 0; i < nodeSampleCount; i++ )
@ -705,10 +708,10 @@ void CvCascadeBoostTrainData::get_ord_var_data( CvDTreeNode* n, int vi, float* o
ordValuesBuf[i] = (&sampleValues[0])[sortedIndicesBuf[i]];
*sortedIndices = sortedIndicesBuf;
}
*ordValues = ordValuesBuf;
}
const int* CvCascadeBoostTrainData::get_cat_var_data( CvDTreeNode* n, int vi, int* catValuesBuf )
{
int nodeSampleCount = n->sample_count;
@ -739,8 +742,8 @@ const int* CvCascadeBoostTrainData::get_cat_var_data( CvDTreeNode* n, int vi, in
float CvCascadeBoostTrainData::getVarValue( int vi, int si )
{
if ( vi < numPrecalcVal && !valCache.empty() )
return valCache.at<float>( vi, si );
return (*featureEvaluator)( vi, si );
return valCache.at<float>( vi, si );
return (*featureEvaluator)( vi, si );
}
@ -858,7 +861,7 @@ CvDTreeNode* CvCascadeBoostTree::predict( int sampleIdx ) const
CvDTreeNode* node = root;
if( !node )
CV_Error( CV_StsError, "The tree has not been trained yet" );
if ( ((CvCascadeBoostTrainData*)data)->featureEvaluator->getMaxCatCount() == 0 ) // ordered
{
while( node->left )
@ -946,7 +949,7 @@ void CvCascadeBoostTree::read( const FileNode &node, CvBoost* _ensemble,
int maxCatCount = ((CvCascadeBoostTrainData*)_data)->featureEvaluator->getMaxCatCount();
int subsetN = (maxCatCount + 31)/32;
int step = 3 + ( maxCatCount>0 ? subsetN : 1 );
queue<CvDTreeNode*> internalNodesQueue;
FileNodeIterator internalNodesIt, leafValsuesIt;
CvDTreeNode* prntNode, *cldNode;
@ -986,11 +989,11 @@ void CvCascadeBoostTree::read( const FileNode &node, CvBoost* _ensemble,
{
prntNode->right = cldNode = data->new_node( 0, 0, 0, 0 );
*leafValsuesIt >> cldNode->value; leafValsuesIt--;
cldNode->parent = prntNode;
cldNode->parent = prntNode;
}
else
{
prntNode->right = internalNodesQueue.front();
prntNode->right = internalNodesQueue.front();
prntNode->right->parent = prntNode;
internalNodesQueue.pop();
}
@ -999,7 +1002,7 @@ void CvCascadeBoostTree::read( const FileNode &node, CvBoost* _ensemble,
{
prntNode->left = cldNode = data->new_node( 0, 0, 0, 0 );
*leafValsuesIt >> cldNode->value; leafValsuesIt--;
cldNode->parent = prntNode;
cldNode->parent = prntNode;
}
else
{
@ -1089,7 +1092,7 @@ void CvCascadeBoostTree::split_node_data( CvDTreeNode* node )
}
}
CV_Assert( n1 == n );
}
}
else
{
int *ldst, *rdst;
@ -1116,7 +1119,7 @@ void CvCascadeBoostTree::split_node_data( CvDTreeNode* node )
}
}
CV_Assert( n1 == n );
}
}
}
// split cv_labels using newIdx relocation table
@ -1171,7 +1174,7 @@ void CvCascadeBoostTree::split_node_data( CvDTreeNode* node )
}
}
}
// split sample indices
int *sampleIdx_src_buf = tempBuf + n;
const int* sampleIdx_src = data->get_sample_indices(node, sampleIdx_src_buf);
@ -1181,9 +1184,9 @@ void CvCascadeBoostTree::split_node_data( CvDTreeNode* node )
if (data->is_buf_16u)
{
unsigned short* ldst = (unsigned short*)(buf->data.s + left->buf_idx*buf->cols +
unsigned short* ldst = (unsigned short*)(buf->data.s + left->buf_idx*buf->cols +
workVarCount*scount + left->offset);
unsigned short* rdst = (unsigned short*)(buf->data.s + right->buf_idx*buf->cols +
unsigned short* rdst = (unsigned short*)(buf->data.s + right->buf_idx*buf->cols +
workVarCount*scount + right->offset);
for (int i = 0; i < n; i++)
{
@ -1202,9 +1205,9 @@ void CvCascadeBoostTree::split_node_data( CvDTreeNode* node )
}
else
{
int* ldst = buf->data.i + left->buf_idx*buf->cols +
int* ldst = buf->data.i + left->buf_idx*buf->cols +
workVarCount*scount + left->offset;
int* rdst = buf->data.i + right->buf_idx*buf->cols +
int* rdst = buf->data.i + right->buf_idx*buf->cols +
workVarCount*scount + right->offset;
for (int i = 0; i < n; i++)
{
@ -1229,10 +1232,10 @@ void CvCascadeBoostTree::split_node_data( CvDTreeNode* node )
}
// deallocate the parent node data that is not needed anymore
data->free_node_data(node);
data->free_node_data(node);
}
void auxMarkFeaturesInMap( const CvDTreeNode* node, Mat& featureMap)
static void auxMarkFeaturesInMap( const CvDTreeNode* node, Mat& featureMap)
{
if ( node && node->split )
{
@ -1265,7 +1268,7 @@ bool CvCascadeBoost::train( const CvFeatureEvaluator* _featureEvaluator,
set_params( _params );
if ( (_params.boost_type == LOGIT) || (_params.boost_type == GENTLE) )
data->do_responses_copy();
update_weights( 0 );
cout << "+----+---------+---------+" << endl;
@ -1316,7 +1319,7 @@ bool CvCascadeBoost::set_params( const CvBoostParams& _params )
minHitRate = ((CvCascadeBoostParams&)_params).minHitRate;
maxFalseAlarm = ((CvCascadeBoostParams&)_params).maxFalseAlarm;
return ( ( minHitRate > 0 ) && ( minHitRate < 1) &&
( maxFalseAlarm > 0 ) && ( maxFalseAlarm < 1) &&
( maxFalseAlarm > 0 ) && ( maxFalseAlarm < 1) &&
CvBoost::set_params( _params ));
}
@ -1364,7 +1367,7 @@ void CvCascadeBoost::update_weights( CvBoostTree* tree )
if (data->is_buf_16u)
{
unsigned short* labels = (unsigned short*)(buf->data.s + data->data_root->buf_idx*buf->cols +
unsigned short* labels = (unsigned short*)(buf->data.s + data->data_root->buf_idx*buf->cols +
data->data_root->offset + (data->work_var_count-1)*data->sample_count);
for( int i = 0; i < n; i++ )
{
@ -1382,7 +1385,7 @@ void CvCascadeBoost::update_weights( CvBoostTree* tree )
}
else
{
int* labels = buf->data.i + data->data_root->buf_idx*buf->cols +
int* labels = buf->data.i + data->data_root->buf_idx*buf->cols +
data->data_root->offset + (data->work_var_count-1)*data->sample_count;
for( int i = 0; i < n; i++ )
@ -1425,7 +1428,7 @@ void CvCascadeBoost::update_weights( CvBoostTree* tree )
{
// invert the subsample mask
cvXorS( subsample_mask, cvScalar(1.), subsample_mask );
// run tree through all the non-processed samples
for( int i = 0; i < n; i++ )
if( subsample_mask->data.ptr[i] )
@ -1565,7 +1568,7 @@ bool CvCascadeBoost::isErrDesired()
int sCount = data->sample_count,
numPos = 0, numNeg = 0, numFalse = 0, numPosTrue = 0;
vector<float> eval(sCount);
for( int i = 0; i < sCount; i++ )
if( ((CvCascadeBoostTrainData*)data)->featureEvaluator->getCls( i ) == 1.0F )
eval[numPos++] = predict( i, true );
@ -1625,7 +1628,7 @@ bool CvCascadeBoost::read( const FileNode &node,
set_params( _params );
node[CC_STAGE_THRESHOLD] >> threshold;
FileNode rnode = node[CC_WEAK_CLASSIFIERS];
FileNode rnode = node[CC_WEAK_CLASSIFIERS];
storage = cvCreateMemStorage();
weak = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvBoostTree*), storage );

View File

@ -1,3 +1,6 @@
#include "opencv2/core/core.hpp"
#include "opencv2/core/internal.hpp"
#include "cascadeclassifier.h"
#include <queue>
@ -6,14 +9,14 @@ using namespace std;
static const char* stageTypes[] = { CC_BOOST };
static const char* featureTypes[] = { CC_HAAR, CC_LBP, CC_HOG };
CvCascadeParams::CvCascadeParams() : stageType( defaultStageType ),
CvCascadeParams::CvCascadeParams() : stageType( defaultStageType ),
featureType( defaultFeatureType ), winSize( cvSize(24, 24) )
{
name = CC_CASCADE_PARAMS;
{
name = CC_CASCADE_PARAMS;
}
CvCascadeParams::CvCascadeParams( int _stageType, int _featureType ) : stageType( _stageType ),
featureType( _featureType ), winSize( cvSize(24, 24) )
{
{
name = CC_CASCADE_PARAMS;
}
@ -25,7 +28,7 @@ void CvCascadeParams::write( FileStorage &fs ) const
CV_Assert( !stageTypeStr.empty() );
fs << CC_STAGE_TYPE << stageTypeStr;
String featureTypeStr = featureType == CvFeatureParams::HAAR ? CC_HAAR :
featureType == CvFeatureParams::LBP ? CC_LBP :
featureType == CvFeatureParams::LBP ? CC_LBP :
featureType == CvFeatureParams::HOG ? CC_HOG :
0;
CV_Assert( !stageTypeStr.empty() );
@ -51,7 +54,7 @@ bool CvCascadeParams::read( const FileNode &node )
return false;
rnode >> featureTypeStr;
featureType = !featureTypeStr.compare( CC_HAAR ) ? CvFeatureParams::HAAR :
!featureTypeStr.compare( CC_LBP ) ? CvFeatureParams::LBP :
!featureTypeStr.compare( CC_LBP ) ? CvFeatureParams::LBP :
!featureTypeStr.compare( CC_HOG ) ? CvFeatureParams::HOG :
-1;
if (featureType == -1)
@ -125,15 +128,15 @@ bool CvCascadeParams::scanAttr( const String prmName, const String val )
bool CvCascadeClassifier::train( const String _cascadeDirName,
const String _posFilename,
const String _negFilename,
int _numPos, int _numNeg,
const String _negFilename,
int _numPos, int _numNeg,
int _precalcValBufSize, int _precalcIdxBufSize,
int _numStages,
const CvCascadeParams& _cascadeParams,
const CvFeatureParams& _featureParams,
const CvCascadeBoostParams& _stageParams,
bool baseFormatSave )
{
{
if( _cascadeDirName.empty() || _posFilename.empty() || _negFilename.empty() )
CV_Error( CV_StsBadArg, "_cascadeDirName or _bgfileName or _vecFileName is NULL" );
@ -181,17 +184,17 @@ bool CvCascadeClassifier::train( const String _cascadeDirName,
cout << endl << "Stages 0-" << startNumStages-1 << " are loaded" << endl;
else if ( startNumStages == 1)
cout << endl << "Stage 0 is loaded" << endl;
double requiredLeafFARate = pow( (double) stageParams->maxFalseAlarm, (double) numStages ) /
(double)stageParams->max_depth;
double tempLeafFARate;
for( int i = startNumStages; i < numStages; i++ )
{
cout << endl << "===== TRAINING " << i << "-stage =====" << endl;
cout << "<BEGIN" << endl;
if ( !updateTrainingSet( tempLeafFARate ) )
if ( !updateTrainingSet( tempLeafFARate ) )
{
cout << "Train dataset for temp stage can not be filled. "
"Branch training terminated." << endl;
@ -211,10 +214,10 @@ bool CvCascadeClassifier::train( const String _cascadeDirName,
stageClassifiers.push_back( tempStage );
cout << "END>" << endl;
// save params
String filename;
if ( i == 0)
if ( i == 0)
{
filename = dirName + CC_PARAMS_FILENAME;
FileStorage fs( filename, FileStorage::WRITE);
@ -289,7 +292,7 @@ int CvCascadeClassifier::fillPassedSamples( int first, int count, bool isPositiv
{
bool isGetImg = isPositive ? imgReader.getPos( img ) :
imgReader.getNeg( img );
if( !isGetImg )
if( !isGetImg )
return getcount;
consumed++;
@ -313,14 +316,14 @@ void CvCascadeClassifier::writeParams( FileStorage &fs ) const
void CvCascadeClassifier::writeFeatures( FileStorage &fs, const Mat& featureMap ) const
{
((CvFeatureEvaluator*)((Ptr<CvFeatureEvaluator>)featureEvaluator))->writeFeatures( fs, featureMap );
((CvFeatureEvaluator*)((Ptr<CvFeatureEvaluator>)featureEvaluator))->writeFeatures( fs, featureMap );
}
void CvCascadeClassifier::writeStages( FileStorage &fs, const Mat& featureMap ) const
{
char cmnt[30];
int i = 0;
fs << CC_STAGES << "[";
fs << CC_STAGES << "[";
for( vector< Ptr<CvCascadeBoost> >::const_iterator it = stageClassifiers.begin();
it != stageClassifiers.end(); it++, i++ )
{
@ -337,17 +340,17 @@ bool CvCascadeClassifier::readParams( const FileNode &node )
{
if ( !node.isMap() || !cascadeParams.read( node ) )
return false;
stageParams = new CvCascadeBoostParams;
FileNode rnode = node[CC_STAGE_PARAMS];
if ( !stageParams->read( rnode ) )
return false;
featureParams = CvFeatureParams::create(cascadeParams.featureType);
rnode = node[CC_FEATURE_PARAMS];
if ( !featureParams->read( rnode ) )
return false;
return true;
return true;
}
bool CvCascadeClassifier::readStages( const FileNode &node)
@ -396,7 +399,7 @@ void CvCascadeClassifier::save( const String filename, bool baseFormat )
fs << FileStorage::getDefaultObjectName(filename) << "{";
if ( !baseFormat )
{
Mat featureMap;
Mat featureMap;
getUsedFeaturesIdxMap( featureMap );
writeParams( fs );
fs << CC_STAGE_NUM << (int)stageClassifiers.size();
@ -409,7 +412,7 @@ void CvCascadeClassifier::save( const String filename, bool baseFormat )
CvSeq* weak;
if ( cascadeParams.featureType != CvFeatureParams::HAAR )
CV_Error( CV_StsBadFunc, "old file format is used for Haar-like features only");
fs << ICV_HAAR_SIZE_NAME << "[:" << cascadeParams.winSize.width <<
fs << ICV_HAAR_SIZE_NAME << "[:" << cascadeParams.winSize.width <<
cascadeParams.winSize.height << "]";
fs << ICV_HAAR_STAGES_NAME << "[";
for( size_t si = 0; si < stageClassifiers.size(); si++ )
@ -424,16 +427,16 @@ void CvCascadeClassifier::save( const String filename, bool baseFormat )
int inner_node_idx = -1, total_inner_node_idx = -1;
queue<const CvDTreeNode*> inner_nodes_queue;
CvCascadeBoostTree* tree = *((CvCascadeBoostTree**) cvGetSeqElem( weak, wi ));
fs << "[";
/*sprintf( buf, "tree %d", wi );
CV_CALL( cvWriteComment( fs, buf, 1 ) );*/
const CvDTreeNode* tempNode;
inner_nodes_queue.push( tree->get_root() );
total_inner_node_idx++;
while (!inner_nodes_queue.empty())
{
tempNode = inner_nodes_queue.front();
@ -498,7 +501,7 @@ bool CvCascadeClassifier::load( const String cascadeDirName )
node = fs.getFirstTopLevelNode();
if ( !fs.isOpened() )
break;
CvCascadeBoost *tempStage = new CvCascadeBoost;
CvCascadeBoost *tempStage = new CvCascadeBoost;
if ( !tempStage->read( node, (CvFeatureEvaluator*)featureEvaluator, *((CvCascadeBoostParams*)stageParams )) )
{
@ -516,11 +519,11 @@ void CvCascadeClassifier::getUsedFeaturesIdxMap( Mat& featureMap )
int varCount = featureEvaluator->getNumFeatures() * featureEvaluator->getFeatureSize();
featureMap.create( 1, varCount, CV_32SC1 );
featureMap.setTo(Scalar(-1));
for( vector< Ptr<CvCascadeBoost> >::const_iterator it = stageClassifiers.begin();
it != stageClassifiers.end(); it++ )
((CvCascadeBoost*)((Ptr<CvCascadeBoost>)(*it)))->markUsedFeaturesInMap( featureMap );
for( int fi = 0, idx = 0; fi < varCount; fi++ )
if ( featureMap.at<int>(0, fi) >= 0 )
featureMap.ptr<int>(0)[fi] = idx++;

View File

@ -1,3 +1,6 @@
#include "opencv2/core/core.hpp"
#include "opencv2/core/internal.hpp"
#include "traincascade_features.h"
#include "cascadeclassifier.h"
@ -28,7 +31,7 @@ bool CvParams::scanAttr( const String prmName, const String val ) { return false
CvFeatureParams::CvFeatureParams() : maxCatCount( 0 ), featSize( 1 )
{
name = CC_FEATURE_PARAMS;
name = CC_FEATURE_PARAMS;
}
void CvFeatureParams::init( const CvFeatureParams& fp )
@ -55,7 +58,7 @@ bool CvFeatureParams::read( const FileNode &node )
Ptr<CvFeatureParams> CvFeatureParams::create( int featureType )
{
return featureType == HAAR ? Ptr<CvFeatureParams>(new CvHaarFeatureParams) :
featureType == LBP ? Ptr<CvFeatureParams>(new CvLBPFeatureParams) :
featureType == LBP ? Ptr<CvFeatureParams>(new CvLBPFeatureParams) :
featureType == HOG ? Ptr<CvFeatureParams>(new CvHOGFeatureParams) :
Ptr<CvFeatureParams>();
}
@ -84,7 +87,7 @@ void CvFeatureEvaluator::setImage(const Mat &img, uchar clsLabel, int idx)
Ptr<CvFeatureEvaluator> CvFeatureEvaluator::create(int type)
{
return type == CvFeatureParams::HAAR ? Ptr<CvFeatureEvaluator>(new CvHaarEvaluator) :
type == CvFeatureParams::LBP ? Ptr<CvFeatureEvaluator>(new CvLBPEvaluator) :
type == CvFeatureParams::LBP ? Ptr<CvFeatureEvaluator>(new CvLBPEvaluator) :
type == CvFeatureParams::HOG ? Ptr<CvFeatureEvaluator>(new CvHOGEvaluator) :
Ptr<CvFeatureEvaluator>();
}

View File

@ -1,16 +1,19 @@
#include "opencv2/core/core.hpp"
#include "opencv2/core/internal.hpp"
#include "haarfeatures.h"
#include "cascadeclassifier.h"
using namespace std;
CvHaarFeatureParams::CvHaarFeatureParams() : mode(BASIC)
{
{
name = HFP_NAME;
}
CvHaarFeatureParams::CvHaarFeatureParams( int _mode ) : mode( _mode )
{
name = HFP_NAME;
name = HFP_NAME;
}
void CvHaarFeatureParams::init( const CvFeatureParams& fp )
@ -22,7 +25,7 @@ void CvHaarFeatureParams::init( const CvFeatureParams& fp )
void CvHaarFeatureParams::write( FileStorage &fs ) const
{
CvFeatureParams::write( fs );
String modeStr = mode == BASIC ? CC_MODE_BASIC :
String modeStr = mode == BASIC ? CC_MODE_BASIC :
mode == CORE ? CC_MODE_CORE :
mode == ALL ? CC_MODE_ALL : String();
CV_Assert( !modeStr.empty() );
@ -55,7 +58,7 @@ void CvHaarFeatureParams::printDefaults() const
void CvHaarFeatureParams::printAttrs() const
{
CvFeatureParams::printAttrs();
String mode_str = mode == BASIC ? CC_MODE_BASIC :
String mode_str = mode == BASIC ? CC_MODE_BASIC :
mode == CORE ? CC_MODE_CORE :
mode == ALL ? CC_MODE_ALL : 0;
cout << "mode: " << mode_str << endl;
@ -156,7 +159,7 @@ void CvHaarEvaluator::generateFeatures()
if( mode != CvHaarFeatureParams::BASIC )
{
// haar_x4
if ( (x+dx*4 <= winSize.width) && (y+dy <= winSize.height) )
if ( (x+dx*4 <= winSize.width) && (y+dy <= winSize.height) )
{
features.push_back( Feature( offset, false,
x, y, dx*4, dy, -1,
@ -171,61 +174,61 @@ void CvHaarEvaluator::generateFeatures()
}
}
// x2_y2
if ( (x+dx*2 <= winSize.width) && (y+dy*2 <= winSize.height) )
if ( (x+dx*2 <= winSize.width) && (y+dy*2 <= winSize.height) )
{
features.push_back( Feature( offset, false,
x, y, dx*2, dy*2, -1,
x, y, dx, dy, +2,
x+dx, y+dy, dx, dy, +2 ) );
}
if (mode != CvHaarFeatureParams::BASIC)
{
if ( (x+dx*3 <= winSize.width) && (y+dy*3 <= winSize.height) )
if (mode != CvHaarFeatureParams::BASIC)
{
if ( (x+dx*3 <= winSize.width) && (y+dy*3 <= winSize.height) )
{
features.push_back( Feature( offset, false,
x , y , dx*3, dy*3, -1,
x+dx, y+dy, dx , dy , +9) );
}
}
if (mode == CvHaarFeatureParams::ALL)
{
if (mode == CvHaarFeatureParams::ALL)
{
// tilted haar_x2
if ( (x+2*dx <= winSize.width) && (y+2*dx+dy <= winSize.height) && (x-dy>= 0) )
if ( (x+2*dx <= winSize.width) && (y+2*dx+dy <= winSize.height) && (x-dy>= 0) )
{
features.push_back( Feature( offset, true,
x, y, dx*2, dy, -1,
x, y, dx, dy, +2 ) );
}
// tilted haar_y2
if ( (x+dx <= winSize.width) && (y+dx+2*dy <= winSize.height) && (x-2*dy>= 0) )
if ( (x+dx <= winSize.width) && (y+dx+2*dy <= winSize.height) && (x-2*dy>= 0) )
{
features.push_back( Feature( offset, true,
x, y, dx, 2*dy, -1,
x, y, dx, dy, +2 ) );
}
// tilted haar_x3
if ( (x+3*dx <= winSize.width) && (y+3*dx+dy <= winSize.height) && (x-dy>= 0) )
if ( (x+3*dx <= winSize.width) && (y+3*dx+dy <= winSize.height) && (x-dy>= 0) )
{
features.push_back( Feature( offset, true,
x, y, dx*3, dy, -1,
x+dx, y+dx, dx, dy, +3 ) );
}
// tilted haar_y3
if ( (x+dx <= winSize.width) && (y+dx+3*dy <= winSize.height) && (x-3*dy>= 0) )
if ( (x+dx <= winSize.width) && (y+dx+3*dy <= winSize.height) && (x-3*dy>= 0) )
{
features.push_back( Feature( offset, true,
x, y, dx, 3*dy, -1,
x-dy, y+dy, dx, dy, +3 ) );
}
// tilted haar_x4
if ( (x+4*dx <= winSize.width) && (y+4*dx+dy <= winSize.height) && (x-dy>= 0) )
if ( (x+4*dx <= winSize.width) && (y+4*dx+dy <= winSize.height) && (x-dy>= 0) )
{
features.push_back( Feature( offset, true,
x, y, dx*4, dy, -1,
x+dx, y+dx, dx*2, dy, +2 ) );
}
// tilted haar_y4
if ( (x+dx <= winSize.width) && (y+dx+4*dy <= winSize.height) && (x-4*dy>= 0) )
if ( (x+dx <= winSize.width) && (y+dx+4*dy <= winSize.height) && (x-4*dy>= 0) )
{
features.push_back( Feature( offset, true,
x, y, dx, 4*dy, -1,
@ -296,7 +299,7 @@ void CvHaarEvaluator::Feature::write( FileStorage &fs ) const
fs << CC_RECTS << "[";
for( int ri = 0; ri < CV_HAAR_FEATURE_MAX && rect[ri].r.width != 0; ++ri )
{
fs << "[:" << rect[ri].r.x << rect[ri].r.y <<
fs << "[:" << rect[ri].r.x << rect[ri].r.y <<
rect[ri].r.width << rect[ri].r.height << rect[ri].weight << "]";
}
fs << "]" << CC_TILTED << tilted;

View File

@ -1,3 +1,6 @@
#include "opencv2/core/core.hpp"
#include "opencv2/core/internal.hpp"
#include "cv.h"
#include "imagestorage.h"
#include <stdio.h>
@ -55,7 +58,7 @@ bool CvCascadeImageReader::NegReader::nextImg()
for( size_t i = 0; i < count; i++ )
{
src = imread( imgFilenames[last++], 0 );
if( src.empty() )
if( src.empty() )
continue;
round += last / count;
round = round % (winSize.width * winSize.height);
@ -63,7 +66,7 @@ bool CvCascadeImageReader::NegReader::nextImg()
_offset.x = min( (int)round % winSize.width, src.cols - winSize.width );
_offset.y = min( (int)round / winSize.width, src.rows - winSize.height );
if( !src.empty() && src.type() == CV_8UC1
if( !src.empty() && src.type() == CV_8UC1
&& offset.x >= 0 && offset.y >= 0 )
break;
}
@ -73,7 +76,7 @@ bool CvCascadeImageReader::NegReader::nextImg()
point = offset = _offset;
scale = max( ((float)winSize.width + point.x) / ((float)src.cols),
((float)winSize.height + point.y) / ((float)src.rows) );
Size sz( (int)(scale*src.cols + 0.5F), (int)(scale*src.rows + 0.5F) );
resize( src, img, sz );
return true;
@ -87,7 +90,7 @@ bool CvCascadeImageReader::NegReader::get( Mat& _img )
CV_Assert( _img.rows == winSize.height );
if( img.empty() )
if ( !nextImg() )
if ( !nextImg() )
return false;
Mat mat( winSize.height, winSize.width, CV_8UC1,
@ -109,7 +112,7 @@ bool CvCascadeImageReader::NegReader::get( Mat& _img )
resize( src, img, Size( (int)(scale*src.cols), (int)(scale*src.rows) ) );
else
{
if ( !nextImg() )
if ( !nextImg() )
return false;
}
}
@ -131,7 +134,7 @@ bool CvCascadeImageReader::PosReader::create( const String _filename )
if( !file )
return false;
short tmp = 0;
short tmp = 0;
if( fread( &count, sizeof( count ), 1, file ) != 1 ||
fread( &vecSize, sizeof( vecSize ), 1, file ) != 1 ||
fread( &tmp, sizeof( tmp ), 1, file ) != 1 ||

View File

@ -1,3 +1,6 @@
#include "opencv2/core/core.hpp"
#include "opencv2/core/internal.hpp"
#include "lbpfeatures.h"
#include "cascadeclassifier.h"

View File

@ -1,3 +1,6 @@
#include "opencv2/core/core.hpp"
#include "opencv2/core/internal.hpp"
#include "cv.h"
#include "cascadeclassifier.h"
@ -13,11 +16,11 @@ int main( int argc, char* argv[] )
int precalcValBufSize = 256,
precalcIdxBufSize = 256;
bool baseFormatSave = false;
CvCascadeParams cascadeParams;
CvCascadeBoostParams stageParams;
Ptr<CvFeatureParams> featureParams[] = { Ptr<CvFeatureParams>(new CvHaarFeatureParams),
Ptr<CvFeatureParams>(new CvLBPFeatureParams),
Ptr<CvFeatureParams>(new CvLBPFeatureParams),
Ptr<CvFeatureParams>(new CvHOGFeatureParams)
};
int fc = sizeof(featureParams)/sizeof(featureParams[0]);
@ -85,7 +88,7 @@ int main( int argc, char* argv[] )
{
for( int fi = 0; fi < fc; fi++ )
{
set = featureParams[fi]->scanAttr(argv[i], argv[i+1]);
set = featureParams[fi]->scanAttr(argv[i], argv[i+1]);
if ( !set )
{
i++;
@ -94,11 +97,11 @@ int main( int argc, char* argv[] )
}
}
}
classifier.train( cascadeDirName,
vecName,
bgName,
numPos, numNeg,
bgName,
numPos, numNeg,
precalcValBufSize, precalcIdxBufSize,
numStages,
cascadeParams,

View File

@ -47,6 +47,9 @@ if(CMAKE_COMPILER_IS_GNUCXX)
# High level of warnings.
set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -Wall")
set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -Werror=return-type -Werror=non-virtual-dtor -Werror=address -Werror=sequence-point -Werror=format-security")
set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -Wmissing-declarations -Wcast-align -Wundef -Winit-self -Wpointer-arith") #-Wstrict-aliasing=2
# The -Wno-long-long is required in 64bit systems when including sytem headers.
if(X86_64)
set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -Wno-long-long")
@ -171,18 +174,18 @@ if(MSVC)
set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /arch:SSE2")
endif()
endif()
if(ENABLE_SSE3)
set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /arch:SSE3")
endif()
if(ENABLE_SSE4_1)
set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /arch:SSE4.1")
endif()
if(ENABLE_SSE OR ENABLE_SSE2 OR ENABLE_SSE3 OR ENABLE_SSE4_1)
set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /Oi")
endif()
if(X86 OR X86_64)
if(CMAKE_SIZEOF_VOID_P EQUAL 4 AND ENABLE_SSE2)
set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /fp:fast")# !! important - be on the same wave with x64 compilers
@ -217,6 +220,10 @@ set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${OPENCV_EXTRA_EXE_LINKER_
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} ${OPENCV_EXTRA_EXE_LINKER_FLAGS_RELEASE}")
set(CMAKE_EXE_LINKER_FLAGS_DEBUG "${CMAKE_EXE_LINKER_FLAGS_DEBUG} ${OPENCV_EXTRA_EXE_LINKER_FLAGS_DEBUG}")
if(CMAKE_COMPILER_IS_GNUCXX)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wmissing-prototypes -Wstrict-prototypes")
endif()
if(MSVC)
# avoid warnings from MSVC about overriding the /W* option
# we replace /W3 with /W4 only for C++ files,
@ -225,12 +232,12 @@ if(MSVC)
string(REPLACE "/W3" "/W4" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
string(REPLACE "/W3" "/W4" CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE}")
string(REPLACE "/W3" "/W4" CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG}")
# allow extern "C" functions throw exceptions
foreach(flags CMAKE_C_FLAGS CMAKE_C_FLAGS_RELEASE CMAKE_C_FLAGS_RELEASE CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_DEBUG)
string(REPLACE "/EHsc-" "/EHs" ${flags} "${${flags}}")
string(REPLACE "/EHsc" "/EHs" ${flags} "${${flags}}")
string(REPLACE "/Zm1000" "" ${flags} "${${flags}}")
endforeach()

View File

@ -72,6 +72,7 @@ MACRO(_PCH_WRITE_PCHDEP_CXX _targetName _include_file _dephelp)
ADD_CUSTOM_COMMAND(
OUTPUT "${${_dephelp}}"
COMMAND ${CMAKE_COMMAND} -E echo "#include \\\"${_include_file}\\\"" > "${${_dephelp}}"
COMMAND ${CMAKE_COMMAND} -E echo "int testfunction();" >> "${${_dephelp}}"
COMMAND ${CMAKE_COMMAND} -E echo "int testfunction()" >> "${${_dephelp}}"
COMMAND ${CMAKE_COMMAND} -E echo "{" >> "${${_dephelp}}"
COMMAND ${CMAKE_COMMAND} -E echo " return 0;" >> "${${_dephelp}}"
@ -82,6 +83,7 @@ MACRO(_PCH_WRITE_PCHDEP_CXX _targetName _include_file _dephelp)
ADD_CUSTOM_COMMAND(
OUTPUT "${${_dephelp}}"
COMMAND ${CMAKE_COMMAND} -E echo "\\#include \\\"${_include_file}\\\"" > "${${_dephelp}}"
COMMAND ${CMAKE_COMMAND} -E echo "int testfunction\\(\\)\\;" >> "${${_dephelp}}"
COMMAND ${CMAKE_COMMAND} -E echo "int testfunction\\(\\)" >> "${${_dephelp}}"
COMMAND ${CMAKE_COMMAND} -E echo "{" >> "${${_dephelp}}"
COMMAND ${CMAKE_COMMAND} -E echo " \\return 0\\;" >> "${${_dephelp}}"

View File

@ -6,7 +6,7 @@
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#if GTEST_CREATE_SHARED_LIBRARY
#ifdef GTEST_CREATE_SHARED_LIBRARY
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
#endif

View File

@ -55,12 +55,12 @@
# endif
#endif
void icvGetQuadrangleHypotheses(CvSeq* contours, std::vector<std::pair<float, int> >& quads, int class_id)
static void icvGetQuadrangleHypotheses(CvSeq* contours, std::vector<std::pair<float, int> >& quads, int class_id)
{
const float min_aspect_ratio = 0.3f;
const float max_aspect_ratio = 3.0f;
const float min_box_size = 10.0f;
for(CvSeq* seq = contours; seq != NULL; seq = seq->h_next)
{
CvBox2D box = cvMinAreaRect2(seq);
@ -75,12 +75,12 @@ void icvGetQuadrangleHypotheses(CvSeq* contours, std::vector<std::pair<float, in
{
continue;
}
quads.push_back(std::pair<float, int>(box_size, class_id));
}
}
void countClasses(const std::vector<std::pair<float, int> >& pairs, size_t idx1, size_t idx2, std::vector<int>& counts)
static void countClasses(const std::vector<std::pair<float, int> >& pairs, size_t idx1, size_t idx2, std::vector<int>& counts)
{
counts.assign(2, 0);
for(size_t i = idx1; i != idx2; i++)
@ -89,36 +89,36 @@ void countClasses(const std::vector<std::pair<float, int> >& pairs, size_t idx1,
}
}
bool less_pred(const std::pair<float, int>& p1, const std::pair<float, int>& p2)
inline bool less_pred(const std::pair<float, int>& p1, const std::pair<float, int>& p2)
{
return p1.first < p2.first;
}
// does a fast check if a chessboard is in the input image. This is a workaround to
// does a fast check if a chessboard is in the input image. This is a workaround to
// a problem of cvFindChessboardCorners being slow on images with no chessboard
// - src: input image
// - size: chessboard size
// Returns 1 if a chessboard can be in this image and findChessboardCorners should be called,
// Returns 1 if a chessboard can be in this image and findChessboardCorners should be called,
// 0 if there is no chessboard, -1 in case of error
int cvCheckChessboard(IplImage* src, CvSize size)
{
if(src->nChannels > 1)
{
cvError(CV_BadNumChannels, "cvCheckChessboard", "supports single-channel images only",
cvError(CV_BadNumChannels, "cvCheckChessboard", "supports single-channel images only",
__FILE__, __LINE__);
}
if(src->depth != 8)
{
cvError(CV_BadDepth, "cvCheckChessboard", "supports depth=8 images only",
cvError(CV_BadDepth, "cvCheckChessboard", "supports depth=8 images only",
__FILE__, __LINE__);
}
const int erosion_count = 1;
const float black_level = 20.f;
const float white_level = 130.f;
const float black_white_gap = 70.f;
#if defined(DEBUG_WINDOWS)
cvNamedWindow("1", 1);
cvShowImage("1", src);
@ -126,46 +126,46 @@ int cvCheckChessboard(IplImage* src, CvSize size)
#endif //DEBUG_WINDOWS
CvMemStorage* storage = cvCreateMemStorage();
IplImage* white = cvCloneImage(src);
IplImage* black = cvCloneImage(src);
cvErode(white, white, NULL, erosion_count);
cvDilate(black, black, NULL, erosion_count);
IplImage* thresh = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
int result = 0;
for(float thresh_level = black_level; thresh_level < white_level && !result; thresh_level += 20.0f)
{
cvThreshold(white, thresh, thresh_level + black_white_gap, 255, CV_THRESH_BINARY);
#if defined(DEBUG_WINDOWS)
cvShowImage("1", thresh);
cvWaitKey(0);
#endif //DEBUG_WINDOWS
CvSeq* first = 0;
std::vector<std::pair<float, int> > quads;
cvFindContours(thresh, storage, &first, sizeof(CvContour), CV_RETR_CCOMP);
cvFindContours(thresh, storage, &first, sizeof(CvContour), CV_RETR_CCOMP);
icvGetQuadrangleHypotheses(first, quads, 1);
cvThreshold(black, thresh, thresh_level, 255, CV_THRESH_BINARY_INV);
#if defined(DEBUG_WINDOWS)
cvShowImage("1", thresh);
cvWaitKey(0);
#endif //DEBUG_WINDOWS
cvFindContours(thresh, storage, &first, sizeof(CvContour), CV_RETR_CCOMP);
icvGetQuadrangleHypotheses(first, quads, 0);
const size_t min_quads_count = size.width*size.height/2;
std::sort(quads.begin(), quads.end(), less_pred);
// now check if there are many hypotheses with similar sizes
// do this by floodfill-style algorithm
const float size_rel_dev = 0.4f;
for(size_t i = 0; i < quads.size(); i++)
{
size_t j = i + 1;
@ -176,7 +176,7 @@ int cvCheckChessboard(IplImage* src, CvSize size)
break;
}
}
if(j + 1 > min_quads_count + i)
{
// check the number of black and white squares
@ -194,12 +194,12 @@ int cvCheckChessboard(IplImage* src, CvSize size)
}
}
}
cvReleaseImage(&thresh);
cvReleaseImage(&white);
cvReleaseImage(&black);
cvReleaseMemStorage(&storage);
return result;
}

View File

@ -1223,7 +1223,7 @@ void computePredecessorMatrix(const Mat &dm, int verticesCount, Mat &predecessor
}
}
void computeShortestPath(Mat &predecessorMatrix, size_t v1, size_t v2, vector<size_t> &path)
static void computeShortestPath(Mat &predecessorMatrix, size_t v1, size_t v2, vector<size_t> &path)
{
if (predecessorMatrix.at<int> ((int)v1, (int)v2) < 0)
{
@ -1403,7 +1403,7 @@ void CirclesGridFinder::getHoles(vector<Point2f> &outHoles) const
}
}
bool areIndicesCorrect(Point pos, vector<vector<size_t> > *points)
static bool areIndicesCorrect(Point pos, vector<vector<size_t> > *points)
{
if (pos.y < 0 || pos.x < 0)
return false;

View File

@ -42,11 +42,11 @@
#ifndef __OPENCV_PRECOMP_H__
#define __OPENCV_PRECOMP_H__
#if _MSC_VER >= 1200
#if defined _MSC_VER && _MSC_VER >= 1200
#pragma warning( disable: 4251 4710 4711 4514 4996 )
#endif
#ifdef HAVE_CVCONFIG_H
#ifdef HAVE_CVCONFIG_H
#include "cvconfig.h"
#endif

View File

@ -52,41 +52,41 @@
#undef max
namespace cv {
void drawCircles(Mat& img, const vector<Point2f>& corners, const vector<float>& radius)
{
for(size_t i = 0; i < corners.size(); i++)
{
circle(img, corners[i], cvRound(radius[i]), CV_RGB(255, 0, 0));
}
}
int histQuantile(const Mat& hist, float quantile)
{
if(hist.dims > 1) return -1; // works for 1D histograms only
float cur_sum = 0;
float total_sum = (float)sum(hist).val[0];
float quantile_sum = total_sum*quantile;
for(int j = 0; j < hist.size[0]; j++)
{
cur_sum += (float)hist.at<float>(j);
if(cur_sum > quantile_sum)
{
return j;
}
}
return hist.size[0] - 1;
}
bool is_smaller(const std::pair<int, float>& p1, const std::pair<int, float>& p2)
// static void drawCircles(Mat& img, const vector<Point2f>& corners, const vector<float>& radius)
// {
// for(size_t i = 0; i < corners.size(); i++)
// {
// circle(img, corners[i], cvRound(radius[i]), CV_RGB(255, 0, 0));
// }
// }
// static int histQuantile(const Mat& hist, float quantile)
// {
// if(hist.dims > 1) return -1; // works for 1D histograms only
// float cur_sum = 0;
// float total_sum = (float)sum(hist).val[0];
// float quantile_sum = total_sum*quantile;
// for(int j = 0; j < hist.size[0]; j++)
// {
// cur_sum += (float)hist.at<float>(j);
// if(cur_sum > quantile_sum)
// {
// return j;
// }
// }
// return hist.size[0] - 1;
// }
inline bool is_smaller(const std::pair<int, float>& p1, const std::pair<int, float>& p2)
{
return p1.second < p2.second;
}
void orderContours(const vector<vector<Point> >& contours, Point2f point, vector<std::pair<int, float> >& order)
static void orderContours(const vector<vector<Point> >& contours, Point2f point, vector<std::pair<int, float> >& order)
{
order.clear();
size_t i, j, n = contours.size();
@ -101,58 +101,58 @@ void orderContours(const vector<vector<Point> >& contours, Point2f point, vector
}
order.push_back(std::pair<int, float>((int)i, (float)min_dist));
}
std::sort(order.begin(), order.end(), is_smaller);
}
// fit second order curve to a set of 2D points
void fitCurve2Order(const vector<Point2f>& /*points*/, vector<float>& /*curve*/)
inline void fitCurve2Order(const vector<Point2f>& /*points*/, vector<float>& /*curve*/)
{
// TBD
}
void findCurvesCross(const vector<float>& /*curve1*/, const vector<float>& /*curve2*/, Point2f& /*cross_point*/)
inline void findCurvesCross(const vector<float>& /*curve1*/, const vector<float>& /*curve2*/, Point2f& /*cross_point*/)
{
}
void findLinesCrossPoint(Point2f origin1, Point2f dir1, Point2f origin2, Point2f dir2, Point2f& cross_point)
static void findLinesCrossPoint(Point2f origin1, Point2f dir1, Point2f origin2, Point2f dir2, Point2f& cross_point)
{
float det = dir2.x*dir1.y - dir2.y*dir1.x;
Point2f offset = origin2 - origin1;
float alpha = (dir2.x*offset.y - dir2.y*offset.x)/det;
cross_point = origin1 + dir1*alpha;
}
void findCorner(const vector<Point>& contour, Point2f point, Point2f& corner)
{
// find the nearest point
double min_dist = std::numeric_limits<double>::max();
int min_idx = -1;
// find corner idx
for(size_t i = 0; i < contour.size(); i++)
{
double dist = norm(Point2f((float)contour[i].x, (float)contour[i].y) - point);
if(dist < min_dist)
{
min_dist = dist;
min_idx = (int)i;
}
}
assert(min_idx >= 0);
// temporary solution, have to make something more precise
corner = contour[min_idx];
return;
}
void findCorner(const vector<Point2f>& contour, Point2f point, Point2f& corner)
// static void findCorner(const vector<Point>& contour, Point2f point, Point2f& corner)
// {
// // find the nearest point
// double min_dist = std::numeric_limits<double>::max();
// int min_idx = -1;
// // find corner idx
// for(size_t i = 0; i < contour.size(); i++)
// {
// double dist = norm(Point2f((float)contour[i].x, (float)contour[i].y) - point);
// if(dist < min_dist)
// {
// min_dist = dist;
// min_idx = (int)i;
// }
// }
// assert(min_idx >= 0);
// // temporary solution, have to make something more precise
// corner = contour[min_idx];
// return;
// }
static void findCorner(const vector<Point2f>& contour, Point2f point, Point2f& corner)
{
// find the nearest point
double min_dist = std::numeric_limits<double>::max();
int min_idx = -1;
// find corner idx
for(size_t i = 0; i < contour.size(); i++)
{
@ -164,23 +164,23 @@ void findCorner(const vector<Point2f>& contour, Point2f point, Point2f& corner)
}
}
assert(min_idx >= 0);
// temporary solution, have to make something more precise
corner = contour[min_idx];
return;
}
int segment_hist_max(const Mat& hist, int& low_thresh, int& high_thresh)
static int segment_hist_max(const Mat& hist, int& low_thresh, int& high_thresh)
{
Mat bw;
//const double max_bell_width = 20; // we expect two bells with width bounded above
//const double min_bell_width = 5; // and below
double total_sum = sum(hist).val[0];
//double thresh = total_sum/(2*max_bell_width)*0.25f; // quarter of a bar inside a bell
// threshold(hist, bw, thresh, 255.0, CV_THRESH_BINARY);
double quantile_sum = 0.0;
//double min_quantile = 0.2;
double low_sum = 0;
@ -193,7 +193,7 @@ int segment_hist_max(const Mat& hist, int& low_thresh, int& high_thresh)
{
quantile_sum += hist.at<float>(x);
if(quantile_sum < 0.2*total_sum) continue;
if(quantile_sum - low_sum > out_of_bells_fraction*total_sum)
{
if(max_segment_length < x - start_x)
@ -207,7 +207,7 @@ int segment_hist_max(const Mat& hist, int& low_thresh, int& high_thresh)
start_x = x;
}
}
if(start_x == -1)
{
return 0;
@ -219,9 +219,9 @@ int segment_hist_max(const Mat& hist, int& low_thresh, int& high_thresh)
return 1;
}
}
}
bool cv::find4QuadCornerSubpix(InputArray _img, InputOutputArray _corners, Size region_size)
{
Mat img = _img.getMat(), cornersM = _corners.getMat();
@ -232,22 +232,22 @@ bool cv::find4QuadCornerSubpix(InputArray _img, InputOutputArray _corners, Size
float ranges[] = {0, 256};
const float* _ranges = ranges;
Mat hist;
#if defined(_SUBPIX_VERBOSE)
vector<float> radius;
radius.assign(corners.size(), 0.0f);
#endif //_SUBPIX_VERBOSE
Mat black_comp, white_comp;
for(int i = 0; i < ncorners; i++)
{
{
int channels = 0;
Rect roi(cvRound(corners[i].x - region_size.width), cvRound(corners[i].y - region_size.height),
region_size.width*2 + 1, region_size.height*2 + 1);
Mat img_roi = img(roi);
calcHist(&img_roi, 1, &channels, Mat(), hist, 1, &nbins, &_ranges);
#if 0
int black_thresh = histQuantile(hist, 0.45f);
int white_thresh = histQuantile(hist, 0.55f);
@ -255,10 +255,10 @@ bool cv::find4QuadCornerSubpix(InputArray _img, InputOutputArray _corners, Size
int black_thresh, white_thresh;
segment_hist_max(hist, black_thresh, white_thresh);
#endif
threshold(img, black_comp, black_thresh, 255.0, CV_THRESH_BINARY_INV);
threshold(img, white_comp, white_thresh, 255.0, CV_THRESH_BINARY);
const int erode_count = 1;
erode(black_comp, black_comp, Mat(), Point(-1, -1), erode_count);
erode(white_comp, white_comp, Mat(), Point(-1, -1), erode_count);
@ -275,28 +275,28 @@ bool cv::find4QuadCornerSubpix(InputArray _img, InputOutputArray _corners, Size
imwrite("black.jpg", black_comp);
imwrite("white.jpg", white_comp);
#endif
vector<vector<Point> > white_contours, black_contours;
vector<Vec4i> white_hierarchy, black_hierarchy;
findContours(black_comp, black_contours, black_hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
findContours(white_comp, white_contours, white_hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
if(black_contours.size() < 5 || white_contours.size() < 5) continue;
// find two white and black blobs that are close to the input point
vector<std::pair<int, float> > white_order, black_order;
orderContours(black_contours, corners[i], black_order);
orderContours(white_contours, corners[i], white_order);
const float max_dist = 10.0f;
if(black_order[0].second > max_dist || black_order[1].second > max_dist ||
if(black_order[0].second > max_dist || black_order[1].second > max_dist ||
white_order[0].second > max_dist || white_order[1].second > max_dist)
{
continue; // there will be no improvement in this corner position
}
const vector<Point>* quads[4] = {&black_contours[black_order[0].first], &black_contours[black_order[1].first],
const vector<Point>* quads[4] = {&black_contours[black_order[0].first], &black_contours[black_order[1].first],
&white_contours[white_order[0].first], &white_contours[white_order[1].first]};
vector<Point2f> quads_approx[4];
Point2f quad_corners[4];
@ -306,14 +306,14 @@ bool cv::find4QuadCornerSubpix(InputArray _img, InputOutputArray _corners, Size
vector<Point2f> temp;
for(size_t j = 0; j < quads[k]->size(); j++) temp.push_back((*quads[k])[j]);
approxPolyDP(Mat(temp), quads_approx[k], 0.5, true);
findCorner(quads_approx[k], corners[i], quad_corners[k]);
#else
findCorner(*quads[k], corners[i], quad_corners[k]);
#endif
quad_corners[k] += Point2f(0.5f, 0.5f);
}
// cross two lines
Point2f origin1 = quad_corners[0];
Point2f dir1 = quad_corners[1] - quad_corners[0];
@ -321,12 +321,12 @@ bool cv::find4QuadCornerSubpix(InputArray _img, InputOutputArray _corners, Size
Point2f dir2 = quad_corners[3] - quad_corners[2];
double angle = acos(dir1.dot(dir2)/(norm(dir1)*norm(dir2)));
if(cvIsNaN(angle) || cvIsInf(angle) || angle < 0.5 || angle > CV_PI - 0.5) continue;
findLinesCrossPoint(origin1, dir1, origin2, dir2, corners[i]);
#if defined(_SUBPIX_VERBOSE)
radius[i] = norm(corners[i] - ground_truth_corners[ground_truth_idx])*6;
#if 1
Mat test(img.size(), CV_32FC3);
cvtColor(img, test, CV_GRAY2RGB);
@ -349,9 +349,9 @@ bool cv::find4QuadCornerSubpix(InputArray _img, InputOutputArray _corners, Size
waitKey(0);
#endif
#endif //_SUBPIX_VERBOSE
}
#if defined(_SUBPIX_VERBOSE)
Mat test(img.size(), CV_32FC3);
cvtColor(img, test, CV_GRAY2RGB);
@ -361,6 +361,6 @@ bool cv::find4QuadCornerSubpix(InputArray _img, InputOutputArray _corners, Size
imshow("corners", test);
waitKey();
#endif //_SUBPIX_VERBOSE
return true;
}

View File

@ -52,48 +52,48 @@ bool cv::solvePnP( InputArray _opoints, InputArray _ipoints,
{
Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat();
int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
CV_Assert( npoints >= 0 && npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );
CV_Assert( npoints >= 0 && npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );
_rvec.create(3, 1, CV_64F);
_tvec.create(3, 1, CV_64F);
Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat();
if (flags == CV_EPNP)
{
cv::Mat undistortedPoints;
cv::undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
epnp PnP(cameraMatrix, opoints, undistortedPoints);
cv::Mat undistortedPoints;
cv::undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
epnp PnP(cameraMatrix, opoints, undistortedPoints);
cv::Mat R, rvec = _rvec.getMat(), tvec = _tvec.getMat();
PnP.compute_pose(R, tvec);
cv::Rodrigues(R, rvec);
return true;
}
else if (flags == CV_P3P)
{
CV_Assert( npoints == 4);
cv::Mat undistortedPoints;
cv::undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
p3p P3Psolver(cameraMatrix);
return true;
}
else if (flags == CV_P3P)
{
CV_Assert( npoints == 4);
cv::Mat undistortedPoints;
cv::undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
p3p P3Psolver(cameraMatrix);
cv::Mat R, rvec = _rvec.getMat(), tvec = _tvec.getMat();
bool result = P3Psolver.solve(R, tvec, opoints, undistortedPoints);
if (result)
cv::Rodrigues(R, rvec);
return result;
}
else if (flags == CV_ITERATIVE)
{
CvMat c_objectPoints = opoints, c_imagePoints = ipoints;
CvMat c_cameraMatrix = cameraMatrix, c_distCoeffs = distCoeffs;
CvMat c_rvec = _rvec.getMat(), c_tvec = _tvec.getMat();
cvFindExtrinsicCameraParams2(&c_objectPoints, &c_imagePoints, &c_cameraMatrix,
c_distCoeffs.rows*c_distCoeffs.cols ? &c_distCoeffs : 0,
&c_rvec, &c_tvec, useExtrinsicGuess );
return true;
}
else
cv::Rodrigues(R, rvec);
return result;
}
else if (flags == CV_ITERATIVE)
{
CvMat c_objectPoints = opoints, c_imagePoints = ipoints;
CvMat c_cameraMatrix = cameraMatrix, c_distCoeffs = distCoeffs;
CvMat c_rvec = _rvec.getMat(), c_tvec = _tvec.getMat();
cvFindExtrinsicCameraParams2(&c_objectPoints, &c_imagePoints, &c_cameraMatrix,
c_distCoeffs.rows*c_distCoeffs.cols ? &c_distCoeffs : 0,
&c_rvec, &c_tvec, useExtrinsicGuess );
return true;
}
else
CV_Error(CV_StsBadArg, "The flags argument must be one of CV_ITERATIVE or CV_EPNP");
return false;
return false;
}
namespace cv
@ -101,8 +101,8 @@ namespace cv
namespace pnpransac
{
const int MIN_POINTS_COUNT = 4;
void project3dPoints(const Mat& points, const Mat& rvec, const Mat& tvec, Mat& modif_points)
static void project3dPoints(const Mat& points, const Mat& rvec, const Mat& tvec, Mat& modif_points)
{
modif_points.create(1, points.cols, CV_32FC3);
Mat R(3, 3, CV_64FC1);
@ -114,32 +114,32 @@ namespace cv
tvec.copyTo(t);
transform(points, modif_points, transformation);
}
class Mutex
{
public:
Mutex() {
}
}
void lock()
{
#ifdef HAVE_TBB
resultsMutex.lock();
resultsMutex.lock();
#endif
}
void unlock()
{
#ifdef HAVE_TBB
resultsMutex.unlock();
#endif
}
private:
#ifdef HAVE_TBB
tbb::mutex resultsMutex;
#endif
};
struct CameraParameters
{
void init(Mat _intrinsics, Mat _distCoeffs)
@ -147,22 +147,22 @@ namespace cv
_intrinsics.copyTo(intrinsics);
_distCoeffs.copyTo(distortion);
}
Mat intrinsics;
Mat distortion;
};
struct Parameters
{
int iterationsCount;
float reprojectionError;
int minInliersCount;
bool useExtrinsicGuess;
int flags;
int flags;
CameraParameters camera;
};
void pnpTask(const vector<char>& pointsMask, const Mat& objectPoints, const Mat& imagePoints,
static void pnpTask(const vector<char>& pointsMask, const Mat& objectPoints, const Mat& imagePoints,
const Parameters& params, vector<int>& inliers, Mat& rvec, Mat& tvec,
const Mat& rvecInit, const Mat& tvecInit, Mutex& resultsMutex)
{
@ -178,7 +178,7 @@ namespace cv
colIndex = colIndex+1;
}
}
//filter same 3d points, hang in solvePnP
double eps = 1e-10;
int num_same_points = 0;
@ -190,22 +190,22 @@ namespace cv
}
if (num_same_points > 0)
return;
Mat localRvec, localTvec;
rvecInit.copyTo(localRvec);
tvecInit.copyTo(localTvec);
solvePnP(modelObjectPoints, modelImagePoints, params.camera.intrinsics, params.camera.distortion, localRvec, localTvec,
params.useExtrinsicGuess, params.flags);
solvePnP(modelObjectPoints, modelImagePoints, params.camera.intrinsics, params.camera.distortion, localRvec, localTvec,
params.useExtrinsicGuess, params.flags);
vector<Point2f> projected_points;
projected_points.resize(objectPoints.cols);
projectPoints(objectPoints, localRvec, localTvec, params.camera.intrinsics, params.camera.distortion, projected_points);
Mat rotatedPoints;
project3dPoints(objectPoints, localRvec, localTvec, rotatedPoints);
vector<int> localInliers;
for (int i = 0; i < objectPoints.cols; i++)
{
@ -216,21 +216,21 @@ namespace cv
localInliers.push_back(i);
}
}
if (localInliers.size() > inliers.size())
{
resultsMutex.lock();
inliers.clear();
inliers.resize(localInliers.size());
memcpy(&inliers[0], &localInliers[0], sizeof(int) * localInliers.size());
localRvec.copyTo(rvec);
localTvec.copyTo(tvec);
resultsMutex.unlock();
}
}
class PnPSolver
{
public:
@ -262,18 +262,18 @@ namespace cv
tvec.copyTo(initTvec);
}
private:
PnPSolver& operator=(const PnPSolver&);
PnPSolver& operator=(const PnPSolver&);
const Mat& objectPoints;
const Mat& imagePoints;
const Parameters& parameters;
Mat &rvec, &tvec;
vector<int>& inliers;
Mat initRvec, initTvec;
static RNG generator;
static Mutex syncMutex;
void generateVar(vector<char>& mask) const
{
int size = (int)mask.size();
@ -287,10 +287,10 @@ namespace cv
}
}
};
Mutex PnPSolver::syncMutex;
RNG PnPSolver::generator;
}
}
@ -302,21 +302,21 @@ void cv::solvePnPRansac(InputArray _opoints, InputArray _ipoints,
{
Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat();
Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat();
CV_Assert(opoints.isContinuous());
CV_Assert(opoints.depth() == CV_32F);
CV_Assert((opoints.rows == 1 && opoints.channels() == 3) || opoints.cols*opoints.channels() == 3);
CV_Assert(ipoints.isContinuous());
CV_Assert(ipoints.depth() == CV_32F);
CV_Assert((ipoints.rows == 1 && ipoints.channels() == 2) || ipoints.cols*ipoints.channels() == 2);
_rvec.create(3, 1, CV_64FC1);
_tvec.create(3, 1, CV_64FC1);
Mat rvec = _rvec.getMat();
Mat tvec = _tvec.getMat();
Mat objectPoints = opoints.reshape(3, 1), imagePoints = ipoints.reshape(2, 1);
if (minInliersCount <= 0)
minInliersCount = objectPoints.cols;
cv::pnpransac::Parameters params;
@ -325,36 +325,36 @@ void cv::solvePnPRansac(InputArray _opoints, InputArray _ipoints,
params.reprojectionError = reprojectionError;
params.useExtrinsicGuess = useExtrinsicGuess;
params.camera.init(cameraMatrix, distCoeffs);
params.flags = flags;
params.flags = flags;
vector<int> localInliers;
Mat localRvec, localTvec;
rvec.copyTo(localRvec);
tvec.copyTo(localTvec);
if (objectPoints.cols >= pnpransac::MIN_POINTS_COUNT)
{
parallel_for(BlockedRange(0,iterationsCount), cv::pnpransac::PnPSolver(objectPoints, imagePoints, params,
localRvec, localTvec, localInliers));
}
if (localInliers.size() >= (size_t)pnpransac::MIN_POINTS_COUNT)
{
if (flags != CV_P3P)
{
int i, pointsCount = (int)localInliers.size();
Mat inlierObjectPoints(1, pointsCount, CV_32FC3), inlierImagePoints(1, pointsCount, CV_32FC2);
for (i = 0; i < pointsCount; i++)
{
int index = localInliers[i];
Mat colInlierImagePoints = inlierImagePoints(Rect(i, 0, 1, 1));
imagePoints.col(index).copyTo(colInlierImagePoints);
Mat colInlierObjectPoints = inlierObjectPoints(Rect(i, 0, 1, 1));
objectPoints.col(index).copyTo(colInlierObjectPoints);
}
solvePnP(inlierObjectPoints, inlierImagePoints, params.camera.intrinsics, params.camera.distortion, localRvec, localTvec, true, flags);
}
localRvec.copyTo(rvec);
if (flags != CV_P3P)
{
int i, pointsCount = (int)localInliers.size();
Mat inlierObjectPoints(1, pointsCount, CV_32FC3), inlierImagePoints(1, pointsCount, CV_32FC2);
for (i = 0; i < pointsCount; i++)
{
int index = localInliers[i];
Mat colInlierImagePoints = inlierImagePoints(Rect(i, 0, 1, 1));
imagePoints.col(index).copyTo(colInlierImagePoints);
Mat colInlierObjectPoints = inlierObjectPoints(Rect(i, 0, 1, 1));
objectPoints.col(index).copyTo(colInlierObjectPoints);
}
solvePnP(inlierObjectPoints, inlierImagePoints, params.camera.intrinsics, params.camera.distortion, localRvec, localTvec, true, flags);
}
localRvec.copyTo(rvec);
localTvec.copyTo(tvec);
if (_inliers.needed())
Mat(localInliers).copyTo(_inliers);

File diff suppressed because it is too large Load Diff

View File

@ -81,6 +81,7 @@ private:
{
public:
virtual ImageIterator* iterator() const = 0;
virtual ~ImageRange() {}
};
// Sliding window

View File

@ -59,8 +59,8 @@ static Mat sortMatrixRowsByIndices(InputArray src, InputArray indices)
return dst;
}
Mat argsort(InputArray _src, bool ascending=true)
static Mat argsort(InputArray _src, bool ascending=true)
{
Mat src = _src.getMat();
if (src.rows != 1 && src.cols != 1)
@ -70,14 +70,14 @@ Mat argsort(InputArray _src, bool ascending=true)
sortIdx(src.reshape(1,1),sorted_indices,flags);
return sorted_indices;
}
template <typename _Tp> static
Mat interp1_(const Mat& X_, const Mat& Y_, const Mat& XI)
{
int n = XI.rows;
// sort input table
vector<int> sort_indices = argsort(X_);
Mat X = sortMatrixRowsByIndices(X_,sort_indices);
Mat Y = sortMatrixRowsByIndices(Y_,sort_indices);
// interpolated values
@ -131,7 +131,7 @@ static Mat interp1(InputArray _x, InputArray _Y, InputArray _xi)
}
return Mat();
}
namespace colormap
{
@ -531,7 +531,7 @@ namespace colormap
n); // number of sample points
}
};
void ColorMap::operator()(InputArray _src, OutputArray _dst) const
{
if(_lut.total() != 256)
@ -550,7 +550,7 @@ namespace colormap
// Apply the ColorMap.
LUT(src, _lut, _dst);
}
Mat ColorMap::linear_colormap(InputArray X,
InputArray r, InputArray g, InputArray b,
InputArray xi) {
@ -581,12 +581,12 @@ namespace colormap
colormap == COLORMAP_HOT ? (colormap::ColorMap*)(new colormap::Hot) :
colormap == COLORMAP_MKPJ1 ? (colormap::ColorMap*)(new colormap::MKPJ1) :
colormap == COLORMAP_MKPJ2 ? (colormap::ColorMap*)(new colormap::MKPJ2) : 0;
if( !cm )
CV_Error( CV_StsBadArg, "Unknown colormap id; use one of COLORMAP_*");
(*cm)(src, dst);
delete cm;
}
}

View File

@ -3,7 +3,7 @@
#define DEBUGLOGS 1
#if ANDROID
#ifdef ANDROID
#include <android/log.h>
#define LOG_TAG "OBJECT_DETECTOR"
#define LOGD0(...) ((void)__android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__))
@ -25,7 +25,7 @@
#define LOGI(_str, ...) LOGI0(_str , ## __VA_ARGS__)
#define LOGW(_str, ...) LOGW0(_str , ## __VA_ARGS__)
#define LOGE(_str, ...) LOGE0(_str , ## __VA_ARGS__)
#else
#else
#define LOGD(...) do{} while(0)
#define LOGI(...) do{} while(0)
#define LOGW(...) do{} while(0)
@ -193,7 +193,7 @@ do {
} catch(...) { \
LOGE0("\n ERROR: UNKNOWN Exception caught\n\n"); \
} \
} while(0)
} while(0)
#endif
void* workcycleObjectDetectorFunction(void* p)
@ -214,7 +214,7 @@ void DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector()
vector<Rect> objects;
CV_Assert(stateThread==STATE_THREAD_WORKING_SLEEPING);
pthread_mutex_lock(&mutex);
pthread_mutex_lock(&mutex);
{
pthread_cond_signal(&objectDetectorThreadStartStop);
@ -268,7 +268,7 @@ void DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector()
LOGD("DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector() --- imageSeparateDetecting is empty, continue");
continue;
}
LOGD("DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector() --- start handling imageSeparateDetecting, img.size=%dx%d, img.data=0x%p",
LOGD("DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector() --- start handling imageSeparateDetecting, img.size=%dx%d, img.data=0x%p",
imageSeparateDetecting.size().width, imageSeparateDetecting.size().height, (void*)imageSeparateDetecting.data);
@ -368,7 +368,7 @@ void DetectionBasedTracker::SeparateDetectionWork::resetTracking()
pthread_mutex_unlock(&mutex);
}
bool DetectionBasedTracker::SeparateDetectionWork::communicateWithDetectingThread(const Mat& imageGray, vector<Rect>& rectsWhereRegions)
@ -398,7 +398,7 @@ bool DetectionBasedTracker::SeparateDetectionWork::communicateWithDetectingThrea
if (timeWhenDetectingThreadStartedWork > 0) {
double time_from_previous_launch_in_ms=1000.0 * (((double)(getTickCount() - timeWhenDetectingThreadStartedWork )) / freq); //the same formula as for lastBigDetectionDuration
shouldSendNewDataToWorkThread = (time_from_previous_launch_in_ms >= detectionBasedTracker.parameters.minDetectionPeriod);
LOGD("DetectionBasedTracker::SeparateDetectionWork::communicateWithDetectingThread: shouldSendNewDataToWorkThread was 1, now it is %d, since time_from_previous_launch_in_ms=%.2f, minDetectionPeriod=%d",
LOGD("DetectionBasedTracker::SeparateDetectionWork::communicateWithDetectingThread: shouldSendNewDataToWorkThread was 1, now it is %d, since time_from_previous_launch_in_ms=%.2f, minDetectionPeriod=%d",
(shouldSendNewDataToWorkThread?1:0), time_from_previous_launch_in_ms, detectionBasedTracker.parameters.minDetectionPeriod);
}
@ -454,7 +454,7 @@ DetectionBasedTracker::DetectionBasedTracker(const std::string& cascadeFilename,
&& (params.scaleFactor > 1.0)
&& (params.maxTrackLifetime >= 0) );
if (!cascadeForTracking.load(cascadeFilename)) {
if (!cascadeForTracking.load(cascadeFilename)) {
CV_Error(CV_StsBadArg, "DetectionBasedTracker::DetectionBasedTracker: Cannot load a cascade from the file '"+cascadeFilename+"'");
}
@ -495,7 +495,7 @@ void DetectionBasedTracker::process(const Mat& imageGray)
Mat imageDetect=imageGray;
int D=parameters.minObjectSize;
if (D < 1)
if (D < 1)
D=1;
vector<Rect> rectsWhereRegions;
@ -633,7 +633,7 @@ void DetectionBasedTracker::updateTrackedObjects(const vector<Rect>& detectedObj
LOGD("DetectionBasedTracker::updateTrackedObjects: j=%d is rejected, because it is intersected with another rectangle", j);
continue;
}
LOGD("DetectionBasedTracker::updateTrackedObjects: detectedObjects[%d]={%d, %d, %d x %d}",
LOGD("DetectionBasedTracker::updateTrackedObjects: detectedObjects[%d]={%d, %d, %d x %d}",
j, detectedObjects[j].x, detectedObjects[j].y, detectedObjects[j].width, detectedObjects[j].height);
Rect r=prevRect & detectedObjects[j];
@ -691,9 +691,9 @@ void DetectionBasedTracker::updateTrackedObjects(const vector<Rect>& detectedObj
std::vector<TrackedObject>::iterator it=trackedObjects.begin();
while( it != trackedObjects.end() ) {
if ( (it->numFramesNotDetected > parameters.maxTrackLifetime)
if ( (it->numFramesNotDetected > parameters.maxTrackLifetime)
||
(
(
(it->numDetectedFrames <= innerParameters.numStepsToWaitBeforeFirstShow)
&&
(it->numFramesNotDetected > innerParameters.numStepsToTrackWithoutDetectingIfObjectHasNotBeenShown)
@ -718,7 +718,7 @@ Rect DetectionBasedTracker::calcTrackedObjectPositionToShow(int i) const
return Rect();
}
if (trackedObjects[i].numDetectedFrames <= innerParameters.numStepsToWaitBeforeFirstShow){
LOGI("DetectionBasedTracker::calcTrackedObjectPositionToShow: trackedObjects[%d].numDetectedFrames=%d <= numStepsToWaitBeforeFirstShow=%d --- return empty Rect()",
LOGI("DetectionBasedTracker::calcTrackedObjectPositionToShow: trackedObjects[%d].numDetectedFrames=%d <= numStepsToWaitBeforeFirstShow=%d --- return empty Rect()",
i, trackedObjects[i].numDetectedFrames, innerParameters.numStepsToWaitBeforeFirstShow);
return Rect();
}

View File

@ -46,7 +46,7 @@
using namespace cv;
void downsamplePoints( const Mat& src, Mat& dst, size_t count )
static void downsamplePoints( const Mat& src, Mat& dst, size_t count )
{
CV_Assert( count >= 2 );
CV_Assert( src.cols == 1 || src.rows == 1 );

View File

@ -28,7 +28,7 @@ using std::map;
using std::set;
using std::cout;
using std::endl;
// Removes duplicate elements in a given vector.
template<typename _Tp>
inline vector<_Tp> remove_dups(const vector<_Tp>& src) {
@ -42,7 +42,7 @@ inline vector<_Tp> remove_dups(const vector<_Tp>& src) {
elems.push_back(*it);
return elems;
}
static Mat argsort(InputArray _src, bool ascending=true)
{
Mat src = _src.getMat();
@ -72,8 +72,8 @@ static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=1, double
}
return data;
}
void sortMatrixColumnsByIndices(InputArray _src, InputArray _indices, OutputArray _dst) {
static void sortMatrixColumnsByIndices(InputArray _src, InputArray _indices, OutputArray _dst) {
if(_indices.getMat().type() != CV_32SC1)
CV_Error(CV_StsUnsupportedFormat, "cv::sortColumnsByIndices only works on integer indices!");
Mat src = _src.getMat();
@ -87,13 +87,13 @@ void sortMatrixColumnsByIndices(InputArray _src, InputArray _indices, OutputArra
}
}
Mat sortMatrixColumnsByIndices(InputArray src, InputArray indices) {
static Mat sortMatrixColumnsByIndices(InputArray src, InputArray indices) {
Mat dst;
sortMatrixColumnsByIndices(src, indices, dst);
return dst;
}
template<typename _Tp> static bool
isSymmetric_(InputArray src) {
Mat _src = src.getMat();
@ -151,7 +151,7 @@ static bool isSymmetric(InputArray src, double eps=1e-16)
return false;
}
//------------------------------------------------------------------------------
// subspace::project
//------------------------------------------------------------------------------
@ -198,32 +198,32 @@ Mat subspaceReconstruct(InputArray _W, InputArray _mean, InputArray _src)
return X;
}
class EigenvalueDecomposition {
private:
// Holds the data dimension.
int n;
// Stores real/imag part of a complex division.
double cdivr, cdivi;
// Pointer to internal memory.
double *d, *e, *ort;
double **V, **H;
// Holds the computed eigenvalues.
Mat _eigenvalues;
// Holds the computed eigenvectors.
Mat _eigenvectors;
// Allocates memory.
template<typename _Tp>
_Tp *alloc_1d(int m) {
return new _Tp[m];
}
// Allocates memory.
template<typename _Tp>
_Tp *alloc_1d(int m, _Tp val) {
@ -232,7 +232,7 @@ private:
arr[i] = val;
return arr;
}
// Allocates memory.
template<typename _Tp>
_Tp **alloc_2d(int m, int n) {
@ -241,7 +241,7 @@ private:
arr[i] = new _Tp[n];
return arr;
}
// Allocates memory.
template<typename _Tp>
_Tp **alloc_2d(int m, int n, _Tp val) {
@ -253,7 +253,7 @@ private:
}
return arr;
}
void cdiv(double xr, double xi, double yr, double yi) {
double r, d;
if (std::abs(yr) > std::abs(yi)) {
@ -268,16 +268,16 @@ private:
cdivi = (r * xi - xr) / d;
}
}
// Nonsymmetric reduction from Hessenberg to real Schur form.
void hqr2() {
// This is derived from the Algol procedure hqr2,
// by Martin and Wilkinson, Handbook for Auto. Comp.,
// Vol.ii-Linear Algebra, and the corresponding
// Fortran subroutine in EISPACK.
// Initialize
int nn = this->n;
int n = nn - 1;
@ -286,9 +286,9 @@ private:
double eps = pow(2.0, -52.0);
double exshift = 0.0;
double p = 0, q = 0, r = 0, s = 0, z = 0, t, w, x, y;
// Store roots isolated by balanc and compute matrix norm
double norm = 0.0;
for (int i = 0; i < nn; i++) {
if (i < low || i > high) {
@ -299,11 +299,11 @@ private:
norm = norm + std::abs(H[i][j]);
}
}
// Outer loop over eigenvalue index
int iter = 0;
while (n >= low) {
// Look for single small sub-diagonal element
int l = n;
while (l > low) {
@ -316,19 +316,19 @@ private:
}
l--;
}
// Check for convergence
// One root found
if (l == n) {
H[n][n] = H[n][n] + exshift;
d[n] = H[n][n];
e[n] = 0.0;
n--;
iter = 0;
// Two roots found
} else if (l == n - 1) {
w = H[n][n - 1] * H[n - 1][n];
p = (H[n - 1][n - 1] - H[n][n]) / 2.0;
@ -337,9 +337,9 @@ private:
H[n][n] = H[n][n] + exshift;
H[n - 1][n - 1] = H[n - 1][n - 1] + exshift;
x = H[n][n];
// Real pair
if (q >= 0) {
if (p >= 0) {
z = p + z;
@ -360,33 +360,33 @@ private:
r = sqrt(p * p + q * q);
p = p / r;
q = q / r;
// Row modification
for (int j = n - 1; j < nn; j++) {
z = H[n - 1][j];
H[n - 1][j] = q * z + p * H[n][j];
H[n][j] = q * H[n][j] - p * z;
}
// Column modification
for (int i = 0; i <= n; i++) {
z = H[i][n - 1];
H[i][n - 1] = q * z + p * H[i][n];
H[i][n] = q * H[i][n] - p * z;
}
// Accumulate transformations
for (int i = low; i <= high; i++) {
z = V[i][n - 1];
V[i][n - 1] = q * z + p * V[i][n];
V[i][n] = q * V[i][n] - p * z;
}
// Complex pair
} else {
d[n - 1] = x + p;
d[n] = x + p;
@ -395,13 +395,13 @@ private:
}
n = n - 2;
iter = 0;
// No convergence yet
} else {
// Form shift
x = H[n][n];
y = 0.0;
w = 0.0;
@ -409,9 +409,9 @@ private:
y = H[n - 1][n - 1];
w = H[n][n - 1] * H[n - 1][n];
}
// Wilkinson's original ad hoc shift
if (iter == 10) {
exshift += x;
for (int i = low; i <= n; i++) {
@ -421,9 +421,9 @@ private:
x = y = 0.75 * s;
w = -0.4375 * s * s;
}
// MATLAB's new ad hoc shift
if (iter == 30) {
s = (y - x) / 2.0;
s = s * s + w;
@ -440,9 +440,9 @@ private:
x = y = w = 0.964;
}
}
iter = iter + 1; // (Could check iteration count here.)
// Look for two consecutive small sub-diagonal elements
int m = n - 2;
while (m >= l) {
@ -466,16 +466,16 @@ private:
}
m--;
}
for (int i = m + 2; i <= n; i++) {
H[i][i - 2] = 0.0;
if (i > m + 2) {
H[i][i - 3] = 0.0;
}
}
// Double QR step involving rows l:n and columns m:n
for (int k = m; k <= n - 1; k++) {
bool notlast = (k != n - 1);
if (k != m) {
@ -508,9 +508,9 @@ private:
z = r / s;
q = q / p;
r = r / p;
// Row modification
for (int j = k; j < nn; j++) {
p = H[k][j] + q * H[k + 1][j];
if (notlast) {
@ -520,9 +520,9 @@ private:
H[k][j] = H[k][j] - p * x;
H[k + 1][j] = H[k + 1][j] - p * y;
}
// Column modification
for (int i = 0; i <= min(n, k + 3); i++) {
p = x * H[i][k] + y * H[i][k + 1];
if (notlast) {
@ -532,9 +532,9 @@ private:
H[i][k] = H[i][k] - p;
H[i][k + 1] = H[i][k + 1] - p * q;
}
// Accumulate transformations
for (int i = low; i <= high; i++) {
p = x * V[i][k] + y * V[i][k + 1];
if (notlast) {
@ -548,19 +548,19 @@ private:
} // k loop
} // check convergence
} // while (n >= low)
// Backsubstitute to find vectors of upper triangular form
if (norm == 0.0) {
return;
}
for (n = nn - 1; n >= 0; n--) {
p = d[n];
q = e[n];
// Real vector
if (q == 0) {
int l = n;
H[n][n] = 1.0;
@ -581,9 +581,9 @@ private:
} else {
H[i][n] = -r / (eps * norm);
}
// Solve real equations
} else {
x = H[i][i + 1];
y = H[i + 1][i];
@ -596,9 +596,9 @@ private:
H[i + 1][n] = (-s - y * t) / z;
}
}
// Overflow control
t = std::abs(H[i][n]);
if ((eps * t) * t > 1) {
for (int j = i; j <= n; j++) {
@ -607,14 +607,14 @@ private:
}
}
}
// Complex vector
} else if (q < 0) {
int l = n - 1;
// Last vector component imaginary so matrix is triangular
if (std::abs(H[n][n - 1]) > std::abs(H[n - 1][n])) {
H[n - 1][n - 1] = q / H[n][n - 1];
H[n - 1][n] = -(H[n][n] - p) / H[n][n - 1];
@ -634,7 +634,7 @@ private:
sa = sa + H[i][j] * H[j][n];
}
w = H[i][i] - p;
if (e[i] < 0.0) {
z = w;
r = ra;
@ -646,9 +646,9 @@ private:
H[i][n - 1] = cdivr;
H[i][n] = cdivi;
} else {
// Solve complex equations
x = H[i][i + 1];
y = H[i + 1][i];
vr = (d[i] - p) * (d[i] - p) + e[i] * e[i] - q * q;
@ -673,9 +673,9 @@ private:
H[i + 1][n] = cdivi;
}
}
// Overflow control
t = max(std::abs(H[i][n - 1]), std::abs(H[i][n]));
if ((eps * t) * t > 1) {
for (int j = i; j <= n; j++) {
@ -687,9 +687,9 @@ private:
}
}
}
// Vectors of isolated roots
for (int i = 0; i < nn; i++) {
if (i < low || i > high) {
for (int j = i; j < nn; j++) {
@ -697,9 +697,9 @@ private:
}
}
}
// Back transformation to get eigenvectors of original matrix
for (int j = nn - 1; j >= low; j--) {
for (int i = low; i <= high; i++) {
z = 0.0;
@ -710,7 +710,7 @@ private:
}
}
}
// Nonsymmetric reduction to Hessenberg form.
void orthes() {
// This is derived from the Algol procedures orthes and ortran,
@ -719,19 +719,19 @@ private:
// Fortran subroutines in EISPACK.
int low = 0;
int high = n - 1;
for (int m = low + 1; m <= high - 1; m++) {
// Scale column.
double scale = 0.0;
for (int i = m; i <= high; i++) {
scale = scale + std::abs(H[i][m - 1]);
}
if (scale != 0.0) {
// Compute Householder transformation.
double h = 0.0;
for (int i = high; i >= m; i--) {
ort[i] = H[i][m - 1] / scale;
@ -743,10 +743,10 @@ private:
}
h = h - ort[m] * g;
ort[m] = ort[m] - g;
// Apply Householder similarity transformation
// H = (I-u*u'/h)*H*(I-u*u')/h)
for (int j = m; j < n; j++) {
double f = 0.0;
for (int i = high; i >= m; i--) {
@ -757,7 +757,7 @@ private:
H[i][j] -= f * ort[i];
}
}
for (int i = 0; i <= high; i++) {
double f = 0.0;
for (int j = high; j >= m; j--) {
@ -772,15 +772,15 @@ private:
H[m][m - 1] = scale * g;
}
}
// Accumulate transformations (Algol's ortran).
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
V[i][j] = (i == j ? 1.0 : 0.0);
}
}
for (int m = high - 1; m >= low + 1; m--) {
if (H[m][m - 1] != 0.0) {
for (int i = m + 1; i <= high; i++) {
@ -800,7 +800,7 @@ private:
}
}
}
// Releases all internal working memory.
void release() {
// releases the working data
@ -814,7 +814,7 @@ private:
delete[] H;
delete[] V;
}
// Computes the Eigenvalue Decomposition for a matrix given in H.
void compute() {
// Allocate memory for the working data.
@ -839,11 +839,11 @@ private:
// Deallocate the memory by releasing all internal working data.
release();
}
public:
EigenvalueDecomposition()
: n(0) { }
// Initializes & computes the Eigenvalue Decomposition for a general matrix
// given in src. This function is a port of the EigenvalueSolver in JAMA,
// which has been released to public domain by The MathWorks and the
@ -851,7 +851,7 @@ public:
EigenvalueDecomposition(InputArray src) {
compute(src);
}
// This function computes the Eigenvalue Decomposition for a general matrix
// given in src. This function is a port of the EigenvalueSolver in JAMA,
// which has been released to public domain by The MathWorks and the
@ -883,9 +883,9 @@ public:
compute();
}
}
~EigenvalueDecomposition() {}
// Returns the eigenvalues of the Eigenvalue Decomposition.
Mat eigenvalues() { return _eigenvalues; }
// Returns the eigenvectors of the Eigenvalue Decomposition.
@ -1045,6 +1045,6 @@ Mat LDA::project(InputArray src) {
Mat LDA::reconstruct(InputArray src) {
return subspaceReconstruct(_eigenvectors, Mat(), _dataAsRow ? src : src.getMat().t());
}
}

View File

@ -43,98 +43,99 @@
#include "precomp.hpp"
#include <limits>
namespace cv
namespace
{
using namespace cv;
const size_t MAX_STACK_SIZE = 255;
const size_t MAX_LEAFS = 8;
bool checkIfNodeOutsideSphere(const Octree::Node& node, const Point3f& c, float r)
{
if (node.x_max < (c.x - r) || node.y_max < (c.y - r) || node.z_max < (c.z - r))
return true;
if ((c.x + r) < node.x_min || (c.y + r) < node.y_min || (c.z + r) < node.z_min)
return true;
return false;
}
bool checkIfNodeInsideSphere(const Octree::Node& node, const Point3f& c, float r)
{
r *= r;
float d2_xmin = (node.x_min - c.x) * (node.x_min - c.x);
float d2_ymin = (node.y_min - c.y) * (node.y_min - c.y);
float d2_zmin = (node.z_min - c.z) * (node.z_min - c.z);
if (d2_xmin + d2_ymin + d2_zmin > r)
return false;
float d2_zmax = (node.z_max - c.z) * (node.z_max - c.z);
if (d2_xmin + d2_ymin + d2_zmax > r)
return false;
float d2_ymax = (node.y_max - c.y) * (node.y_max - c.y);
if (d2_xmin + d2_ymax + d2_zmin > r)
return false;
if (d2_xmin + d2_ymax + d2_zmax > r)
return false;
float d2_xmax = (node.x_max - c.x) * (node.x_max - c.x);
if (d2_xmax + d2_ymin + d2_zmin > r)
return false;
if (d2_xmax + d2_ymin + d2_zmax > r)
return false;
if (d2_xmax + d2_ymax + d2_zmin > r)
return false;
if (d2_xmax + d2_ymax + d2_zmax > r)
return false;
return true;
}
void fillMinMax(const vector<Point3f>& points, Octree::Node& node)
{
node.x_max = node.y_max = node.z_max = std::numeric_limits<float>::min();
node.x_min = node.y_min = node.z_min = std::numeric_limits<float>::max();
for (size_t i = 0; i < points.size(); ++i)
{
const Point3f& point = points[i];
if (node.x_max < point.x)
node.x_max = point.x;
if (node.y_max < point.y)
node.y_max = point.y;
if (node.z_max < point.z)
node.z_max = point.z;
if (node.x_min > point.x)
node.x_min = point.x;
if (node.y_min > point.y)
node.y_min = point.y;
if (node.z_min > point.z)
node.z_min = point.z;
}
}
size_t findSubboxForPoint(const Point3f& point, const Octree::Node& node)
{
size_t ind_x = point.x < (node.x_max + node.x_min) / 2 ? 0 : 1;
size_t ind_y = point.y < (node.y_max + node.y_min) / 2 ? 0 : 1;
size_t ind_z = point.z < (node.z_max + node.z_min) / 2 ? 0 : 1;
return (ind_x << 2) + (ind_y << 1) + (ind_z << 0);
}
void initChildBox(const Octree::Node& parent, size_t boxIndex, Octree::Node& child)
@ -142,58 +143,61 @@ namespace cv
child.x_min = child.x_max = (parent.x_max + parent.x_min) / 2;
child.y_min = child.y_max = (parent.y_max + parent.y_min) / 2;
child.z_min = child.z_max = (parent.z_max + parent.z_min) / 2;
if ((boxIndex >> 0) & 1)
child.z_max = parent.z_max;
else
child.z_min = parent.z_min;
if ((boxIndex >> 1) & 1)
child.y_max = parent.y_max;
else
child.y_min = parent.y_min;
if ((boxIndex >> 2) & 1)
child.x_max = parent.x_max;
else
child.x_min = parent.x_min;
}
}//namespace
////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////// Octree //////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////
namespace cv
{
Octree::Octree()
{
}
Octree::Octree(const vector<Point3f>& points3d, int maxLevels, int minPoints)
{
buildTree(points3d, maxLevels, minPoints);
}
Octree::~Octree()
{
}
void Octree::getPointsWithinSphere(const Point3f& center, float radius, vector<Point3f>& out) const
{
out.clear();
if (nodes.empty())
return;
int stack[MAX_STACK_SIZE];
int pos = 0;
stack[pos] = 0;
while (pos >= 0)
{
const Node& cur = nodes[stack[pos--]];
if (checkIfNodeOutsideSphere(cur, center, radius))
continue;
if (checkIfNodeInsideSphere(cur, center, radius))
{
size_t sz = out.size();
@ -202,133 +206,133 @@ namespace cv
out[sz++] = points[i];
continue;
}
if (cur.isLeaf)
{
double r2 = radius * radius;
size_t sz = out.size();
out.resize(sz + (cur.end - cur.begin));
for (int i = cur.begin; i < cur.end; ++i)
{
const Point3f& point = points[i];
double dx = (point.x - center.x);
double dy = (point.y - center.y);
double dz = (point.z - center.z);
double dist2 = dx * dx + dy * dy + dz * dz;
if (dist2 < r2)
out[sz++] = point;
};
out.resize(sz);
continue;
}
if (cur.children[0])
stack[++pos] = cur.children[0];
if (cur.children[1])
stack[++pos] = cur.children[1];
if (cur.children[2])
stack[++pos] = cur.children[2];
if (cur.children[3])
stack[++pos] = cur.children[3];
if (cur.children[4])
stack[++pos] = cur.children[4];
if (cur.children[5])
stack[++pos] = cur.children[5];
if (cur.children[6])
stack[++pos] = cur.children[6];
if (cur.children[7])
stack[++pos] = cur.children[7];
}
}
void Octree::buildTree(const vector<Point3f>& points3d, int maxLevels, int minPoints)
{
assert((size_t)maxLevels * 8 < MAX_STACK_SIZE);
points.resize(points3d.size());
std::copy(points3d.begin(), points3d.end(), points.begin());
this->minPoints = minPoints;
nodes.clear();
nodes.push_back(Node());
Node& root = nodes[0];
fillMinMax(points, root);
root.isLeaf = true;
root.maxLevels = maxLevels;
root.begin = 0;
root.end = (int)points.size();
for (size_t i = 0; i < MAX_LEAFS; i++)
root.children[i] = 0;
if (maxLevels != 1 && (root.end - root.begin) > minPoints)
{
root.isLeaf = false;
buildNext(0);
}
}
void Octree::buildNext(size_t nodeInd)
{
size_t size = nodes[nodeInd].end - nodes[nodeInd].begin;
vector<size_t> boxBorders(MAX_LEAFS+1, 0);
vector<size_t> boxIndices(size);
vector<Point3f> tempPoints(size);
for (int i = nodes[nodeInd].begin, j = 0; i < nodes[nodeInd].end; ++i, ++j)
{
const Point3f& p = points[i];
size_t subboxInd = findSubboxForPoint(p, nodes[nodeInd]);
boxBorders[subboxInd+1]++;
boxIndices[j] = subboxInd;
tempPoints[j] = p;
}
for (size_t i = 1; i < boxBorders.size(); ++i)
boxBorders[i] += boxBorders[i-1];
vector<size_t> writeInds(boxBorders.begin(), boxBorders.end());
for (size_t i = 0; i < size; ++i)
{
size_t boxIndex = boxIndices[i];
Point3f& curPoint = tempPoints[i];
size_t copyTo = nodes[nodeInd].begin + writeInds[boxIndex]++;
points[copyTo] = curPoint;
}
for (size_t i = 0; i < MAX_LEAFS; ++i)
{
if (boxBorders[i] == boxBorders[i+1])
continue;
nodes.push_back(Node());
Node& child = nodes.back();
initChildBox(nodes[nodeInd], i, child);
child.isLeaf = true;
child.maxLevels = nodes[nodeInd].maxLevels - 1;
child.begin = nodes[nodeInd].begin + (int)boxBorders[i+0];
child.end = nodes[nodeInd].begin + (int)boxBorders[i+1];
for (size_t k = 0; k < MAX_LEAFS; k++)
child.children[k] = 0;
nodes[nodeInd].children[i] = (int)(nodes.size() - 1);
if (child.maxLevels != 1 && (child.end - child.begin) > minPoints)
{
child.isLeaf = false;
@ -336,5 +340,5 @@ namespace cv
}
}
}
}

View File

@ -43,11 +43,11 @@
#ifndef __OPENCV_PRECOMP_H__
#define __OPENCV_PRECOMP_H__
#if _MSC_VER >= 1200
#if defined _MSC_VER && _MSC_VER >= 1200
#pragma warning( disable: 4251 4710 4711 4514 4996 )
#endif
#ifdef HAVE_CVCONFIG_H
#ifdef HAVE_CVCONFIG_H
#include "cvconfig.h"
#endif

View File

@ -1299,6 +1299,7 @@ public:
GPU_MAT = 9 << KIND_SHIFT
};
_InputArray();
_InputArray(const Mat& m);
_InputArray(const MatExpr& expr);
template<typename _Tp> _InputArray(const _Tp* vec, int n);
@ -1328,6 +1329,8 @@ public:
virtual int channels(int i=-1) const;
virtual bool empty() const;
virtual ~_InputArray();
int flags;
void* obj;
Size sz;
@ -1384,6 +1387,8 @@ public:
virtual void create(int dims, const int* size, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const;
virtual void release() const;
virtual void clear() const;
virtual ~_OutputArray();
};
typedef const _InputArray& InputArray;
@ -3977,7 +3982,7 @@ public:
CV_WRAP virtual bool isOpened() const;
//! closes the file and releases all the memory buffers
CV_WRAP virtual void release();
//! closes the file, releases all the memory buffers and returns the text string
//! closes the file, releases all the memory buffers and returns the text string
CV_WRAP virtual string releaseAndGetString();
//! returns the first element of the top-level mapping

View File

@ -60,34 +60,34 @@
#endif
#if defined WIN32 || defined WINCE
#ifndef _WIN32_WINNT // This is needed for the declaration of TryEnterCriticalSection in winbase.h with Visual Studio 2005 (and older?)
#define _WIN32_WINNT 0x0400 // http://msdn.microsoft.com/en-us/library/ms686857(VS.85).aspx
#endif
#include <windows.h>
#undef small
#undef min
#undef max
# ifndef _WIN32_WINNT // This is needed for the declaration of TryEnterCriticalSection in winbase.h with Visual Studio 2005 (and older?)
# define _WIN32_WINNT 0x0400 // http://msdn.microsoft.com/en-us/library/ms686857(VS.85).aspx
# endif
# include <windows.h>
# undef small
# undef min
# undef max
#else
#include <pthread.h>
# include <pthread.h>
#endif
#ifdef __BORLANDC__
#ifndef WIN32
#define WIN32
#endif
#ifndef _WIN32
#define _WIN32
#endif
#define CV_DLL
#undef _CV_ALWAYS_PROFILE_
#define _CV_ALWAYS_NO_PROFILE_
# ifndef WIN32
# define WIN32
# endif
# ifndef _WIN32
# define _WIN32
# endif
# define CV_DLL
# undef _CV_ALWAYS_PROFILE_
# define _CV_ALWAYS_NO_PROFILE_
#endif
#ifndef FALSE
#define FALSE 0
# define FALSE 0
#endif
#ifndef TRUE
#define TRUE 1
# define TRUE 1
#endif
#define __BEGIN__ __CV_BEGIN__
@ -95,7 +95,7 @@
#define EXIT __CV_EXIT__
#ifdef HAVE_IPP
#include "ipp.h"
# include "ipp.h"
CV_INLINE IppiSize ippiSize(int width, int height)
{
@ -104,137 +104,132 @@ CV_INLINE IppiSize ippiSize(int width, int height)
}
#endif
#if defined __SSE2__ || _MSC_VER >= 1300
#include "emmintrin.h"
#define CV_SSE 1
#define CV_SSE2 1
#if defined __SSE3__ || _MSC_VER >= 1500
#include "pmmintrin.h"
#define CV_SSE3 1
#endif
#if defined __SSSE3__
#include "tmmintrin.h"
#define CV_SSSE3 1
#endif
#if defined __SSE2__ || (defined _MSC_VER && _MSC_VER >= 1300)
# include "emmintrin.h"
# define CV_SSE 1
# define CV_SSE2 1
# if defined __SSE3__ || (defined _MSC_VER && _MSC_VER >= 1500)
# include "pmmintrin.h"
# define CV_SSE3 1
# else
# define CV_SSE3 0
# endif
# if defined __SSSE3__
# include "tmmintrin.h"
# define CV_SSSE3 1
# else
# define CV_SSSE3 0
# endif
#else
#define CV_SSE 0
#define CV_SSE2 0
#define CV_SSE3 0
#define CV_SSSE3 0
# define CV_SSE 0
# define CV_SSE2 0
# define CV_SSE3 0
# define CV_SSSE3 0
#endif
#if defined ANDROID && defined __ARM_NEON__ && defined __GNUC__
#include "arm_neon.h"
#define CV_NEON 1
#if defined ANDROID && defined __ARM_NEON__
# include "arm_neon.h"
# define CV_NEON 1
#define CPU_HAS_NEON_FEATURE (true)
# define CPU_HAS_NEON_FEATURE (true)
//TODO: make real check using stuff from "cpu-features.h"
//((bool)android_getCpuFeatures() & ANDROID_CPU_ARM_FEATURE_NEON)
#else
#define CV_NEON 0
#define CPU_HAS_NEON_FEATURE (false)
#endif
#ifdef CV_ICC
#define CV_ENABLE_UNROLLED 0
#else
#define CV_ENABLE_UNROLLED 1
# define CV_NEON 0
# define CPU_HAS_NEON_FEATURE (false)
#endif
#ifndef IPPI_CALL
#define IPPI_CALL(func) CV_Assert((func) >= 0)
# define IPPI_CALL(func) CV_Assert((func) >= 0)
#endif
#ifdef HAVE_TBB
#include "tbb/tbb_stddef.h"
#if TBB_VERSION_MAJOR*100 + TBB_VERSION_MINOR >= 202
#include "tbb/tbb.h"
#include "tbb/task.h"
#undef min
#undef max
#else
#undef HAVE_TBB
#endif
# include "tbb/tbb_stddef.h"
# if TBB_VERSION_MAJOR*100 + TBB_VERSION_MINOR >= 202
# include "tbb/tbb.h"
# include "tbb/task.h"
# undef min
# undef max
# else
# undef HAVE_TBB
# endif
#endif
#ifdef HAVE_EIGEN
#include <Eigen/Core>
#include "opencv2/core/eigen.hpp"
# include <Eigen/Core>
# include "opencv2/core/eigen.hpp"
#endif
#ifdef __cplusplus
namespace cv
{
#ifdef HAVE_TBB
namespace cv
{
typedef tbb::blocked_range<int> BlockedRange;
template<typename Body> static inline
void parallel_for( const BlockedRange& range, const Body& body )
{
tbb::parallel_for(range, body);
}
template<typename Iterator, typename Body> static inline
void parallel_do( Iterator first, Iterator last, const Body& body )
{
tbb::parallel_do(first, last, body);
}
typedef tbb::split Split;
template<typename Body> static inline
void parallel_reduce( const BlockedRange& range, Body& body )
{
tbb::parallel_reduce(range, body);
}
typedef tbb::concurrent_vector<Rect> ConcurrentRectVector;
typedef tbb::concurrent_vector<double> ConcurrentDoubleVector;
}
#else
namespace cv
{
class BlockedRange
{
public:
BlockedRange() : _begin(0), _end(0), _grainsize(0) {}
BlockedRange(int b, int e, int g=1) : _begin(b), _end(e), _grainsize(g) {}
int begin() const { return _begin; }
int end() const { return _end; }
int grainsize() const { return _grainsize; }
protected:
int _begin, _end, _grainsize;
};
template<typename Body> static inline
void parallel_for( const BlockedRange& range, const Body& body )
{
body(range);
}
typedef std::vector<Rect> ConcurrentRectVector;
typedef std::vector<double> ConcurrentDoubleVector;
template<typename Iterator, typename Body> static inline
void parallel_do( Iterator first, Iterator last, const Body& body )
{
for( ; first != last; ++first )
body(*first);
}
class Split {};
template<typename Body> static inline
void parallel_reduce( const BlockedRange& range, Body& body )
{
body(range);
}
typedef tbb::blocked_range<int> BlockedRange;
template<typename Body> static inline
void parallel_for( const BlockedRange& range, const Body& body )
{
tbb::parallel_for(range, body);
}
template<typename Iterator, typename Body> static inline
void parallel_do( Iterator first, Iterator last, const Body& body )
{
tbb::parallel_do(first, last, body);
}
typedef tbb::split Split;
template<typename Body> static inline
void parallel_reduce( const BlockedRange& range, Body& body )
{
tbb::parallel_reduce(range, body);
}
typedef tbb::concurrent_vector<Rect> ConcurrentRectVector;
typedef tbb::concurrent_vector<double> ConcurrentDoubleVector;
#else
class BlockedRange
{
public:
BlockedRange() : _begin(0), _end(0), _grainsize(0) {}
BlockedRange(int b, int e, int g=1) : _begin(b), _end(e), _grainsize(g) {}
int begin() const { return _begin; }
int end() const { return _end; }
int grainsize() const { return _grainsize; }
protected:
int _begin, _end, _grainsize;
};
template<typename Body> static inline
void parallel_for( const BlockedRange& range, const Body& body )
{
body(range);
}
typedef std::vector<Rect> ConcurrentRectVector;
typedef std::vector<double> ConcurrentDoubleVector;
template<typename Iterator, typename Body> static inline
void parallel_do( Iterator first, Iterator last, const Body& body )
{
for( ; first != last; ++first )
body(*first);
}
class Split {};
template<typename Body> static inline
void parallel_reduce( const BlockedRange& range, Body& body )
{
body(range);
}
#endif
} //namespace cv
#define CV_INIT_ALGORITHM(classname, algname, memberinit) \
#define CV_INIT_ALGORITHM(classname, algname, memberinit) \
static Algorithm* create##classname() \
{ \
return new classname; \
@ -261,7 +256,7 @@ CV_INLINE IppiSize ippiSize(int width, int height)
return &classname##_info(); \
}
#endif
#endif //__cplusplus
/* maximal size of vector to run matrix operations on it inline (i.e. w/o ipp calls) */
#define CV_MAX_INLINE_MAT_OP_SIZE 10
@ -305,9 +300,9 @@ CV_INLINE IppiSize ippiSize(int width, int height)
#define CV_MAX_STRLEN 1024
#if 0 /*def CV_CHECK_FOR_NANS*/
#define CV_CHECK_NANS( arr ) cvCheckArray((arr))
# define CV_CHECK_NANS( arr ) cvCheckArray((arr))
#else
#define CV_CHECK_NANS( arr )
# define CV_CHECK_NANS( arr )
#endif
/****************************************************************************************\
@ -316,38 +311,38 @@ CV_INLINE IppiSize ippiSize(int width, int height)
/* get alloca declaration */
#ifdef __GNUC__
#undef alloca
#define alloca __builtin_alloca
#define CV_HAVE_ALLOCA 1
# undef alloca
# define alloca __builtin_alloca
# define CV_HAVE_ALLOCA 1
#elif defined WIN32 || defined _WIN32 || \
defined WINCE || defined _MSC_VER || defined __BORLANDC__
#include <malloc.h>
#define CV_HAVE_ALLOCA 1
# include <malloc.h>
# define CV_HAVE_ALLOCA 1
#elif defined HAVE_ALLOCA_H
#include <alloca.h>
#define CV_HAVE_ALLOCA 1
# include <alloca.h>
# define CV_HAVE_ALLOCA 1
#elif defined HAVE_ALLOCA
#include <stdlib.h>
#define CV_HAVE_ALLOCA 1
# include <stdlib.h>
# define CV_HAVE_ALLOCA 1
#else
#undef CV_HAVE_ALLOCA
# undef CV_HAVE_ALLOCA
#endif
#ifdef __GNUC__
#define CV_DECL_ALIGNED(x) __attribute__ ((aligned (x)))
# define CV_DECL_ALIGNED(x) __attribute__ ((aligned (x)))
#elif defined _MSC_VER
#define CV_DECL_ALIGNED(x) __declspec(align(x))
# define CV_DECL_ALIGNED(x) __declspec(align(x))
#else
#define CV_DECL_ALIGNED(x)
# define CV_DECL_ALIGNED(x)
#endif
#if CV_HAVE_ALLOCA
/* ! DO NOT make it an inline function */
#define cvStackAlloc(size) cvAlignPtr( alloca((size) + CV_MALLOC_ALIGN), CV_MALLOC_ALIGN )
# define cvStackAlloc(size) cvAlignPtr( alloca((size) + CV_MALLOC_ALIGN), CV_MALLOC_ALIGN )
#endif
#ifndef CV_IMPL
#define CV_IMPL CV_EXTERN_C
# define CV_IMPL CV_EXTERN_C
#endif
#define CV_DBG_BREAK() { volatile int* crashMe = 0; *crashMe = 0; }
@ -687,25 +682,25 @@ typedef enum CvStatus
CV_UNSUPPORTED_DEPTH_ERR = -101,
CV_UNSUPPORTED_FORMAT_ERR = -100,
CV_BADARG_ERR = -49, //ipp comp
CV_NOTDEFINED_ERR = -48, //ipp comp
CV_BADARG_ERR = -49, //ipp comp
CV_NOTDEFINED_ERR = -48, //ipp comp
CV_BADCHANNELS_ERR = -47, //ipp comp
CV_BADRANGE_ERR = -44, //ipp comp
CV_BADSTEP_ERR = -29, //ipp comp
CV_BADCHANNELS_ERR = -47, //ipp comp
CV_BADRANGE_ERR = -44, //ipp comp
CV_BADSTEP_ERR = -29, //ipp comp
CV_BADFLAG_ERR = -12,
CV_DIV_BY_ZERO_ERR = -11, //ipp comp
CV_BADCOEF_ERR = -10,
CV_BADFLAG_ERR = -12,
CV_DIV_BY_ZERO_ERR = -11, //ipp comp
CV_BADCOEF_ERR = -10,
CV_BADFACTOR_ERR = -7,
CV_BADPOINT_ERR = -6,
CV_BADSCALE_ERR = -4,
CV_OUTOFMEM_ERR = -3,
CV_NULLPTR_ERR = -2,
CV_BADSIZE_ERR = -1,
CV_NO_ERR = 0,
CV_OK = CV_NO_ERR
CV_BADFACTOR_ERR = -7,
CV_BADPOINT_ERR = -6,
CV_BADSCALE_ERR = -4,
CV_OUTOFMEM_ERR = -3,
CV_NULLPTR_ERR = -2,
CV_BADSIZE_ERR = -1,
CV_NO_ERR = 0,
CV_OK = CV_NO_ERR
}
CvStatus;
@ -720,8 +715,7 @@ CvFuncTable;
typedef struct CvBigFuncTable
{
void* fn_2d[CV_DEPTH_MAX*4];
}
CvBigFuncTable;
} CvBigFuncTable;
#define CV_INIT_FUNC_TAB( tab, FUNCNAME, FLAG ) \
(tab).fn_2d[CV_8U] = (void*)FUNCNAME##_8u##FLAG; \
@ -732,13 +726,14 @@ CvBigFuncTable;
(tab).fn_2d[CV_32F] = (void*)FUNCNAME##_32f##FLAG; \
(tab).fn_2d[CV_64F] = (void*)FUNCNAME##_64f##FLAG
#ifdef __cplusplus
//! OpenGL extension table
class CV_EXPORTS CvOpenGlFuncTab
{
public:
virtual ~CvOpenGlFuncTab();
virtual void genBuffers(int n, unsigned int* buffers) const = 0;
virtual void genBuffers(int n, unsigned int* buffers) const = 0;
virtual void deleteBuffers(int n, const unsigned int* buffers) const = 0;
virtual void bufferData(unsigned int target, ptrdiff_t size, const void* data, unsigned int usage) const = 0;
@ -764,4 +759,6 @@ CV_EXPORTS bool icvCheckGlError(const char* file, const int line, const char* fu
#define CV_CheckGlError() CV_DbgAssert( (::icvCheckGlError(__FILE__, __LINE__)) )
#endif
#endif
#endif //__cplusplus
#endif // __OPENCV_CORE_INTERNAL_HPP__

File diff suppressed because it is too large Load Diff

View File

@ -43,122 +43,132 @@
#ifndef __OPENCV_CORE_TYPES_H__
#define __OPENCV_CORE_TYPES_H__
#if !defined _CRT_SECURE_NO_DEPRECATE && _MSC_VER > 1300
#define _CRT_SECURE_NO_DEPRECATE /* to avoid multiple Visual Studio 2005 warnings */
#if !defined _CRT_SECURE_NO_DEPRECATE && defined _MSC_VER
# if _MSC_VER > 1300
# define _CRT_SECURE_NO_DEPRECATE /* to avoid multiple Visual Studio 2005 warnings */
# endif
#endif
#ifndef SKIP_INCLUDES
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <float.h>
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <float.h>
#if !defined _MSC_VER && !defined __BORLANDC__
#include <stdint.h>
# include <stdint.h>
#endif
#if defined __ICL
#define CV_ICC __ICL
#elif defined __ICC
#define CV_ICC __ICC
#elif defined __ECL
#define CV_ICC __ECL
#elif defined __ECC
#define CV_ICC __ECC
#elif defined __INTEL_COMPILER
#define CV_ICC __INTEL_COMPILER
#endif
#if defined __ICL
# define CV_ICC __ICL
#elif defined __ICC
# define CV_ICC __ICC
#elif defined __ECL
# define CV_ICC __ECL
#elif defined __ECC
# define CV_ICC __ECC
#elif defined __INTEL_COMPILER
# define CV_ICC __INTEL_COMPILER
#endif
#if (_MSC_VER >= 1400 && defined _M_X64) || (__GNUC__ >= 4 && defined __x86_64__)
#if defined WIN32
#include <intrin.h>
#endif
#if __SSE2__ || !defined __GNUC__
#include <emmintrin.h>
#endif
#endif
#if defined CV_ICC && !defined CV_ENABLE_UNROLLED
# define CV_ENABLE_UNROLLED 0
#else
# define CV_ENABLE_UNROLLED 1
#endif
#if defined __BORLANDC__
#include <fastmath.h>
#else
#include <math.h>
#endif
#if (defined _M_X64 && _MSC_VER >= 1400) || (__GNUC__ >= 4 && defined __x86_64__)
# if defined WIN32
# include <intrin.h>
# endif
# if __SSE2__ || !defined __GNUC__
# include <emmintrin.h>
# endif
#endif
#if defined __BORLANDC__
# include <fastmath.h>
#else
# include <math.h>
#endif
#ifdef HAVE_IPL
# ifndef __IPL_H__
# if defined WIN32 || defined _WIN32
# include <ipl.h>
# else
# include <ipl/ipl.h>
# endif
# endif
#elif defined __IPL_H__
# define HAVE_IPL
#endif
#ifdef HAVE_IPL
#ifndef __IPL_H__
#if defined WIN32 || defined _WIN32
#include <ipl.h>
#else
#include <ipl/ipl.h>
#endif
#endif
#elif defined __IPL_H__
#define HAVE_IPL
#endif
#endif // SKIP_INCLUDES
#if defined WIN32 || defined _WIN32
#define CV_CDECL __cdecl
#define CV_STDCALL __stdcall
# define CV_CDECL __cdecl
# define CV_STDCALL __stdcall
#else
#define CV_CDECL
#define CV_STDCALL
# define CV_CDECL
# define CV_STDCALL
#endif
#ifndef CV_EXTERN_C
#ifdef __cplusplus
#define CV_EXTERN_C extern "C"
#define CV_DEFAULT(val) = val
#else
#define CV_EXTERN_C
#define CV_DEFAULT(val)
#endif
# ifdef __cplusplus
# define CV_EXTERN_C extern "C"
# define CV_DEFAULT(val) = val
# else
# define CV_EXTERN_C
# define CV_DEFAULT(val)
# endif
#endif
#ifndef CV_EXTERN_C_FUNCPTR
#ifdef __cplusplus
#define CV_EXTERN_C_FUNCPTR(x) extern "C" { typedef x; }
#else
#define CV_EXTERN_C_FUNCPTR(x) typedef x
#endif
# ifdef __cplusplus
# define CV_EXTERN_C_FUNCPTR(x) extern "C" { typedef x; }
# else
# define CV_EXTERN_C_FUNCPTR(x) typedef x
# endif
#endif
#ifndef CV_INLINE
#if defined __cplusplus
#define CV_INLINE inline
#elif (defined WIN32 || defined _WIN32 || defined WINCE) && !defined __GNUC__
#define CV_INLINE __inline
#else
#define CV_INLINE static
#endif
# if defined __cplusplus
# define CV_INLINE inline
# elif (defined WIN32 || defined _WIN32 || defined WINCE) && !defined __GNUC__
# define CV_INLINE __inline
# else
# define CV_INLINE static
# endif
#endif /* CV_INLINE */
#if (defined WIN32 || defined _WIN32 || defined WINCE) && defined CVAPI_EXPORTS
#define CV_EXPORTS __declspec(dllexport)
# define CV_EXPORTS __declspec(dllexport)
#else
#define CV_EXPORTS
# define CV_EXPORTS
#endif
#ifndef CVAPI
#define CVAPI(rettype) CV_EXTERN_C CV_EXPORTS rettype CV_CDECL
# define CVAPI(rettype) CV_EXTERN_C CV_EXPORTS rettype CV_CDECL
#endif
#if defined _MSC_VER || defined __BORLANDC__
typedef __int64 int64;
typedef unsigned __int64 uint64;
#define CV_BIG_INT(n) n##I64
#define CV_BIG_UINT(n) n##UI64
typedef __int64 int64;
typedef unsigned __int64 uint64;
# define CV_BIG_INT(n) n##I64
# define CV_BIG_UINT(n) n##UI64
#else
typedef int64_t int64;
typedef uint64_t uint64;
#define CV_BIG_INT(n) n##LL
#define CV_BIG_UINT(n) n##ULL
typedef int64_t int64;
typedef uint64_t uint64;
# define CV_BIG_INT(n) n##LL
# define CV_BIG_UINT(n) n##ULL
#endif
#ifndef HAVE_IPL
typedef unsigned char uchar;
typedef unsigned short ushort;
typedef unsigned char uchar;
typedef unsigned short ushort;
#endif
typedef signed char schar;
@ -203,7 +213,7 @@ Cv64suf;
typedef int CVStatus;
enum {
enum {
CV_StsOk= 0, /* everithing is ok */
CV_StsBackTrace= -1, /* pseudo error for back trace */
CV_StsError= -2, /* unknown /unspecified error */
@ -241,8 +251,8 @@ enum {
CV_StsInplaceNotSupported= -203, /* in-place operation is not supported */
CV_StsObjectNotFound= -204, /* request can't be completed */
CV_StsUnmatchedFormats= -205, /* formats of input/output arrays differ */
CV_StsBadFlag= -206, /* flag is wrong or not supported */
CV_StsBadPoint= -207, /* bad CvPoint */
CV_StsBadFlag= -206, /* flag is wrong or not supported */
CV_StsBadPoint= -207, /* bad CvPoint */
CV_StsBadMask= -208, /* bad format of mask (neither 8uC1 nor 8sC1)*/
CV_StsUnmatchedSizes= -209, /* sizes of input/output structures do not match */
CV_StsUnsupportedFormat= -210, /* the data format/type is not supported by the function*/
@ -250,8 +260,8 @@ enum {
CV_StsParseError= -212, /* invalid syntax/structure of the parsed file */
CV_StsNotImplemented= -213, /* the requested function/feature is not implemented */
CV_StsBadMemBlock= -214, /* an allocated block has been corrupted */
CV_StsAssert= -215, /* assertion failed */
CV_GpuNotSupported= -216,
CV_StsAssert= -215, /* assertion failed */
CV_GpuNotSupported= -216,
CV_GpuApiCallError= -217,
CV_OpenGlNotSupported= -218,
CV_OpenGlApiCallError= -219
@ -262,7 +272,7 @@ enum {
\****************************************************************************************/
#ifdef HAVE_TEGRA_OPTIMIZATION
# include "tegra_round.hpp"
# include "tegra_round.hpp"
#endif
#define CV_PI 3.1415926535897932384626433832795
@ -271,11 +281,11 @@ enum {
#define CV_SWAP(a,b,t) ((t) = (a), (a) = (b), (b) = (t))
#ifndef MIN
#define MIN(a,b) ((a) > (b) ? (b) : (a))
# define MIN(a,b) ((a) > (b) ? (b) : (a))
#endif
#ifndef MAX
#define MAX(a,b) ((a) < (b) ? (b) : (a))
# define MAX(a,b) ((a) < (b) ? (b) : (a))
#endif
/* min & max without jumps */
@ -285,9 +295,9 @@ enum {
/* absolute value without jumps */
#ifndef __cplusplus
#define CV_IABS(a) (((a) ^ ((a) < 0 ? -1 : 0)) - ((a) < 0 ? -1 : 0))
# define CV_IABS(a) (((a) ^ ((a) < 0 ? -1 : 0)) - ((a) < 0 ? -1 : 0))
#else
#define CV_IABS(a) abs(a)
# define CV_IABS(a) abs(a)
#endif
#define CV_CMP(a,b) (((a) > (b)) - ((a) < (b)))
#define CV_SIGN(a) CV_CMP((a),0)
@ -306,11 +316,11 @@ CV_INLINE int cvRound( double value )
}
return t;
#elif defined HAVE_LRINT || defined CV_ICC || defined __GNUC__
# ifdef HAVE_TEGRA_OPTIMIZATION
# ifdef HAVE_TEGRA_OPTIMIZATION
TEGRA_ROUND(value);
# else
# else
return (int)lrint(value);
# endif
# endif
#else
// while this is not IEEE754-compliant rounding, it's usually a good enough approximation
return (int)(value + (value >= 0 ? 0.5 : -0.5));
@ -318,7 +328,7 @@ CV_INLINE int cvRound( double value )
}
#if defined __SSE2__ || (defined _M_IX86_FP && 2 == _M_IX86_FP)
#include "emmintrin.h"
# include "emmintrin.h"
#endif
CV_INLINE int cvFloor( double value )
@ -1886,6 +1896,6 @@ typedef struct CvModuleInfo
}
CvModuleInfo;
#endif /*_CXCORE_TYPES_H_*/
#endif /*__OPENCV_CORE_TYPES_H__*/
/* End of file. */

View File

@ -3,7 +3,7 @@
#include "opencv2/ts/ts.hpp"
#if GTEST_CREATE_SHARED_LIBRARY
#ifdef GTEST_CREATE_SHARED_LIBRARY
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
#endif

View File

@ -28,11 +28,11 @@ PERF_TEST_P(Size_MatType, mean, TYPICAL_MATS)
Mat src(sz, type);
Scalar s;
declare.in(src, WARMUP_RNG).out(s);
TEST_CYCLE() s = mean(src);
SANITY_CHECK(s, 1e-6);
}
@ -44,11 +44,11 @@ PERF_TEST_P(Size_MatType, mean_mask, TYPICAL_MATS)
Mat src(sz, type);
Mat mask = Mat::ones(src.size(), CV_8U);
Scalar s;
declare.in(src, WARMUP_RNG).in(mask).out(s);
TEST_CYCLE() s = mean(src, mask);
SANITY_CHECK(s, 1e-6);
}
@ -64,7 +64,7 @@ PERF_TEST_P(Size_MatType, meanStdDev, TYPICAL_MATS)
declare.in(src, WARMUP_RNG).out(mean, dev);
TEST_CYCLE() meanStdDev(src, mean, dev);
SANITY_CHECK(mean, 1e-6);
SANITY_CHECK(dev, 1e-6);
}
@ -80,9 +80,9 @@ PERF_TEST_P(Size_MatType, meanStdDev_mask, TYPICAL_MATS)
Scalar dev;
declare.in(src, WARMUP_RNG).in(mask).out(mean, dev);
TEST_CYCLE() meanStdDev(src, mean, dev, mask);
SANITY_CHECK(mean, 1e-6);
SANITY_CHECK(dev, 1e-6);
}
@ -96,8 +96,8 @@ PERF_TEST_P(Size_MatType, countNonZero, testing::Combine( testing::Values( TYPIC
int cnt = 0;
declare.in(src, WARMUP_RNG);
TEST_CYCLE() cnt = countNonZero(src);
SANITY_CHECK(cnt);
}

View File

@ -55,7 +55,9 @@ static void* OutOfMemoryError(size_t size)
#if CV_USE_SYSTEM_MALLOC
#if defined WIN32 || defined _WIN32
void deleteThreadAllocData() {}
#endif
void* fastMalloc( size_t size )
{
@ -66,14 +68,14 @@ void* fastMalloc( size_t size )
adata[-1] = udata;
return adata;
}
void fastFree(void* ptr)
{
if(ptr)
{
uchar* udata = ((uchar**)ptr)[-1];
CV_DbgAssert(udata < (uchar*)ptr &&
((uchar*)ptr - udata) <= (ptrdiff_t)(sizeof(void*)+CV_MALLOC_ALIGN));
((uchar*)ptr - udata) <= (ptrdiff_t)(sizeof(void*)+CV_MALLOC_ALIGN));
free(udata);
}
}
@ -388,7 +390,7 @@ struct ThreadData
#ifdef WIN32
#ifdef WINCE
# define TLS_OUT_OF_INDEXES ((DWORD)0xFFFFFFFF)
# define TLS_OUT_OF_INDEXES ((DWORD)0xFFFFFFFF)
#endif //WINCE
static DWORD tlsKey;
@ -535,7 +537,7 @@ void* fastMalloc( size_t size )
freePtr = block;
if( !data )
{
block = gcPtr;
block = gcPtr;
for( int k = 0; k < 2; k++ )
{
SANITY_CHECK(block);
@ -620,7 +622,7 @@ void fastFree( void* ptr )
Block*& startPtr = tls->bins[idx][START];
Block*& freePtr = tls->bins[idx][FREE];
Block*& gcPtr = tls->bins[idx][GC];
if( block == block->next )
{
CV_DbgAssert( startPtr == block && freePtr == block && gcPtr == block );

View File

@ -974,7 +974,7 @@ void convertAndUnrollScalar( const Mat& sc, int buftype, uchar* scbuf, size_t bl
scbuf[i] = scbuf[i - esz];
}
void binary_op(InputArray _src1, InputArray _src2, OutputArray _dst,
static void binary_op(InputArray _src1, InputArray _src2, OutputArray _dst,
InputArray _mask, const BinaryFunc* tab, bool bitwise)
{
int kind1 = _src1.kind(), kind2 = _src2.kind();
@ -1216,7 +1216,7 @@ void cv::min(const Mat& src1, double src2, Mat& dst)
namespace cv
{
void arithm_op(InputArray _src1, InputArray _src2, OutputArray _dst,
static void arithm_op(InputArray _src1, InputArray _src2, OutputArray _dst,
InputArray _mask, int dtype, BinaryFunc* tab, bool muldiv=false, void* usrdata=0)
{
int kind1 = _src1.kind(), kind2 = _src2.kind();

View File

@ -6,6 +6,7 @@
using namespace std;
using namespace cv;
namespace {
void helpParser()
{
printf("\nThe CommandLineParser class is designed for command line arguments parsing\n"
@ -89,6 +90,8 @@ string del_space(string name)
return name;
}
}//namespace
CommandLineParser::CommandLineParser(int argc, const char* const argv[], const char* keys)
{
std::string keys_buffer;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -116,13 +116,13 @@ static void writeMat(std::ostream& out, const Mat& m, char rowsep, char elembrac
{
CV_Assert(m.dims <= 2);
int type = m.type();
char crowbrace = getCloseBrace(rowsep);
char orowbrace = crowbrace ? rowsep : '\0';
if( orowbrace || isspace(rowsep) )
rowsep = '\0';
for( int i = 0; i < m.rows; i++ )
{
if(orowbrace)
@ -151,7 +151,7 @@ public:
writeMat(out, m, ';', ' ', m.cols == 1);
out << "]";
}
void write(std::ostream& out, const void* data, int nelems, int type, const int*, int) const
{
writeElems(out, data, nelems, type, ' ');
@ -168,7 +168,7 @@ public:
writeMat(out, m, m.cols > 1 ? '[' : ' ', '[', m.cols*m.channels() == 1);
out << "]";
}
void write(std::ostream& out, const void* data, int nelems, int type, const int*, int) const
{
writeElems(out, data, nelems, type, '[');
@ -190,7 +190,7 @@ public:
writeMat(out, m, m.cols > 1 ? '[' : ' ', '[', m.cols*m.channels() == 1);
out << "], type='" << numpyTypes[m.depth()] << "')";
}
void write(std::ostream& out, const void* data, int nelems, int type, const int*, int) const
{
writeElems(out, data, nelems, type, '[');
@ -208,7 +208,7 @@ public:
if(m.rows > 1)
out << "\n";
}
void write(std::ostream& out, const void* data, int nelems, int type, const int*, int) const
{
writeElems(out, data, nelems, type, ' ');
@ -226,7 +226,7 @@ public:
writeMat(out, m, ',', ' ', m.cols==1);
out << "}";
}
void write(std::ostream& out, const void* data, int nelems, int type, const int*, int) const
{
writeElems(out, data, nelems, type, ' ');
@ -243,7 +243,7 @@ static CFormatter cFormatter;
static const Formatter* g_defaultFormatter0 = &matlabFormatter;
static const Formatter* g_defaultFormatter = &matlabFormatter;
bool my_streq(const char* a, const char* b)
static bool my_streq(const char* a, const char* b)
{
size_t i, alen = strlen(a), blen = strlen(b);
if( alen != blen )
@ -280,7 +280,7 @@ const Formatter* Formatter::setDefault(const Formatter* fmt)
g_defaultFormatter = fmt;
return prevFmt;
}
Formatted::Formatted(const Mat& _m, const Formatter* _fmt,
const vector<int>& _params)
{
@ -288,12 +288,12 @@ Formatted::Formatted(const Mat& _m, const Formatter* _fmt,
fmt = _fmt ? _fmt : Formatter::get();
std::copy(_params.begin(), _params.end(), back_inserter(params));
}
Formatted::Formatted(const Mat& _m, const Formatter* _fmt, const int* _params)
{
mtx = _m;
fmt = _fmt ? _fmt : Formatter::get();
if( _params )
{
int i, maxParams = 100;

View File

@ -54,7 +54,7 @@ template<typename T> static inline Scalar rawToScalar(const T& v)
for( i = 0; i < n; i++ )
s.val[i] = ((T1*)&v)[i];
return s;
}
}
/****************************************************************************************\
* sum *
@ -72,7 +72,7 @@ static int sum_(const T* src0, const uchar* mask, ST* dst, int len, int cn )
{
ST s0 = dst[0];
#if CV_ENABLE_UNROLLED
#if CV_ENABLE_UNROLLED
for(; i <= len - 4; i += 4, src += cn*4 )
s0 += src[0] + src[cn] + src[cn*2] + src[cn*3];
#endif
@ -104,7 +104,7 @@ static int sum_(const T* src0, const uchar* mask, ST* dst, int len, int cn )
dst[1] = s1;
dst[2] = s2;
}
for( ; k < cn; k += 4 )
{
src = src0 + k;
@ -121,7 +121,7 @@ static int sum_(const T* src0, const uchar* mask, ST* dst, int len, int cn )
}
return len;
}
int i, nzm = 0;
if( cn == 1 )
{
@ -155,7 +155,7 @@ static int sum_(const T* src0, const uchar* mask, ST* dst, int len, int cn )
if( mask[i] )
{
int k = 0;
#if CV_ENABLE_UNROLLED
#if CV_ENABLE_UNROLLED
for( ; k <= cn - 4; k += 4 )
{
ST s0, s1;
@ -212,7 +212,7 @@ template<typename T>
static int countNonZero_(const T* src, int len )
{
int i=0, nz = 0;
#if CV_ENABLE_UNROLLED
#if CV_ENABLE_UNROLLED
for(; i <= len - 4; i += 4 )
nz += (src[i] != 0) + (src[i+1] != 0) + (src[i+2] != 0) + (src[i+3] != 0);
#endif
@ -251,12 +251,12 @@ template<typename T, typename ST, typename SQT>
static int sumsqr_(const T* src0, const uchar* mask, ST* sum, SQT* sqsum, int len, int cn )
{
const T* src = src0;
if( !mask )
{
int i;
int k = cn % 4;
if( k == 1 )
{
ST s0 = sum[0];
@ -296,7 +296,7 @@ static int sumsqr_(const T* src0, const uchar* mask, ST* sum, SQT* sqsum, int le
sum[0] = s0; sum[1] = s1; sum[2] = s2;
sqsum[0] = sq0; sqsum[1] = sq1; sqsum[2] = sq2;
}
for( ; k < cn; k += 4 )
{
src = src0 + k;
@ -319,7 +319,7 @@ static int sumsqr_(const T* src0, const uchar* mask, ST* sum, SQT* sqsum, int le
}
return len;
}
int i, nzm = 0;
if( cn == 1 )
@ -368,7 +368,7 @@ static int sumsqr_(const T* src0, const uchar* mask, ST* sum, SQT* sqsum, int le
}
}
return nzm;
}
}
static int sqsum8u( const uchar* src, const uchar* mask, int* sum, int* sqsum, int len, int cn )
@ -407,9 +407,9 @@ cv::Scalar cv::sum( InputArray _src )
Mat src = _src.getMat();
int k, cn = src.channels(), depth = src.depth();
SumFunc func = sumTab[depth];
CV_Assert( cn <= 4 && func != 0 );
const Mat* arrays[] = {&src, 0};
uchar* ptrs[1];
NAryMatIterator it(arrays, ptrs);
@ -420,7 +420,7 @@ cv::Scalar cv::sum( InputArray _src )
int* buf = (int*)&s[0];
size_t esz = 0;
bool blockSum = depth < CV_32S;
if( blockSum )
{
intSumBlockSize = depth <= CV_8S ? (1 << 23) : (1 << 15);
@ -459,30 +459,30 @@ int cv::countNonZero( InputArray _src )
{
Mat src = _src.getMat();
CountNonZeroFunc func = countNonZeroTab[src.depth()];
CV_Assert( src.channels() == 1 && func != 0 );
const Mat* arrays[] = {&src, 0};
uchar* ptrs[1];
NAryMatIterator it(arrays, ptrs);
int total = (int)it.size, nz = 0;
for( size_t i = 0; i < it.nplanes; i++, ++it )
nz += func( ptrs[0], total );
return nz;
}
}
cv::Scalar cv::mean( InputArray _src, InputArray _mask )
{
Mat src = _src.getMat(), mask = _mask.getMat();
CV_Assert( mask.empty() || mask.type() == CV_8U );
int k, cn = src.channels(), depth = src.depth();
SumFunc func = sumTab[depth];
CV_Assert( cn <= 4 && func != 0 );
const Mat* arrays[] = {&src, &mask, 0};
uchar* ptrs[2];
NAryMatIterator it(arrays, ptrs);
@ -493,19 +493,19 @@ cv::Scalar cv::mean( InputArray _src, InputArray _mask )
int* buf = (int*)&s[0];
bool blockSum = depth <= CV_16S;
size_t esz = 0, nz0 = 0;
if( blockSum )
{
intSumBlockSize = depth <= CV_8S ? (1 << 23) : (1 << 15);
blockSize = std::min(blockSize, intSumBlockSize);
_buf.allocate(cn);
buf = _buf;
for( k = 0; k < cn; k++ )
buf[k] = 0;
esz = src.elemSize();
}
for( size_t i = 0; i < it.nplanes; i++, ++it )
{
for( j = 0; j < total; j += blockSize )
@ -529,19 +529,19 @@ cv::Scalar cv::mean( InputArray _src, InputArray _mask )
}
}
return s*(nz0 ? 1./nz0 : 0);
}
}
void cv::meanStdDev( InputArray _src, OutputArray _mean, OutputArray _sdv, InputArray _mask )
{
Mat src = _src.getMat(), mask = _mask.getMat();
CV_Assert( mask.empty() || mask.type() == CV_8U );
int k, cn = src.channels(), depth = src.depth();
SumSqrFunc func = sumSqrTab[depth];
CV_Assert( func != 0 );
const Mat* arrays[] = {&src, &mask, 0};
uchar* ptrs[2];
NAryMatIterator it(arrays, ptrs);
@ -552,10 +552,10 @@ void cv::meanStdDev( InputArray _src, OutputArray _mean, OutputArray _sdv, Input
int *sbuf = (int*)s, *sqbuf = (int*)sq;
bool blockSum = depth <= CV_16S, blockSqSum = depth <= CV_8S;
size_t esz = 0;
for( k = 0; k < cn; k++ )
s[k] = sq[k] = 0;
if( blockSum )
{
intSumBlockSize = 1 << 15;
@ -567,7 +567,7 @@ void cv::meanStdDev( InputArray _src, OutputArray _mean, OutputArray _sdv, Input
sbuf[k] = sqbuf[k] = 0;
esz = src.elemSize();
}
for( size_t i = 0; i < it.nplanes; i++, ++it )
{
for( j = 0; j < total; j += blockSize )
@ -598,14 +598,14 @@ void cv::meanStdDev( InputArray _src, OutputArray _mean, OutputArray _sdv, Input
ptrs[1] += bsz;
}
}
double scale = nz0 ? 1./nz0 : 0.;
for( k = 0; k < cn; k++ )
{
s[k] *= scale;
sq[k] = std::sqrt(std::max(sq[k]*scale - s[k]*s[k], 0.));
}
for( j = 0; j < 2; j++ )
{
const double* sptr = j == 0 ? s : sq;
@ -640,7 +640,7 @@ minMaxIdx_( const T* src, const uchar* mask, WT* _minVal, WT* _maxVal,
{
WT minVal = *_minVal, maxVal = *_maxVal;
size_t minIdx = *_minIdx, maxIdx = *_maxIdx;
if( !mask )
{
for( int i = 0; i < len; i++ )
@ -708,7 +708,7 @@ static void minMaxIdx_32f(const float* src, const uchar* mask, float* minval, fl
static void minMaxIdx_64f(const double* src, const uchar* mask, double* minval, double* maxval,
size_t* minidx, size_t* maxidx, int len, size_t startidx )
{ minMaxIdx_(src, mask, minval, maxval, minidx, maxidx, len, startidx ); }
{ minMaxIdx_(src, mask, minval, maxval, minidx, maxidx, len, startidx ); }
typedef void (*MinMaxIdxFunc)(const uchar*, const uchar*, int*, int*, size_t*, size_t*, int, size_t);
@ -749,16 +749,16 @@ void cv::minMaxIdx(InputArray _src, double* minVal,
{
Mat src = _src.getMat(), mask = _mask.getMat();
int depth = src.depth(), cn = src.channels();
CV_Assert( (cn == 1 && (mask.empty() || mask.type() == CV_8U)) ||
(cn >= 1 && mask.empty() && !minIdx && !maxIdx) );
MinMaxIdxFunc func = minmaxTab[depth];
CV_Assert( func != 0 );
const Mat* arrays[] = {&src, &mask, 0};
uchar* ptrs[2];
NAryMatIterator it(arrays, ptrs);
size_t minidx = 0, maxidx = 0;
int iminval = INT_MAX, imaxval = INT_MIN;
float fminval = FLT_MAX, fmaxval = -FLT_MAX;
@ -766,39 +766,39 @@ void cv::minMaxIdx(InputArray _src, double* minVal,
size_t startidx = 1;
int *minval = &iminval, *maxval = &imaxval;
int planeSize = (int)it.size*cn;
if( depth == CV_32F )
minval = (int*)&fminval, maxval = (int*)&fmaxval;
else if( depth == CV_64F )
minval = (int*)&dminval, maxval = (int*)&dmaxval;
for( size_t i = 0; i < it.nplanes; i++, ++it, startidx += planeSize )
func( ptrs[0], ptrs[1], minval, maxval, &minidx, &maxidx, planeSize, startidx );
if( minidx == 0 )
dminval = dmaxval = 0;
else if( depth == CV_32F )
dminval = fminval, dmaxval = fmaxval;
else if( depth <= CV_32S )
dminval = iminval, dmaxval = imaxval;
if( minVal )
*minVal = dminval;
if( maxVal )
*maxVal = dmaxval;
if( minIdx )
ofs2idx(src, minidx, minIdx);
if( maxIdx )
ofs2idx(src, maxidx, maxIdx);
}
}
void cv::minMaxLoc( InputArray _img, double* minVal, double* maxVal,
Point* minLoc, Point* maxLoc, InputArray mask )
{
Mat img = _img.getMat();
CV_Assert(img.dims <= 2);
minMaxIdx(_img, minVal, maxVal, (int*)minLoc, (int*)maxLoc, mask);
if( minLoc )
std::swap(minLoc->x, minLoc->y);
@ -821,7 +821,7 @@ float normL2Sqr_(const float* a, const float* b, int n)
{
float CV_DECL_ALIGNED(16) buf[4];
__m128 d0 = _mm_setzero_ps(), d1 = _mm_setzero_ps();
for( ; j <= n - 8; j += 8 )
{
__m128 t0 = _mm_sub_ps(_mm_loadu_ps(a + j), _mm_loadu_ps(b + j));
@ -834,14 +834,14 @@ float normL2Sqr_(const float* a, const float* b, int n)
}
else
#endif
{
{
for( ; j <= n - 4; j += 4 )
{
float t0 = a[j] - b[j], t1 = a[j+1] - b[j+1], t2 = a[j+2] - b[j+2], t3 = a[j+3] - b[j+3];
d += t0*t0 + t1*t1 + t2*t2 + t3*t3;
}
}
for( ; j < n; j++ )
{
float t = a[j] - b[j];
@ -861,7 +861,7 @@ float normL1_(const float* a, const float* b, int n)
static const int CV_DECL_ALIGNED(16) absbuf[4] = {0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff};
__m128 d0 = _mm_setzero_ps(), d1 = _mm_setzero_ps();
__m128 absmask = _mm_load_ps((const float*)absbuf);
for( ; j <= n - 8; j += 8 )
{
__m128 t0 = _mm_sub_ps(_mm_loadu_ps(a + j), _mm_loadu_ps(b + j));
@ -894,12 +894,12 @@ int normL1_(const uchar* a, const uchar* b, int n)
if( USE_SSE2 )
{
__m128i d0 = _mm_setzero_si128();
for( ; j <= n - 16; j += 16 )
{
__m128i t0 = _mm_loadu_si128((const __m128i*)(a + j));
__m128i t1 = _mm_loadu_si128((const __m128i*)(b + j));
d0 = _mm_add_epi32(d0, _mm_sad_epu8(t0, t1));
}
@ -907,7 +907,7 @@ int normL1_(const uchar* a, const uchar* b, int n)
{
__m128i t0 = _mm_cvtsi32_si128(*(const int*)(a + j));
__m128i t1 = _mm_cvtsi32_si128(*(const int*)(b + j));
d0 = _mm_add_epi32(d0, _mm_sad_epu8(t0, t1));
}
d = _mm_cvtsi128_si32(_mm_add_epi32(d0, _mm_unpackhi_epi64(d0, d0)));
@ -926,7 +926,7 @@ int normL1_(const uchar* a, const uchar* b, int n)
return d;
}
static const uchar popCountTable[] =
static const uchar popCountTable[] =
{
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
@ -962,7 +962,7 @@ static const uchar popCountTable4[] =
1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
};
int normHamming(const uchar* a, int n)
static int normHamming(const uchar* a, int n)
{
int i = 0, result = 0;
#if CV_NEON
@ -989,7 +989,7 @@ int normHamming(const uchar* a, int n)
result += popCountTable[a[i]];
return result;
}
int normHamming(const uchar* a, const uchar* b, int n)
{
int i = 0, result = 0;
@ -1020,7 +1020,7 @@ int normHamming(const uchar* a, const uchar* b, int n)
return result;
}
int normHamming(const uchar* a, int n, int cellSize)
static int normHamming(const uchar* a, int n, int cellSize)
{
if( cellSize == 1 )
return normHamming(a, n);
@ -1039,8 +1039,8 @@ int normHamming(const uchar* a, int n, int cellSize)
for( ; i < n; i++ )
result += tab[a[i]];
return result;
}
}
int normHamming(const uchar* a, const uchar* b, int n, int cellSize)
{
if( cellSize == 1 )
@ -1053,7 +1053,7 @@ int normHamming(const uchar* a, const uchar* b, int n, int cellSize)
else
CV_Error( CV_StsBadSize, "bad cell size (not 1, 2 or 4) in normHamming" );
int i = 0, result = 0;
#if CV_ENABLE_UNROLLED
#if CV_ENABLE_UNROLLED
for( ; i <= n - 4; i += 4 )
result += tab[a[i] ^ b[i]] + tab[a[i+1] ^ b[i+1]] +
tab[a[i+2] ^ b[i+2]] + tab[a[i+3] ^ b[i+3]];
@ -1128,7 +1128,7 @@ normL2_(const T* src, const uchar* mask, ST* _result, int len, int cn)
}
*_result = result;
return 0;
}
}
template<typename T, typename ST> int
normDiffInf_(const T* src1, const T* src2, const uchar* mask, ST* _result, int len, int cn)
@ -1194,7 +1194,7 @@ normDiffL2_(const T* src1, const T* src2, const uchar* mask, ST* _result, int le
}
*_result = result;
return 0;
}
}
#define CV_DEF_NORM_FUNC(L, suffix, type, ntype) \
@ -1219,7 +1219,7 @@ CV_DEF_NORM_ALL(64f, double, double, double, double)
typedef int (*NormFunc)(const uchar*, const uchar*, uchar*, int, int);
typedef int (*NormDiffFunc)(const uchar*, const uchar*, const uchar*, uchar*, int, int);
typedef int (*NormDiffFunc)(const uchar*, const uchar*, const uchar*, uchar*, int, int);
static NormFunc normTab[3][8] =
{
@ -1265,11 +1265,11 @@ double cv::norm( InputArray _src, int normType, InputArray _mask )
{
Mat src = _src.getMat(), mask = _mask.getMat();
int depth = src.depth(), cn = src.channels();
normType &= 7;
CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 || normType == NORM_L2SQR ||
((normType == NORM_HAMMING || normType == NORM_HAMMING2) && src.type() == CV_8U) );
if( src.isContinuous() && mask.empty() )
{
size_t len = src.total()*cn;
@ -1278,7 +1278,7 @@ double cv::norm( InputArray _src, int normType, InputArray _mask )
if( depth == CV_32F )
{
const float* data = src.ptr<float>();
if( normType == NORM_L2 )
{
double result = 0;
@ -1307,18 +1307,18 @@ double cv::norm( InputArray _src, int normType, InputArray _mask )
if( depth == CV_8U )
{
const uchar* data = src.ptr<uchar>();
if( normType == NORM_HAMMING )
return normHamming(data, (int)len);
if( normType == NORM_HAMMING2 )
return normHamming(data, (int)len, 2);
}
}
}
CV_Assert( mask.empty() || mask.type() == CV_8U );
if( normType == NORM_HAMMING || normType == NORM_HAMMING2 )
{
if( !mask.empty() )
@ -1328,22 +1328,22 @@ double cv::norm( InputArray _src, int normType, InputArray _mask )
return norm(temp, normType);
}
int cellSize = normType == NORM_HAMMING ? 1 : 2;
const Mat* arrays[] = {&src, 0};
uchar* ptrs[1];
NAryMatIterator it(arrays, ptrs);
int total = (int)it.size;
int result = 0;
for( size_t i = 0; i < it.nplanes; i++, ++it )
result += normHamming(ptrs[0], total, cellSize);
return result;
}
NormFunc func = normTab[normType >> 1][depth];
CV_Assert( func != 0 );
const Mat* arrays[] = {&src, &mask, 0};
uchar* ptrs[2];
union
@ -1361,7 +1361,7 @@ double cv::norm( InputArray _src, int normType, InputArray _mask )
int isum = 0;
int *ibuf = &result.i;
size_t esz = 0;
if( blockSum )
{
intSumBlockSize = (normType == NORM_L1 && depth <= CV_8S ? (1 << 23) : (1 << 15))/cn;
@ -1369,7 +1369,7 @@ double cv::norm( InputArray _src, int normType, InputArray _mask )
ibuf = &isum;
esz = src.elemSize();
}
for( size_t i = 0; i < it.nplanes; i++, ++it )
{
for( j = 0; j < total; j += blockSize )
@ -1388,7 +1388,7 @@ double cv::norm( InputArray _src, int normType, InputArray _mask )
ptrs[1] += bsz;
}
}
if( normType == NORM_INF )
{
if( depth == CV_64F )
@ -1400,7 +1400,7 @@ double cv::norm( InputArray _src, int normType, InputArray _mask )
}
else if( normType == NORM_L2 )
result.d = std::sqrt(result.d);
return result.d;
}
@ -1409,16 +1409,16 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m
{
if( normType & CV_RELATIVE )
return norm(_src1, _src2, normType & ~CV_RELATIVE, _mask)/(norm(_src2, normType, _mask) + DBL_EPSILON);
Mat src1 = _src1.getMat(), src2 = _src2.getMat(), mask = _mask.getMat();
int depth = src1.depth(), cn = src1.channels();
CV_Assert( src1.size == src2.size && src1.type() == src2.type() );
normType &= 7;
CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 || normType == NORM_L2SQR ||
((normType == NORM_HAMMING || normType == NORM_HAMMING2) && src1.type() == CV_8U) );
if( src1.isContinuous() && src2.isContinuous() && mask.empty() )
{
size_t len = src1.total()*src1.channels();
@ -1428,7 +1428,7 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m
{
const float* data1 = src1.ptr<float>();
const float* data2 = src2.ptr<float>();
if( normType == NORM_L2 )
{
double result = 0;
@ -1456,9 +1456,9 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m
}
}
}
CV_Assert( mask.empty() || mask.type() == CV_8U );
if( normType == NORM_HAMMING || normType == NORM_HAMMING2 )
{
if( !mask.empty() )
@ -1469,22 +1469,22 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m
return norm(temp, normType);
}
int cellSize = normType == NORM_HAMMING ? 1 : 2;
const Mat* arrays[] = {&src1, &src2, 0};
uchar* ptrs[2];
NAryMatIterator it(arrays, ptrs);
int total = (int)it.size;
int result = 0;
for( size_t i = 0; i < it.nplanes; i++, ++it )
result += normHamming(ptrs[0], ptrs[1], total, cellSize);
return result;
}
NormDiffFunc func = normDiffTab[normType >> 1][depth];
CV_Assert( func != 0 );
const Mat* arrays[] = {&src1, &src2, &mask, 0};
uchar* ptrs[3];
union
@ -1503,7 +1503,7 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m
unsigned isum = 0;
unsigned *ibuf = &result.u;
size_t esz = 0;
if( blockSum )
{
intSumBlockSize = normType == NORM_L1 && depth <= CV_8S ? (1 << 23) : (1 << 15);
@ -1511,7 +1511,7 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m
ibuf = &isum;
esz = src1.elemSize();
}
for( size_t i = 0; i < it.nplanes; i++, ++it )
{
for( j = 0; j < total; j += blockSize )
@ -1531,7 +1531,7 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m
ptrs[2] += bsz;
}
}
if( normType == NORM_INF )
{
if( depth == CV_64F )
@ -1543,7 +1543,7 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m
}
else if( normType == NORM_L2 )
result.d = std::sqrt(result.d);
return result.d;
}
@ -1692,7 +1692,7 @@ static void batchDistL2_32f(const float* src1, const float* src2, size_t step2,
typedef void (*BatchDistFunc)(const uchar* src1, const uchar* src2, size_t step2,
int nvecs, int len, uchar* dist, const uchar* mask);
struct BatchDistInvoker
{
BatchDistInvoker( const Mat& _src1, const Mat& _src2,
@ -1709,26 +1709,26 @@ struct BatchDistInvoker
update = _update;
func = _func;
}
void operator()(const BlockedRange& range) const
{
AutoBuffer<int> buf(src2->rows);
int* bufptr = buf;
for( int i = range.begin(); i < range.end(); i++ )
{
func(src1->ptr(i), src2->ptr(), src2->step, src2->rows, src2->cols,
K > 0 ? (uchar*)bufptr : dist->ptr(i), mask->data ? mask->ptr(i) : 0);
if( K > 0 )
{
int* nidxptr = nidx->ptr<int>(i);
// since positive float's can be compared just like int's,
// we handle both CV_32S and CV_32F cases with a single branch
int* distptr = (int*)dist->ptr(i);
int j, k;
for( j = 0; j < src2->rows; j++ )
{
int d = bufptr[j];
@ -1746,7 +1746,7 @@ struct BatchDistInvoker
}
}
}
const Mat *src1;
const Mat *src2;
Mat *dist;
@ -1756,9 +1756,9 @@ struct BatchDistInvoker
int update;
BatchDistFunc func;
};
}
void cv::batchDistance( InputArray _src1, InputArray _src2,
OutputArray _dist, int dtype, OutputArray _nidx,
int normType, int K, InputArray _mask,
@ -1769,7 +1769,7 @@ void cv::batchDistance( InputArray _src1, InputArray _src2,
CV_Assert( type == src2.type() && src1.cols == src2.cols &&
(type == CV_32F || type == CV_8U));
CV_Assert( _nidx.needed() == (K > 0) );
if( dtype == -1 )
{
dtype = normType == NORM_HAMMING || normType == NORM_HAMMING2 ? CV_32S : CV_32F;
@ -1777,7 +1777,7 @@ void cv::batchDistance( InputArray _src1, InputArray _src2,
CV_Assert( (type == CV_8U && dtype == CV_32S) || dtype == CV_32F);
K = std::min(K, src2.rows);
_dist.create(src1.rows, (K > 0 ? K : src2.rows), dtype);
Mat dist = _dist.getMat(), nidx;
if( _nidx.needed() )
@ -1785,19 +1785,19 @@ void cv::batchDistance( InputArray _src1, InputArray _src2,
_nidx.create(dist.size(), CV_32S);
nidx = _nidx.getMat();
}
if( update == 0 && K > 0 )
{
dist = Scalar::all(dtype == CV_32S ? (double)INT_MAX : (double)FLT_MAX);
nidx = Scalar::all(-1);
}
if( crosscheck )
{
CV_Assert( K == 1 && update == 0 && mask.empty() );
Mat tdist, tidx;
batchDistance(src2, src1, tdist, dtype, tidx, normType, K, mask, 0, false);
// if an idx-th element from src1 appeared to be the nearest to i-th element of src2,
// we update the minimum mutual distance between idx-th element of src1 and the whole src2 set.
// As a result, if nidx[idx] = i*, it means that idx-th element of src1 is the nearest
@ -1832,7 +1832,7 @@ void cv::batchDistance( InputArray _src1, InputArray _src2,
}
return;
}
BatchDistFunc func = 0;
if( type == CV_8U )
{
@ -1860,12 +1860,12 @@ void cv::batchDistance( InputArray _src1, InputArray _src2,
else if( normType == NORM_L2 )
func = (BatchDistFunc)batchDistL2_32f;
}
if( func == 0 )
CV_Error_(CV_StsUnsupportedFormat,
("The combination of type=%d, dtype=%d and normType=%d is not supported",
type, dtype, normType));
parallel_for(BlockedRange(0, src1.rows),
BatchDistInvoker(src1, src2, dist, nidx, K, mask, update, func));
}

View File

@ -88,7 +88,7 @@
#if defined __linux__ || defined __APPLE__
#include <unistd.h>
#include <stdio.h>
#include <sys/types.h>
#include <sys/types.h>
#if defined ANDROID
#include <sys/sysconf.h>
#else
@ -111,7 +111,7 @@ Exception::~Exception() throw() {}
/*!
\return the error description and the context as a text string.
*/
*/
const char* Exception::what() const throw() { return msg.c_str(); }
void Exception::formatMessage()
@ -121,7 +121,7 @@ void Exception::formatMessage()
else
msg = format("%s:%d: error: (%d) %s\n", file.c_str(), line, code, err.c_str());
}
struct HWFeatures
{
enum { MAX_FEATURE = CV_HARDWARE_MAX_FEATURE };
@ -374,7 +374,7 @@ int getThreadNum(void)
#endif
}
#if ANDROID
#ifdef ANDROID
static inline int getNumberOfCPUsImpl()
{
FILE* cpuPossible = fopen("/sys/devices/system/cpu/possible", "r");
@ -408,7 +408,7 @@ static inline int getNumberOfCPUsImpl()
sscanf(pos, "%d-%d", &rstart, &rend);
cpusAvailable += rend - rstart + 1;
}
}
return cpusAvailable ? cpusAvailable : 1;
}
@ -419,9 +419,9 @@ int getNumberOfCPUs(void)
#if defined WIN32 || defined _WIN32
SYSTEM_INFO sysinfo;
GetSystemInfo( &sysinfo );
return (int)sysinfo.dwNumberOfProcessors;
#elif ANDROID
#elif defined ANDROID
static int ncpus = getNumberOfCPUsImpl();
printf("CPUS= %d\n", ncpus);
return ncpus;
@ -430,24 +430,24 @@ int getNumberOfCPUs(void)
#elif defined __APPLE__
int numCPU=0;
int mib[4];
size_t len = sizeof(numCPU);
size_t len = sizeof(numCPU);
/* set the mib for hw.ncpu */
mib[0] = CTL_HW;
mib[1] = HW_AVAILCPU; // alternatively, try HW_NCPU;
/* get the number of CPUs from the system */
sysctl(mib, 2, &numCPU, &len, NULL, 0);
if( numCPU < 1 )
if( numCPU < 1 )
{
mib[1] = HW_NCPU;
sysctl( mib, 2, &numCPU, &len, NULL, 0 );
if( numCPU < 1 )
numCPU = 1;
}
return (int)numCPU;
#else
return 1;
@ -475,7 +475,7 @@ string tempfile( const char* suffix )
{
char buf[L_tmpnam];
char* name = 0;
#if ANDROID
#ifdef ANDROID
strcpy(buf, "/sdcard/__opencv_temp_XXXXXX");
name = mktemp(buf);
#else

File diff suppressed because it is too large Load Diff

View File

@ -5,7 +5,7 @@
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/features2d/features2d.hpp"
#if GTEST_CREATE_SHARED_LIBRARY
#ifdef GTEST_CREATE_SHARED_LIBRARY
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
#endif

View File

@ -61,7 +61,7 @@ inline int smoothedSum(const Mat& sum, const KeyPoint& pt, int y, int x)
+ sum.at<int>(img_y - HALF_KERNEL, img_x - HALF_KERNEL);
}
void pixelTests16(const Mat& sum, const std::vector<KeyPoint>& keypoints, Mat& descriptors)
static void pixelTests16(const Mat& sum, const std::vector<KeyPoint>& keypoints, Mat& descriptors)
{
for (int i = 0; i < (int)keypoints.size(); ++i)
{
@ -71,7 +71,7 @@ void pixelTests16(const Mat& sum, const std::vector<KeyPoint>& keypoints, Mat& d
}
}
void pixelTests32(const Mat& sum, const std::vector<KeyPoint>& keypoints, Mat& descriptors)
static void pixelTests32(const Mat& sum, const std::vector<KeyPoint>& keypoints, Mat& descriptors)
{
for (int i = 0; i < (int)keypoints.size(); ++i)
{
@ -82,7 +82,7 @@ void pixelTests32(const Mat& sum, const std::vector<KeyPoint>& keypoints, Mat& d
}
}
void pixelTests64(const Mat& sum, const std::vector<KeyPoint>& keypoints, Mat& descriptors)
static void pixelTests64(const Mat& sum, const std::vector<KeyPoint>& keypoints, Mat& descriptors)
{
for (int i = 0; i < (int)keypoints.size(); ++i)
{

View File

@ -56,7 +56,7 @@ DescriptorExtractor::~DescriptorExtractor()
{}
void DescriptorExtractor::compute( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors ) const
{
{
if( image.empty() || keypoints.empty() )
{
descriptors.release();
@ -102,7 +102,7 @@ Ptr<DescriptorExtractor> DescriptorExtractor::create(const string& descriptorExt
string type = descriptorExtractorType.substr(pos);
return new OpponentColorDescriptorExtractor(DescriptorExtractor::create(type));
}
return Algorithm::create<DescriptorExtractor>("Feature2D." + descriptorExtractorType);
}
@ -117,7 +117,7 @@ OpponentColorDescriptorExtractor::OpponentColorDescriptorExtractor( const Ptr<De
CV_Assert( !descriptorExtractor.empty() );
}
void convertBGRImageToOpponentColorSpace( const Mat& bgrImage, vector<Mat>& opponentChannels )
static void convertBGRImageToOpponentColorSpace( const Mat& bgrImage, vector<Mat>& opponentChannels )
{
if( bgrImage.type() != CV_8UC3 )
CV_Error( CV_StsBadArg, "input image must be an BGR image of type CV_8UC3" );
@ -227,7 +227,7 @@ void OpponentColorDescriptorExtractor::computeImpl( const Mat& bgrImage, vector<
Mat mergedDescriptors( maxKeypointsCount, 3*descriptorSize, descriptorExtractor->descriptorType() );
int mergedCount = 0;
// cp - current channel position
size_t cp[] = {0, 0, 0};
size_t cp[] = {0, 0, 0};
while( cp[0] < channelKeypoints[0].size() &&
cp[1] < channelKeypoints[1].size() &&
cp[2] < channelKeypoints[2].size() )

View File

@ -45,7 +45,7 @@ using namespace std;
namespace cv
{
/*
* FeatureDetector
*/
@ -95,19 +95,19 @@ Ptr<FeatureDetector> FeatureDetector::create( const string& detectorType )
return new GridAdaptedFeatureDetector(FeatureDetector::create(
detectorType.substr(strlen("Grid"))));
}
if( detectorType.find("Pyramid") == 0 )
{
return new PyramidAdaptedFeatureDetector(FeatureDetector::create(
detectorType.substr(strlen("Pyramid"))));
}
if( detectorType.find("Dynamic") == 0 )
{
return new DynamicAdaptedFeatureDetector(AdjusterAdapter::create(
detectorType.substr(strlen("Dynamic"))));
}
if( detectorType.compare( "HARRIS" ) == 0 )
{
Ptr<FeatureDetector> fd = FeatureDetector::create("GFTT");
@ -149,13 +149,13 @@ void GFTTDetector::detectImpl( const Mat& image, vector<KeyPoint>& keypoints, co
/*
* DenseFeatureDetector
*/
DenseFeatureDetector::DenseFeatureDetector( float _initFeatureScale, int _featureScaleLevels,
float _featureScaleMul, int _initXyStep,
int _initImgBound, bool _varyXyStepWithScale,
bool _varyImgBoundWithScale ) :
initFeatureScale(_initFeatureScale), featureScaleLevels(_featureScaleLevels),
featureScaleMul(_featureScaleMul), initXyStep(_initXyStep), initImgBound(_initImgBound),
varyXyStepWithScale(_varyXyStepWithScale), varyImgBoundWithScale(_varyImgBoundWithScale)
DenseFeatureDetector::DenseFeatureDetector( float _initFeatureScale, int _featureScaleLevels,
float _featureScaleMul, int _initXyStep,
int _initImgBound, bool _varyXyStepWithScale,
bool _varyImgBoundWithScale ) :
initFeatureScale(_initFeatureScale), featureScaleLevels(_featureScaleLevels),
featureScaleMul(_featureScaleMul), initXyStep(_initXyStep), initImgBound(_initImgBound),
varyXyStepWithScale(_varyXyStepWithScale), varyImgBoundWithScale(_varyImgBoundWithScale)
{}
@ -203,7 +203,7 @@ struct ResponseComparator
}
};
void keepStrongest( int N, vector<KeyPoint>& keypoints )
static void keepStrongest( int N, vector<KeyPoint>& keypoints )
{
if( (int)keypoints.size() > N )
{

View File

@ -42,8 +42,7 @@
#include "precomp.hpp"
namespace cv
{
using namespace cv;
/////////////////////// AlgorithmInfo for various detector & descriptors ////////////////////////////
@ -54,7 +53,7 @@ namespace cv
CV_INIT_ALGORITHM(BriefDescriptorExtractor, "Feature2D.BRIEF",
obj.info()->addParam(obj, "bytes", obj.bytes_));
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(FastFeatureDetector, "Feature2D.FAST",
@ -69,7 +68,7 @@ CV_INIT_ALGORITHM(StarDetector, "Feature2D.STAR",
obj.info()->addParam(obj, "lineThresholdProjected", obj.lineThresholdProjected);
obj.info()->addParam(obj, "lineThresholdBinarized", obj.lineThresholdBinarized);
obj.info()->addParam(obj, "suppressNonmaxSize", obj.suppressNonmaxSize));
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(MSER, "Feature2D.MSER",
@ -81,8 +80,8 @@ CV_INIT_ALGORITHM(MSER, "Feature2D.MSER",
obj.info()->addParam(obj, "maxEvolution", obj.maxEvolution);
obj.info()->addParam(obj, "areaThreshold", obj.areaThreshold);
obj.info()->addParam(obj, "minMargin", obj.minMargin);
obj.info()->addParam(obj, "edgeBlurSize", obj.edgeBlurSize));
obj.info()->addParam(obj, "edgeBlurSize", obj.edgeBlurSize));
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(ORB, "Feature2D.ORB",
@ -96,7 +95,7 @@ CV_INIT_ALGORITHM(ORB, "Feature2D.ORB",
obj.info()->addParam(obj, "scoreType", obj.scoreType));
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(GFTTDetector, "Feature2D.GFTT",
obj.info()->addParam(obj, "nfeatures", obj.nfeatures);
obj.info()->addParam(obj, "qualityLevel", obj.qualityLevel);
@ -105,7 +104,7 @@ CV_INIT_ALGORITHM(GFTTDetector, "Feature2D.GFTT",
obj.info()->addParam(obj, "k", obj.k));
///////////////////////////////////////////////////////////////////////////////////////////////////////////
class CV_EXPORTS HarrisDetector : public GFTTDetector
{
public:
@ -113,7 +112,7 @@ public:
int blockSize=3, bool useHarrisDetector=true, double k=0.04 )
: GFTTDetector( maxCorners, qualityLevel, minDistance, blockSize, useHarrisDetector, k ) {}
AlgorithmInfo* info() const;
};
};
CV_INIT_ALGORITHM(HarrisDetector, "Feature2D.HARRIS",
obj.info()->addParam(obj, "nfeatures", obj.nfeatures);
@ -122,7 +121,7 @@ CV_INIT_ALGORITHM(HarrisDetector, "Feature2D.HARRIS",
obj.info()->addParam(obj, "useHarrisDetector", obj.useHarrisDetector);
obj.info()->addParam(obj, "k", obj.k));
////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(DenseFeatureDetector, "Feature2D.Dense",
obj.info()->addParam(obj, "initFeatureScale", obj.initFeatureScale);
@ -134,22 +133,23 @@ CV_INIT_ALGORITHM(DenseFeatureDetector, "Feature2D.Dense",
obj.info()->addParam(obj, "varyImgBoundWithScale", obj.varyImgBoundWithScale));
CV_INIT_ALGORITHM(GridAdaptedFeatureDetector, "Feature2D.Grid",
obj.info()->addParam(obj, "detector", (Ptr<Algorithm>&)obj.detector);
//obj.info()->addParam(obj, "detector", (Ptr<Algorithm>&)obj.detector);
obj.info()->addParam(obj, "maxTotalKeypoints", obj.maxTotalKeypoints);
obj.info()->addParam(obj, "gridRows", obj.gridRows);
obj.info()->addParam(obj, "gridCols", obj.gridCols));
bool initModule_features2d(void)
bool cv::initModule_features2d(void)
{
Ptr<Algorithm> brief = createBriefDescriptorExtractor(), orb = createORB(),
star = createStarDetector(), fastd = createFastFeatureDetector(), mser = createMSER(),
dense = createDenseFeatureDetector(), gftt = createGFTTDetector(),
harris = createHarrisDetector(), grid = createGridAdaptedFeatureDetector();
return brief->info() != 0 && orb->info() != 0 && star->info() != 0 &&
fastd->info() != 0 && mser->info() != 0 && dense->info() != 0 &&
gftt->info() != 0 && harris->info() != 0 && grid->info() != 0;
}
bool all = true;
all &= !BriefDescriptorExtractor_info_auto.name().empty();
all &= !FastFeatureDetector_info_auto.name().empty();
all &= !StarDetector_info_auto.name().empty();
all &= !MSER_info_auto.name().empty();
all &= !ORB_info_auto.name().empty();
all &= !GFTTDetector_info_auto.name().empty();
all &= !HarrisDetector_info_auto.name().empty();
all &= !DenseFeatureDetector_info_auto.name().empty();
all &= !GridAdaptedFeatureDetector_info_auto.name().empty();
return all;
}

View File

@ -174,7 +174,7 @@ int DescriptorMatcher::DescriptorCollection::size() const
/*
* DescriptorMatcher
*/
void convertMatches( const vector<vector<DMatch> >& knnMatches, vector<DMatch>& matches )
static void convertMatches( const vector<vector<DMatch> >& knnMatches, vector<DMatch>& matches )
{
matches.clear();
matches.reserve( knnMatches.size() );

View File

@ -43,7 +43,7 @@
#ifndef __OPENCV_PRECOMP_H__
#define __OPENCV_PRECOMP_H__
#if _MSC_VER >= 1200
#if defined _MSC_VER && _MSC_VER >= 1200
#pragma warning( disable: 4251 4512 4710 4711 4514 4996 )
#endif

View File

@ -493,23 +493,6 @@ private:
CV_DescriptorExtractorTest& operator=(const CV_DescriptorExtractorTest&) { return *this; }
};
/*template<typename T, typename Distance>
class CV_CalonderDescriptorExtractorTest : public CV_DescriptorExtractorTest<Distance>
{
public:
CV_CalonderDescriptorExtractorTest( const char* testName, float _normDif, float _prevTime ) :
CV_DescriptorExtractorTest<Distance>( testName, _normDif, Ptr<DescriptorExtractor>(), _prevTime )
{}
protected:
virtual void createDescriptorExtractor()
{
CV_DescriptorExtractorTest<Distance>::dextractor =
new CalonderDescriptorExtractor<T>( string(CV_DescriptorExtractorTest<Distance>::ts->get_data_path()) +
FEATURES2D_DIR + "/calonder_classifier.rtc");
}
};*/
/****************************************************************************************\
* Algorithmic tests for descriptor matchers *
\****************************************************************************************/
@ -1059,24 +1042,6 @@ TEST( Features2d_DescriptorExtractor_BRIEF, regression )
test.safe_run();
}
#if CV_SSE2
TEST( Features2d_DescriptorExtractor_Calonder_uchar, regression )
{
CV_CalonderDescriptorExtractorTest<uchar, L2<uchar> > test( "descriptor-calonder-uchar",
std::numeric_limits<float>::epsilon() + 1,
0.0132175f );
test.safe_run();
}
TEST( Features2d_DescriptorExtractor_Calonder_float, regression )
{
CV_CalonderDescriptorExtractorTest<float, L2<float> > test( "descriptor-calonder-float",
std::numeric_limits<float>::epsilon(),
0.0221308f );
test.safe_run();
}
#endif // CV_SSE2
/*
* Matchers
*/

View File

@ -46,6 +46,7 @@ struct base_any_policy
virtual ::size_t get_size() = 0;
virtual const std::type_info& type() = 0;
virtual void print(std::ostream& out, void* const* src) = 0;
virtual ~base_any_policy() {}
};
template<typename T>

View File

@ -35,6 +35,9 @@
#ifndef OPENCV_FLANN_DYNAMIC_BITSET_H_
#define OPENCV_FLANN_DYNAMIC_BITSET_H_
#ifndef FLANN_USE_BOOST
# define FLANN_USE_BOOST 0
#endif
//#define FLANN_USE_BOOST 1
#if FLANN_USE_BOOST
#include <boost/dynamic_bitset.hpp>

View File

@ -40,6 +40,11 @@
#include <iomanip>
#include <limits.h>
// TODO as soon as we use C++0x, use the code in USE_UNORDERED_MAP
#ifdef __GXX_EXPERIMENTAL_CXX0X__
# define USE_UNORDERED_MAP 1
#else
# define USE_UNORDERED_MAP 0
#endif
#if USE_UNORDERED_MAP
#include <unordered_map>
#else

View File

@ -16,7 +16,7 @@
#include "perf_utility.hpp"
#if GTEST_CREATE_SHARED_LIBRARY
#ifdef GTEST_CREATE_SHARED_LIBRARY
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
#endif

View File

@ -20,7 +20,7 @@
#include "perf_utility.hpp"
#if GTEST_CREATE_SHARED_LIBRARY
#ifdef GTEST_CREATE_SHARED_LIBRARY
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
#endif

View File

@ -43,7 +43,7 @@
#ifndef __OPENCV_PRECOMP_H__
#define __OPENCV_PRECOMP_H__
#if _MSC_VER >= 1200
#if defined _MSC_VER && _MSC_VER >= 1200
#pragma warning( disable: 4251 4710 4711 4514 4996 )
#endif

View File

@ -81,6 +81,10 @@ if(HAVE_QT)
list(APPEND HIGHGUI_LIBRARIES ${QT_LIBRARIES} ${QT_QTTEST_LIBRARY})
list(APPEND highgui_srcs src/window_QT.cpp ${_MOC_OUTFILES} ${_RCC_OUTFILES} )
if(CMAKE_COMPILER_IS_GNUCXX)
set_source_files_properties(${_RCC_OUTFILES} PROPERTIES COMPILE_FLAGS "-Wno-missing-declarations")
endif()
elseif(WIN32)
list(APPEND highgui_srcs src/window_w32.cpp)
elseif(HAVE_GTK)
@ -131,6 +135,10 @@ if(HAVE_OPENNI)
list(APPEND highgui_srcs src/cap_openni.cpp)
ocv_include_directories(${OPENNI_INCLUDE_DIR})
list(APPEND HIGHGUI_LIBRARIES ${OPENNI_LIBRARY})
if(CMAKE_COMPILER_IS_GNUCXX)
set_source_files_properties(src/cap_openni.cpp PROPERTIES COMPILE_FLAGS "-Wno-unknown-pragmas -Wno-uninitialized -Wno-reorder -Wno-strict-aliasing")
endif()
endif(HAVE_OPENNI)
if(HAVE_opencv_androidcamera)

View File

@ -79,7 +79,7 @@ CVAPI(void) cvDisplayStatusBar(const char* name, const char* text, int delayms C
CVAPI(void) cvSaveWindowParameters(const char* name);
CVAPI(void) cvLoadWindowParameters(const char* name);
CVAPI(int) cvStartLoop(int (*pt2Func)(int argc, char *argv[]), int argc, char* argv[]);
CVAPI(void) cvStopLoop();
CVAPI(void) cvStopLoop( void );
typedef void (CV_CDECL *CvButtonCallback)(int state, void* userdata);
enum {CV_PUSH_BUTTON = 0, CV_CHECKBOX = 1, CV_RADIOBOX = 2};
@ -90,7 +90,7 @@ CVAPI(int) cvCreateButton( const char* button_name CV_DEFAULT(NULL),CvButtonCall
/* this function is used to set some external parameters in case of X Window */
CVAPI(int) cvInitSystem( int argc, char** argv );
CVAPI(int) cvStartWindowThread();
CVAPI(int) cvStartWindowThread( void );
// --------- YV ---------
enum
@ -100,16 +100,16 @@ enum
CV_WND_PROP_AUTOSIZE = 1, //to change/get window's autosize property
CV_WND_PROP_ASPECTRATIO= 2, //to change/get window's aspectratio property
CV_WND_PROP_OPENGL = 3, //to change/get window's opengl support
//These 2 flags are used by cvNamedWindow and cvSet/GetWindowProperty
CV_WINDOW_NORMAL = 0x00000000, //the user can resize the window (no constraint) / also use to switch a fullscreen window to a normal size
CV_WINDOW_AUTOSIZE = 0x00000001, //the user cannot resize the window, the size is constrainted by the image displayed
CV_WINDOW_OPENGL = 0x00001000, //window with opengl support
//Those flags are only for Qt
CV_GUI_EXPANDED = 0x00000000, //status bar and tool bar
CV_GUI_NORMAL = 0x00000010, //old fashious way
//These 3 flags are used by cvNamedWindow and cvSet/GetWindowProperty
CV_WINDOW_FULLSCREEN = 1,//change the window to fullscreen
CV_WINDOW_FREERATIO = 0x00000100,//the image expends as much as it can (no ratio constraint)
@ -303,10 +303,10 @@ enum
CV_CAP_OPENNI_ASUS =910, // OpenNI (for Asus Xtion)
CV_CAP_ANDROID =1000, // Android
CV_CAP_XIAPI =1100, // XIMEA Camera API
CV_CAP_AVFOUNDATION = 1200 // AVFoundation framework for iOS (OS X Lion will have the same API)
CV_CAP_AVFOUNDATION = 1200 // AVFoundation framework for iOS (OS X Lion will have the same API)
};
/* start capturing frames from camera: index = camera_index + domain_offset (CV_CAP_*) */
@ -367,15 +367,15 @@ enum
CV_CAP_PROP_TRIGGER_DELAY =25,
CV_CAP_PROP_WHITE_BALANCE_RED_V =26,
CV_CAP_PROP_ZOOM =27,
CV_CAP_PROP_FOCUS =28,
CV_CAP_PROP_GUID =29,
CV_CAP_PROP_ISO_SPEED =30,
CV_CAP_PROP_FOCUS =28,
CV_CAP_PROP_GUID =29,
CV_CAP_PROP_ISO_SPEED =30,
CV_CAP_PROP_MAX_DC1394 =31,
CV_CAP_PROP_BACKLIGHT =32,
CV_CAP_PROP_PAN =33,
CV_CAP_PROP_TILT =34,
CV_CAP_PROP_ROLL =35,
CV_CAP_PROP_IRIS =36,
CV_CAP_PROP_BACKLIGHT =32,
CV_CAP_PROP_PAN =33,
CV_CAP_PROP_TILT =34,
CV_CAP_PROP_ROLL =35,
CV_CAP_PROP_IRIS =36,
CV_CAP_PROP_SETTINGS =37,
CV_CAP_PROP_AUTOGRAB =1024, // property for highgui class CvCapture_Android only
@ -409,24 +409,24 @@ enum
CV_CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_FOCAL_LENGTH,
CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_REGISTRATION,
CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON = CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION,
// Properties of cameras available through GStreamer interface
CV_CAP_GSTREAMER_QUEUE_LENGTH = 200, // default is 1
CV_CAP_PROP_PVAPI_MULTICASTIP = 300, // ip for anable multicast master mode. 0 for disable multicast
// Properties of cameras available through XIMEA SDK interface
CV_CAP_PROP_XI_DOWNSAMPLING = 400, // Change image resolution by binning or skipping.
CV_CAP_PROP_XI_DOWNSAMPLING = 400, // Change image resolution by binning or skipping.
CV_CAP_PROP_XI_DATA_FORMAT = 401, // Output data format.
CV_CAP_PROP_XI_OFFSET_X = 402, // Horizontal offset from the origin to the area of interest (in pixels).
CV_CAP_PROP_XI_OFFSET_Y = 403, // Vertical offset from the origin to the area of interest (in pixels).
CV_CAP_PROP_XI_TRG_SOURCE = 404, // Defines source of trigger.
CV_CAP_PROP_XI_TRG_SOFTWARE = 405, // Generates an internal trigger. PRM_TRG_SOURCE must be set to TRG_SOFTWARE.
CV_CAP_PROP_XI_GPI_SELECTOR = 406, // Selects general purpose input
CV_CAP_PROP_XI_GPI_SELECTOR = 406, // Selects general purpose input
CV_CAP_PROP_XI_GPI_MODE = 407, // Set general purpose input mode
CV_CAP_PROP_XI_GPI_LEVEL = 408, // Get general purpose level
CV_CAP_PROP_XI_GPO_SELECTOR = 409, // Selects general purpose output
CV_CAP_PROP_XI_GPO_SELECTOR = 409, // Selects general purpose output
CV_CAP_PROP_XI_GPO_MODE = 410, // Set general purpose output mode
CV_CAP_PROP_XI_LED_SELECTOR = 411, // Selects camera signalling LED
CV_CAP_PROP_XI_LED_SELECTOR = 411, // Selects camera signalling LED
CV_CAP_PROP_XI_LED_MODE = 412, // Define camera signalling LED functionality
CV_CAP_PROP_XI_MANUAL_WB = 413, // Calculates White Balance(must be called during acquisition)
CV_CAP_PROP_XI_AUTO_WB = 414, // Automatic white balance
@ -436,7 +436,7 @@ enum
CV_CAP_PROP_XI_AG_MAX_LIMIT = 418, // Maximum limit of gain in AEAG procedure
CV_CAP_PROP_XI_AEAG_LEVEL = 419, // Average intensity of output signal AEAG should achieve(in %)
CV_CAP_PROP_XI_TIMEOUT = 420, // Image capture timeout in milliseconds
// Properties for Android cameras
CV_CAP_PROP_ANDROID_FLASH_MODE = 8001,
CV_CAP_PROP_ANDROID_FOCUS_MODE = 8002,
@ -532,7 +532,7 @@ CVAPI(double) cvGetCaptureProperty( CvCapture* capture, int property_id );
CVAPI(int) cvSetCaptureProperty( CvCapture* capture, int property_id, double value );
// Return the type of the capturer (eg, CV_CAP_V4W, CV_CAP_UNICAP), which is unknown if created with CV_CAP_ANY
CVAPI(int) cvGetCaptureDomain( CvCapture* capture);
CVAPI(int) cvGetCaptureDomain( CvCapture* capture);
/* "black box" video file writer structure */
typedef struct CvVideoWriter CvVideoWriter;

View File

@ -4,7 +4,7 @@
#include "opencv2/ts/ts.hpp"
#include "opencv2/highgui/highgui.hpp"
#if GTEST_CREATE_SHARED_LIBRARY
#ifdef GTEST_CREATE_SHARED_LIBRARY
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
#endif

View File

@ -41,7 +41,7 @@
#include "precomp.hpp"
#if _MSC_VER >= 1200
#if defined _MSC_VER && _MSC_VER >= 1200
#pragma warning( disable: 4711 )
#endif
@ -282,7 +282,7 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index)
return capture;
break;
#endif
#ifdef HAVE_PVAPI
case CV_CAP_PVAPI:
capture = cvCreateCameraCapture_PvAPI (index);
@ -306,7 +306,7 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index)
return capture;
break;
#endif
#ifdef HAVE_XIMEA
case CV_CAP_XIAPI:
capture = cvCreateCameraCapture_XIMEA (index);
@ -354,7 +354,7 @@ CV_IMPL CvCapture * cvCreateFileCapture (const char * filename)
if (! result)
result = cvCreateFileCapture_QT (filename);
#endif
#ifdef HAVE_AVFOUNDATION
if (! result)
result = cvCreateFileCapture_AVFoundation (filename);
@ -364,7 +364,7 @@ CV_IMPL CvCapture * cvCreateFileCapture (const char * filename)
if (! result)
result = cvCreateFileCapture_OpenNI (filename);
#endif
if (! result)
result = cvCreateFileCapture_Images (filename);
@ -378,29 +378,29 @@ CV_IMPL CvCapture * cvCreateFileCapture (const char * filename)
CV_IMPL CvVideoWriter* cvCreateVideoWriter( const char* filename, int fourcc,
double fps, CvSize frameSize, int is_color )
{
//CV_FUNCNAME( "cvCreateVideoWriter" );
//CV_FUNCNAME( "cvCreateVideoWriter" );
CvVideoWriter *result = 0;
CvVideoWriter *result = 0;
if(!fourcc || !fps)
result = cvCreateVideoWriter_Images(filename);
if(!fourcc || !fps)
result = cvCreateVideoWriter_Images(filename);
if(!result)
result = cvCreateVideoWriter_FFMPEG_proxy (filename, fourcc, fps, frameSize, is_color);
if(!result)
result = cvCreateVideoWriter_FFMPEG_proxy (filename, fourcc, fps, frameSize, is_color);
/* #ifdef HAVE_XINE
if(!result)
result = cvCreateVideoWriter_XINE(filename, fourcc, fps, frameSize, is_color);
#endif
/* #ifdef HAVE_XINE
if(!result)
result = cvCreateVideoWriter_XINE(filename, fourcc, fps, frameSize, is_color);
#endif
*/
#ifdef HAVE_AVFOUNDATION
#ifdef HAVE_AVFOUNDATION
if (! result)
result = cvCreateVideoWriter_AVFoundation(filename, fourcc, fps, frameSize, is_color);
#endif
#ifdef HAVE_QUICKTIME
if(!result)
result = cvCreateVideoWriter_QT(filename, fourcc, fps, frameSize, is_color);
if(!result)
result = cvCreateVideoWriter_QT(filename, fourcc, fps, frameSize, is_color);
#endif
#ifdef HAVE_GSTREAMER
@ -408,10 +408,10 @@ CV_IMPL CvVideoWriter* cvCreateVideoWriter( const char* filename, int fourcc,
result = cvCreateVideoWriter_GStreamer(filename, fourcc, fps, frameSize, is_color);
#endif
if(!result)
result = cvCreateVideoWriter_Images(filename);
if(!result)
result = cvCreateVideoWriter_Images(filename);
return result;
return result;
}
CV_IMPL int cvWriteFrame( CvVideoWriter* writer, const IplImage* image )
@ -434,12 +434,12 @@ namespace cv
VideoCapture::VideoCapture()
{}
VideoCapture::VideoCapture(const string& filename)
{
open(filename);
}
VideoCapture::VideoCapture(int device)
{
open(device);
@ -449,21 +449,21 @@ VideoCapture::~VideoCapture()
{
cap.release();
}
bool VideoCapture::open(const string& filename)
{
cap = cvCreateFileCapture(filename.c_str());
return isOpened();
}
bool VideoCapture::open(int device)
{
cap = cvCreateCameraCapture(device);
return isOpened();
}
bool VideoCapture::isOpened() const { return !cap.empty(); }
void VideoCapture::release()
{
cap.release();
@ -473,7 +473,7 @@ bool VideoCapture::grab()
{
return cvGrabFrame(cap) != 0;
}
bool VideoCapture::retrieve(Mat& image, int channel)
{
IplImage* _img = cvRetrieveFrame(cap, channel);
@ -500,18 +500,18 @@ bool VideoCapture::read(Mat& image)
image.release();
return !image.empty();
}
VideoCapture& VideoCapture::operator >> (Mat& image)
{
read(image);
return *this;
}
bool VideoCapture::set(int propId, double value)
{
return cvSetCaptureProperty(cap, propId, value) != 0;
}
double VideoCapture::get(int propId)
{
return cvGetCaptureProperty(cap, propId);
@ -519,7 +519,7 @@ double VideoCapture::get(int propId)
VideoWriter::VideoWriter()
{}
VideoWriter::VideoWriter(const string& filename, int fourcc, double fps, Size frameSize, bool isColor)
{
open(filename, fourcc, fps, frameSize, isColor);
@ -528,13 +528,13 @@ VideoWriter::VideoWriter(const string& filename, int fourcc, double fps, Size fr
void VideoWriter::release()
{
writer.release();
}
}
VideoWriter::~VideoWriter()
{
release();
}
bool VideoWriter::open(const string& filename, int fourcc, double fps, Size frameSize, bool isColor)
{
writer = cvCreateVideoWriter(filename.c_str(), fourcc, fps, frameSize, isColor);
@ -544,18 +544,18 @@ bool VideoWriter::open(const string& filename, int fourcc, double fps, Size fram
bool VideoWriter::isOpened() const
{
return !writer.empty();
}
}
void VideoWriter::write(const Mat& image)
{
IplImage _img = image;
cvWriteFrame(writer, &_img);
}
VideoWriter& VideoWriter::operator << (const Mat& image)
{
write(image);
return *this;
return *this;
}
}

View File

@ -123,7 +123,7 @@ icvInitFFMPEG(void)
icvReleaseVideoWriter_FFMPEG_p = (CvReleaseVideoWriter_Plugin)cvReleaseVideoWriter_FFMPEG;
icvWriteFrame_FFMPEG_p = (CvWriteFrame_Plugin)cvWriteFrame_FFMPEG;
#endif
ffmpegInitialized = 1;
}
}
@ -151,7 +151,7 @@ public:
{
unsigned char* data = 0;
int step=0, width=0, height=0, cn=0;
if(!ffmpegCapture ||
!icvRetrieveFrame_FFMPEG_p(ffmpegCapture,&data,&step,&width,&height,&cn))
return 0;
@ -193,7 +193,7 @@ CvCapture* cvCreateFileCapture_FFMPEG_proxy(const char * filename)
return cvCreateFileCapture_VFW(filename);
#else
return 0;
#endif
#endif
}
@ -247,5 +247,5 @@ CvVideoWriter* cvCreateVideoWriter_FFMPEG_proxy( const char* filename, int fourc
return cvCreateVideoWriter_VFW(filename, fourcc, fps, frameSize, isColor);
#else
return 0;
#endif
#endif
}

View File

@ -66,7 +66,7 @@ extern "C" {
#ifndef HAVE_FFMPEG_SWSCALE
#error "libswscale is necessary to build the newer OpenCV ffmpeg wrapper"
#endif
// if the header path is not specified explicitly, let's deduce it
#if !defined HAVE_FFMPEG_AVCODEC_H && !defined HAVE_LIBAVCODEC_AVCODEC_H
@ -140,7 +140,7 @@ extern "C" {
#define AV_NOPTS_VALUE_ ((int64_t)AV_NOPTS_VALUE)
#endif
int get_number_of_cpus(void)
static int get_number_of_cpus(void)
{
#if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(52, 111, 0)
return 1;
@ -210,7 +210,7 @@ struct CvCapture_FFMPEG
void seek(int64_t frame_number);
void seek(double sec);
bool slowSeek( int framenumber );
bool slowSeek( int framenumber );
int64_t get_total_frames();
double get_duration_sec();
@ -225,8 +225,8 @@ struct CvCapture_FFMPEG
AVCodec * avcodec;
int video_stream;
AVStream * video_st;
AVFrame * picture;
AVFrame rgb_picture;
AVFrame * picture;
AVFrame rgb_picture;
int64_t picture_pts;
AVPacket packet;
@ -274,7 +274,7 @@ void CvCapture_FFMPEG::close()
sws_freeContext(img_convert_ctx);
img_convert_ctx = 0;
}
if( picture )
av_free(picture);
@ -293,9 +293,9 @@ void CvCapture_FFMPEG::close()
if( ic )
{
#if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(53, 24, 2)
av_close_input_file(ic);
av_close_input_file(ic);
#else
avformat_close_input(&ic);
avformat_close_input(&ic);
#endif
ic = NULL;
@ -337,7 +337,7 @@ static void icvInitFFMPEG_internal()
av_register_all();
av_log_set_level(AV_LOG_ERROR);
initialized = true;
}
}
@ -345,18 +345,18 @@ static void icvInitFFMPEG_internal()
bool CvCapture_FFMPEG::open( const char* _filename )
{
icvInitFFMPEG_internal();
unsigned i;
bool valid = false;
close();
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(52, 111, 0)
int err = avformat_open_input(&ic, _filename, NULL, NULL);
#else
int err = av_open_input_file(&ic, _filename, NULL, 0, NULL);
#endif
#endif
if (err < 0) {
CV_WARN("Error opening file");
goto exit_func;
@ -438,13 +438,13 @@ bool CvCapture_FFMPEG::grabFrame()
const int max_number_of_attempts = 1 << 16;
if( !ic || !video_st ) return false;
if( ic->streams[video_stream]->nb_frames > 0 &&
frame_number > ic->streams[video_stream]->nb_frames )
return false;
av_free_packet (&packet);
picture_pts = AV_NOPTS_VALUE_;
// get the next frame
@ -463,7 +463,7 @@ bool CvCapture_FFMPEG::grabFrame()
break;
continue;
}
// Decode video frame
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 2, 0)
avcodec_decode_video2(video_st->codec, picture, &got_picture, &packet);
@ -498,7 +498,7 @@ bool CvCapture_FFMPEG::grabFrame()
if( valid && first_frame_number < 0 )
first_frame_number = dts_to_frame_number(picture_pts);
// return if we have a new picture or not
return valid;
}
@ -518,7 +518,7 @@ bool CvCapture_FFMPEG::retrieveFrame(int, unsigned char** data, int* step, int*
{
if( img_convert_ctx )
sws_freeContext(img_convert_ctx);
frame.width = video_st->codec->width;
frame.height = video_st->codec->height;
@ -629,7 +629,7 @@ double CvCapture_FFMPEG::get_fps()
{
fps = r2d(ic->streams[video_stream]->avg_frame_rate);
}
#endif
#endif
if (fps < eps_zero)
{
@ -666,12 +666,12 @@ void CvCapture_FFMPEG::seek(int64_t _frame_number)
{
_frame_number = std::min(_frame_number, get_total_frames());
int delta = 16;
// if we have not grabbed a single frame before first seek, let's read the first frame
// and get some valuable information during the process
if( first_frame_number < 0 && get_total_frames() > 1 )
grabFrame();
for(;;)
{
int64_t _frame_number_temp = std::max(_frame_number-delta, (int64_t)0);
@ -684,13 +684,13 @@ void CvCapture_FFMPEG::seek(int64_t _frame_number)
if( _frame_number > 0 )
{
grabFrame();
if( _frame_number > 1 )
{
frame_number = dts_to_frame_number(picture_pts) - first_frame_number;
//printf("_frame_number = %d, frame_number = %d, delta = %d\n",
// (int)_frame_number, (int)frame_number, delta);
if( frame_number < 0 || frame_number > _frame_number-1 )
{
if( _frame_number_temp == 0 || delta >= INT_MAX/4 )
@ -771,7 +771,7 @@ struct CvVideoWriter_FFMPEG
void init();
AVOutputFormat * fmt;
AVOutputFormat * fmt;
AVFormatContext * oc;
uint8_t * outbuf;
uint32_t outbuf_size;
@ -1010,7 +1010,7 @@ static AVStream *icv_add_video_stream_FFMPEG(AVFormatContext *oc,
static const int OPENCV_NO_FRAMES_WRITTEN_CODE = 1000;
int icv_av_write_frame_FFMPEG( AVFormatContext * oc, AVStream * video_st, uint8_t * outbuf, uint32_t outbuf_size, AVFrame * picture )
static int icv_av_write_frame_FFMPEG( AVFormatContext * oc, AVStream * video_st, uint8_t * outbuf, uint32_t outbuf_size, AVFrame * picture )
{
#if LIBAVFORMAT_BUILD > 4628
AVCodecContext * c = video_st->codec;
@ -1046,7 +1046,7 @@ int icv_av_write_frame_FFMPEG( AVFormatContext * oc, AVStream * video_st, uint8_
#if LIBAVFORMAT_BUILD > 4752
if(c->coded_frame->pts != (int64_t)AV_NOPTS_VALUE)
pkt.pts = av_rescale_q(c->coded_frame->pts, c->time_base, video_st->time_base);
pkt.pts = av_rescale_q(c->coded_frame->pts, c->time_base, video_st->time_base);
#else
pkt.pts = c->coded_frame->pts;
#endif
@ -1069,7 +1069,7 @@ int icv_av_write_frame_FFMPEG( AVFormatContext * oc, AVStream * video_st, uint8_
bool CvVideoWriter_FFMPEG::writeFrame( const unsigned char* data, int step, int width, int height, int cn, int origin )
{
bool ret = false;
if( (width & -2) != frame_width || (height & -2) != frame_height || !data )
return false;
width = frame_width;
@ -1180,7 +1180,7 @@ void CvVideoWriter_FFMPEG::close()
// nothing to do if already released
if ( !picture )
return;
/* no more frame to compress. The codec has a latency of a few
frames if using B frames, so we get the last frames by
passing the same picture again */
@ -1200,7 +1200,7 @@ void CvVideoWriter_FFMPEG::close()
}
av_write_trailer(oc);
}
if( img_convert_ctx )
{
sws_freeContext(img_convert_ctx);
@ -1272,7 +1272,7 @@ bool CvVideoWriter_FFMPEG::open( const char * filename, int fourcc,
double fps, int width, int height, bool is_color )
{
icvInitFFMPEG_internal();
CodecID codec_id = CODEC_ID_NONE;
int err, codec_pix_fmt;
double bitrate_scale = 1;
@ -1284,7 +1284,7 @@ bool CvVideoWriter_FFMPEG::open( const char * filename, int fourcc,
return false;
if(fps <= 0)
return false;
// we allow frames of odd width or height, but in this case we truncate
// the rightmost column/the bottom row. Probably, this should be handled more elegantly,
// but some internal functions inside FFMPEG swscale require even width/height.
@ -1363,7 +1363,7 @@ bool CvVideoWriter_FFMPEG::open( const char * filename, int fourcc,
codec_pix_fmt = PIX_FMT_YUV420P;
break;
}
double bitrate = MIN(bitrate_scale*fps*width*height, (double)INT_MAX/2);
// TODO -- safe to ignore output audio stream?
@ -1480,8 +1480,8 @@ bool CvVideoWriter_FFMPEG::open( const char * filename, int fourcc,
err=avformat_write_header(oc, NULL);
#else
err=av_write_header( oc );
#endif
#endif
if(err < 0)
{
close();
@ -1579,7 +1579,7 @@ struct OutputMediaStream_FFMPEG
{
bool open(const char* fileName, int width, int height, double fps);
void close();
void write(unsigned char* data, int size, int keyFrame);
// add a video output stream to the container
@ -1692,7 +1692,7 @@ AVStream* OutputMediaStream_FFMPEG::addVideoStream(AVFormatContext *oc, CodecID
{
AVRational error = av_sub_q(req, *p);
if (error.num < 0)
if (error.num < 0)
error.num *= -1;
if (av_cmp_q(error, best_error) < 0)
@ -1825,7 +1825,7 @@ bool OutputMediaStream_FFMPEG::open(const char* fileName, int width, int height,
void OutputMediaStream_FFMPEG::write(unsigned char* data, int size, int keyFrame)
{
// if zero size, it means the image was buffered
if (size > 0)
if (size > 0)
{
AVPacket pkt;
av_init_packet(&pkt);
@ -1851,7 +1851,7 @@ struct OutputMediaStream_FFMPEG* create_OutputMediaStream_FFMPEG(const char* fil
stream->close();
free(stream);
return 0;
}

View File

@ -50,6 +50,20 @@
#include <iostream>
#include <queue>
#ifndef i386
# define i386 0
#endif
#ifndef __arm__
# define __arm__ 0
#endif
#ifndef _ARC
# define _ARC 0
#endif
#ifndef __APPLE__
# define __APPLE__ 0
#endif
#include "XnCppWrapper.h"
const std::string XMLConfig =
@ -169,6 +183,8 @@ private:
approxSyncGrabber(approxSyncGrabber), isDepthFilled(false), isImageFilled(false)
{}
virtual ~ApproximateSynchronizerBase() {}
virtual bool isSpinContinue() const = 0;
virtual void pushDepthMetaData( xn::DepthMetaData& depthMetaData ) = 0;
virtual void pushImageMetaData( xn::ImageMetaData& imageMetaData ) = 0;
@ -410,7 +426,7 @@ class CvCapture_OpenNI : public CvCapture
{
public:
enum { DEVICE_DEFAULT=0, DEVICE_MS_KINECT=0, DEVICE_ASUS_XTION=1, DEVICE_MAX=1 };
static const int INVALID_PIXEL_VAL = 0;
static const int INVALID_COORDINATE_VAL = 0;
@ -508,26 +524,26 @@ bool CvCapture_OpenNI::isOpened() const
return isContextOpened;
}
XnMapOutputMode defaultMapOutputMode()
{
XnMapOutputMode mode;
mode.nXRes = XN_VGA_X_RES;
mode.nYRes = XN_VGA_Y_RES;
mode.nFPS = 30;
return mode;
}
// static XnMapOutputMode defaultMapOutputMode()
// {
// XnMapOutputMode mode;
// mode.nXRes = XN_VGA_X_RES;
// mode.nYRes = XN_VGA_Y_RES;
// mode.nFPS = 30;
// return mode;
// }
CvCapture_OpenNI::CvCapture_OpenNI( int index )
{
int deviceType = DEVICE_DEFAULT;
XnStatus status;
isContextOpened = false;
maxBufferSize = DEFAULT_MAX_BUFFER_SIZE;
isCircleBuffer = DEFAULT_IS_CIRCLE_BUFFER;
maxTimeDuration = DEFAULT_MAX_TIME_DURATION;
if( index >= 10 )
{
deviceType = index / 10;
@ -1201,7 +1217,7 @@ IplImage* CvCapture_OpenNI::retrievePointCloudMap()
return outputMaps[CV_CAP_OPENNI_POINT_CLOUD_MAP].getIplImagePtr();
}
void computeDisparity_32F( const xn::DepthMetaData& depthMetaData, cv::Mat& disp, XnDouble baseline, XnUInt64 F,
static void computeDisparity_32F( const xn::DepthMetaData& depthMetaData, cv::Mat& disp, XnDouble baseline, XnUInt64 F,
XnUInt64 noSampleValue, XnUInt64 shadowValue )
{
cv::Mat depth;

View File

@ -126,8 +126,7 @@ skip_input_data(j_decompress_ptr cinfo, long num_bytes)
}
GLOBAL(void)
jpeg_buffer_src(j_decompress_ptr cinfo, JpegSource* source)
static void jpeg_buffer_src(j_decompress_ptr cinfo, JpegSource* source)
{
cinfo->src = &source->pub;
@ -498,8 +497,7 @@ empty_output_buffer (j_compress_ptr cinfo)
return TRUE;
}
GLOBAL(void)
jpeg_buffer_dest(j_compress_ptr cinfo, JpegDestination* destination)
static void jpeg_buffer_dest(j_compress_ptr cinfo, JpegDestination* destination)
{
cinfo->dest = &destination->pub;

View File

@ -57,7 +57,7 @@ namespace cv
static vector<ImageDecoder> decoders;
static vector<ImageEncoder> encoders;
ImageDecoder findDecoder( const string& filename )
static ImageDecoder findDecoder( const string& filename )
{
size_t i, maxlen = 0;
for( i = 0; i < decoders.size(); i++ )
@ -83,7 +83,7 @@ ImageDecoder findDecoder( const string& filename )
return ImageDecoder();
}
ImageDecoder findDecoder( const Mat& buf )
static ImageDecoder findDecoder( const Mat& buf )
{
size_t i, maxlen = 0;
@ -110,7 +110,7 @@ ImageDecoder findDecoder( const Mat& buf )
return ImageDecoder();
}
ImageEncoder findEncoder( const string& _ext )
static ImageEncoder findEncoder( const string& _ext )
{
if( _ext.size() <= 1 )
return ImageEncoder();
@ -395,7 +395,7 @@ Mat imdecode( InputArray _buf, int flags )
imdecode_( buf, flags, LOAD_MAT, &img );
return img;
}
bool imencode( const string& ext, InputArray _image,
vector<uchar>& buf, const vector<int>& params )
{

View File

@ -42,7 +42,7 @@
#ifndef __HIGHGUI_H_
#define __HIGHGUI_H_
#if _MSC_VER >= 1200
#if defined _MSC_VER && _MSC_VER >= 1200
#pragma warning( disable: 4251 )
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1372,17 +1372,17 @@ cvDestroyAllWindows( void )
CV_UNLOCK_MUTEX();
}
CvSize icvCalcOptimalWindowSize( CvWindow * window, CvSize new_image_size){
CvSize window_size;
GtkWidget * toplevel = gtk_widget_get_toplevel( window->frame );
gdk_drawable_get_size( GDK_DRAWABLE(toplevel->window),
&window_size.width, &window_size.height );
// CvSize icvCalcOptimalWindowSize( CvWindow * window, CvSize new_image_size){
// CvSize window_size;
// GtkWidget * toplevel = gtk_widget_get_toplevel( window->frame );
// gdk_drawable_get_size( GDK_DRAWABLE(toplevel->window),
// &window_size.width, &window_size.height );
window_size.width = window_size.width + new_image_size.width - window->widget->allocation.width;
window_size.height = window_size.height + new_image_size.height - window->widget->allocation.height;
// window_size.width = window_size.width + new_image_size.width - window->widget->allocation.width;
// window_size.height = window_size.height + new_image_size.height - window->widget->allocation.height;
return window_size;
}
// return window_size;
// }
CV_IMPL void
cvShowImage( const char* name, const CvArr* arr )

View File

@ -5,7 +5,7 @@
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#if GTEST_CREATE_SHARED_LIBRARY
#ifdef GTEST_CREATE_SHARED_LIBRARY
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
#endif

View File

@ -98,6 +98,7 @@ typedef struct _list _CVLIST;
_LIST_INLINE CVPOS prefix##get_tail_pos_##type(_CVLIST*);\
_LIST_INLINE type* prefix##get_next_##type(CVPOS*);\
_LIST_INLINE type* prefix##get_prev_##type(CVPOS*);\
_LIST_INLINE int prefix##is_pos_##type(CVPOS pos);\
/* Modification functions*/\
_LIST_INLINE void prefix##clear_list_##type(_CVLIST*);\
_LIST_INLINE CVPOS prefix##add_head_##type(_CVLIST*, type*);\
@ -151,8 +152,8 @@ typedef struct _list _CVLIST;
}\
element->m_next = ((element_type*)l->m_head_free.m_pos);\
l->m_head_free.m_pos = element;
/*#define GET_FIRST_FREE(l) ((ELEMENT_##type*)(l->m_head_free.m_pos))*/
#define IMPLEMENT_LIST(type, prefix)\

View File

@ -233,7 +233,7 @@ typedef DiffC3<cv::Vec3i> Diff32sC3;
typedef DiffC1<float> Diff32fC1;
typedef DiffC3<cv::Vec3f> Diff32fC3;
cv::Vec3i& operator += (cv::Vec3i& a, const cv::Vec3b& b)
static cv::Vec3i& operator += (cv::Vec3i& a, const cv::Vec3b& b)
{
a[0] += b[0];
a[1] += b[1];
@ -440,7 +440,7 @@ cvFloodFill( CvArr* arr, CvPoint seed_point,
{
cv::Ptr<CvMat> tempMask;
cv::AutoBuffer<CvFFillSegment> buffer;
if( comp )
memset( comp, 0, sizeof(*comp) );
@ -491,16 +491,16 @@ cvFloodFill( CvArr* arr, CvPoint seed_point,
{
/*int elem_size = CV_ELEM_SIZE(type);
const uchar* seed_ptr = img->data.ptr + img->step*seed_point.y + elem_size*seed_point.x;
// check if the new value is different from the current value at the seed point.
// if they are exactly the same, use the generic version with mask to avoid infinite loops.
for( i = 0; i < elem_size; i++ )
if( seed_ptr[i] != ((uchar*)nv_buf)[i] )
break;
if( i == elem_size )
return;*/
if( type == CV_8UC1 )
icvFloodFill_CnIR(img->data.ptr, img->step, size, seed_point, nv_buf.b[0],
comp, flags, buffer, buffer_size);
@ -632,7 +632,7 @@ int cv::floodFill( InputOutputArray _image, Point seedPoint,
}
int cv::floodFill( InputOutputArray _image, InputOutputArray _mask,
Point seedPoint, Scalar newVal, Rect* rect,
Point seedPoint, Scalar newVal, Rect* rect,
Scalar loDiff, Scalar upDiff, int flags )
{
CvConnectedComp ccomp;

View File

@ -230,7 +230,7 @@ void GMM::calcInverseCovAndDeterm( int ci )
Calculate beta - parameter of GrabCut algorithm.
beta = 1/(2*avg(sqr(||color[i] - color[j]||)))
*/
double calcBeta( const Mat& img )
static double calcBeta( const Mat& img )
{
double beta = 0;
for( int y = 0; y < img.rows; y++ )
@ -272,7 +272,7 @@ double calcBeta( const Mat& img )
Calculate weights of noterminal vertices of graph.
beta and gamma - parameters of GrabCut algorithm.
*/
void calcNWeights( const Mat& img, Mat& leftW, Mat& upleftW, Mat& upW, Mat& uprightW, double beta, double gamma )
static void calcNWeights( const Mat& img, Mat& leftW, Mat& upleftW, Mat& upW, Mat& uprightW, double beta, double gamma )
{
const double gammaDivSqrt2 = gamma / std::sqrt(2.0f);
leftW.create( img.rows, img.cols, CV_64FC1 );
@ -319,7 +319,7 @@ void calcNWeights( const Mat& img, Mat& leftW, Mat& upleftW, Mat& upW, Mat& upri
/*
Check size, type and element values of mask matrix.
*/
void checkMask( const Mat& img, const Mat& mask )
static void checkMask( const Mat& img, const Mat& mask )
{
if( mask.empty() )
CV_Error( CV_StsBadArg, "mask is empty" );
@ -342,7 +342,7 @@ void checkMask( const Mat& img, const Mat& mask )
/*
Initialize mask using rectangular.
*/
void initMaskWithRect( Mat& mask, Size imgSize, Rect rect )
static void initMaskWithRect( Mat& mask, Size imgSize, Rect rect )
{
mask.create( imgSize, CV_8UC1 );
mask.setTo( GC_BGD );
@ -358,7 +358,7 @@ void initMaskWithRect( Mat& mask, Size imgSize, Rect rect )
/*
Initialize GMM background and foreground models using kmeans algorithm.
*/
void initGMMs( const Mat& img, const Mat& mask, GMM& bgdGMM, GMM& fgdGMM )
static void initGMMs( const Mat& img, const Mat& mask, GMM& bgdGMM, GMM& fgdGMM )
{
const int kMeansItCount = 10;
const int kMeansType = KMEANS_PP_CENTERS;
@ -398,7 +398,7 @@ void initGMMs( const Mat& img, const Mat& mask, GMM& bgdGMM, GMM& fgdGMM )
/*
Assign GMMs components for each pixel.
*/
void assignGMMsComponents( const Mat& img, const Mat& mask, const GMM& bgdGMM, const GMM& fgdGMM, Mat& compIdxs )
static void assignGMMsComponents( const Mat& img, const Mat& mask, const GMM& bgdGMM, const GMM& fgdGMM, Mat& compIdxs )
{
Point p;
for( p.y = 0; p.y < img.rows; p.y++ )
@ -415,7 +415,7 @@ void assignGMMsComponents( const Mat& img, const Mat& mask, const GMM& bgdGMM, c
/*
Learn GMMs parameters.
*/
void learnGMMs( const Mat& img, const Mat& mask, const Mat& compIdxs, GMM& bgdGMM, GMM& fgdGMM )
static void learnGMMs( const Mat& img, const Mat& mask, const Mat& compIdxs, GMM& bgdGMM, GMM& fgdGMM )
{
bgdGMM.initLearning();
fgdGMM.initLearning();
@ -443,7 +443,7 @@ void learnGMMs( const Mat& img, const Mat& mask, const Mat& compIdxs, GMM& bgdGM
/*
Construct GCGraph
*/
void constructGCGraph( const Mat& img, const Mat& mask, const GMM& bgdGMM, const GMM& fgdGMM, double lambda,
static void constructGCGraph( const Mat& img, const Mat& mask, const GMM& bgdGMM, const GMM& fgdGMM, double lambda,
const Mat& leftW, const Mat& upleftW, const Mat& upW, const Mat& uprightW,
GCGraph<double>& graph )
{
@ -506,7 +506,7 @@ void constructGCGraph( const Mat& img, const Mat& mask, const GMM& bgdGMM, const
/*
Estimate segmentation using MaxFlow algorithm
*/
void estimateSegmentation( GCGraph<double>& graph, Mat& mask )
static void estimateSegmentation( GCGraph<double>& graph, Mat& mask )
{
graph.maxFlow();
Point p;
@ -533,7 +533,7 @@ void cv::grabCut( InputArray _img, InputOutputArray _mask, Rect rect,
Mat& mask = _mask.getMatRef();
Mat& bgdModel = _bgdModel.getMatRef();
Mat& fgdModel = _fgdModel.getMatRef();
if( img.empty() )
CV_Error( CV_StsBadArg, "image is empty" );
if( img.type() != CV_8UC3 )

View File

@ -114,7 +114,7 @@ icvHoughLinesStandard( const CvMat* img, float rho, float theta,
_tabCos.allocate(numangle);
int *accum = _accum, *sort_buf = _sort_buf;
float *tabSin = _tabSin, *tabCos = _tabCos;
memset( accum, 0, sizeof(accum[0]) * (numangle+2) * (numrho+2) );
for( ang = 0, n = 0; n < numangle; ang += theta, n++ )
@ -249,7 +249,7 @@ icvHoughLinesSDiv( const CvMat* img,
/* Precalculating sin */
_sinTable.resize( 5 * tn * stn );
sinTable = &_sinTable[0];
for( index = 0; index < 5 * tn * stn; index++ )
sinTable[index] = (float)cos( stheta * index * 0.2f );
@ -449,7 +449,7 @@ icvHoughLinesSDiv( const CvMat* img,
h_get_next__index( &pos );
}
}
h_destroy_list__index(list);
}
@ -756,7 +756,7 @@ cvHoughLines2( CvArr* src_image, void* lineStorage, int method,
}
else
CV_Error( CV_StsBadArg, "Destination is not CvMemStorage* nor CvMat*" );
iparam1 = cvRound(param1);
iparam2 = cvRound(param2);
@ -842,7 +842,7 @@ icvHoughCirclesGradient( CvMat* img, float dp, float min_dist,
acols = accum->cols - 2;
adata = accum->data.i;
astep = accum->step/sizeof(adata[0]);
// Accumulate circle evidence for each edge pixel
// Accumulate circle evidence for each edge pixel
for( y = 0; y < rows; y++ )
{
const uchar* edges_row = edges->data.ptr + y*edges->step;
@ -868,7 +868,7 @@ icvHoughCirclesGradient( CvMat* img, float dp, float min_dist,
x0 = cvRound((x*idp)*ONE);
y0 = cvRound((y*idp)*ONE);
// Step from min_radius to max_radius in both directions of the gradient
// Step from min_radius to max_radius in both directions of the gradient
for( k = 0; k < 2; k++ )
{
x1 = x0 + min_radius * sx;
@ -894,7 +894,7 @@ icvHoughCirclesGradient( CvMat* img, float dp, float min_dist,
nz_count = nz->total;
if( !nz_count )
return;
//Find possible circle centers
//Find possible circle centers
for( y = 1; y < arows - 1; y++ )
{
for( x = 1; x < acols - 1; x++ )
@ -924,19 +924,19 @@ icvHoughCirclesGradient( CvMat* img, float dp, float min_dist,
dr = dp;
min_dist = MAX( min_dist, dp );
min_dist *= min_dist;
// For each found possible center
// Estimate radius and check support
// For each found possible center
// Estimate radius and check support
for( i = 0; i < centers->total; i++ )
{
int ofs = *(int*)cvGetSeqElem( centers, i );
y = ofs/(acols+2);
x = ofs - (y)*(acols+2);
//Calculate circle's center in pixels
//Calculate circle's center in pixels
float cx = (float)((x + 0.5f)*dp), cy = (float)(( y + 0.5f )*dp);
float start_dist, dist_sum;
float r_best = 0, c[3];
int max_count = 0;
// Check distance with previously detected circles
// Check distance with previously detected circles
for( j = 0; j < circles->total; j++ )
{
float* c = (float*)cvGetSeqElem( circles, j );
@ -946,7 +946,7 @@ icvHoughCirclesGradient( CvMat* img, float dp, float min_dist,
if( j < circles->total )
continue;
// Estimate best radius
// Estimate best radius
cvStartReadSeq( nz, &reader );
for( j = k = 0; j < nz_count; j++ )
{
@ -982,7 +982,7 @@ icvHoughCirclesGradient( CvMat* img, float dp, float min_dist,
{
float r_cur = ddata[sort_buf[(j + start_idx)/2]];
if( (start_idx - j)*r_best >= max_count*r_cur ||
(r_best < FLT_EPSILON && start_idx - j >= max_count) )
(r_best < FLT_EPSILON && start_idx - j >= max_count) )
{
r_best = r_cur;
max_count = start_idx - j;
@ -993,7 +993,7 @@ icvHoughCirclesGradient( CvMat* img, float dp, float min_dist,
}
dist_sum += d;
}
// Check if the circle has enough support
// Check if the circle has enough support
if( max_count > acc_threshold )
{
c[0] = cx;
@ -1103,9 +1103,9 @@ static void seqToMat(const CvSeq* seq, OutputArray _arr)
else
_arr.release();
}
}
void cv::HoughLines( InputArray _image, OutputArray _lines,
double rho, double theta, int threshold,
double srn, double stn )

View File

@ -406,42 +406,42 @@ static void fftShift(InputOutputArray _out)
merge(planes, out);
}
Point2d weightedCentroid(InputArray _src, cv::Point peakLocation, cv::Size weightBoxSize)
static Point2d weightedCentroid(InputArray _src, cv::Point peakLocation, cv::Size weightBoxSize)
{
Mat src = _src.getMat();
int type = src.type();
CV_Assert( type == CV_32FC1 || type == CV_64FC1 );
int minr = peakLocation.y - (weightBoxSize.height >> 1);
int maxr = peakLocation.y + (weightBoxSize.height >> 1);
int minc = peakLocation.x - (weightBoxSize.width >> 1);
int maxc = peakLocation.x + (weightBoxSize.width >> 1);
Point2d centroid;
double sumIntensity = 0.0;
// clamp the values to min and max if needed.
if(minr < 0)
{
minr = 0;
}
if(minc < 0)
{
minc = 0;
}
if(maxr > src.rows - 1)
{
maxr = src.rows - 1;
}
if(maxc > src.cols - 1)
{
maxc = src.cols - 1;
}
if(type == CV_32FC1)
{
const float* dataIn = (const float*)src.data;
@ -454,7 +454,7 @@ Point2d weightedCentroid(InputArray _src, cv::Point peakLocation, cv::Size weigh
centroid.y += (double)y*dataIn[x];
sumIntensity += (double)dataIn[x];
}
dataIn += src.cols;
}
}
@ -470,19 +470,19 @@ Point2d weightedCentroid(InputArray _src, cv::Point peakLocation, cv::Size weigh
centroid.y += (double)y*dataIn[x];
sumIntensity += dataIn[x];
}
dataIn += src.cols;
}
}
sumIntensity += DBL_EPSILON; // prevent div0 problems...
centroid.x /= sumIntensity;
centroid.y /= sumIntensity;
return centroid;
}
}
cv::Point2d cv::phaseCorrelate(InputArray _src1, InputArray _src2, InputArray _window)

View File

@ -73,13 +73,13 @@ template<typename T, typename ST> struct RowSum : public BaseRowFilter
ksize = _ksize;
anchor = _anchor;
}
void operator()(const uchar* src, uchar* dst, int width, int cn)
{
const T* S = (const T*)src;
ST* D = (ST*)dst;
int i = 0, k, ksz_cn = ksize*cn;
width = (width - 1)*cn;
for( k = 0; k < cn; k++, S++, D++ )
{
@ -108,7 +108,7 @@ template<typename ST, typename T> struct ColumnSum : public BaseColumnFilter
}
void reset() { sumCount = 0; }
void operator()(const uchar** src, uchar* dst, int dststep, int count, int width)
{
int i;
@ -198,7 +198,7 @@ template<typename ST, typename T> struct ColumnSum : public BaseColumnFilter
}
cv::Ptr<cv::BaseRowFilter> cv::getRowSumFilter(int srcType, int sumType, int ksize, int anchor)
{
int sdepth = CV_MAT_DEPTH(srcType), ddepth = CV_MAT_DEPTH(sumType);
@ -325,7 +325,7 @@ void cv::blur( InputArray src, OutputArray dst,
Size ksize, Point anchor, int borderType )
{
boxFilter( src, dst, -1, ksize, anchor, true, borderType );
}
}
/****************************************************************************************\
Gaussian Blur
@ -422,7 +422,7 @@ void cv::GaussianBlur( InputArray _src, OutputArray _dst, Size ksize,
Mat src = _src.getMat();
_dst.create( src.size(), src.type() );
Mat dst = _dst.getMat();
if( borderType != BORDER_CONSTANT )
{
if( src.rows == 1 )
@ -454,7 +454,7 @@ void cv::GaussianBlur( InputArray _src, OutputArray _dst, Size ksize,
namespace cv
{
#if _MSC_VER >= 1200
#if defined _MSC_VER && _MSC_VER >= 1200
#pragma warning( disable: 4244 )
#endif
@ -479,7 +479,7 @@ typedef struct
#if CV_SSE2
#define MEDIAN_HAVE_SIMD 1
static inline void histogram_add_simd( const HT x[16], HT y[16] )
{
const __m128i* rx = (const __m128i*)x;
@ -499,12 +499,12 @@ static inline void histogram_sub_simd( const HT x[16], HT y[16] )
_mm_store_si128(ry+0, r0);
_mm_store_si128(ry+1, r1);
}
#else
#define MEDIAN_HAVE_SIMD 0
#endif
static inline void histogram_add( const HT x[16], HT y[16] )
{
int i;
@ -667,14 +667,14 @@ medianBlur_8u_O1( const Mat& _src, Mat& _dst, int ksize )
{
for( j = 0; j < 2*r; ++j )
histogram_add( &h_coarse[16*(n*c+j)], H[c].coarse );
for( j = r; j < n-r; j++ )
{
int t = 2*r*r + 2*r, b, sum = 0;
HT* segment;
histogram_add( &h_coarse[16*(n*c + std::min(j+r,n-1))], H[c].coarse );
// Find median at coarse level
for ( k = 0; k < 16 ; ++k )
{
@ -686,14 +686,14 @@ medianBlur_8u_O1( const Mat& _src, Mat& _dst, int ksize )
}
}
assert( k < 16 );
/* Update corresponding histogram segment */
if ( luc[c][k] <= j-r )
{
memset( &H[c].fine[k], 0, 16 * sizeof(HT) );
for ( luc[c][k] = j-r; luc[c][k] < MIN(j+r+1,n); ++luc[c][k] )
histogram_add( &h_fine[16*(n*(16*c+k)+luc[c][k])], H[c].fine[k] );
if ( luc[c][k] < j+r+1 )
{
histogram_muladd( j+r+1 - n, &h_fine[16*(n*(16*c+k)+(n-1))], &H[c].fine[k][0] );
@ -708,9 +708,9 @@ medianBlur_8u_O1( const Mat& _src, Mat& _dst, int ksize )
histogram_add( &h_fine[16*(n*(16*c+k)+MIN(luc[c][k],n-1))], H[c].fine[k] );
}
}
histogram_sub( &h_coarse[16*(n*c+MAX(j-r,0))], H[c].coarse );
/* Find median in segment */
segment = H[c].fine[k];
for ( b = 0; b < 16 ; b++ )
@ -734,7 +734,7 @@ medianBlur_8u_O1( const Mat& _src, Mat& _dst, int ksize )
}
#if _MSC_VER >= 1200
#if defined _MSC_VER && _MSC_VER >= 1200
#pragma warning( default: 4244 )
#endif
@ -910,7 +910,7 @@ struct MinMax16u
b = std::max(b, t);
}
};
struct MinMax16s
{
typedef short value_type;
@ -974,7 +974,7 @@ struct MinMaxVec16u
}
};
struct MinMaxVec16s
{
typedef short value_type;
@ -988,9 +988,9 @@ struct MinMaxVec16s
a = _mm_min_epi16(a, b);
b = _mm_max_epi16(b, t);
}
};
};
struct MinMaxVec32f
{
typedef float value_type;
@ -1033,7 +1033,7 @@ medianBlur_SortNet( const Mat& _src, Mat& _dst, int m )
Op op;
VecOp vop;
volatile bool useSIMD = checkHardwareSupport(CV_CPU_SSE2);
if( m == 3 )
{
if( size.width == 1 || size.height == 1 )
@ -1055,7 +1055,7 @@ medianBlur_SortNet( const Mat& _src, Mat& _dst, int m )
}
return;
}
size.width *= cn;
for( i = 0; i < size.height; i++, dst += dstep )
{
@ -1155,7 +1155,7 @@ medianBlur_SortNet( const Mat& _src, Mat& _dst, int m )
p[k*5+2] = rowk[j]; p[k*5+3] = rowk[j3];
p[k*5+4] = rowk[j4];
}
op(p[1], p[2]); op(p[0], p[1]); op(p[1], p[2]); op(p[4], p[5]); op(p[3], p[4]);
op(p[4], p[5]); op(p[0], p[3]); op(p[2], p[5]); op(p[2], p[3]); op(p[1], p[4]);
op(p[1], p[2]); op(p[3], p[4]); op(p[7], p[8]); op(p[6], p[7]); op(p[7], p[8]);
@ -1195,7 +1195,7 @@ medianBlur_SortNet( const Mat& _src, Mat& _dst, int m )
p[k*5+2] = vop.load(rowk+j); p[k*5+3] = vop.load(rowk+j+cn);
p[k*5+4] = vop.load(rowk+j+cn*2);
}
vop(p[1], p[2]); vop(p[0], p[1]); vop(p[1], p[2]); vop(p[4], p[5]); vop(p[3], p[4]);
vop(p[4], p[5]); vop(p[0], p[3]); vop(p[2], p[5]); vop(p[2], p[3]); vop(p[1], p[4]);
vop(p[1], p[2]); vop(p[3], p[4]); vop(p[7], p[8]); vop(p[6], p[7]); vop(p[7], p[8]);
@ -1229,13 +1229,13 @@ medianBlur_SortNet( const Mat& _src, Mat& _dst, int m )
}
}
void cv::medianBlur( InputArray _src0, OutputArray _dst, int ksize )
{
Mat src0 = _src0.getMat();
_dst.create( src0.size(), src0.type() );
Mat dst = _dst.getMat();
if( ksize <= 1 )
{
src0.copyTo(dst);
@ -1248,13 +1248,13 @@ void cv::medianBlur( InputArray _src0, OutputArray _dst, int ksize )
if (tegra::medianBlur(src0, dst, ksize))
return;
#endif
bool useSortNet = ksize == 3 || (ksize == 5
#if !CV_SSE2
&& src0.depth() > CV_8U
#endif
);
Mat src;
if( useSortNet )
{
@ -1315,7 +1315,7 @@ bilateralFilter_8u( const Mat& src, Mat& dst, int d,
sigma_color = 1;
if( sigma_space <= 0 )
sigma_space = 1;
double gauss_color_coeff = -0.5/(sigma_color*sigma_color);
double gauss_space_coeff = -0.5/(sigma_space*sigma_space);
@ -1422,7 +1422,7 @@ bilateralFilter_32f( const Mat& src, Mat& dst, int d,
sigma_color = 1;
if( sigma_space <= 0 )
sigma_space = 1;
double gauss_color_coeff = -0.5/(sigma_color*sigma_color);
double gauss_space_coeff = -0.5/(sigma_space*sigma_space);
@ -1433,9 +1433,9 @@ bilateralFilter_32f( const Mat& src, Mat& dst, int d,
radius = MAX(radius, 1);
d = radius*2 + 1;
// compute the min/max range for the input image (even if multichannel)
minMaxLoc( src.reshape(1), &minValSrc, &maxValSrc );
// temporary copy of the image with borders for easy processing
Mat temp;
copyMakeBorder( src, temp, radius, radius, radius, radius, borderType );
@ -1454,7 +1454,7 @@ bilateralFilter_32f( const Mat& src, Mat& dst, int d,
float* expLUT = &_expLUT[0];
scale_index = kExpNumBins/len;
// initialize the exp LUT
for( i = 0; i < kExpNumBins+2; i++ )
{
@ -1467,7 +1467,7 @@ bilateralFilter_32f( const Mat& src, Mat& dst, int d,
else
expLUT[i] = 0.f;
}
// initialize space-related bilateral filter coefficients
for( i = -radius, maxk = 0; i <= radius; i++ )
for( j = -radius; j <= radius; j++ )
@ -1481,7 +1481,7 @@ bilateralFilter_32f( const Mat& src, Mat& dst, int d,
for( i = 0; i < size.height; i++ )
{
const float* sptr = (const float*)(temp.data + (i+radius)*temp.step) + radius*cn;
const float* sptr = (const float*)(temp.data + (i+radius)*temp.step) + radius*cn;
float* dptr = (float*)(dst.data + i*dst.step);
if( cn == 1 )
@ -1493,11 +1493,11 @@ bilateralFilter_32f( const Mat& src, Mat& dst, int d,
for( k = 0; k < maxk; k++ )
{
float val = sptr[j + space_ofs[k]];
float alpha = (float)(std::abs(val - val0)*scale_index);
float alpha = (float)(std::abs(val - val0)*scale_index);
int idx = cvFloor(alpha);
alpha -= idx;
float w = space_weight[k]*(expLUT[idx] + alpha*(expLUT[idx+1] - expLUT[idx]));
sum += val*w;
sum += val*w;
wsum += w;
}
dptr[j] = (float)(sum/wsum);
@ -1514,7 +1514,7 @@ bilateralFilter_32f( const Mat& src, Mat& dst, int d,
{
const float* sptr_k = sptr + j + space_ofs[k];
float b = sptr_k[0], g = sptr_k[1], r = sptr_k[2];
float alpha = (float)((std::abs(b - b0) +
float alpha = (float)((std::abs(b - b0) +
std::abs(g - g0) + std::abs(r - r0))*scale_index);
int idx = cvFloor(alpha);
alpha -= idx;
@ -1541,7 +1541,7 @@ void cv::bilateralFilter( InputArray _src, OutputArray _dst, int d,
Mat src = _src.getMat();
_dst.create( src.size(), src.type() );
Mat dst = _dst.getMat();
if( src.depth() == CV_8U )
bilateralFilter_8u( src, dst, d, sigmaColor, sigmaSpace, borderType );
else if( src.depth() == CV_32F )

View File

@ -134,7 +134,7 @@ void integral_( const T* src, size_t _srcstep, ST* sum, size_t _sumstep,
if( size.width == cn )
buf[cn] = 0;
if( sqsum )
{
sqsum[-cn] = 0;
@ -148,7 +148,7 @@ void integral_( const T* src, size_t _srcstep, ST* sum, size_t _sumstep,
sum += sumstep - cn;
tilted += tiltedstep - cn;
buf += -cn;
if( sqsum )
sqsum += sqsumstep - cn;
@ -197,7 +197,7 @@ void integral_( const T* src, size_t _srcstep, ST* sum, size_t _sumstep,
tilted[x] = t0 + t1 + tilted[x - tiltedstep - cn];
buf[x] = t0;
}
if( sqsum )
sqsum++;
}
@ -205,10 +205,10 @@ void integral_( const T* src, size_t _srcstep, ST* sum, size_t _sumstep,
}
}
#define DEF_INTEGRAL_FUNC(suffix, T, ST, QT) \
void integral_##suffix( T* src, size_t srcstep, ST* sum, size_t sumstep, QT* sqsum, size_t sqsumstep, \
ST* tilted, size_t tiltedstep, Size size, int cn ) \
static void integral_##suffix( T* src, size_t srcstep, ST* sum, size_t sumstep, QT* sqsum, size_t sqsumstep, \
ST* tilted, size_t tiltedstep, Size size, int cn ) \
{ integral_(src, srcstep, sum, sumstep, sqsum, sqsumstep, tilted, tiltedstep, size, cn); }
DEF_INTEGRAL_FUNC(8u32s, uchar, int, double)
@ -217,7 +217,7 @@ DEF_INTEGRAL_FUNC(8u64f, uchar, double, double)
DEF_INTEGRAL_FUNC(32f, float, float, double)
DEF_INTEGRAL_FUNC(32f64f, float, double, double)
DEF_INTEGRAL_FUNC(64f, double, double, double)
typedef void (*IntegralFunc)(const uchar* src, size_t srcstep, uchar* sum, size_t sumstep,
uchar* sqsum, size_t sqsumstep, uchar* tilted, size_t tstep,
Size size, int cn );
@ -236,19 +236,19 @@ void cv::integral( InputArray _src, OutputArray _sum, OutputArray _sqsum, Output
sdepth = CV_MAT_DEPTH(sdepth);
_sum.create( isize, CV_MAKETYPE(sdepth, cn) );
sum = _sum.getMat();
if( _tilted.needed() )
{
_tilted.create( isize, CV_MAKETYPE(sdepth, cn) );
tilted = _tilted.getMat();
}
if( _sqsum.needed() )
{
_sqsum.create( isize, CV_MAKETYPE(CV_64F, cn) );
sqsum = _sqsum.getMat();
}
IntegralFunc func = 0;
if( depth == CV_8U && sdepth == CV_32S )
@ -269,7 +269,7 @@ void cv::integral( InputArray _src, OutputArray _sum, OutputArray _sqsum, Output
func( src.data, src.step, sum.data, sum.step, sqsum.data, sqsum.step,
tilted.data, tilted.step, src.size(), cn );
}
void cv::integral( InputArray src, OutputArray sum, int sdepth )
{
integral( src, sum, noArray(), noArray(), sdepth );

View File

@ -49,7 +49,7 @@
#include "opencv2/core/core_c.h"
#include <stdio.h>
#if _MSC_VER >= 1200 || defined __BORLANDC__
#if (defined _MSC_VER && _MSC_VER >= 1200) || defined __BORLANDC__
#define cv_stricmp stricmp
#define cv_strnicmp strnicmp
#if defined WINCE

View File

@ -3478,7 +3478,7 @@ typedef struct CvBGCodeBookModel
CvBGCodeBookElem* freeList;
} CvBGCodeBookModel;
CVAPI(CvBGCodeBookModel*) cvCreateBGCodeBookModel();
CVAPI(CvBGCodeBookModel*) cvCreateBGCodeBookModel( void );
CVAPI(void) cvReleaseBGCodeBookModel( CvBGCodeBookModel** model );
CVAPI(void) cvBGCodeBookUpdate( CvBGCodeBookModel* model, const CvArr* image,

View File

@ -41,7 +41,7 @@
#include "precomp.hpp"
#if _MSC_VER >= 1200
#if defined _MSC_VER && _MSC_VER >= 1200
#pragma warning(disable:4786) // Disable MSVC warnings in the standard library.
#pragma warning(disable:4100)
#pragma warning(disable:4512)
@ -49,7 +49,7 @@
#include <stdio.h>
#include <map>
#include <algorithm>
#if _MSC_VER >= 1200
#if defined _MSC_VER && _MSC_VER >= 1200
#pragma warning(default:4100)
#pragma warning(default:4512)
#endif
@ -148,7 +148,7 @@ CV_IMPL CvBool cv3dTrackerCalibrateCameras(int num_cameras,
cvReleaseImage(&gray_img);
CV_CALL(gray_img = cvCreateImage(image_size, IPL_DEPTH_8U, 1));
}
CV_CALL(cvCvtColor(samples[c], gray_img, CV_BGR2GRAY));
img = gray_img;
@ -172,7 +172,7 @@ CV_IMPL CvBool cv3dTrackerCalibrateCameras(int num_cameras,
etalon_size, points, &count) != 0;
if (count == 0)
continue;
// If found is true, it means all the points were found (count = num_points).
// If found is false but count is non-zero, it means that not all points were found.
@ -258,7 +258,7 @@ CV_IMPL CvBool cv3dTrackerCalibrateCameras(int num_cameras,
{ 0.f, 1.f, 0.f, 0.f },
{ 0.f, 0.f, 1.f, 0.f },
{ transVect[0], transVect[1], transVect[2], 1.f } };
float rmat[4][4] = { { rotMatr[0], rotMatr[1], rotMatr[2], 0.f },
{ rotMatr[3], rotMatr[4], rotMatr[5], 0.f },
{ rotMatr[6], rotMatr[7], rotMatr[8], 0.f },
@ -267,7 +267,7 @@ CV_IMPL CvBool cv3dTrackerCalibrateCameras(int num_cameras,
MultMatrix(camera_info[c].mat, tmat, rmat);
// change the transformation of the cameras to put them in the world coordinate
// change the transformation of the cameras to put them in the world coordinate
// system we want to work with.
// Start with an identity matrix; then fill in the values to accomplish

View File

@ -53,13 +53,13 @@
#include "assert.h"
#include "math.h"
#if _MSC_VER >= 1400
#if defined _MSC_VER && _MSC_VER >= 1400
#pragma warning(disable: 4512) // suppress "assignment operator could not be generated"
#endif
// J.S. Beis and D.G. Lowe. Shape indexing using approximate nearest-neighbor search
// in highdimensional spaces. In Proc. IEEE Conf. Comp. Vision Patt. Recog.,
// pages 1000--1006, 1997. http://citeseer.ist.psu.edu/beis97shape.html
// J.S. Beis and D.G. Lowe. Shape indexing using approximate nearest-neighbor search
// in highdimensional spaces. In Proc. IEEE Conf. Comp. Vision Patt. Recog.,
// pages 1000--1006, 1997. http://citeseer.ist.psu.edu/beis97shape.html
#undef __deref
#undef __valuetype
@ -72,23 +72,23 @@ public:
private:
struct node {
int dim; // split dimension; >=0 for nodes, -1 for leaves
__valuetype value; // if leaf, value of leaf
int left, right; // node indices of left and right branches
scalar_type boundary; // left if deref(value,dim)<=boundary, otherwise right
int dim; // split dimension; >=0 for nodes, -1 for leaves
__valuetype value; // if leaf, value of leaf
int left, right; // node indices of left and right branches
scalar_type boundary; // left if deref(value,dim)<=boundary, otherwise right
};
typedef std::vector < node > node_array;
__deref deref; // requires operator() (__valuetype lhs,int dim)
__deref deref; // requires operator() (__valuetype lhs,int dim)
node_array nodes; // node storage
int point_dim; // dimension of points (the k in kd-tree)
int root_node; // index of root node, -1 if empty tree
node_array nodes; // node storage
int point_dim; // dimension of points (the k in kd-tree)
int root_node; // index of root node, -1 if empty tree
// for given set of point indices, compute dimension of highest variance
template < class __instype, class __valuector >
int dimension_of_highest_variance(__instype * first, __instype * last,
__valuector ctor) {
__valuector ctor) {
assert(last - first > 0);
accum_type maxvar = -std::numeric_limits < accum_type >::max();
@ -96,32 +96,32 @@ private:
for (int j = 0; j < point_dim; ++j) {
accum_type mean = 0;
for (__instype * k = first; k < last; ++k)
mean += deref(ctor(*k), j);
mean += deref(ctor(*k), j);
mean /= last - first;
accum_type var = 0;
for (__instype * k = first; k < last; ++k) {
accum_type diff = accum_type(deref(ctor(*k), j)) - mean;
var += diff * diff;
accum_type diff = accum_type(deref(ctor(*k), j)) - mean;
var += diff * diff;
}
var /= last - first;
assert(maxj != -1 || var >= maxvar);
if (var >= maxvar) {
maxvar = var;
maxj = j;
maxvar = var;
maxj = j;
}
}
return maxj;
}
// given point indices and dimension, find index of median; (almost) modifies [first,last)
// given point indices and dimension, find index of median; (almost) modifies [first,last)
// such that points_in[first,median]<=point[median], points_in(median,last)>point[median].
// implemented as partial quicksort; expected linear perf.
template < class __instype, class __valuector >
__instype * median_partition(__instype * first, __instype * last,
int dim, __valuector ctor) {
int dim, __valuector ctor) {
assert(last - first > 0);
__instype *k = first + (last - first) / 2;
median_partition(first, last, k, dim, ctor);
@ -143,14 +143,14 @@ private:
};
template < class __instype, class __valuector >
void median_partition(__instype * first, __instype * last,
__instype * k, int dim, __valuector ctor) {
void median_partition(__instype * first, __instype * last,
__instype * k, int dim, __valuector ctor) {
int pivot = (int)((last - first) / 2);
std::swap(first[pivot], last[-1]);
__instype *middle = std::partition(first, last - 1,
median_pr < __instype, __valuector >
(last[-1], dim, deref, ctor));
median_pr < __instype, __valuector >
(last[-1], dim, deref, ctor));
std::swap(*middle, last[-1]);
if (middle < k)
@ -170,36 +170,36 @@ private:
__instype *median = median_partition(first, last, dim, ctor);
__instype *split = median;
for (; split != last && deref(ctor(*split), dim) ==
deref(ctor(*median), dim); ++split);
for (; split != last && deref(ctor(*split), dim) ==
deref(ctor(*median), dim); ++split);
if (split == last) { // leaf
int nexti = -1;
for (--split; split >= first; --split) {
int i = (int)nodes.size();
node & n = *nodes.insert(nodes.end(), node());
n.dim = -1;
n.value = ctor(*split);
n.left = -1;
n.right = nexti;
nexti = i;
}
int nexti = -1;
for (--split; split >= first; --split) {
int i = (int)nodes.size();
node & n = *nodes.insert(nodes.end(), node());
n.dim = -1;
n.value = ctor(*split);
n.left = -1;
n.right = nexti;
nexti = i;
}
return nexti;
return nexti;
} else { // node
int i = (int)nodes.size();
// note that recursive insert may invalidate this ref
node & n = *nodes.insert(nodes.end(), node());
int i = (int)nodes.size();
// note that recursive insert may invalidate this ref
node & n = *nodes.insert(nodes.end(), node());
n.dim = dim;
n.boundary = deref(ctor(*median), dim);
n.dim = dim;
n.boundary = deref(ctor(*median), dim);
int left = insert(first, split, ctor);
nodes[i].left = left;
int right = insert(split, last, ctor);
nodes[i].right = right;
int left = insert(first, split, ctor);
nodes[i].left = left;
int right = insert(split, last, ctor);
nodes[i].right = right;
return i;
return i;
}
}
}
@ -214,21 +214,21 @@ private:
if (n.dim >= 0) { // node
if (deref(p, n.dim) <= n.boundary) // left
r = remove(&n.left, p);
r = remove(&n.left, p);
else // right
r = remove(&n.right, p);
r = remove(&n.right, p);
// if terminal, remove this node
if (n.left == -1 && n.right == -1)
*i = -1;
*i = -1;
return r;
} else { // leaf
if (n.value == p) {
*i = n.right;
return true;
*i = n.right;
return true;
} else
return remove(&n.right, p);
return remove(&n.right, p);
}
}
@ -245,14 +245,14 @@ public:
}
// given points, initialize a balanced tree
CvKDTree(__valuetype * first, __valuetype * last, int _point_dim,
__deref _deref = __deref())
__deref _deref = __deref())
: deref(_deref) {
set_data(first, last, _point_dim, identity_ctor());
}
// given points, initialize a balanced tree
template < class __instype, class __valuector >
CvKDTree(__instype * first, __instype * last, int _point_dim,
__valuector ctor, __deref _deref = __deref())
__valuector ctor, __deref _deref = __deref())
: deref(_deref) {
set_data(first, last, _point_dim, ctor);
}
@ -266,7 +266,7 @@ public:
}
template < class __instype, class __valuector >
void set_data(__instype * first, __instype * last, int _point_dim,
__valuector ctor) {
__valuector ctor) {
point_dim = _point_dim;
nodes.clear();
nodes.reserve(last - first);
@ -292,9 +292,9 @@ public:
std::cout << " ";
const node & n = nodes[i];
if (n.dim >= 0) {
std::cout << "node " << i << ", left " << nodes[i].left << ", right " <<
nodes[i].right << ", dim " << nodes[i].dim << ", boundary " <<
nodes[i].boundary << std::endl;
std::cout << "node " << i << ", left " << nodes[i].left << ", right " <<
nodes[i].right << ", dim " << nodes[i].dim << ", boundary " <<
nodes[i].boundary << std::endl;
print(n.left, indent + 3);
print(n.right, indent + 3);
} else
@ -304,9 +304,9 @@ public:
////////////////////////////////////////////////////////////////////////////////////////
// bbf search
public:
struct bbf_nn { // info on found neighbors (approx k nearest)
const __valuetype *p; // nearest neighbor
accum_type dist; // distance from d to query point
struct bbf_nn { // info on found neighbors (approx k nearest)
const __valuetype *p; // nearest neighbor
accum_type dist; // distance from d to query point
bbf_nn(const __valuetype & _p, accum_type _dist)
: p(&_p), dist(_dist) {
}
@ -316,9 +316,9 @@ public:
};
typedef std::vector < bbf_nn > bbf_nn_pqueue;
private:
struct bbf_node { // info on branches not taken
int node; // corresponding node
accum_type dist; // minimum distance from bounds to query point
struct bbf_node { // info on branches not taken
int node; // corresponding node
accum_type dist; // minimum distance from bounds to query point
bbf_node(int _node, accum_type _dist)
: node(_node), dist(_dist) {
}
@ -346,10 +346,10 @@ private:
int bbf_branch(int i, const __desctype * d, bbf_pqueue & pq) const {
const node & n = nodes[i];
// push bbf_node with bounds of alternate branch, then branch
if (d[n.dim] <= n.boundary) { // left
if (d[n.dim] <= n.boundary) { // left
pq_alternate(n.right, pq, n.boundary - d[n.dim]);
return n.left;
} else { // right
} else { // right
pq_alternate(n.left, pq, d[n.dim] - n.boundary);
return n.right;
}
@ -366,11 +366,11 @@ private:
}
// called per candidate nearest neighbor; constructs new bbf_nn for
// candidate and adds it to priority queue of all candidates; if
// candidate and adds it to priority queue of all candidates; if
// queue len exceeds k, drops the point furthest from query point d.
template < class __desctype >
void bbf_new_nn(bbf_nn_pqueue & nn_pq, int k,
const __desctype * d, const __valuetype & p) const {
void bbf_new_nn(bbf_nn_pqueue & nn_pq, int k,
const __desctype * d, const __valuetype & p) const {
bbf_nn nn(p, distance(d, p));
if ((int) nn_pq.size() < k) {
nn_pq.push_back(nn);
@ -384,14 +384,14 @@ private:
}
public:
// finds (with high probability) the k nearest neighbors of d,
// finds (with high probability) the k nearest neighbors of d,
// searching at most emax leaves/bins.
// ret_nn_pq is an array containing the (at most) k nearest neighbors
// ret_nn_pq is an array containing the (at most) k nearest neighbors
// (see bbf_nn structure def above).
template < class __desctype >
int find_nn_bbf(const __desctype * d,
int k, int emax,
bbf_nn_pqueue & ret_nn_pq) const {
int find_nn_bbf(const __desctype * d,
int k, int emax,
bbf_nn_pqueue & ret_nn_pq) const {
assert(k > 0);
ret_nn_pq.clear();
@ -411,17 +411,17 @@ public:
int i;
for (i = bbf.node;
i != -1 && nodes[i].dim >= 0;
i = bbf_branch(i, d, tmp_pq));
i != -1 && nodes[i].dim >= 0;
i = bbf_branch(i, d, tmp_pq));
if (i != -1) {
// add points in leaf/bin to ret_nn_pq
do {
bbf_new_nn(ret_nn_pq, k, d, nodes[i].value);
} while (-1 != (i = nodes[i].right));
// add points in leaf/bin to ret_nn_pq
do {
bbf_new_nn(ret_nn_pq, k, d, nodes[i].value);
} while (-1 != (i = nodes[i].right));
--emax;
--emax;
}
}
@ -433,27 +433,27 @@ public:
// orthogonal range search
private:
void find_ortho_range(int i, scalar_type * bounds_min,
scalar_type * bounds_max,
std::vector < __valuetype > &inbounds) const {
scalar_type * bounds_max,
std::vector < __valuetype > &inbounds) const {
if (i == -1)
return;
const node & n = nodes[i];
if (n.dim >= 0) { // node
if (bounds_min[n.dim] <= n.boundary)
find_ortho_range(n.left, bounds_min, bounds_max, inbounds);
find_ortho_range(n.left, bounds_min, bounds_max, inbounds);
if (bounds_max[n.dim] > n.boundary)
find_ortho_range(n.right, bounds_min, bounds_max, inbounds);
find_ortho_range(n.right, bounds_min, bounds_max, inbounds);
} else { // leaf
do {
inbounds.push_back(nodes[i].value);
inbounds.push_back(nodes[i].value);
} while (-1 != (i = nodes[i].right));
}
}
public:
// return all points that lie within the given bounds; inbounds is cleared
int find_ortho_range(scalar_type * bounds_min,
scalar_type * bounds_max,
std::vector < __valuetype > &inbounds) const {
scalar_type * bounds_max,
std::vector < __valuetype > &inbounds) const {
inbounds.clear();
find_ortho_range(root_node, bounds_min, bounds_max, inbounds);
return (int)inbounds.size();

View File

@ -237,9 +237,9 @@ public:
virtual float* GetFVVar(){return m_FVVar;}; /* returned pointer to array of maximal values of FV, if return 0 then FVrange is not exist */
};/* CvBlobTrackFVGenN */
CvBlobTrackFVGen* cvCreateFVGenP(){return (CvBlobTrackFVGen*)new CvBlobTrackFVGenN(2);}
CvBlobTrackFVGen* cvCreateFVGenPV(){return (CvBlobTrackFVGen*)new CvBlobTrackFVGenN(4);}
CvBlobTrackFVGen* cvCreateFVGenPVS(){return (CvBlobTrackFVGen*)new CvBlobTrackFVGenN(5);}
inline CvBlobTrackFVGen* cvCreateFVGenP(){return (CvBlobTrackFVGen*)new CvBlobTrackFVGenN(2);}
inline CvBlobTrackFVGen* cvCreateFVGenPV(){return (CvBlobTrackFVGen*)new CvBlobTrackFVGenN(4);}
inline CvBlobTrackFVGen* cvCreateFVGenPVS(){return (CvBlobTrackFVGen*)new CvBlobTrackFVGenN(5);}
#undef MAX_FV_SIZE
#define MAX_FV_SIZE 4
@ -408,7 +408,7 @@ public:
virtual float* GetFVVar(){return m_FVVar;}; /* returned pointer to array of maximal values of FV, if return 0 then FVrange is not exist */
};/* CvBlobTrackFVGenSS */
CvBlobTrackFVGen* cvCreateFVGenSS(){return (CvBlobTrackFVGen*)new CvBlobTrackFVGenSS;}
inline CvBlobTrackFVGen* cvCreateFVGenSS(){return (CvBlobTrackFVGen*)new CvBlobTrackFVGenSS;}
/*======================= TRAJECTORY ANALYZER MODULES =====================*/
/* Trajectory Analyser module */
@ -1510,7 +1510,7 @@ public:
}; /* CvBlobTrackAnalysisSVM. */
#if 0
CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisSVMP()
{return (CvBlobTrackAnalysis*) new CvBlobTrackAnalysisSVM(cvCreateFVGenP);}
@ -1522,3 +1522,4 @@ CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisSVMPVS()
CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisSVMSS()
{return (CvBlobTrackAnalysis*) new CvBlobTrackAnalysisSVM(cvCreateFVGenSS);}
#endif

View File

@ -162,12 +162,15 @@ public:
}
}; /* class CvBlobTrackerOneKalman */
#if 0
static CvBlobTrackerOne* cvCreateModuleBlobTrackerOneKalman()
{
return (CvBlobTrackerOne*) new CvBlobTrackerOneKalman;
}
CvBlobTracker* cvCreateBlobTrackerKalman()
{
return cvCreateBlobTrackerList(cvCreateModuleBlobTrackerOneKalman);
}
#endif

View File

@ -716,7 +716,7 @@ void CvBlobTrackerOneMSFG::CollectHist(IplImage* pImg, IplImage* pMask, CvBlob*
}; /* CollectHist */
#endif
CvBlobTrackerOne* cvCreateBlobTrackerOneMSFG()
static CvBlobTrackerOne* cvCreateBlobTrackerOneMSFG()
{
return (CvBlobTrackerOne*) new CvBlobTrackerOneMSFG;
}
@ -739,7 +739,7 @@ public:
};
};
CvBlobTrackerOne* cvCreateBlobTrackerOneMS()
static CvBlobTrackerOne* cvCreateBlobTrackerOneMS()
{
return (CvBlobTrackerOne*) new CvBlobTrackerOneMS;
}
@ -1169,6 +1169,7 @@ public:
}; /* CvBlobTrackerOneMSPF */
CvBlobTrackerOne* cvCreateBlobTrackerOneMSPF();
CvBlobTrackerOne* cvCreateBlobTrackerOneMSPF()
{
return (CvBlobTrackerOne*) new CvBlobTrackerOneMSPF;

View File

@ -47,7 +47,7 @@ typedef float DefHistType;
#define DefHistTypeMat CV_32F
#define HIST_INDEX(_pData) (((_pData)[0]>>m_ByteShift) + (((_pData)[1]>>(m_ByteShift))<<m_BinBit)+((pImgData[2]>>m_ByteShift)<<(m_BinBit*2)))
void calcKernelEpanechnikov(CvMat* pK)
static void calcKernelEpanechnikov(CvMat* pK)
{ /* Allocate kernel for histogramm creation: */
int x,y;
int w = pK->width;
@ -445,7 +445,7 @@ public:
virtual void Release(){delete this;};
}; /*CvBlobTrackerOneMSFGS*/
CvBlobTrackerOne* cvCreateBlobTrackerOneMSFGS()
static CvBlobTrackerOne* cvCreateBlobTrackerOneMSFGS()
{
return (CvBlobTrackerOne*) new CvBlobTrackerOneMSFGS;
}

View File

@ -188,7 +188,7 @@ void CvBlobTrackPostProcKalman::Release()
delete this;
}
CvBlobTrackPostProcOne* cvCreateModuleBlobTrackPostProcKalmanOne()
static CvBlobTrackPostProcOne* cvCreateModuleBlobTrackPostProcKalmanOne()
{
return (CvBlobTrackPostProcOne*) new CvBlobTrackPostProcKalman;
}

View File

@ -106,12 +106,12 @@ public:
}
}; /* class CvBlobTrackPostProcTimeAver */
CvBlobTrackPostProcOne* cvCreateModuleBlobTrackPostProcTimeAverRectOne()
static CvBlobTrackPostProcOne* cvCreateModuleBlobTrackPostProcTimeAverRectOne()
{
return (CvBlobTrackPostProcOne*) new CvBlobTrackPostProcTimeAver(0);
}
CvBlobTrackPostProcOne* cvCreateModuleBlobTrackPostProcTimeAverExpOne()
static CvBlobTrackPostProcOne* cvCreateModuleBlobTrackPostProcTimeAverExpOne()
{
return (CvBlobTrackPostProcOne*) new CvBlobTrackPostProcTimeAver(1);
}

View File

@ -44,7 +44,7 @@
#undef quad
#if _MSC_VER >= 1200
#if defined _MSC_VER && _MSC_VER >= 1200
#pragma warning( disable: 4701 )
#endif
@ -99,18 +99,18 @@ bool CvCalibFilter::SetEtalon( CvCalibEtalonType type, double* params,
Stop();
if (latestPoints != NULL)
{
for( i = 0; i < MAX_CAMERAS; i++ )
cvFree( latestPoints + i );
}
if (latestPoints != NULL)
{
for( i = 0; i < MAX_CAMERAS; i++ )
cvFree( latestPoints + i );
}
if( type == CV_CALIB_ETALON_USER || type != etalonType )
{
if (etalonParams != NULL)
{
cvFree( &etalonParams );
}
if (etalonParams != NULL)
{
cvFree( &etalonParams );
}
}
etalonType = type;
@ -154,10 +154,10 @@ bool CvCalibFilter::SetEtalon( CvCalibEtalonType type, double* params,
if( etalonPointCount != pointCount )
{
if (etalonPoints != NULL)
{
cvFree( &etalonPoints );
}
if (etalonPoints != NULL)
{
cvFree( &etalonPoints );
}
etalonPointCount = pointCount;
etalonPoints = (CvPoint2D32f*)cvAlloc( arrSize );
}
@ -184,15 +184,15 @@ bool CvCalibFilter::SetEtalon( CvCalibEtalonType type, double* params,
break;
case CV_CALIB_ETALON_USER:
if (params != NULL)
{
memcpy( etalonParams, params, arrSize );
}
if (points != NULL)
{
memcpy( etalonPoints, points, arrSize );
}
break;
if (params != NULL)
{
memcpy( etalonParams, params, arrSize );
}
if (points != NULL)
{
memcpy( etalonPoints, points, arrSize );
}
break;
default:
assert(0);
@ -226,7 +226,7 @@ CvCalibFilter::GetEtalon( int* paramCount, const double** params,
void CvCalibFilter::SetCameraCount( int count )
{
Stop();
if( count != cameraCount )
{
for( int i = 0; i < cameraCount; i++ )
@ -245,7 +245,7 @@ void CvCalibFilter::SetCameraCount( int count )
}
}
bool CvCalibFilter::SetFrames( int frames )
{
if( frames < 5 )
@ -253,7 +253,7 @@ bool CvCalibFilter::SetFrames( int frames )
assert(0);
return false;
}
framesTotal = frames;
return true;
}
@ -304,7 +304,7 @@ void CvCalibFilter::Stop( bool calibrate )
cameraParams[i].imgSize[0] = (float)imgSize.width;
cameraParams[i].imgSize[1] = (float)imgSize.height;
// cameraParams[i].focalLength[0] = cameraParams[i].matrix[0];
// cameraParams[i].focalLength[1] = cameraParams[i].matrix[4];
@ -315,7 +315,7 @@ void CvCalibFilter::Stop( bool calibrate )
memcpy( cameraParams[i].transVect, transVect, 3 * sizeof(transVect[0]));
mat.data.ptr = (uchar*)(cameraParams + i);
/* check resultant camera parameters: if there are some INF's or NAN's,
stop and reset results */
if( !cvCheckArr( &mat, CV_CHECK_RANGE | CV_CHECK_QUIET, -10000, 10000 ))
@ -342,7 +342,7 @@ void CvCalibFilter::Stop( bool calibrate )
{
stereo.fundMatr[i] = stereo.fundMatr[i];
}
}
}
@ -499,16 +499,16 @@ bool CvCalibFilter::GetLatestPoints( int idx, CvPoint2D32f** pts,
int* count, bool* found )
{
int n;
if( (unsigned)idx >= (unsigned)cameraCount ||
!pts || !count || !found )
{
assert(0);
return false;
}
n = latestCounts[idx];
*found = n > 0;
*count = abs(n);
*pts = latestPoints[idx];
@ -616,7 +616,7 @@ const CvCamera* CvCalibFilter::GetCameraParams( int idx ) const
assert(0);
return 0;
}
return isCalibrated ? cameraParams + idx : 0;
}
@ -630,7 +630,7 @@ const CvStereoCamera* CvCalibFilter::GetStereoParams() const
assert(0);
return 0;
}
return &stereo;
}
@ -640,9 +640,9 @@ bool CvCalibFilter::SetCameraParams( CvCamera* params )
{
CvMat mat;
int arrSize;
Stop();
if( !params )
{
assert(0);
@ -667,7 +667,7 @@ bool CvCalibFilter::SaveCameraParams( const char* filename )
if( isCalibrated )
{
int i, j;
FILE* f = fopen( filename, "w" );
if( !f ) return false;
@ -729,7 +729,7 @@ bool CvCalibFilter::LoadCameraParams( const char* filename )
return false;
SetCameraCount( d );
for( i = 0; i < cameraCount; i++ )
{
for( j = 0; j < (int)(sizeof(cameraParams[i])/sizeof(float)); j++ )
@ -763,16 +763,16 @@ bool CvCalibFilter::LoadCameraParams( const char* filename )
CV_Assert(values_read == 1);
}
}
fclose(f);
stereo.warpSize = cvSize( cvRound(cameraParams[0].imgSize[0]), cvRound(cameraParams[0].imgSize[1]));
isCalibrated = true;
return true;
}
@ -924,4 +924,4 @@ bool CvCalibFilter::Undistort( CvMat** srcarr, CvMat** dstarr )
return true;
}

View File

@ -45,7 +45,7 @@
//#include <limits.h>
//#include "cv.h"
//#include "highgui.h"
#if 0
#include <stdio.h>
/* Valery Mosyagin */
@ -53,7 +53,7 @@
/* ===== Function for find corresponding between images ===== */
/* Create feature points on image and return number of them. Array points fills by found points */
int icvCreateFeaturePoints(IplImage *image, CvMat *points, CvMat *status)
static int icvCreateFeaturePoints(IplImage *image, CvMat *points, CvMat *status)
{
int foundFeaturePoints = 0;
IplImage *grayImage = 0;
@ -175,9 +175,9 @@ int icvCreateFeaturePoints(IplImage *image, CvMat *points, CvMat *status)
/* For given points1 (with pntStatus) on image1 finds corresponding points2 on image2 and set pntStatus2 for them */
/* Returns number of corresponding points */
int icvFindCorrForGivenPoints( IplImage *image1,/* Image 1 */
static int icvFindCorrForGivenPoints( IplImage *image1,/* Image 1 */
IplImage *image2,/* Image 2 */
CvMat *points1,
CvMat *points1,
CvMat *pntStatus1,
CvMat *points2,
CvMat *pntStatus2,
@ -203,7 +203,7 @@ int icvFindCorrForGivenPoints( IplImage *image1,/* Image 1 */
/* Test input data for errors */
/* Test for null pointers */
if( image1 == 0 || image2 == 0 ||
if( image1 == 0 || image2 == 0 ||
points1 == 0 || points2 == 0 ||
pntStatus1 == 0 || pntStatus2 == 0)
{
@ -226,7 +226,7 @@ int icvFindCorrForGivenPoints( IplImage *image1,/* Image 1 */
}
/* Test for matrices */
if( !CV_IS_MAT(points1) || !CV_IS_MAT(points2) ||
if( !CV_IS_MAT(points1) || !CV_IS_MAT(points2) ||
!CV_IS_MAT(pntStatus1) || !CV_IS_MAT(pntStatus2) )
{
CV_ERROR( CV_StsUnsupportedFormat, "Input parameters (points and status) must be a matrices" );
@ -333,11 +333,11 @@ int icvFindCorrForGivenPoints( IplImage *image1,/* Image 1 */
pyrImage1, pyrImage2,
cornerPoints1, cornerPoints2,
numVisPoints, cvSize(10,10), 3,
status, errors,
status, errors,
cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03),
0/*CV_LKFLOW_PYR_A_READY*/ );
memset(stat2,0,sizeof(uchar)*numPoints);
int currVis = 0;
@ -393,7 +393,7 @@ int icvFindCorrForGivenPoints( IplImage *image1,/* Image 1 */
CvMat fundMatr;
double fundMatr_dat[9];
fundMatr = cvMat(3,3,CV_64F,fundMatr_dat);
CV_CALL( pStatus = cvCreateMat(1,totalCorns,CV_32F) );
int num = cvFindFundamentalMat(tmpPoints1,tmpPoints2,&fundMatr,CV_FM_RANSAC,threshold,0.99,pStatus);
@ -435,8 +435,9 @@ int icvFindCorrForGivenPoints( IplImage *image1,/* Image 1 */
return resNumCorrPoints;
}
/*-------------------------------------------------------------------------------------*/
int icvGrowPointsAndStatus(CvMat **oldPoints,CvMat **oldStatus,CvMat *addPoints,CvMat *addStatus,int addCreateNum)
static int icvGrowPointsAndStatus(CvMat **oldPoints,CvMat **oldStatus,CvMat *addPoints,CvMat *addStatus,int addCreateNum)
{
/* Add to existing points and status arrays new points or just grow */
CvMat *newOldPoint = 0;
@ -445,7 +446,7 @@ int icvGrowPointsAndStatus(CvMat **oldPoints,CvMat **oldStatus,CvMat *addPoints,
CV_FUNCNAME( "icvGrowPointsAndStatus" );
__BEGIN__;
/* Test for errors */
if( oldPoints == 0 || oldStatus == 0 )
{
@ -546,8 +547,9 @@ int icvGrowPointsAndStatus(CvMat **oldPoints,CvMat **oldStatus,CvMat *addPoints,
return newTotalNumber;
}
/*-------------------------------------------------------------------------------------*/
int icvRemoveDoublePoins( CvMat *oldPoints,/* Points on prev image */
static int icvRemoveDoublePoins( CvMat *oldPoints,/* Points on prev image */
CvMat *newPoints,/* New points */
CvMat *oldStatus,/* Status for old points */
CvMat *newStatus,
@ -560,7 +562,7 @@ int icvRemoveDoublePoins( CvMat *oldPoints,/* Points on prev image */
CvSeq* seq = 0;
int originalPoints = 0;
CV_FUNCNAME( "icvRemoveDoublePoins" );
__BEGIN__;
@ -624,7 +626,7 @@ int icvRemoveDoublePoins( CvMat *oldPoints,/* Points on prev image */
{
CV_ERROR( CV_StsOutOfRange, "Statuses must have 1 row" );
}
/* we have points on image and wants add new points */
/* use subdivision for find nearest points */
@ -731,7 +733,7 @@ int icvRemoveDoublePoins( CvMat *oldPoints,/* Points on prev image */
/* Point is double. Turn it off */
/* Set status */
//newStatus->data.ptr[i] = 0;
/* No this is a double point */
//originalPoints--;
flag = 0;
@ -745,7 +747,7 @@ int icvRemoveDoublePoins( CvMat *oldPoints,/* Points on prev image */
__END__;
cvReleaseMemStorage( &storage );
return originalPoints;
@ -755,11 +757,11 @@ int icvRemoveDoublePoins( CvMat *oldPoints,/* Points on prev image */
void icvComputeProjectMatrix(CvMat* objPoints,CvMat* projPoints,CvMat* projMatr);
/*-------------------------------------------------------------------------------------*/
void icvComputeProjectMatrixStatus(CvMat *objPoints4D,CvMat *points2,CvMat *status, CvMat *projMatr)
static void icvComputeProjectMatrixStatus(CvMat *objPoints4D,CvMat *points2,CvMat *status, CvMat *projMatr)
{
/* Compute number of good points */
int num = cvCountNonZero(status);
/* Create arrays */
CvMat *objPoints = 0;
objPoints = cvCreateMat(4,num,CV_64F);
@ -802,7 +804,7 @@ void icvComputeProjectMatrixStatus(CvMat *objPoints4D,CvMat *points2,CvMat *stat
currVis++;
}
fprintf(file,"\n");
}
@ -820,17 +822,16 @@ void icvComputeProjectMatrixStatus(CvMat *objPoints4D,CvMat *points2,CvMat *stat
/*-------------------------------------------------------------------------------------*/
/* For given N images
/* For given N images
we have corresponding points on N images
computed projection matrices
reconstructed 4D points
we must to compute
we must to compute
*/
void icvAddNewImageToPrevious____(
static void icvAddNewImageToPrevious____(
IplImage *newImage,//Image to add
IplImage *oldImage,//Previous image
CvMat *oldPoints,// previous 2D points on prev image (some points may be not visible)
@ -868,7 +869,7 @@ void icvAddNewImageToPrevious____(
int corrNum;
corrNum = icvFindCorrForGivenPoints( oldImage,/* Image 1 */
newImage,/* Image 2 */
oldPoints,
oldPoints,
oldPntStatus,
points2,
status,
@ -887,10 +888,10 @@ void icvAddNewImageToPrevious____(
// icvComputeProjectMatrix(objPoints4D,points2,&projMatr);
icvComputeProjectMatrixStatus(objPoints4D,points2,status,&projMatr);
cvCopy(&projMatr,newProjMatr);
/* Create new points and find correspondence */
icvCreateFeaturePoints(newImage, newFPoints2D2,newFPointsStatus);
/* Good if we test new points before find corr points */
/* Find correspondence for new found points */
@ -947,7 +948,7 @@ void icvAddNewImageToPrevious____(
//CreateGood
/*-------------------------------------------------------------------------------------*/
int icvDeleteSparsInPoints( int numImages,
static int icvDeleteSparsInPoints( int numImages,
CvMat **points,
CvMat **status,
CvMat *wasStatus)/* status of previous configuration */
@ -979,7 +980,7 @@ int icvDeleteSparsInPoints( int numImages,
int numCoord;
numCoord = points[0]->rows;// !!! may be number of coordinates is not correct !!!
int i;
int currExistPoint;
currExistPoint = 0;
@ -1041,7 +1042,7 @@ int icvDeleteSparsInPoints( int numImages,
return comNumber;
}
#if 0
/*-------------------------------------------------------------------------------------*/
void icvGrowPointsArray(CvMat **points)
{
@ -1089,7 +1090,7 @@ int AddImageToStruct( IplImage *newImage,//Image to add
cvConvert(pntStatus,status);
int corrNum = FindCorrForGivenPoints(oldImage,newImage,oldPoints,newPoints,status);
/* Status has new status of points */
CvMat projMatr;

View File

@ -48,7 +48,7 @@
Stan Birchfield and Carlo Tomasi
International Journal of Computer Vision,
35(3): 269-293, December 1999.
This implementation uses different cost function that results in
O(pixPerRow*maxDisparity) complexity of dynamic programming stage versus
O(pixPerRow*log(pixPerRow)*maxDisparity) in the above paper.
@ -68,7 +68,7 @@
typedef struct _CvDPCell
{
uchar step; //local-optimal step
int sum; //current sum
int sum; //current sum
}_CvDPCell;
typedef struct _CvRightImData
@ -79,17 +79,17 @@ typedef struct _CvRightImData
#define CV_IMAX3(a,b,c) ((temp3 = (a) >= (b) ? (a) : (b)),(temp3 >= (c) ? temp3 : (c)))
#define CV_IMIN3(a,b,c) ((temp3 = (a) <= (b) ? (a) : (b)),(temp3 <= (c) ? temp3 : (c)))
void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
static void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
uchar* disparities,
CvSize size, int widthStep,
int maxDisparity,
float _param1, float _param2,
int maxDisparity,
float _param1, float _param2,
float _param3, float _param4,
float _param5 )
{
int x, y, i, j, temp3;
int d, s;
int dispH = maxDisparity + 3;
int dispH = maxDisparity + 3;
uchar *dispdata;
int imgW = size.width;
int imgH = size.height;
@ -103,22 +103,22 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
int param5 = cvRound(_param5);
#define CELL(d,x) cells[(d)+(x)*dispH]
uchar* dsi = (uchar*)cvAlloc(sizeof(uchar)*imgW*dispH);
uchar* edges = (uchar*)cvAlloc(sizeof(uchar)*imgW*imgH);
_CvDPCell* cells = (_CvDPCell*)cvAlloc(sizeof(_CvDPCell)*imgW*MAX(dispH,(imgH+1)/2));
_CvRightImData* rData = (_CvRightImData*)cvAlloc(sizeof(_CvRightImData)*imgW);
int* reliabilities = (int*)cells;
for( y = 0; y < imgH; y++ )
{
for( y = 0; y < imgH; y++ )
{
uchar* srcdata1 = src1 + widthStep * y;
uchar* srcdata2 = src2 + widthStep * y;
uchar* srcdata2 = src2 + widthStep * y;
//init rData
prevval = prev = srcdata2[0];
for( j = 1; j < imgW; j++ )
{
{
curr = srcdata2[j];
val = (uchar)((curr + prev)>>1);
rData[j-1].max_val = (uchar)CV_IMAX3( val, prevval, prev );
@ -130,12 +130,12 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
// fill dissimularity space image
for( i = 1; i <= maxDisparity + 1; i++ )
{
{
dsi += imgW;
rData--;
for( j = i - 1; j < imgW - 1; j++ )
{
int t;
{
int t;
if( (t = srcdata1[j] - rData[j+1].max_val) >= 0 )
{
dsi[j] = (uchar)t;
@ -160,36 +160,36 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
for( j = 3; j < imgW-4; j++ )
{
edges[y*imgW+j] = 0;
if( ( CV_IMAX3( srcdata1[j-3], srcdata1[j-2], srcdata1[j-1] ) -
if( ( CV_IMAX3( srcdata1[j-3], srcdata1[j-2], srcdata1[j-1] ) -
CV_IMIN3( srcdata1[j-3], srcdata1[j-2], srcdata1[j-1] ) ) >= ICV_BIRCH_DIFF_LUM )
{
edges[y*imgW+j] |= 1;
}
if( ( CV_IMAX3( srcdata2[j+3], srcdata2[j+2], srcdata2[j+1] ) -
if( ( CV_IMAX3( srcdata2[j+3], srcdata2[j+2], srcdata2[j+1] ) -
CV_IMIN3( srcdata2[j+3], srcdata2[j+2], srcdata2[j+1] ) ) >= ICV_BIRCH_DIFF_LUM )
{
edges[y*imgW+j] |= 2;
}
}
}
}
//find correspondence using dynamical programming
//init DP table
for( x = 0; x < imgW; x++ )
for( x = 0; x < imgW; x++ )
{
CELL(0,x).sum = CELL(dispH-1,x).sum = ICV_MAX_DP_SUM_VAL;
CELL(0,x).step = CELL(dispH-1,x).step = ICV_DP_STEP_LEFT;
}
for( d = 2; d < dispH; d++ )
for( d = 2; d < dispH; d++ )
{
CELL(d,d-2).sum = ICV_MAX_DP_SUM_VAL;
CELL(d,d-2).step = ICV_DP_STEP_UP;
}
}
CELL(1,0).sum = 0;
CELL(1,0).step = ICV_DP_STEP_LEFT;
for( x = 1; x < imgW; x++ )
{
{
int d = MIN( x + 1, maxDisparity + 1);
uchar* _edges = edges + y*imgW + x;
int e0 = _edges[0] & 1;
@ -201,17 +201,17 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
int sum[3];
//check left step
sum[0] = _cell[d-dispH].sum - param2;
sum[0] = _cell[d-dispH].sum - param2;
//check up step
if( _cell[d+1].step != ICV_DP_STEP_DIAG && e0 )
{
sum[1] = _cell[d+1].sum + param1;
if( _cell[d-1-dispH].step != ICV_DP_STEP_UP && (_edges[1-d] & 2) )
if( _cell[d-1-dispH].step != ICV_DP_STEP_UP && (_edges[1-d] & 2) )
{
int t;
sum[2] = _cell[d-1-dispH].sum + param1;
t = sum[1] < sum[0];
@ -223,7 +223,7 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
_cell[d].sum = sum[t] + s;
}
else
{
{
_cell[d].step = ICV_DP_STEP_DIAG;
_cell[d].sum = sum[2] + s;
}
@ -242,7 +242,7 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
}
}
}
else if( _cell[d-1-dispH].step != ICV_DP_STEP_UP && (_edges[1-d] & 2) )
else if( _cell[d-1-dispH].step != ICV_DP_STEP_UP && (_edges[1-d] & 2) )
{
sum[2] = _cell[d-1-dispH].sum + param1;
if( sum[0] <= sum[2] )
@ -278,25 +278,25 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
min_val = CELL(i,imgW-1).sum;
}
}
//track optimal pass
for( x = imgW - 1; x > 0; x-- )
{
{
dispdata[x] = (uchar)(d - 1);
while( CELL(d,x).step == ICV_DP_STEP_UP ) d++;
if ( CELL(d,x).step == ICV_DP_STEP_DIAG )
{
s = x;
while( CELL(d,x).step == ICV_DP_STEP_DIAG )
while( CELL(d,x).step == ICV_DP_STEP_DIAG )
{
d--;
x--;
d--;
x--;
}
for( i = x; i < s; i++ )
{
dispdata[i] = (uchar)(d-1);
}
}
}
}
}//for x
}// for y
@ -319,9 +319,9 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
{
for( y = 1; y < imgH - 1; y++ )
{
if( ( CV_IMAX3( src1[(y-1)*widthStep+x], src1[y*widthStep+x],
src1[(y+1)*widthStep+x] ) -
CV_IMIN3( src1[(y-1)*widthStep+x], src1[y*widthStep+x],
if( ( CV_IMAX3( src1[(y-1)*widthStep+x], src1[y*widthStep+x],
src1[(y+1)*widthStep+x] ) -
CV_IMIN3( src1[(y-1)*widthStep+x], src1[y*widthStep+x],
src1[(y+1)*widthStep+x] ) ) >= ICV_BIRCH_DIFF_LUM )
{
edges[y*imgW+x] |= 4;
@ -332,14 +332,14 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
}
}
//remove along any particular row, every gradient
//remove along any particular row, every gradient
//for which two adjacent columns do not agree.
for( y = 0; y < imgH; y++ )
{
prev = edges[y*imgW];
for( x = 1; x < imgW - 1; x++ )
{
curr = edges[y*imgW+x];
curr = edges[y*imgW+x];
if( (curr & 4) &&
( !( prev & 4 ) ||
!( edges[y*imgW+x+1] & 4 ) ) )
@ -360,41 +360,41 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
;
s = y - i;
for( ; i < y; i++ )
{
{
reliabilities[i*imgW+x] = s;
}
}
}
}
//Y - propagate reliable regions
}
//Y - propagate reliable regions
for( x = 0; x < imgW; x++ )
{
{
for( y = 0; y < imgH; y++ )
{
{
d = dest[y*widthStep+x];
if( reliabilities[y*imgW+x] >= param4 && !(edges[y*imgW+x] & 4) &&
d > 0 )//highly || moderately
{
{
disparities[y*widthStep+x] = (uchar)d;
//up propagation
for( i = y - 1; i >= 0; i-- )
{
if( ( edges[i*imgW+x] & 4 ) ||
( dest[i*widthStep+x] < d &&
( dest[i*widthStep+x] < d &&
reliabilities[i*imgW+x] >= param3 ) ||
( reliabilities[y*imgW+x] < param5 &&
( reliabilities[y*imgW+x] < param5 &&
dest[i*widthStep+x] - 1 == d ) ) break;
disparities[i*widthStep+x] = (uchar)d;
}
disparities[i*widthStep+x] = (uchar)d;
}
//down propagation
for( i = y + 1; i < imgH; i++ )
{
if( ( edges[i*imgW+x] & 4 ) ||
( dest[i*widthStep+x] < d &&
( dest[i*widthStep+x] < d &&
reliabilities[i*imgW+x] >= param3 ) ||
( reliabilities[y*imgW+x] < param5 &&
( reliabilities[y*imgW+x] < param5 &&
dest[i*widthStep+x] - 1 == d ) ) break;
disparities[i*widthStep+x] = (uchar)d;
@ -417,41 +417,41 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
for( ; x < imgW && dest[y*widthStep+x] == dest[y*widthStep+x-1]; x++ );
s = x - i;
for( ; i < x; i++ )
{
{
reliabilities[y*imgW+i] = s;
}
}
}
}
//X - propagate reliable regions
for( y = 0; y < imgH; y++ )
{
}
//X - propagate reliable regions
for( y = 0; y < imgH; y++ )
{
for( x = 0; x < imgW; x++ )
{
{
d = dest[y*widthStep+x];
if( reliabilities[y*imgW+x] >= param4 && !(edges[y*imgW+x] & 1) &&
d > 0 )//highly || moderately
{
{
disparities[y*widthStep+x] = (uchar)d;
//up propagation
for( i = x - 1; i >= 0; i-- )
{
if( (edges[y*imgW+i] & 1) ||
( dest[y*widthStep+i] < d &&
( dest[y*widthStep+i] < d &&
reliabilities[y*imgW+i] >= param3 ) ||
( reliabilities[y*imgW+x] < param5 &&
( reliabilities[y*imgW+x] < param5 &&
dest[y*widthStep+i] - 1 == d ) ) break;
disparities[y*widthStep+i] = (uchar)d;
}
}
//down propagation
for( i = x + 1; i < imgW; i++ )
{
if( (edges[y*imgW+i] & 1) ||
( dest[y*widthStep+i] < d &&
( dest[y*widthStep+i] < d &&
reliabilities[y*imgW+i] >= param3 ) ||
( reliabilities[y*imgW+x] < param5 &&
( reliabilities[y*imgW+x] < param5 &&
dest[y*widthStep+i] - 1 == d ) ) break;
disparities[y*widthStep+i] = (uchar)d;
@ -466,10 +466,10 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
}
//release resources
cvFree( &dsi );
cvFree( &edges );
cvFree( &cells );
cvFree( &rData );
cvFree( &dsi );
cvFree( &edges );
cvFree( &cells );
cvFree( &rData );
}
@ -483,7 +483,7 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
// rightImage - right image of stereo-pair (format 8uC1).
// mode -mode of correspondance retrieval (now CV_RETR_DP_BIRCHFIELD only)
// dispImage - destination disparity image
// maxDisparity - maximal disparity
// maxDisparity - maximal disparity
// param1, param2, param3, param4, param5 - parameters of algorithm
// Returns:
// Notes:
@ -491,43 +491,43 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
// All images must have format 8uC1.
//F*/
CV_IMPL void
cvFindStereoCorrespondence(
cvFindStereoCorrespondence(
const CvArr* leftImage, const CvArr* rightImage,
int mode,
CvArr* depthImage,
int maxDisparity,
double param1, double param2, double param3,
int maxDisparity,
double param1, double param2, double param3,
double param4, double param5 )
{
{
CV_FUNCNAME( "cvFindStereoCorrespondence" );
__BEGIN__;
CvMat *src1, *src2;
CvMat *src1, *src2;
CvMat *dst;
CvMat src1_stub, src2_stub, dst_stub;
int coi;
int coi;
CV_CALL( src1 = cvGetMat( leftImage, &src1_stub, &coi ));
if( coi ) CV_ERROR( CV_BadCOI, "COI is not supported by the function" );
CV_CALL( src2 = cvGetMat( rightImage, &src2_stub, &coi ));
if( coi ) CV_ERROR( CV_BadCOI, "COI is not supported by the function" );
if( coi ) CV_ERROR( CV_BadCOI, "COI is not supported by the function" );
CV_CALL( dst = cvGetMat( depthImage, &dst_stub, &coi ));
if( coi ) CV_ERROR( CV_BadCOI, "COI is not supported by the function" );
// check args
if( CV_MAT_TYPE( src1->type ) != CV_8UC1 ||
CV_MAT_TYPE( src2->type ) != CV_8UC1 ||
// check args
if( CV_MAT_TYPE( src1->type ) != CV_8UC1 ||
CV_MAT_TYPE( src2->type ) != CV_8UC1 ||
CV_MAT_TYPE( dst->type ) != CV_8UC1) CV_ERROR(CV_StsUnsupportedFormat,
"All images must be single-channel and have 8u" );
"All images must be single-channel and have 8u" );
if( !CV_ARE_SIZES_EQ( src1, src2 ) || !CV_ARE_SIZES_EQ( src1, dst ) )
CV_ERROR( CV_StsUnmatchedSizes, "" );
if( maxDisparity <= 0 || maxDisparity >= src1->width || maxDisparity > 255 )
CV_ERROR(CV_StsOutOfRange,
CV_ERROR(CV_StsOutOfRange,
"parameter /maxDisparity/ is out of range");
if( mode == CV_DISPARITY_BIRCHFIELD )
{
if( param1 == CV_UNDEF_SC_PARAM ) param1 = CV_IDP_BIRCHFIELD_PARAM1;
@ -536,10 +536,10 @@ cvFindStereoCorrespondence(
if( param4 == CV_UNDEF_SC_PARAM ) param4 = CV_IDP_BIRCHFIELD_PARAM4;
if( param5 == CV_UNDEF_SC_PARAM ) param5 = CV_IDP_BIRCHFIELD_PARAM5;
CV_CALL( icvFindStereoCorrespondenceByBirchfieldDP( src1->data.ptr,
src2->data.ptr, dst->data.ptr,
CV_CALL( icvFindStereoCorrespondenceByBirchfieldDP( src1->data.ptr,
src2->data.ptr, dst->data.ptr,
cvGetMatSize( src1 ), src1->step,
maxDisparity, (float)param1, (float)param2, (float)param3,
maxDisparity, (float)param1, (float)param2, (float)param3,
(float)param4, (float)param5 ) );
}
else
@ -547,7 +547,7 @@ cvFindStereoCorrespondence(
CV_ERROR( CV_StsBadArg, "Unsupported mode of function" );
}
__END__;
__END__;
}
/* End of file. */

View File

@ -41,7 +41,7 @@
#include "precomp.hpp"
CvStatus CV_STDCALL
static CvStatus
icvJacobiEigens_32f(float *A, float *V, float *E, int n, float eps)
{
int i, j, k, ind;

View File

@ -83,7 +83,7 @@ static int CompareContour(const void* a, const void* b, void* )
return (dx < wt && dy < ht);
}
void cvFindBlobsByCCClasters(IplImage* pFG, CvBlobSeq* pBlobs, CvMemStorage* storage)
static void cvFindBlobsByCCClasters(IplImage* pFG, CvBlobSeq* pBlobs, CvMemStorage* storage)
{ /* Create contours: */
IplImage* pIB = NULL;
CvSeq* cnt = NULL;

View File

@ -160,9 +160,5 @@ public:
};
/* Blob detector constructor: */
CvBlobDetector* cvCreateBlobDetectorReal(CvTestSeq* pTestSeq){return new CvBlobDetectorReal(pTestSeq);}
//CvBlobDetector* cvCreateBlobDetectorReal(CvTestSeq* pTestSeq){return new CvBlobDetectorReal(pTestSeq);}

File diff suppressed because it is too large Load Diff

View File

@ -48,7 +48,7 @@
#include "_kdtree.hpp"
#include "_featuretree.h"
#if _MSC_VER >= 1400
#if defined _MSC_VER && _MSC_VER >= 1400
#pragma warning(disable:4996) // suppress "function call with parameters may be unsafe" in std::copy
#endif
@ -95,7 +95,7 @@ class CvKDTreeWrap : public CvFeatureTree {
for (int j = 0; j < d->rows; ++j) {
const typename __treetype::scalar_type* dj =
(const typename __treetype::scalar_type*) dptr;
(const typename __treetype::scalar_type*) dptr;
int* resultsj = (int*) resultsptr;
double* distj = (double*) distptr;
@ -103,8 +103,8 @@ class CvKDTreeWrap : public CvFeatureTree {
assert((int)nn.size() <= k);
for (unsigned int j = 0; j < nn.size(); ++j) {
*resultsj++ = *nn[j].p;
*distj++ = nn[j].dist;
*resultsj++ = *nn[j].p;
*distj++ = nn[j].dist;
}
std::fill(resultsj, resultsj + k - nn.size(), -1);
std::fill(distj, distj + k - nn.size(), 0);
@ -117,16 +117,16 @@ class CvKDTreeWrap : public CvFeatureTree {
template <class __treetype>
int find_ortho_range(CvMat* bounds_min, CvMat* bounds_max,
CvMat* results) {
CvMat* results) {
int rn = results->rows * results->cols;
std::vector<int> inbounds;
dispatch_cvtype(mat, ((__treetype*)data)->
find_ortho_range((typename __treetype::scalar_type*)bounds_min->data.ptr,
(typename __treetype::scalar_type*)bounds_max->data.ptr,
inbounds));
find_ortho_range((typename __treetype::scalar_type*)bounds_min->data.ptr,
(typename __treetype::scalar_type*)bounds_max->data.ptr,
inbounds));
std::copy(inbounds.begin(),
inbounds.begin() + std::min((int)inbounds.size(), rn),
(int*) results->data.ptr);
inbounds.begin() + std::min((int)inbounds.size(), rn),
(int*) results->data.ptr);
return (int)inbounds.size();
}
@ -135,7 +135,7 @@ class CvKDTreeWrap : public CvFeatureTree {
public:
CvKDTreeWrap(CvMat* _mat) : mat(_mat) {
// * a flag parameter should tell us whether
// * (a) user ensures *mat outlives *this and is unchanged,
// * (a) user ensures *mat outlives *this and is unchanged,
// * (b) we take reference and user ensures mat is unchanged,
// * (c) we copy data, (d) we own and release data.
@ -144,8 +144,8 @@ public:
tmp[j] = j;
dispatch_cvtype(mat, data = new tree_type
(&tmp[0], &tmp[0] + tmp.size(), mat->cols,
tree_type::deref_type(mat)));
(&tmp[0], &tmp[0] + tmp.size(), mat->cols,
tree_type::deref_type(mat)));
}
~CvKDTreeWrap() {
dispatch_cvtype(mat, delete (tree_type*) data);
@ -185,15 +185,15 @@ public:
assert(CV_MAT_TYPE(results->type) == CV_32SC1);
dispatch_cvtype(mat, find_nn<tree_type>
(desc, k, emax, results, dist));
(desc, k, emax, results, dist));
}
int FindOrthoRange(CvMat* bounds_min, CvMat* bounds_max,
CvMat* results) {
CvMat* results) {
bool free_bounds = false;
int count = -1;
if (bounds_min->cols * bounds_min->rows != dims() ||
bounds_max->cols * bounds_max->rows != dims())
bounds_max->cols * bounds_max->rows != dims())
CV_Error(CV_StsUnmatchedSizes, "bounds_{min,max} must 1 x dims or dims x 1");
if (CV_MAT_TYPE(bounds_min->type) != CV_MAT_TYPE(bounds_max->type))
CV_Error(CV_StsUnmatchedFormats, "bounds_{min,max} must have same type");
@ -218,7 +218,7 @@ public:
assert(bounds_max->rows * bounds_max->cols == dims());
dispatch_cvtype(mat, count = find_ortho_range<tree_type>
(bounds_min, bounds_max,results));
(bounds_min, bounds_max,results));
if (free_bounds) {
cvReleaseMat(&bounds_min);

View File

@ -1247,7 +1247,7 @@ int _cvSolveEqu1th(T c1, T c0, T* X);
vertices_number: in, number of vertices in polygon
Return :
--------------------------------------------------------------------------*/
void _cvSetSeqBlockSize(CvVoronoiDiagramInt* pVoronoiDiagramInt,int vertices_number)
static void _cvSetSeqBlockSize(CvVoronoiDiagramInt* pVoronoiDiagramInt,int vertices_number)
{
int N = 2*vertices_number;
cvSetSeqBlockSize(pVoronoiDiagramInt->SiteSeq,N*pVoronoiDiagramInt->SiteSeq->elem_size);

View File

@ -50,6 +50,7 @@
typedef void (*pointer_LMJac)( const CvMat* src, CvMat* dst );
typedef void (*pointer_LMFunc)( const CvMat* src, CvMat* dst );
#if 0
/* Optimization using Levenberg-Marquardt */
void cvLevenbergMarquardtOptimization(pointer_LMJac JacobianFunction,
pointer_LMFunc function,
@ -75,7 +76,7 @@ void cvLevenbergMarquardtOptimization(pointer_LMJac JacobianFunction,
CvMat *matrJtJN = 0;
CvMat *matrJt = 0;
CvMat *vectB = 0;
CV_FUNCNAME( "cvLevenbegrMarquardtOptimization" );
__BEGIN__;
@ -104,7 +105,7 @@ void cvLevenbergMarquardtOptimization(pointer_LMJac JacobianFunction,
{
CV_ERROR( CV_StsUnmatchedSizes, "Number of colomn of vector X0 must be 1" );
}
if( observRes->cols != 1 )
{
CV_ERROR( CV_StsUnmatchedSizes, "Number of colomn of vector observed rusult must be 1" );
@ -157,8 +158,8 @@ void cvLevenbergMarquardtOptimization(pointer_LMJac JacobianFunction,
/* Print result of function to file */
/* Compute error */
cvSub(observRes,resFunc,error);
cvSub(observRes,resFunc,error);
//valError = error_function(observRes,resFunc);
/* Need to use new version of computing error (norm) */
valError = cvNorm(observRes,resFunc);
@ -169,7 +170,7 @@ void cvLevenbergMarquardtOptimization(pointer_LMJac JacobianFunction,
/* Define optimal delta for J'*J*delta=J'*error */
/* compute J'J */
cvMulTransposed(Jac,matrJtJ,1);
cvCopy(matrJtJ,matrJtJN);
/* compute J'*error */
@ -244,6 +245,7 @@ void cvLevenbergMarquardtOptimization(pointer_LMJac JacobianFunction,
return;
}
#endif
/*------------------------------------------------------------------------------*/
#if 0

View File

@ -65,9 +65,13 @@ void icvReconstructPoints4DStatus(CvMat** projPoints, CvMat **projMatrs, CvMat**
*/
#define TRACK_BUNDLE_FILE "d:\\test\\bundle.txt"
void cvOptimizeLevenbergMarquardtBundle( CvMat** projMatrs, CvMat** observProjPoints,
CvMat** pointsPres, int numImages,
CvMat** resultProjMatrs, CvMat* resultPoints4D,int maxIter,double epsilon );
/* ============== Bundle adjustment optimization ================= */
void icvComputeDerivateProj(CvMat *points4D,CvMat *projMatr, CvMat *status, CvMat *derivProj)
static void icvComputeDerivateProj(CvMat *points4D,CvMat *projMatr, CvMat *status, CvMat *derivProj)
{
/* Compute derivate for given projection matrix points and status of points */
@ -201,7 +205,7 @@ void icvComputeDerivateProj(CvMat *points4D,CvMat *projMatr, CvMat *status, CvMa
}
/*======================================================================================*/
void icvComputeDerivateProjAll(CvMat *points4D, CvMat **projMatrs, CvMat **pointPres, int numImages,CvMat **projDerives)
static void icvComputeDerivateProjAll(CvMat *points4D, CvMat **projMatrs, CvMat **pointPres, int numImages,CvMat **projDerives)
{
CV_FUNCNAME( "icvComputeDerivateProjAll" );
__BEGIN__;
@ -228,7 +232,7 @@ void icvComputeDerivateProjAll(CvMat *points4D, CvMat **projMatrs, CvMat **point
}
/*======================================================================================*/
void icvComputeDerivatePoints(CvMat *points4D,CvMat *projMatr, CvMat *presPoints, CvMat *derivPoint)
static void icvComputeDerivatePoints(CvMat *points4D,CvMat *projMatr, CvMat *presPoints, CvMat *derivPoint)
{
CV_FUNCNAME( "icvComputeDerivatePoints" );
@ -267,7 +271,7 @@ void icvComputeDerivatePoints(CvMat *points4D,CvMat *projMatr, CvMat *presPoints
{
CV_ERROR( CV_StsOutOfRange, "Size of projection matrix (projMatr) must be 3x4" );
}
if( !CV_IS_MAT(presPoints) )
{
CV_ERROR( CV_StsUnsupportedFormat, "Status must be a matrix 1xN" );
@ -282,10 +286,10 @@ void icvComputeDerivatePoints(CvMat *points4D,CvMat *projMatr, CvMat *presPoints
{
CV_ERROR( CV_StsUnsupportedFormat, "derivPoint must be a matrix 2 x 4VisNum" );
}
/* ----- End test ----- */
/* ----- End test ----- */
/* Compute derivates by points */
double p[12];
int i;
for( i = 0; i < 12; i++ )
@ -311,16 +315,16 @@ void icvComputeDerivatePoints(CvMat *points4D,CvMat *projMatr, CvMat *presPoints
piX[0] = X[0]*p[0] + X[1]*p[1] + X[2]*p[2] + X[3]*p[3];
piX[1] = X[0]*p[4] + X[1]*p[5] + X[2]*p[6] + X[3]*p[7];
piX[2] = X[0]*p[8] + X[1]*p[9] + X[2]*p[10] + X[3]*p[11];
int i,j;
double tmp3 = 1/(piX[2]*piX[2]);
for( j = 0; j < 2; j++ )//for x and y
{
for( i = 0; i < 4; i++ )// for X,Y,Z,W
{
cvmSet( derivPoint,
cvmSet( derivPoint,
j, currVisPoint*4+i,
(p[j*4+i]*piX[2]-p[8+i]*piX[j]) * tmp3 );
}
@ -337,8 +341,9 @@ void icvComputeDerivatePoints(CvMat *points4D,CvMat *projMatr, CvMat *presPoints
__END__;
return;
}
/*======================================================================================*/
void icvComputeDerivatePointsAll(CvMat *points4D, CvMat **projMatrs, CvMat **pointPres, int numImages,CvMat **pointDerives)
static void icvComputeDerivatePointsAll(CvMat *points4D, CvMat **projMatrs, CvMat **pointPres, int numImages,CvMat **pointDerives)
{
CV_FUNCNAME( "icvComputeDerivatePointsAll" );
__BEGIN__;
@ -364,7 +369,7 @@ void icvComputeDerivatePointsAll(CvMat *points4D, CvMat **projMatrs, CvMat **poi
return;
}
/*======================================================================================*/
void icvComputeMatrixVAll(int numImages,CvMat **pointDeriv,CvMat **presPoints, CvMat **matrV)
static void icvComputeMatrixVAll(int numImages,CvMat **pointDeriv,CvMat **presPoints, CvMat **matrV)
{
int *shifts = 0;
@ -404,10 +409,10 @@ void icvComputeMatrixVAll(int numImages,CvMat **pointDeriv,CvMat **presPoints, C
{
if( cvmGet(presPoints[currImage],0,currPoint) > 0 )
{
sum += cvmGet(pointDeriv[currImage],0,shifts[currImage]*4+i) *
sum += cvmGet(pointDeriv[currImage],0,shifts[currImage]*4+i) *
cvmGet(pointDeriv[currImage],0,shifts[currImage]*4+j);
sum += cvmGet(pointDeriv[currImage],1,shifts[currImage]*4+i) *
sum += cvmGet(pointDeriv[currImage],1,shifts[currImage]*4+i) *
cvmGet(pointDeriv[currImage],1,shifts[currImage]*4+j);
}
}
@ -429,11 +434,11 @@ void icvComputeMatrixVAll(int numImages,CvMat **pointDeriv,CvMat **presPoints, C
__END__;
cvFree( &shifts);
return;
}
/*======================================================================================*/
void icvComputeMatrixUAll(int numImages,CvMat **projDeriv,CvMat** matrU)
static void icvComputeMatrixUAll(int numImages,CvMat **projDeriv,CvMat** matrU)
{
CV_FUNCNAME( "icvComputeMatrixVAll" );
__BEGIN__;
@ -460,7 +465,7 @@ void icvComputeMatrixUAll(int numImages,CvMat **projDeriv,CvMat** matrU)
return;
}
/*======================================================================================*/
void icvComputeMatrixW(int numImages, CvMat **projDeriv, CvMat **pointDeriv, CvMat **presPoints, CvMat *matrW)
static void icvComputeMatrixW(int numImages, CvMat **projDeriv, CvMat **pointDeriv, CvMat **presPoints, CvMat *matrW)
{
CV_FUNCNAME( "icvComputeMatrixW" );
__BEGIN__;
@ -509,10 +514,10 @@ void icvComputeMatrixW(int numImages, CvMat **projDeriv, CvMat **pointDeriv, CvM
for( int currCol = 0; currCol < 4; currCol++ )
{
double sum;
sum = cvmGet(projDeriv[currImage],currVis*2+0,currLine) *
sum = cvmGet(projDeriv[currImage],currVis*2+0,currLine) *
cvmGet(pointDeriv[currImage],0,currVis*4+currCol);
sum += cvmGet(projDeriv[currImage],currVis*2+1,currLine) *
sum += cvmGet(projDeriv[currImage],currVis*2+1,currLine) *
cvmGet(pointDeriv[currImage],1,currVis*4+currCol);
cvmSet(matrW,currImage*12+currLine,currPoint*4+currCol,sum);
@ -529,7 +534,7 @@ void icvComputeMatrixW(int numImages, CvMat **projDeriv, CvMat **pointDeriv, CvM
}
}
}
#ifdef TRACK_BUNDLE
{
FILE *file;
@ -560,9 +565,10 @@ void icvComputeMatrixW(int numImages, CvMat **projDeriv, CvMat **pointDeriv, CvM
__END__;
return;
}
/*======================================================================================*/
/* Compute jacobian mult projection matrices error */
void icvComputeJacErrorProj(int numImages,CvMat **projDeriv,CvMat **projErrors,CvMat *jacProjErr )
static void icvComputeJacErrorProj(int numImages,CvMat **projDeriv,CvMat **projErrors,CvMat *jacProjErr )
{
CV_FUNCNAME( "icvComputeJacErrorProj" );
__BEGIN__;
@ -596,7 +602,7 @@ void icvComputeJacErrorProj(int numImages,CvMat **projDeriv,CvMat **projErrors,C
double sum = 0;
for( int i = 0; i < num; i++ )
{
sum += cvmGet(projDeriv[currImage],i,currCol) *
sum += cvmGet(projDeriv[currImage],i,currCol) *
cvmGet(projErrors[currImage],i%2,i/2);
}
cvmSet(jacProjErr,currImage*12+currCol,0,sum);
@ -627,9 +633,10 @@ void icvComputeJacErrorProj(int numImages,CvMat **projDeriv,CvMat **projErrors,C
__END__;
return;
}
/*======================================================================================*/
/* Compute jacobian mult points error */
void icvComputeJacErrorPoint(int numImages,CvMat **pointDeriv,CvMat **projErrors, CvMat **presPoints,CvMat *jacPointErr )
static void icvComputeJacErrorPoint(int numImages,CvMat **pointDeriv,CvMat **projErrors, CvMat **presPoints,CvMat *jacPointErr )
{
int *shifts = 0;
@ -734,6 +741,7 @@ void icvComputeJacErrorPoint(int numImages,CvMat **pointDeriv,CvMat **projErrors
}
/*======================================================================================*/
/* Reconstruct 4D points using status */
void icvReconstructPoints4DStatus(CvMat** projPoints, CvMat **projMatrs, CvMat** presPoints,
CvMat *points4D,int numImages,CvMat **projError)
@ -797,7 +805,7 @@ void icvReconstructPoints4DStatus(CvMat** projPoints, CvMat **projMatrs, CvMat**
numVisProj++;
}
}
if( numVisProj < 2 )
{
/* This point can't be reconstructed */
@ -821,7 +829,7 @@ void icvReconstructPoints4DStatus(CvMat** projPoints, CvMat **projMatrs, CvMat**
y = cvmGet(projPoints[currImage],1,currPoint);
for( int k = 0; k < 4; k++ )
{
matrA_dat[currVisProj*12 + k] =
matrA_dat[currVisProj*12 + k] =
x * cvmGet(projMatrs[currImage],2,k) - cvmGet(projMatrs[currImage],0,k);
matrA_dat[currVisProj*12+4 + k] =
@ -854,7 +862,7 @@ void icvReconstructPoints4DStatus(CvMat** projPoints, CvMat **projMatrs, CvMat**
CvMat point3D;
double point3D_dat[3];
point3D = cvMat(3,1,CV_64F,point3D_dat);
int currPoint;
int numVis = 0;
double totalError = 0;
@ -897,7 +905,7 @@ void icvReconstructPoints4DStatus(CvMat** projPoints, CvMat **projMatrs, CvMat**
/*======================================================================================*/
void icvProjPointsStatusFunc( int numImages, CvMat *points4D, CvMat **projMatrs, CvMat **pointsPres, CvMat **projPoints)
static void icvProjPointsStatusFunc( int numImages, CvMat *points4D, CvMat **projMatrs, CvMat **pointsPres, CvMat **projPoints)
{
CV_FUNCNAME( "icvProjPointsStatusFunc" );
__BEGIN__;
@ -943,7 +951,7 @@ void icvProjPointsStatusFunc( int numImages, CvMat *points4D, CvMat **projMatrs,
fclose(file);
}
#endif
int currImage;
for( currImage = 0; currImage < numImages; currImage++ )
{
@ -969,7 +977,7 @@ void icvProjPointsStatusFunc( int numImages, CvMat *points4D, CvMat **projMatrs,
fclose(file);
}
#endif
cvmMul(projMatrs[currImage],&point4D,&point3D);
double w = point3D_dat[2];
cvmSet(projPoints[currImage],0,currVisPoint,point3D_dat[0]/w);
@ -998,11 +1006,11 @@ void icvProjPointsStatusFunc( int numImages, CvMat *points4D, CvMat **projMatrs,
}
/*======================================================================================*/
void icvFreeMatrixArray(CvMat ***matrArray,int numMatr)
static void icvFreeMatrixArray(CvMat ***matrArray,int numMatr)
{
/* Free each matrix */
int currMatr;
if( *matrArray != 0 )
{/* Need delete */
for( currMatr = 0; currMatr < numMatr; currMatr++ )
@ -1015,7 +1023,7 @@ void icvFreeMatrixArray(CvMat ***matrArray,int numMatr)
}
/*======================================================================================*/
void *icvClearAlloc(int size)
static void *icvClearAlloc(int size)
{
void *ptr = 0;
@ -1047,6 +1055,7 @@ int icvDeleteSparsInPoints( int numImages,
}
#endif
/*======================================================================================*/
/* !!! may be useful to return norm of error */
/* !!! may be does not work correct with not all visible 4D points */
@ -1054,15 +1063,15 @@ void cvOptimizeLevenbergMarquardtBundle( CvMat** projMatrs, CvMat** observProjPo
CvMat** pointsPres, int numImages,
CvMat** resultProjMatrs, CvMat* resultPoints4D,int maxIter,double epsilon )
{
CvMat *vectorX_points4D = 0;
CvMat **vectorX_projMatrs = 0;
CvMat **vectorX_projMatrs = 0;
CvMat *newVectorX_points4D = 0;
CvMat **newVectorX_projMatrs = 0;
CvMat *changeVectorX_points4D = 0;
CvMat *changeVectorX_projMatrs = 0;
CvMat *changeVectorX_projMatrs = 0;
CvMat **observVisPoints = 0;
CvMat **projVisPoints = 0;
@ -1097,17 +1106,17 @@ void cvOptimizeLevenbergMarquardtBundle( CvMat** projMatrs, CvMat** observProjPo
{
CV_ERROR( CV_StsOutOfRange, "Number of images must be more than zero" );
}
if( maxIter < 1 || maxIter > 2000 )
{
CV_ERROR( CV_StsOutOfRange, "Maximum number of iteration must be in [1..1000]" );
}
if( epsilon < 0 )
{
CV_ERROR( CV_StsOutOfRange, "Epsilon parameter must be >= 0" );
}
if( !CV_IS_MAT(resultPoints4D) )
{
CV_ERROR( CV_StsUnsupportedFormat, "resultPoints4D must be a matrix 4 x NumPnt" );
@ -1139,7 +1148,7 @@ void cvOptimizeLevenbergMarquardtBundle( CvMat** projMatrs, CvMat** observProjPo
CV_CALL( changeVectorX_projMatrs = cvCreateMat(3,4,CV_64F));
int currImage;
/* ----- Test input params ----- */
for( currImage = 0; currImage < numImages; currImage++ )
{
@ -1355,7 +1364,7 @@ void cvOptimizeLevenbergMarquardtBundle( CvMat** projMatrs, CvMat** observProjPo
double norm = cvNorm(vectorX_projMatrs[i]);
fprintf(file," test 6.01 prev normProj=%lf\n",norm);
}
fclose(file);
}
#endif
@ -1384,7 +1393,7 @@ void cvOptimizeLevenbergMarquardtBundle( CvMat** projMatrs, CvMat** observProjPo
double norm = cvNorm(matrsUk[i]);
fprintf(file," test 6.01 prev matrsUk=%lf\n",norm);
}
for( i = 0; i < numPoints; i++ )
{
double norm = cvNorm(matrsVi[i]);
@ -1427,7 +1436,7 @@ void cvOptimizeLevenbergMarquardtBundle( CvMat** projMatrs, CvMat** observProjPo
double norm = cvNorm(matrsUk[i]);
fprintf(file," test 6.01 post1 matrsUk=%lf\n",norm);
}
for( i = 0; i < numPoints; i++ )
{
double norm = cvNorm(matrsVi[i]);
@ -1612,7 +1621,7 @@ void cvOptimizeLevenbergMarquardtBundle( CvMat** projMatrs, CvMat** observProjPo
newError += currNorm * currNorm;
}
newError = sqrt(newError);
currIter++;
@ -1732,7 +1741,7 @@ void cvOptimizeLevenbergMarquardtBundle( CvMat** projMatrs, CvMat** observProjPo
} while( change > epsilon && currIter < maxIter );
/*--------------------------------------------*/
/* Optimization complete copy computed params */
/* Copy projection matrices */

Some files were not shown because too many files have changed in this diff Show More