replaced alloca() (a.k.a. cvStackAlloc) with AutoBuffer or vector() everywhere. cvStackAlloc() is still defined, but we do not need alloca() anymore to compile and run OpenCV (fixes #889 and may be some others)

This commit is contained in:
Vadim Pisarevsky 2011-02-18 10:29:57 +00:00
parent 7b2ec0a1e6
commit 65a7f13af3
21 changed files with 286 additions and 263 deletions

View File

@ -763,7 +763,8 @@ CV_EXPORTS_W void validateDisparity( Mat& disparity, const Mat& cost,
//! reprojects disparity image to 3D: (x,y,d)->(X,Y,Z) using the matrix Q returned by cv::stereoRectify
CV_EXPORTS_W void reprojectImageTo3D( const Mat& disparity,
CV_OUT Mat& _3dImage, const Mat& Q,
bool handleMissingValues=false );
bool handleMissingValues=false,
int ddepth=-1 );
}

View File

@ -2753,66 +2753,72 @@ CV_IMPL int cvStereoRectifyUncalibrated(
}
CV_IMPL void cvReprojectImageTo3D(
const CvArr* disparityImage,
CvArr* _3dImage, const CvMat* matQ,
int handleMissingValues )
void cv::reprojectImageTo3D( const Mat& disparity,
Mat& _3dImage, const Mat& Q,
bool handleMissingValues, int dtype )
{
int stype = disparity.type();
CV_Assert( stype == CV_8UC1 || stype == CV_16SC1 ||
stype == CV_32SC1 || stype == CV_32FC1 );
CV_Assert( Q.size() == Size(4,4) );
if( dtype < 0 )
dtype = CV_32FC3;
else
{
dtype = CV_MAKETYPE(CV_MAT_DEPTH(dtype), 3);
CV_Assert( dtype == CV_16SC3 || dtype == CV_32SC3 || dtype == CV_32FC3 );
}
_3dImage.create(disparity.size(), CV_MAKETYPE(dtype, 3));
const double bigZ = 10000.;
double q[4][4];
CvMat Q = cvMat(4, 4, CV_64F, q);
CvMat sstub, *src = cvGetMat( disparityImage, &sstub );
CvMat dstub, *dst = cvGetMat( _3dImage, &dstub );
int stype = CV_MAT_TYPE(src->type), dtype = CV_MAT_TYPE(dst->type);
int x, y, rows = src->rows, cols = src->cols;
float* sbuf = (float*)cvStackAlloc( cols*sizeof(sbuf[0]) );
float* dbuf = (float*)cvStackAlloc( cols*3*sizeof(dbuf[0]) );
Mat _Q(4, 4, CV_64F, q);
Q.convertTo(_Q, CV_64F);
int x, cols = disparity.cols;
CV_Assert( cols >= 0 );
vector<float> _sbuf(cols+1), _dbuf(cols*3+1);
float* sbuf = &_sbuf[0], *dbuf = &_dbuf[0];
double minDisparity = FLT_MAX;
CV_Assert( CV_ARE_SIZES_EQ(src, dst) &&
(CV_MAT_TYPE(stype) == CV_8UC1 || CV_MAT_TYPE(stype) == CV_16SC1 ||
CV_MAT_TYPE(stype) == CV_32SC1 || CV_MAT_TYPE(stype) == CV_32FC1) &&
(CV_MAT_TYPE(dtype) == CV_16SC3 || CV_MAT_TYPE(dtype) == CV_32SC3 ||
CV_MAT_TYPE(dtype) == CV_32FC3) );
cvConvert( matQ, &Q );
// NOTE: here we quietly assume that at least one pixel in the disparity map is not defined.
// and we set the corresponding Z's to some fixed big value.
if( handleMissingValues )
cvMinMaxLoc( disparityImage, &minDisparity, 0, 0, 0 );
for( y = 0; y < rows; y++ )
cv::minMaxIdx( disparity, &minDisparity, 0, 0, 0 );
for( int y = 0; y < disparity.rows; y++ )
{
const float* sptr = (const float*)(src->data.ptr + src->step*y);
float* dptr0 = (float*)(dst->data.ptr + dst->step*y), *dptr = dptr0;
float *sptr = sbuf, *dptr = dbuf;
double qx = q[0][1]*y + q[0][3], qy = q[1][1]*y + q[1][3];
double qz = q[2][1]*y + q[2][3], qw = q[3][1]*y + q[3][3];
if( stype == CV_8UC1 )
{
const uchar* sptr0 = (const uchar*)sptr;
const uchar* sptr0 = disparity.ptr<uchar>(y);
for( x = 0; x < cols; x++ )
sbuf[x] = (float)sptr0[x];
sptr = sbuf;
sptr[x] = (float)sptr0[x];
}
else if( stype == CV_16SC1 )
{
const short* sptr0 = (const short*)sptr;
const short* sptr0 = disparity.ptr<short>(y);
for( x = 0; x < cols; x++ )
sbuf[x] = (float)sptr0[x];
sptr = sbuf;
sptr[x] = (float)sptr0[x];
}
else if( stype == CV_32SC1 )
{
const int* sptr0 = (const int*)sptr;
const int* sptr0 = disparity.ptr<int>(y);
for( x = 0; x < cols; x++ )
sbuf[x] = (float)sptr0[x];
sptr = sbuf;
sptr[x] = (float)sptr0[x];
}
else
sptr = (float*)disparity.ptr<float>(y);
if( dtype != CV_32FC3 )
dptr = dbuf;
if( dtype == CV_32FC3 )
dptr = _3dImage.ptr<float>(y);
for( x = 0; x < cols; x++, qx += q[0][0], qy += q[1][0], qz += q[2][0], qw += q[3][0] )
{
@ -2831,24 +2837,41 @@ CV_IMPL void cvReprojectImageTo3D(
if( dtype == CV_16SC3 )
{
short* dptr0 = _3dImage.ptr<short>(y);
for( x = 0; x < cols*3; x++ )
{
int ival = cvRound(dptr[x]);
((short*)dptr0)[x] = CV_CAST_16S(ival);
dptr0[x] = CV_CAST_16S(ival);
}
}
else if( dtype == CV_32SC3 )
{
int* dptr0 = _3dImage.ptr<int>(y);
for( x = 0; x < cols*3; x++ )
{
int ival = cvRound(dptr[x]);
((int*)dptr0)[x] = ival;
dptr0[x] = ival;
}
}
}
}
void cvReprojectImageTo3D( const CvArr* disparityImage,
CvArr* _3dImage, const CvMat* matQ,
int handleMissingValues )
{
cv::Mat disp = cv::cvarrToMat(disparityImage);
cv::Mat _3dimg = cv::cvarrToMat(_3dImage);
cv::Mat mq = cv::cvarrToMat(matQ);
CV_Assert( disp.size() == _3dimg.size() );
int dtype = _3dimg.type();
CV_Assert( dtype == CV_16SC3 || dtype == CV_32SC3 || dtype == CV_32FC3 );
cv::reprojectImageTo3D(disp, _3dimg, mq, handleMissingValues != 0, dtype );
}
CV_IMPL void
cvRQDecomp3x3( const CvMat *matrixM, CvMat *matrixR, CvMat *matrixQ,
CvMat *matrixQx, CvMat *matrixQy, CvMat *matrixQz,
@ -3428,16 +3451,6 @@ bool cv::stereoRectifyUncalibrated( const Mat& points1, const Mat& points2,
return cvStereoRectifyUncalibrated(&_pt1, &_pt2, pF, imgSize, &_H1, &_H2, threshold) > 0;
}
void cv::reprojectImageTo3D( const Mat& disparity,
Mat& _3dImage, const Mat& Q,
bool handleMissingValues )
{
_3dImage.create(disparity.size(), CV_32FC3);
CvMat _disparity = disparity, __3dImage = _3dImage, matQ = Q;
cvReprojectImageTo3D( &_disparity, &__3dImage, &matQ, handleMissingValues );
}
cv::Mat cv::getOptimalNewCameraMatrix( const Mat& cameraMatrix, const Mat& distCoeffs,
Size imgSize, double alpha, Size newImgSize,
Rect* validPixROI )

View File

@ -274,7 +274,8 @@ bool CvModelEstimator2::runLMeDS( const CvMat* m1, const CvMat* m2, CvMat* model
bool CvModelEstimator2::getSubset( const CvMat* m1, const CvMat* m2,
CvMat* ms1, CvMat* ms2, int maxAttempts )
{
int* idx = (int*)cvStackAlloc( modelPoints*sizeof(idx[0]) );
cv::AutoBuffer<int> _idx(modelPoints);
int* idx = _idx;
int i = 0, j, k, idx_i, iters = 0;
int type = CV_MAT_TYPE(m1->type), elemSize = CV_ELEM_SIZE(type);
const int *m1ptr = m1->data.i, *m2ptr = m2->data.i;

View File

@ -482,8 +482,9 @@ icvComputeK( CvStereoGCState* state )
{
int x, y, x1, d, i, j, rows = state->left->rows, cols = state->left->cols, n = 0;
int mind = state->minDisparity, nd = state->numberOfDisparities, maxd = mind + nd;
int k = MIN(MAX((nd + 2)/4, 3), nd);
int *arr = (int*)cvStackAlloc(k*sizeof(arr[0])), delta, t, sum = 0;
int k = MIN(MAX((nd + 2)/4, 3), nd), delta, t, sum = 0;
vector<int> _arr(k);
int *arr = &_arr[0];
for( y = 0; y < rows; y++ )
{
@ -846,8 +847,6 @@ CV_IMPL void cvFindStereoCorrespondenceGC( const CvArr* _left, const CvArr* _rig
CvSize size;
int iter, i, nZeroExpansions = 0;
CvRNG rng = cvRNG(-1);
int* disp;
CvMat _disp;
int64 E;
CV_Assert( state != 0 );
@ -902,8 +901,9 @@ CV_IMPL void cvFindStereoCorrespondenceGC( const CvArr* _left, const CvArr* _rig
icvInitStereoConstTabs();
icvInitGraySubpix( left, right, state->left, state->right );
disp = (int*)cvStackAlloc( state->numberOfDisparities*sizeof(disp[0]) );
_disp = cvMat( 1, state->numberOfDisparities, CV_32S, disp );
vector<int> disp(state->numberOfDisparities);
CvMat _disp = cvMat( 1, (int)disp.size(), CV_32S, &disp[0] );
cvRange( &_disp, state->minDisparity, state->minDisparity + state->numberOfDisparities );
cvRandShuffle( &_disp, &rng );

View File

@ -4040,6 +4040,53 @@ public:
int index;
};
class CV_EXPORTS AlgorithmImpl;
/*!
Base class for high-level OpenCV algorithms
*/
class CV_EXPORTS Algorithm
{
public:
virtual ~Algorithm();
virtual string name() const;
template<typename _Tp> _Tp get(int paramId) const;
template<typename _Tp> bool set(int paramId, const _Tp& value);
string paramName(int paramId) const;
string paramHelp(int paramId) const;
int paramType(int paramId) const;
int findParam(const string& name) const;
template<typename _Tp> _Tp paramDefaultValue(int paramId) const;
template<typename _Tp> bool paramRange(int paramId, _Tp& minVal, _Tp& maxVal) const;
virtual void getParams(vector<int>& ids) const;
virtual void write(vector<uchar>& buf) const;
virtual bool read(const vector<uchar>& buf);
typedef Algorithm* (*Constructor)(void);
static void add(const string& name, Constructor create);
static void getList(vector<string>& algorithms);
static Ptr<Algorithm> create(const string& name);
protected:
template<typename _Tp> void addParam(int propId, _Tp& value, bool readOnly, const string& name,
const string& help=string(), const _Tp& defaultValue=_Tp(),
_Tp (Algorithm::*getter)()=0, bool (Algorithm::*setter)(const _Tp&)=0);
template<typename _Tp> void setParamRange(int propId, const _Tp& minVal, const _Tp& maxVal);
bool set_(int paramId, int argType, const void* value);
void get_(int paramId, int argType, void* value);
void paramDefaultValue_(int paramId, int argType, void* value);
void paramRange_(int paramId, int argType, void* minval, void* maxval);
void addParam_(int propId, int argType, void* value, bool readOnly, const string& name,
const string& help, const void* defaultValue, void* getter, void* setter);
void setParamRange_(int propId, int argType, const void* minVal, const void* maxVal);
Ptr<AlgorithmImpl> impl;
};
}
#endif // __cplusplus

View File

@ -266,15 +266,19 @@ CV_INLINE IppiSize ippiSize(int width, int height)
#ifdef __GNUC__
#undef alloca
#define alloca __builtin_alloca
#define CV_HAVE_ALLOCA 1
#elif defined WIN32 || defined _WIN32 || \
defined WINCE || defined _MSC_VER || defined __BORLANDC__
#include <malloc.h>
#define CV_HAVE_ALLOCA 1
#elif defined HAVE_ALLOCA_H
#include <alloca.h>
#define CV_HAVE_ALLOCA 1
#elif defined HAVE_ALLOCA
#include <stdlib.h>
#define CV_HAVE_ALLOCA 1
#else
#error "No alloca!"
#undef CV_HAVE_ALLOCA
#endif
#ifdef __GNUC__
@ -285,8 +289,10 @@ CV_INLINE IppiSize ippiSize(int width, int height)
#define CV_DECL_ALIGNED(x)
#endif
#if CV_HAVE_ALLOCA
/* ! DO NOT make it an inline function */
#define cvStackAlloc(size) cvAlignPtr( alloca((size) + CV_MALLOC_ALIGN), CV_MALLOC_ALIGN )
#endif
#ifndef CV_IMPL
#define CV_IMPL CV_EXTERN_C

View File

@ -3550,6 +3550,51 @@ template<typename _Tp> static inline std::ostream& operator << (std::ostream& ou
return out;
}
template<typename _Tp> struct AlgorithmParamType {};
template<> struct AlgorithmParamType<int> { enum { type = CV_PARAM_TYPE_INT }; };
template<> struct AlgorithmParamType<double> { enum { type = CV_PARAM_TYPE_REAL }; };
template<> struct AlgorithmParamType<string> { enum { type = CV_PARAM_TYPE_STRING }; };
template<> struct AlgorithmParamType<Mat> { enum { type = CV_PARAM_TYPE_MAT }; };
template<typename _Tp> _Tp Algorithm::get(int paramId) const
{
_Tp value = _Tp();
get_(paramId, AlgorithmParamType<_Tp>::type, &value);
return value;
}
template<typename _Tp> bool Algorithm::set(int paramId, const _Tp& value)
{
set_(paramId, AlgorithmParamType<_Tp>::type, &value);
return value;
}
template<typename _Tp> _Tp Algorithm::paramDefaultValue(int paramId) const
{
_Tp value = _Tp();
paramDefaultValue_(paramId, AlgorithmParamType<_Tp>::type, &value);
return value;
}
template<typename _Tp> bool Algorithm::paramRange(int paramId, _Tp& minVal, _Tp& maxVal) const
{
return paramRange_(paramId, AlgorithmParamType<_Tp>::type, &minVal, &maxVal);
}
template<typename _Tp> void Algorithm::addParam(int propId, _Tp& value, bool readOnly, const string& name,
const string& help, const _Tp& defaultValue,
_Tp (Algorithm::*getter)(), bool (Algorithm::*setter)(const _Tp&))
{
addParam_(propId, AlgorithmParamType<_Tp>::type, &value, readOnly, name, help, &defaultValue,
(void*)getter, (void*)setter);
}
template<typename _Tp> void Algorithm::setParamRange(int propId, const _Tp& minVal, const _Tp& maxVal)
{
setParamRange_(propId, AlgorithmParamType<_Tp>::type, &minVal, &maxVal);
}
}
#endif // __cplusplus

View File

@ -793,7 +793,7 @@ CV_INLINE int cvIplDepth( int type )
#define CV_TYPE_NAME_MATND "opencv-nd-matrix"
#define CV_MAX_DIM 32
#define CV_MAX_DIM_HEAP (1 << 16)
#define CV_MAX_DIM_HEAP 1024
typedef struct CvMatND
{
@ -1868,6 +1868,8 @@ typedef struct CvModuleInfo
}
CvModuleInfo;
enum { CV_PARAM_TYPE_INT=0, CV_PARAM_TYPE_REAL=1, CV_PARAM_TYPE_STRING=2, CV_PARAM_TYPE_MAT=3 };
#endif /*_CXCORE_TYPES_H_*/
/* End of file. */

View File

@ -294,7 +294,8 @@ cvCloneMatND( const CvMatND* src )
if( !CV_IS_MATND_HDR( src ))
CV_Error( CV_StsBadArg, "Bad CvMatND header" );
int* sizes = (int*)cvStackAlloc( src->dims*sizeof(sizes[0]) );
CV_Assert( src->dims <= CV_MAX_DIM );
int sizes[CV_MAX_DIM];
for( int i = 0; i < src->dims; i++ )
sizes[i] = src->dim[i].size;
@ -1717,7 +1718,8 @@ cvPtr1D( const CvArr* arr, int idx, int* _type )
else
{
int i, n = m->dims;
int* _idx = (int*)cvStackAlloc(n*sizeof(_idx[0]));
CV_DbgAssert( n <= CV_MAX_DIM_HEAP );
int _idx[CV_MAX_DIM_HEAP];
for( i = n - 1; i >= 0; i-- )
{

View File

@ -132,6 +132,7 @@ GEMMSingleMul( const T* a_data, size_t a_step,
{
int i, j, k, n = a_size.width, m = d_size.width, drows = d_size.height;
const T *_a_data = a_data, *_b_data = b_data, *_c_data = c_data;
cv::AutoBuffer<T> _a_buf;
T* a_buf = 0;
size_t a_step0, a_step1, c_step0, c_step1, t_step;
@ -154,16 +155,21 @@ GEMMSingleMul( const T* a_data, size_t a_step,
CV_SWAP( a_step0, a_step1, t_step );
n = a_size.height;
if( a_step > 1 && n > 1 )
a_buf = (T*)cvStackAlloc(n*sizeof(a_data[0]));
{
_a_buf.allocate(n);
a_buf = _a_buf;
}
}
if( n == 1 ) /* external product */
{
cv::AutoBuffer<T> _b_buf;
T* b_buf = 0;
if( a_step > 1 && a_size.height > 1 )
{
a_buf = (T*)cvStackAlloc(drows*sizeof(a_data[0]));
_a_buf.allocate(drows);
a_buf = _a_buf;
for( k = 0; k < drows; k++ )
a_buf[k] = a_data[a_step*k];
a_data = a_buf;
@ -171,7 +177,8 @@ GEMMSingleMul( const T* a_data, size_t a_step,
if( b_step > 1 )
{
b_buf = (T*)cvStackAlloc(d_size.width*sizeof(b_buf[0]) );
_b_buf.allocate(d_size.width);
b_buf = _b_buf;
for( j = 0; j < d_size.width; j++ )
b_buf[j] = b_data[j*b_step];
b_data = b_buf;
@ -309,7 +316,8 @@ GEMMSingleMul( const T* a_data, size_t a_step,
}
else
{
WT* d_buf = (WT*)cvStackAlloc(m*sizeof(d_buf[0]));
cv::AutoBuffer<WT> _d_buf(m);
WT* d_buf = _d_buf;
for( i = 0; i < drows; i++, _a_data += a_step0, _c_data += c_step0, d_data += d_step )
{
@ -369,6 +377,7 @@ GEMMBlockMul( const T* a_data, size_t a_step,
{
int i, j, k, n = a_size.width, m = d_size.width;
const T *_a_data = a_data, *_b_data = b_data;
cv::AutoBuffer<T> _a_buf;
T* a_buf = 0;
size_t a_step0, a_step1, t_step;
int do_acc = flags & 16;
@ -384,7 +393,8 @@ GEMMBlockMul( const T* a_data, size_t a_step,
{
CV_SWAP( a_step0, a_step1, t_step );
n = a_size.height;
a_buf = (T*)cvStackAlloc(n*sizeof(a_data[0]));
_a_buf.allocate(n);
a_buf = _a_buf;
}
if( flags & GEMM_2_T )

View File

@ -3620,7 +3620,7 @@ icvReadSparseMat( CvFileStorage* fs, CvFileNode* node )
CvSeqReader reader;
CvSeq* elements;
int* idx;
int* sizes = 0, dims, elem_type, cn;
int sizes[CV_MAX_DIM_HEAP], dims, elem_type, cn;
int i;
sizes_node = cvGetFileNodeByName( fs, node, "sizes" );
@ -3635,7 +3635,6 @@ icvReadSparseMat( CvFileStorage* fs, CvFileNode* node )
if( dims <= 0 || dims > CV_MAX_DIM_HEAP )
CV_Error( CV_StsParseError, "Could not determine sparse matrix dimensionality" );
sizes = (int*)cvStackAlloc( dims*sizeof(sizes[0]));
cvReadRawData( fs, sizes_node, sizes, "i" );
elem_type = icvDecodeSimpleFormat( dt );

View File

@ -462,11 +462,11 @@ static CvSeq* icvFastHessianDetector( const CvMat* sum, const CvMat* mask_sum,
int nTotalLayers = (params->nOctaveLayers+2)*params->nOctaves;
int nMiddleLayers = params->nOctaveLayers*params->nOctaves;
CvMat** dets = (CvMat**)cvStackAlloc(nTotalLayers*sizeof(dets[0]));
CvMat** traces = (CvMat**)cvStackAlloc(nTotalLayers*sizeof(traces[0]));
int *sizes = (int*)cvStackAlloc(nTotalLayers*sizeof(sizes[0]));
int *sampleSteps = (int*)cvStackAlloc(nTotalLayers*sizeof(sampleSteps[0]));
int *middleIndices = (int*)cvStackAlloc(nMiddleLayers*sizeof(middleIndices[0]));
cv::AutoBuffer<CvMat*> dets(nTotalLayers);
cv::AutoBuffer<CvMat*> traces(nTotalLayers);
cv::AutoBuffer<int> sizes(nTotalLayers);
cv::AutoBuffer<int> sampleSteps(nTotalLayers);
cv::AutoBuffer<int> middleIndices(nMiddleLayers);
int octave, layer, step, index, middleIndex;
/* Allocate space and calculate properties of each layer */

View File

@ -278,9 +278,9 @@ public:
}
}
void query(const scalar_type* q, int k0, int emax, double* dist, int* results) {
int* tmp = (int*)cvStackAlloc(sizeof(int) * emax);
cv::AutoBuffer<int> tmp(emax);
typedef std::pair<int, accum_type> dr_type; // * swap int and accum_type here, for naming consistency
dr_type* dr = (dr_type*)cvStackAlloc(sizeof(dr_type) * k0);
cv::AutoBuffer<dr_type> dr(k0);
int k1 = 0;
// * handle k0 >= emax, in which case don't track max distance
@ -294,11 +294,11 @@ public:
accum_type pd = (*g[l]).distance(p, q);
if (k1 < k0) {
dr[k1++] = std::make_pair(i, pd);
std::push_heap(dr, dr + k1, comp_dist);
std::push_heap(&dr[0], &dr[k1], comp_dist);
} else if (pd < dr[0].second) {
std::pop_heap(dr, dr + k0, comp_dist);
std::pop_heap(&dr[0], &dr[k0], comp_dist);
dr[k0 - 1] = std::make_pair(i, pd);
std::push_heap(dr, dr + k0, comp_dist);
std::push_heap(&dr[0], &dr[k0], comp_dist);
}
}
}

View File

@ -265,30 +265,25 @@ void CvANN_MLP::create( const CvMat* _layer_sizes, int _activ_func,
float CvANN_MLP::predict( const CvMat* _inputs, CvMat* _outputs ) const
{
CV_FUNCNAME( "CvANN_MLP::predict" );
__BEGIN__;
double* buf;
int i, j, n, dn = 0, l_count, dn0, buf_sz, min_buf_sz;
if( !layer_sizes )
CV_ERROR( CV_StsError, "The network has not been initialized" );
CV_Error( CV_StsError, "The network has not been initialized" );
if( !CV_IS_MAT(_inputs) || !CV_IS_MAT(_outputs) ||
!CV_ARE_TYPES_EQ(_inputs,_outputs) ||
(CV_MAT_TYPE(_inputs->type) != CV_32FC1 &&
CV_MAT_TYPE(_inputs->type) != CV_64FC1) ||
_inputs->rows != _outputs->rows )
CV_ERROR( CV_StsBadArg, "Both input and output must be floating-point matrices "
CV_Error( CV_StsBadArg, "Both input and output must be floating-point matrices "
"of the same type and have the same number of rows" );
if( _inputs->cols != layer_sizes->data.i[0] )
CV_ERROR( CV_StsBadSize, "input matrix must have the same number of columns as "
CV_Error( CV_StsBadSize, "input matrix must have the same number of columns as "
"the number of neurons in the input layer" );
if( _outputs->cols != layer_sizes->data.i[layer_sizes->cols - 1] )
CV_ERROR( CV_StsBadSize, "output matrix must have the same number of columns as "
CV_Error( CV_StsBadSize, "output matrix must have the same number of columns as "
"the number of neurons in the output layer" );
n = dn0 = _inputs->rows;
min_buf_sz = 2*max_count;
@ -301,7 +296,7 @@ float CvANN_MLP::predict( const CvMat* _inputs, CvMat* _outputs ) const
buf_sz = dn0*min_buf_sz;
}
buf = (double*)cvStackAlloc( buf_sz*sizeof(buf[0]) );
cv::AutoBuffer<double> buf(buf_sz);
l_count = layer_sizes->cols;
for( i = 0; i < n; i += dn )
@ -310,7 +305,7 @@ float CvANN_MLP::predict( const CvMat* _inputs, CvMat* _outputs ) const
dn = MIN( dn0, n - i );
cvGetRows( _inputs, layer_in, i, i + dn );
cvInitMatHeader( layer_out, dn, layer_in->cols, CV_64F, buf );
cvInitMatHeader( layer_out, dn, layer_in->cols, CV_64F, &buf[0] );
scale_input( layer_in, layer_out );
CV_SWAP( layer_in, layer_out, temp );
@ -332,8 +327,6 @@ float CvANN_MLP::predict( const CvMat* _inputs, CvMat* _outputs ) const
scale_output( layer_in, layer_out );
}
__END__;
return 0.f;
}

View File

@ -1592,32 +1592,22 @@ CvBoost::predict( const CvMat* _sample, const CvMat* _missing,
CvMat* weak_responses, CvSlice slice,
bool raw_mode, bool return_sum ) const
{
float* buf = 0;
bool allocated = false;
float value = -FLT_MAX;
CV_FUNCNAME( "CvBoost::predict" );
__BEGIN__;
int i, weak_count, var_count;
CvMat sample, missing;
CvSeqReader reader;
double sum = 0;
int wstep = 0;
const int* vtype;
const int* cmap;
const int* cofs;
const float* sample_data;
if( !weak )
CV_ERROR( CV_StsError, "The boosted tree ensemble has not been trained yet" );
CV_Error( CV_StsError, "The boosted tree ensemble has not been trained yet" );
if( !CV_IS_MAT(_sample) || CV_MAT_TYPE(_sample->type) != CV_32FC1 ||
(_sample->cols != 1 && _sample->rows != 1) ||
(_sample->cols + _sample->rows - 1 != data->var_all && !raw_mode) ||
(active_vars && _sample->cols + _sample->rows - 1 != active_vars->cols && raw_mode) )
CV_ERROR( CV_StsBadArg,
CV_Error( CV_StsBadArg,
"the input sample must be 1d floating-point vector with the same "
"number of elements as the total number of variables or "
"as the number of variables used for training" );
@ -1626,11 +1616,11 @@ CvBoost::predict( const CvMat* _sample, const CvMat* _missing,
{
if( !CV_IS_MAT(_missing) || !CV_IS_MASK_ARR(_missing) ||
!CV_ARE_SIZES_EQ(_missing, _sample) )
CV_ERROR( CV_StsBadArg,
CV_Error( CV_StsBadArg,
"the missing data mask must be 8-bit vector of the same size as input sample" );
}
weak_count = cvSliceLength( slice, weak );
int i, weak_count = cvSliceLength( slice, weak );
if( weak_count >= weak->total )
{
weak_count = weak->total;
@ -1643,21 +1633,20 @@ CvBoost::predict( const CvMat* _sample, const CvMat* _missing,
CV_MAT_TYPE(weak_responses->type) != CV_32FC1 ||
(weak_responses->cols != 1 && weak_responses->rows != 1) ||
weak_responses->cols + weak_responses->rows - 1 != weak_count )
CV_ERROR( CV_StsBadArg,
CV_Error( CV_StsBadArg,
"The output matrix of weak classifier responses must be valid "
"floating-point vector of the same number of components as the length of input slice" );
wstep = CV_IS_MAT_CONT(weak_responses->type) ? 1 : weak_responses->step/sizeof(float);
}
var_count = active_vars->cols;
vtype = data->var_type->data.i;
cmap = data->cat_map->data.i;
cofs = data->cat_ofs->data.i;
int var_count = active_vars->cols;
const int* vtype = data->var_type->data.i;
const int* cmap = data->cat_map->data.i;
const int* cofs = data->cat_ofs->data.i;
// if need, preprocess the input vector
if( !raw_mode )
{
int bufsize;
int step, mstep = 0;
const float* src_sample;
const uchar* src_mask = 0;
@ -1667,16 +1656,9 @@ CvBoost::predict( const CvMat* _sample, const CvMat* _missing,
const int* vidx_abs = active_vars_abs->data.i;
bool have_mask = _missing != 0;
bufsize = var_count*(sizeof(float) + sizeof(uchar));
if( bufsize <= CV_MAX_LOCAL_SIZE )
buf = (float*)cvStackAlloc( bufsize );
else
{
CV_CALL( buf = (float*)cvAlloc( bufsize ));
allocated = true;
}
dst_sample = buf;
dst_mask = (uchar*)(buf + var_count);
cv::AutoBuffer<float> buf(var_count + (var_count+3)/4);
dst_sample = &buf[0];
dst_mask = (uchar*)&buf[var_count];
src_sample = _sample->data.fl;
step = CV_IS_MAT_CONT(_sample->type) ? 1 : _sample->step/sizeof(src_sample[0]);
@ -1700,7 +1682,7 @@ CvBoost::predict( const CvMat* _sample, const CvMat* _missing,
c = a;
int ival = cvRound(val);
if ( (ival != val) && (!m) )
CV_ERROR( CV_StsBadArg,
CV_Error( CV_StsBadArg,
"one of input categorical variable is not an integer" );
while( a < b )
@ -1741,7 +1723,7 @@ CvBoost::predict( const CvMat* _sample, const CvMat* _missing,
else
{
if( !CV_IS_MAT_CONT(_sample->type & (_missing ? _missing->type : -1)) )
CV_ERROR( CV_StsBadArg, "In raw mode the input vectors must be continuous" );
CV_Error( CV_StsBadArg, "In raw mode the input vectors must be continuous" );
}
cvStartReadSeq( weak, &reader );
@ -1830,11 +1812,6 @@ CvBoost::predict( const CvMat* _sample, const CvMat* _missing,
value = (float)cmap[cofs[vtype[data->var_count]] + cls_idx];
}
__END__;
if( allocated )
cvFree( &buf );
return value;
}

View File

@ -209,39 +209,23 @@ float
CvEM::predict( const CvMat* _sample, CvMat* _probs ) const
{
float* sample_data = 0;
void* buffer = 0;
int allocated_buffer = 0;
int cls = 0;
CV_FUNCNAME( "CvEM::predict" );
__BEGIN__;
int i, k, dims;
int nclusters;
int cov_mat_type = params.cov_mat_type;
double opt = FLT_MAX;
size_t size;
CvMat diff, expo;
dims = means->cols;
nclusters = params.nclusters;
int i, dims = means->cols;
int nclusters = params.nclusters;
CV_CALL( cvPreparePredictData( _sample, dims, 0, params.nclusters, _probs, &sample_data ));
cvPreparePredictData( _sample, dims, 0, params.nclusters, _probs, &sample_data );
// allocate memory and initializing headers for calculating
size = sizeof(double) * (nclusters + dims);
if( size <= CV_MAX_LOCAL_SIZE )
buffer = cvStackAlloc( size );
else
{
CV_CALL( buffer = cvAlloc( size ));
allocated_buffer = 1;
}
expo = cvMat( 1, nclusters, CV_64FC1, buffer );
diff = cvMat( 1, dims, CV_64FC1, (double*)buffer + nclusters );
cv::AutoBuffer<double> buffer(nclusters + dims);
CvMat expo = cvMat(1, nclusters, CV_64F, &buffer[0] );
CvMat diff = cvMat(1, dims, CV_64F, &buffer[nclusters] );
// calculate the probabilities
for( k = 0; k < nclusters; k++ )
for( int k = 0; k < nclusters; k++ )
{
const double* mean_k = (const double*)(means->data.ptr + means->step*k);
const double* w = (const double*)(inv_eigen_values->data.ptr + inv_eigen_values->step*k);
@ -281,19 +265,15 @@ CvEM::predict( const CvMat* _sample, CvMat* _probs ) const
if( _probs )
{
CV_CALL( cvConvertScale( &expo, &expo, -0.5 ));
CV_CALL( cvExp( &expo, &expo ));
cvConvertScale( &expo, &expo, -0.5 );
cvExp( &expo, &expo );
if( _probs->cols == 1 )
CV_CALL( cvReshape( &expo, &expo, 0, nclusters ));
CV_CALL( cvConvertScale( &expo, _probs, 1./cvSum( &expo ).val[0] ));
cvReshape( &expo, &expo, 0, nclusters );
cvConvertScale( &expo, _probs, 1./cvSum( &expo ).val[0] );
}
__END__;
if( sample_data != _sample->data.fl )
cvFree( &sample_data );
if( allocated_buffer )
cvFree( &buffer );
return (float)cls;
}

View File

@ -903,8 +903,8 @@ CvDTreeSplit* CvForestERTree::find_split_ord_class( CvDTreeNode* node, int vi, f
// calculate Gini index
if ( !priors )
{
int* lc = (int*)cvStackAlloc(m*sizeof(lc[0]));
int* rc = (int*)cvStackAlloc(m*sizeof(rc[0]));
cv::AutoBuffer<int> lrc(m*2);
int *lc = lrc, *rc = lc + m;
int L = 0, R = 0;
// init arrays of class instance counters on both sides of the split
@ -939,8 +939,8 @@ CvDTreeSplit* CvForestERTree::find_split_ord_class( CvDTreeNode* node, int vi, f
}
else
{
double* lc = (double*)cvStackAlloc(m*sizeof(lc[0]));
double* rc = (double*)cvStackAlloc(m*sizeof(rc[0]));
cv::AutoBuffer<double> lrc(m*2);
double *lc = lrc, *rc = lc + m;
double L = 0, R = 0;
// init arrays of class instance counters on both sides of the split
@ -1013,7 +1013,7 @@ CvDTreeSplit* CvForestERTree::find_split_cat_class( CvDTreeNode* node, int vi, f
const double* priors = data->have_priors ? data->priors_mult->data.db : 0;
// create random class mask
int *valid_cidx = (int*)cvStackAlloc(vm*sizeof(valid_cidx[0]));
cv::AutoBuffer<int> valid_cidx(vm);
for (int i = 0; i < vm; i++)
{
valid_cidx[i] = -1;
@ -1059,8 +1059,8 @@ CvDTreeSplit* CvForestERTree::find_split_cat_class( CvDTreeNode* node, int vi, f
double lbest_val = 0, rbest_val = 0;
if( !priors )
{
int* lc = (int*)cvStackAlloc(cm*sizeof(lc[0]));
int* rc = (int*)cvStackAlloc(cm*sizeof(rc[0]));
cv::AutoBuffer<int> lrc(cm*2);
int *lc = lrc, *rc = lc + cm;
int L = 0, R = 0;
// init arrays of class instance counters on both sides of the split
for(int i = 0; i < cm; i++ )
@ -1096,8 +1096,8 @@ CvDTreeSplit* CvForestERTree::find_split_cat_class( CvDTreeNode* node, int vi, f
}
else
{
double* lc = (double*)cvStackAlloc(cm*sizeof(lc[0]));
double* rc = (double*)cvStackAlloc(cm*sizeof(rc[0]));
cv::AutoBuffer<int> lrc(cm*2);
int *lc = lrc, *rc = lc + cm;
double L = 0, R = 0;
// init arrays of class instance counters on both sides of the split
for(int i = 0; i < cm; i++ )
@ -1333,7 +1333,7 @@ void CvForestERTree::split_node_data( CvDTreeNode* node )
CvDTreeNode *left = 0, *right = 0;
int new_buf_idx = data->get_child_buf_idx( node );
CvMat* buf = data->buf;
int* temp_buf = (int*)cvStackAlloc(n*sizeof(temp_buf[0]));
cv::AutoBuffer<int> temp_buf(n);
complete_node_dir(node);

View File

@ -306,43 +306,37 @@ float CvKNearest::find_nearest( const CvMat* _samples, int k, CvMat* _results,
const float** _neighbors, CvMat* _neighbor_responses, CvMat* _dist ) const
{
float result = 0.f;
bool local_alloc = false;
float* buf = 0;
const int max_blk_count = 128, max_buf_sz = 1 << 12;
CV_FUNCNAME( "CvKNearest::find_nearest" );
__BEGIN__;
int i, count, count_scale, blk_count0, blk_count = 0, buf_sz, k1;
if( !samples )
CV_ERROR( CV_StsError, "The search tree must be constructed first using train method" );
CV_Error( CV_StsError, "The search tree must be constructed first using train method" );
if( !CV_IS_MAT(_samples) ||
CV_MAT_TYPE(_samples->type) != CV_32FC1 ||
_samples->cols != var_count )
CV_ERROR( CV_StsBadArg, "Input samples must be floating-point matrix (<num_samples>x<var_count>)" );
CV_Error( CV_StsBadArg, "Input samples must be floating-point matrix (<num_samples>x<var_count>)" );
if( _results && (!CV_IS_MAT(_results) ||
(_results->cols != 1 && _results->rows != 1) ||
_results->cols + _results->rows - 1 != _samples->rows) )
CV_ERROR( CV_StsBadArg,
CV_Error( CV_StsBadArg,
"The results must be 1d vector containing as much elements as the number of samples" );
if( _results && CV_MAT_TYPE(_results->type) != CV_32FC1 &&
(CV_MAT_TYPE(_results->type) != CV_32SC1 || regression))
CV_ERROR( CV_StsUnsupportedFormat,
CV_Error( CV_StsUnsupportedFormat,
"The results must be floating-point or integer (in case of classification) vector" );
if( k < 1 || k > max_k )
CV_ERROR( CV_StsOutOfRange, "k must be within 1..max_k range" );
CV_Error( CV_StsOutOfRange, "k must be within 1..max_k range" );
if( _neighbor_responses )
{
if( !CV_IS_MAT(_neighbor_responses) || CV_MAT_TYPE(_neighbor_responses->type) != CV_32FC1 ||
_neighbor_responses->rows != _samples->rows || _neighbor_responses->cols != k )
CV_ERROR( CV_StsBadArg,
CV_Error( CV_StsBadArg,
"The neighbor responses (if present) must be floating-point matrix of <num_samples> x <k> size" );
}
@ -350,34 +344,28 @@ float CvKNearest::find_nearest( const CvMat* _samples, int k, CvMat* _results,
{
if( !CV_IS_MAT(_dist) || CV_MAT_TYPE(_dist->type) != CV_32FC1 ||
_dist->rows != _samples->rows || _dist->cols != k )
CV_ERROR( CV_StsBadArg,
CV_Error( CV_StsBadArg,
"The distances from the neighbors (if present) must be floating-point matrix of <num_samples> x <k> size" );
}
count = _samples->rows;
count_scale = k*2*sizeof(float);
count_scale = k*2;
blk_count0 = MIN( count, max_blk_count );
buf_sz = MIN( blk_count0 * count_scale, max_buf_sz );
blk_count0 = MAX( buf_sz/count_scale, 1 );
blk_count0 += blk_count0 % 2;
blk_count0 = MIN( blk_count0, count );
buf_sz = blk_count0 * count_scale + k*sizeof(float);
buf_sz = blk_count0 * count_scale + k;
k1 = get_sample_count();
k1 = MIN( k1, k );
if( buf_sz <= CV_MAX_LOCAL_SIZE )
{
buf = (float*)cvStackAlloc( buf_sz );
local_alloc = true;
}
else
CV_CALL( buf = (float*)cvAlloc( buf_sz ));
cv::AutoBuffer<float> buf(buf_sz);
for( i = 0; i < count; i += blk_count )
{
blk_count = MIN( count - i, blk_count0 );
float* neighbor_responses = buf;
float* dist = buf + blk_count*k;
float* neighbor_responses = &buf[0];
float* dist = neighbor_responses + blk_count*k;
Cv32suf* sort_buf = (Cv32suf*)(dist + blk_count*k);
find_neighbors_direct( _samples, k, i, i + blk_count,
@ -389,11 +377,6 @@ float CvKNearest::find_nearest( const CvMat* _samples, int k, CvMat* _results,
result = r;
}
__END__;
if( !local_alloc )
cvFree( &buf );
return result;
}

View File

@ -281,28 +281,20 @@ bool CvNormalBayesClassifier::train( const CvMat* _train_data, const CvMat* _res
float CvNormalBayesClassifier::predict( const CvMat* samples, CvMat* results ) const
{
float value = 0;
void* buffer = 0;
int allocated_buffer = 0;
CV_FUNCNAME( "CvNormalBayesClassifier::predict" );
__BEGIN__;
int i, j, k, cls = -1, _var_count, nclasses;
int i, j, cls = -1;
double opt = FLT_MAX;
CvMat diff;
int rtype = 0, rstep = 0, size;
const int* vidx = 0;
nclasses = cls_labels->cols;
_var_count = avg[0]->cols;
int rtype = 0, rstep = 0;
int nclasses = cls_labels->cols;
int _var_count = avg[0]->cols;
if( !CV_IS_MAT(samples) || CV_MAT_TYPE(samples->type) != CV_32FC1 || samples->cols != var_all )
CV_ERROR( CV_StsBadArg,
CV_Error( CV_StsBadArg,
"The input samples must be 32f matrix with the number of columns = var_all" );
if( samples->rows > 1 && !results )
CV_ERROR( CV_StsNullPtr,
CV_Error( CV_StsNullPtr,
"When the number of input samples is >1, the output vector of results must be passed" );
if( results )
@ -311,29 +303,20 @@ float CvNormalBayesClassifier::predict( const CvMat* samples, CvMat* results ) c
CV_MAT_TYPE(results->type) != CV_32SC1) ||
(results->cols != 1 && results->rows != 1) ||
results->cols + results->rows - 1 != samples->rows )
CV_ERROR( CV_StsBadArg, "The output array must be integer or floating-point vector "
CV_Error( CV_StsBadArg, "The output array must be integer or floating-point vector "
"with the number of elements = number of rows in the input matrix" );
rtype = CV_MAT_TYPE(results->type);
rstep = CV_IS_MAT_CONT(results->type) ? 1 : results->step/CV_ELEM_SIZE(rtype);
}
if( var_idx )
vidx = var_idx->data.i;
const int* vidx = var_idx ? var_idx->data.i : 0;
// allocate memory and initializing headers for calculating
size = sizeof(double) * (nclasses + var_count);
if( size <= CV_MAX_LOCAL_SIZE )
buffer = cvStackAlloc( size );
else
{
CV_CALL( buffer = cvAlloc( size ));
allocated_buffer = 1;
}
cv::AutoBuffer<double> buffer(nclasses + var_count);
CvMat diff = cvMat( 1, var_count, CV_64FC1, &buffer[0] );
diff = cvMat( 1, var_count, CV_64FC1, buffer );
for( k = 0; k < samples->rows; k++ )
for( int k = 0; k < samples->rows; k++ )
{
int ival;
@ -349,7 +332,7 @@ float CvNormalBayesClassifier::predict( const CvMat* samples, CvMat* results ) c
for( j = 0; j < _var_count; j++ )
diff.data.db[j] = avg_data[j] - x[vidx ? vidx[j] : j];
CV_CALL(cvGEMM( &diff, u, 1, 0, 0, &diff, CV_GEMM_B_T ));
cvGEMM( &diff, u, 1, 0, 0, &diff, CV_GEMM_B_T );
for( j = 0; j < _var_count; j++ )
{
double d = diff.data.db[j];
@ -385,11 +368,6 @@ float CvNormalBayesClassifier::predict( const CvMat* samples, CvMat* results ) c
}*/
}
__END__;
if( allocated_buffer )
cvFree( &buffer );
return value;
}

View File

@ -2036,7 +2036,7 @@ void CvDTree::cluster_categories( const int* vectors, int n, int m,
// TODO: consider adding priors (class weights) and sample weights to the clustering algorithm
int iters = 0, max_iters = 100;
int i, j, idx;
double* buf = (double*)cvStackAlloc( (n + k)*sizeof(buf[0]) );
cv::AutoBuffer<double> buf(n + k);
double *v_weights = buf, *c_weights = buf + n;
bool modified = true;
RNG* r = data->rng;
@ -3558,40 +3558,30 @@ void CvDTree::free_tree()
CvDTreeNode* CvDTree::predict( const CvMat* _sample,
const CvMat* _missing, bool preprocessed_input ) const
{
CvDTreeNode* result = 0;
int* catbuf = 0;
cv::AutoBuffer<int> catbuf;
CV_FUNCNAME( "CvDTree::predict" );
__BEGIN__;
int i, step, mstep = 0;
const float* sample;
int i, mstep = 0;
const uchar* m = 0;
CvDTreeNode* node = root;
const int* vtype;
const int* vidx;
const int* cmap;
const int* cofs;
if( !node )
CV_ERROR( CV_StsError, "The tree has not been trained yet" );
CV_Error( CV_StsError, "The tree has not been trained yet" );
if( !CV_IS_MAT(_sample) || CV_MAT_TYPE(_sample->type) != CV_32FC1 ||
(_sample->cols != 1 && _sample->rows != 1) ||
(_sample->cols + _sample->rows - 1 != data->var_all && !preprocessed_input) ||
(_sample->cols + _sample->rows - 1 != data->var_count && preprocessed_input) )
CV_ERROR( CV_StsBadArg,
CV_Error( CV_StsBadArg,
"the input sample must be 1d floating-point vector with the same "
"number of elements as the total number of variables used for training" );
sample = _sample->data.fl;
step = CV_IS_MAT_CONT(_sample->type) ? 1 : _sample->step/sizeof(sample[0]);
const float* sample = _sample->data.fl;
int step = CV_IS_MAT_CONT(_sample->type) ? 1 : _sample->step/sizeof(sample[0]);
if( data->cat_count && !preprocessed_input ) // cache for categorical variables
{
int n = data->cat_count->cols;
catbuf = (int*)cvStackAlloc(n*sizeof(catbuf[0]));
catbuf.allocate(n);
for( i = 0; i < n; i++ )
catbuf[i] = -1;
}
@ -3599,17 +3589,17 @@ CvDTreeNode* CvDTree::predict( const CvMat* _sample,
if( _missing )
{
if( !CV_IS_MAT(_missing) || !CV_IS_MASK_ARR(_missing) ||
!CV_ARE_SIZES_EQ(_missing, _sample) )
CV_ERROR( CV_StsBadArg,
!CV_ARE_SIZES_EQ(_missing, _sample) )
CV_Error( CV_StsBadArg,
"the missing data mask must be 8-bit vector of the same size as input sample" );
m = _missing->data.ptr;
mstep = CV_IS_MAT_CONT(_missing->type) ? 1 : _missing->step/sizeof(m[0]);
}
vtype = data->var_type->data.i;
vidx = data->var_idx && !preprocessed_input ? data->var_idx->data.i : 0;
cmap = data->cat_map ? data->cat_map->data.i : 0;
cofs = data->cat_ofs ? data->cat_ofs->data.i : 0;
const int* vtype = data->var_type->data.i;
const int* vidx = data->var_idx && !preprocessed_input ? data->var_idx->data.i : 0;
const int* cmap = data->cat_map ? data->cat_map->data.i : 0;
const int* cofs = data->cat_ofs ? data->cat_ofs->data.i : 0;
while( node->Tn > pruned_tree_idx && node->left )
{
@ -3640,7 +3630,7 @@ CvDTreeNode* CvDTree::predict( const CvMat* _sample,
int ival = cvRound(val);
if( ival != val )
CV_ERROR( CV_StsBadArg,
CV_Error( CV_StsBadArg,
"one of input categorical variable is not an integer" );
int sh = 0;
@ -3678,11 +3668,7 @@ CvDTreeNode* CvDTree::predict( const CvMat* _sample,
node = dir < 0 ? node->left : node->right;
}
result = node;
__END__;
return result;
return node;
}

View File

@ -1305,12 +1305,12 @@ bool CvCascadeBoost::isErrDesired()
{
int sCount = data->sample_count,
numPos = 0, numNeg = 0, numFalse = 0, numPosTrue = 0;
float* eval = (float*) cvStackAlloc( sizeof(eval[0]) * sCount );
vector<float> eval(sCount);
for( int i = 0; i < sCount; i++ )
if( ((CvCascadeBoostTrainData*)data)->featureEvaluator->getCls( i ) == 1.0F )
eval[numPos++] = predict( i, true );
icvSortFlt( eval, numPos, 0 );
icvSortFlt( &eval[0], numPos, 0 );
int thresholdIdx = (int)((1.0F - minHitRate) * numPos);
threshold = eval[ thresholdIdx ];
numPosTrue = numPos - thresholdIdx;