mirror of
https://github.com/opencv/opencv.git
synced 2025-06-07 09:25:45 +08:00
fixed many warnings from GCC 4.6.1
This commit is contained in:
parent
4985c1b632
commit
846e37ded5
@ -1066,10 +1066,10 @@ void CvCascadeBoostTree::split_node_data( CvDTreeNode* node )
|
||||
|
||||
if (data->is_buf_16u)
|
||||
{
|
||||
unsigned short *ldst, *rdst, *ldst0, *rdst0;
|
||||
ldst0 = ldst = (unsigned short*)(buf->data.s + left->buf_idx*buf->cols +
|
||||
ushort *ldst, *rdst;
|
||||
ldst = (ushort*)(buf->data.s + left->buf_idx*buf->cols +
|
||||
vi*scount + left->offset);
|
||||
rdst0 = rdst = (unsigned short*)(ldst + nl);
|
||||
rdst = (ushort*)(ldst + nl);
|
||||
|
||||
// split sorted
|
||||
for( int i = 0; i < n1; i++ )
|
||||
@ -1079,12 +1079,12 @@ void CvCascadeBoostTree::split_node_data( CvDTreeNode* node )
|
||||
idx = newIdx[idx];
|
||||
if (d)
|
||||
{
|
||||
*rdst = (unsigned short)idx;
|
||||
*rdst = (ushort)idx;
|
||||
rdst++;
|
||||
}
|
||||
else
|
||||
{
|
||||
*ldst = (unsigned short)idx;
|
||||
*ldst = (ushort)idx;
|
||||
ldst++;
|
||||
}
|
||||
}
|
||||
@ -1092,10 +1092,10 @@ void CvCascadeBoostTree::split_node_data( CvDTreeNode* node )
|
||||
}
|
||||
else
|
||||
{
|
||||
int *ldst0, *ldst, *rdst0, *rdst;
|
||||
ldst0 = ldst = buf->data.i + left->buf_idx*buf->cols +
|
||||
int *ldst, *rdst;
|
||||
ldst = buf->data.i + left->buf_idx*buf->cols +
|
||||
vi*scount + left->offset;
|
||||
rdst0 = rdst = buf->data.i + right->buf_idx*buf->cols +
|
||||
rdst = buf->data.i + right->buf_idx*buf->cols +
|
||||
vi*scount + right->offset;
|
||||
|
||||
// split sorted
|
||||
|
@ -1363,7 +1363,7 @@ CV_IMPL void cvInitIntrinsicParams2D( const CvMat* objectPoints,
|
||||
{
|
||||
Ptr<CvMat> matA, _b, _allH, _allK;
|
||||
|
||||
int i, j, pos, nimages, total, ni = 0;
|
||||
int i, j, pos, nimages, ni = 0;
|
||||
double a[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 1 };
|
||||
double H[9], f[2];
|
||||
CvMat _a = cvMat( 3, 3, CV_64F, a );
|
||||
@ -1389,8 +1389,6 @@ CV_IMPL void cvInitIntrinsicParams2D( const CvMat* objectPoints,
|
||||
a[5] = (imageSize.height - 1)*0.5;
|
||||
_allH = cvCreateMat( nimages, 9, CV_64F );
|
||||
|
||||
total = cvRound(cvSum(npoints).val[0]);
|
||||
|
||||
// extract vanishing points in order to obtain initial value for the focal length
|
||||
for( i = 0, pos = 0; i < nimages; i++, pos += ni )
|
||||
{
|
||||
@ -2136,7 +2134,7 @@ double cvStereoCalibrate( const CvMat* _objectPoints, const CvMat* _imagePoints1
|
||||
|
||||
for( k = 0; k < 2; k++ )
|
||||
{
|
||||
double maxErr, l2err;
|
||||
double l2err;
|
||||
imgpt_i[k] = cvMat(1, ni, CV_64FC2, imagePoints[k]->data.db + ofs*2);
|
||||
|
||||
if( JtJ || JtErr )
|
||||
@ -2148,7 +2146,6 @@ double cvStereoCalibrate( const CvMat* _objectPoints, const CvMat* _imagePoints1
|
||||
cvSub( &tmpimagePoints, &imgpt_i[k], &tmpimagePoints );
|
||||
|
||||
l2err = cvNorm( &tmpimagePoints, 0, CV_L2 );
|
||||
maxErr = cvNorm( &tmpimagePoints, 0, CV_C );
|
||||
|
||||
if( JtJ || JtErr )
|
||||
{
|
||||
|
@ -130,8 +130,6 @@ void findCorner(const vector<Point>& contour, Point2f point, Point2f& corner)
|
||||
double min_dist = std::numeric_limits<double>::max();
|
||||
int min_idx = -1;
|
||||
|
||||
Rect brect = boundingRect(Mat(contour));
|
||||
|
||||
// find corner idx
|
||||
for(size_t i = 0; i < contour.size(); i++)
|
||||
{
|
||||
@ -155,8 +153,6 @@ void findCorner(const vector<Point2f>& contour, Point2f point, Point2f& corner)
|
||||
double min_dist = std::numeric_limits<double>::max();
|
||||
int min_idx = -1;
|
||||
|
||||
Rect brect = boundingRect(Mat(contour));
|
||||
|
||||
// find corner idx
|
||||
for(size_t i = 0; i < contour.size(); i++)
|
||||
{
|
||||
|
@ -134,6 +134,8 @@ cvTriangulatePoints(CvMat* projMatr1, CvMat* projMatr2, CvMat* projPoints1, CvMa
|
||||
}
|
||||
}
|
||||
|
||||
#if 0
|
||||
double err = 0;
|
||||
/* Points was reconstructed. Try to reproject points */
|
||||
/* We can compute reprojection error if need */
|
||||
{
|
||||
@ -172,9 +174,11 @@ cvTriangulatePoints(CvMat* projMatr1, CvMat* projMatr2, CvMat* projPoints1, CvMa
|
||||
float deltaX,deltaY;
|
||||
deltaX = (float)fabs(x-xr);
|
||||
deltaY = (float)fabs(y-yr);
|
||||
err += deltaX*deltaX + deltaY*deltaY;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
@ -1071,7 +1071,6 @@ void CV_ProjectPointsTest::run(int)
|
||||
validImgPoint.y = static_cast<float>((double)cameraMatrix(1,1)*(y*cdist + (double)distCoeffs(0,2)*a3 + distCoeffs(0,3)*a1)
|
||||
+ (double)cameraMatrix(1,2));
|
||||
|
||||
Point2f ssdfp = *it;
|
||||
if( fabs(it->x - validImgPoint.x) > imgPointErr ||
|
||||
fabs(it->y - validImgPoint.y) > imgPointErr )
|
||||
{
|
||||
|
@ -483,12 +483,9 @@ void DetectionBasedTracker::process(const Mat& imageGray)
|
||||
|
||||
Mat imageDetect=imageGray;
|
||||
|
||||
Size sz=imageDetect.size();
|
||||
int D=parameters.minObjectSize;
|
||||
if (D < 1)
|
||||
D=1;
|
||||
Size objectSize=Size(D,D);
|
||||
|
||||
|
||||
vector<Rect> rectsWhereRegions;
|
||||
bool shouldHandleResult=separateDetectionWork->communicateWithDetectingThread(imageGray, rectsWhereRegions);
|
||||
|
@ -1475,9 +1475,9 @@ void cv::dft( InputArray _src0, OutputArray _dst, int flags, int nonzero_rows )
|
||||
int elem_size = (int)src.elemSize1(), complex_elem_size = elem_size*2;
|
||||
int factors[34];
|
||||
bool inplace_transform = false;
|
||||
int ipp_norm_flag = 0;
|
||||
#ifdef HAVE_IPP
|
||||
void *spec_r = 0, *spec_c = 0;
|
||||
int ipp_norm_flag = !(flags & DFT_SCALE) ? 8 : inv ? 2 : 1;
|
||||
#endif
|
||||
|
||||
CV_Assert( type == CV_32FC1 || type == CV_32FC2 || type == CV_64FC1 || type == CV_64FC2 );
|
||||
@ -1506,8 +1506,6 @@ void cv::dft( InputArray _src0, OutputArray _dst, int flags, int nonzero_rows )
|
||||
(src.cols > 1 && inv && real_transform)) )
|
||||
stage = 1;
|
||||
|
||||
ipp_norm_flag = !(flags & DFT_SCALE) ? 8 : inv ? 2 : 1;
|
||||
|
||||
for(;;)
|
||||
{
|
||||
double scale = 1;
|
||||
|
@ -1592,11 +1592,6 @@ struct BatchDistInvoker
|
||||
{
|
||||
AutoBuffer<int> buf(src2->rows);
|
||||
int* bufptr = buf;
|
||||
Cv32suf val0;
|
||||
if( dist->type() == CV_32S )
|
||||
val0.i = INT_MAX;
|
||||
else
|
||||
val0.f = FLT_MAX;
|
||||
|
||||
for( int i = range.begin(); i < range.end(); i++ )
|
||||
{
|
||||
|
@ -1932,10 +1932,9 @@ void Core_SVDTest::prepare_to_validation( int /*test_case_idx*/ )
|
||||
{
|
||||
Mat& input = test_mat[INPUT][0];
|
||||
int depth = input.depth();
|
||||
int m = input.rows, n = input.cols, min_size = MIN(m, n);
|
||||
int i, m = input.rows, n = input.cols, min_size = MIN(m, n);
|
||||
Mat *src, *dst, *w;
|
||||
double prev = 0, threshold = depth == CV_32F ? FLT_EPSILON : DBL_EPSILON;
|
||||
int i, step;
|
||||
|
||||
if( have_u )
|
||||
{
|
||||
@ -1954,7 +1953,6 @@ void Core_SVDTest::prepare_to_validation( int /*test_case_idx*/ )
|
||||
}
|
||||
|
||||
w = &test_mat[TEMP][0];
|
||||
step = w->rows == 1 ? 1 : (int)w->step1();
|
||||
for( i = 0; i < min_size; i++ )
|
||||
{
|
||||
double normval = 0, aii;
|
||||
|
@ -399,8 +399,7 @@ Mat imdecode( InputArray _buf, int flags )
|
||||
bool imencode( const string& ext, InputArray _image,
|
||||
vector<uchar>& buf, const vector<int>& params )
|
||||
{
|
||||
Mat temp, image = _image.getMat();
|
||||
const Mat* pimage = ℑ
|
||||
Mat image = _image.getMat();
|
||||
|
||||
int channels = image.channels();
|
||||
CV_Assert( channels == 1 || channels == 3 || channels == 4 );
|
||||
@ -412,8 +411,9 @@ bool imencode( const string& ext, InputArray _image,
|
||||
if( !encoder->isFormatSupported(image.depth()) )
|
||||
{
|
||||
CV_Assert( encoder->isFormatSupported(CV_8U) );
|
||||
Mat temp;
|
||||
image.convertTo(temp, CV_8U);
|
||||
pimage = &temp;
|
||||
image = temp;
|
||||
}
|
||||
|
||||
bool code;
|
||||
|
@ -156,7 +156,6 @@ cvImageWidgetNew (int flags)
|
||||
static void
|
||||
cvImageWidget_realize (GtkWidget *widget)
|
||||
{
|
||||
CvImageWidget *image_widget;
|
||||
GdkWindowAttr attributes;
|
||||
gint attributes_mask;
|
||||
|
||||
@ -165,7 +164,6 @@ cvImageWidget_realize (GtkWidget *widget)
|
||||
g_return_if_fail (CV_IS_IMAGE_WIDGET (widget));
|
||||
|
||||
GTK_WIDGET_SET_FLAGS (widget, GTK_REALIZED);
|
||||
image_widget = CV_IMAGE_WIDGET (widget);
|
||||
|
||||
attributes.x = widget->allocation.x;
|
||||
attributes.y = widget->allocation.y;
|
||||
|
@ -88,7 +88,6 @@ public:
|
||||
{
|
||||
const int img_r = 640;
|
||||
const int img_c = 480;
|
||||
Size frame_s = Size(img_c, img_r);
|
||||
|
||||
for (int k = 1; k <= 5; ++k)
|
||||
{
|
||||
|
@ -230,7 +230,6 @@ void CV_HighGuiTest::VideoTest(const string& dir, int fourcc)
|
||||
|
||||
CvVideoWriter* writer = 0;
|
||||
|
||||
int counter = 0;
|
||||
for(;;)
|
||||
{
|
||||
IplImage * img = cvQueryFrame( cap );
|
||||
@ -267,7 +266,6 @@ void CV_HighGuiTest::VideoTest(const string& dir, int fourcc)
|
||||
|
||||
const double thresDbell = 20;
|
||||
|
||||
counter = 0;
|
||||
for(;;)
|
||||
{
|
||||
IplImage* ipl = cvQueryFrame( cap );
|
||||
|
@ -344,7 +344,6 @@ icvIsPtInCircle3( CvPoint2D32f pt, CvPoint2D32f a, CvPoint2D32f b, CvPoint2D32f
|
||||
CV_IMPL CvSubdiv2DPoint *
|
||||
cvSubdivDelaunay2DInsert( CvSubdiv2D * subdiv, CvPoint2D32f pt )
|
||||
{
|
||||
CvSubdiv2DPoint *point = 0;
|
||||
CvSubdiv2DPointLocation location = CV_PTLOC_ERROR;
|
||||
|
||||
CvSubdiv2DPoint *curr_point = 0, *first_point = 0;
|
||||
@ -368,7 +367,6 @@ cvSubdivDelaunay2DInsert( CvSubdiv2D * subdiv, CvPoint2D32f pt )
|
||||
CV_Error( CV_StsOutOfRange, "" );
|
||||
|
||||
case CV_PTLOC_VERTEX:
|
||||
point = curr_point;
|
||||
break;
|
||||
|
||||
case CV_PTLOC_ON_EDGE:
|
||||
|
@ -233,7 +233,8 @@ void CvBlobTrackerAuto1::Process(IplImage* pImg, IplImage* pMask)
|
||||
double Time;
|
||||
TickCount = cvGetTickCount()-TickCount;
|
||||
Time = TickCount/FREQ;
|
||||
if(out){fprintf(out,"- %sFrame: %d ALL_TIME - %f\n",stime,Count,Time/1000);fclose(out);}
|
||||
TimeSum += Time;
|
||||
if(out){fprintf(out,"- %sFrame: %d ALL_TIME - %f\n",stime,Count,TimeSum/1000);fclose(out);}
|
||||
|
||||
TimeSum = 0;
|
||||
TickCount = cvGetTickCount();
|
||||
|
@ -520,13 +520,10 @@ private:
|
||||
//DefBlobTracker* pBT = (DefBlobTracker*)pB;
|
||||
CvBlob* pBBest = NULL;
|
||||
double DistBest = -1;
|
||||
int j,BlobID;
|
||||
|
||||
if(pB==NULL) return NULL;
|
||||
|
||||
BlobID = pB->ID;
|
||||
|
||||
for(j=m_BlobListNew.GetBlobNum(); j>0; --j)
|
||||
for(int j=m_BlobListNew.GetBlobNum(); j>0; --j)
|
||||
{ /* Find best CC: */
|
||||
double Dist = -1;
|
||||
CvBlob* pBNew = m_BlobListNew.GetBlob(j-1);
|
||||
|
@ -276,14 +276,11 @@ private:
|
||||
return cvSum(pHT->m_pHist).val[0] / sqrt(pHC->m_HistVolume*pHM->m_HistVolume);
|
||||
#else
|
||||
// Do computations manually and let autovectorizer do the job:
|
||||
DefHistType *hm, *hc, *ht;
|
||||
double sum;
|
||||
int size;
|
||||
hm=(DefHistType *)(pHM->m_pHist->data.ptr);
|
||||
hc=(DefHistType *)(pHC->m_pHist->data.ptr);
|
||||
ht=(DefHistType *)(pHT->m_pHist->data.ptr);
|
||||
size = pHM->m_pHist->width*pHM->m_pHist->height;
|
||||
sum = 0.;
|
||||
DefHistType* hm=(DefHistType *)(pHM->m_pHist->data.ptr);
|
||||
DefHistType* hc=(DefHistType *)(pHC->m_pHist->data.ptr);
|
||||
//ht=(DefHistType *)(pHT->m_pHist->data.ptr);
|
||||
int size = pHM->m_pHist->width*pHM->m_pHist->height;
|
||||
double sum = 0.;
|
||||
for(int i = 0; i < size; i++ )
|
||||
{
|
||||
sum += sqrt(hm[i]*hc[i]);
|
||||
|
@ -696,8 +696,7 @@ int icvRemoveDoublePoins( CvMat *oldPoints,/* Points on prev image */
|
||||
pt.x = (float)cvmGet(oldPoints,0,i);
|
||||
pt.y = (float)cvmGet(oldPoints,1,i);
|
||||
|
||||
CvSubdiv2DPoint* point;
|
||||
point = cvSubdivDelaunay2DInsert( subdiv, pt );
|
||||
cvSubdivDelaunay2DInsert( subdiv, pt );
|
||||
}
|
||||
}
|
||||
|
||||
@ -908,22 +907,21 @@ void icvAddNewImageToPrevious____(
|
||||
|
||||
/* Remove all new double points */
|
||||
|
||||
int origNum;
|
||||
/* Find point of old image */
|
||||
origNum = icvRemoveDoublePoins( oldPoints,/* Points on prev image */
|
||||
newFPoints2D1,/* New points */
|
||||
oldPntStatus,/* Status for old points */
|
||||
newFPointsStatusTmp,
|
||||
newFPointsStatusTmp,//orig status
|
||||
20);/* Status for new points */
|
||||
icvRemoveDoublePoins( oldPoints,/* Points on prev image */
|
||||
newFPoints2D1,/* New points */
|
||||
oldPntStatus,/* Status for old points */
|
||||
newFPointsStatusTmp,
|
||||
newFPointsStatusTmp,//orig status
|
||||
20);/* Status for new points */
|
||||
|
||||
/* Find double points on new image */
|
||||
origNum = icvRemoveDoublePoins( newPoints,/* Points on prev image */
|
||||
newFPoints2D2,/* New points */
|
||||
newPntStatus,/* Status for old points */
|
||||
newFPointsStatusTmp,
|
||||
newFPointsStatusTmp,//orig status
|
||||
20);/* Status for new points */
|
||||
icvRemoveDoublePoins( newPoints,/* Points on prev image */
|
||||
newFPoints2D2,/* New points */
|
||||
newPntStatus,/* Status for old points */
|
||||
newFPointsStatusTmp,
|
||||
newFPointsStatusTmp,//orig status
|
||||
20);/* Status for new points */
|
||||
|
||||
|
||||
|
||||
|
@ -450,12 +450,6 @@ int icvComCoeffForLine( CvPoint2D64d point1,
|
||||
|
||||
double gamma;
|
||||
|
||||
double x1,y1,z1;
|
||||
|
||||
x1 = camPoint1.x;
|
||||
y1 = camPoint1.y;
|
||||
z1 = camPoint1.z;
|
||||
|
||||
double xA,yA,zA;
|
||||
double xB,yB,zB;
|
||||
double xC,yC,zC;
|
||||
@ -2859,12 +2853,12 @@ int icvSelectBestRt( int numImages,
|
||||
&tmpPoint2,
|
||||
rotMatrs1_64d + currImagePair*9,
|
||||
transVects1_64d + currImagePair*3);
|
||||
double err;
|
||||
/*double err;
|
||||
double dx,dy,dz;
|
||||
dx = tmpPoint2.x - points1[i].x;
|
||||
dy = tmpPoint2.y - points1[i].y;
|
||||
dz = tmpPoint2.z - points1[i].z;
|
||||
err = sqrt(dx*dx + dy*dy + dz*dz);
|
||||
err = sqrt(dx*dx + dy*dy + dz*dz);*/
|
||||
|
||||
|
||||
}
|
||||
@ -3458,43 +3452,37 @@ int GetCrossLines(CvPoint2D32f p1_start,CvPoint2D32f p1_end,CvPoint2D32f p2_star
|
||||
|
||||
int icvGetCrossPieceVector(CvPoint2D32f p1_start,CvPoint2D32f p1_end,CvPoint2D32f v2_start,CvPoint2D32f v2_end,CvPoint2D32f *cross)
|
||||
{
|
||||
double ex1,ey1,ex2,ey2;
|
||||
double px1,py1,px2,py2;
|
||||
double del;
|
||||
double delA,delB,delX,delY;
|
||||
double alpha,betta;
|
||||
double ex1 = p1_start.x;
|
||||
double ey1 = p1_start.y;
|
||||
double ex2 = p1_end.x;
|
||||
double ey2 = p1_end.y;
|
||||
|
||||
ex1 = p1_start.x;
|
||||
ey1 = p1_start.y;
|
||||
ex2 = p1_end.x;
|
||||
ey2 = p1_end.y;
|
||||
double px1 = v2_start.x;
|
||||
double py1 = v2_start.y;
|
||||
double px2 = v2_end.x;
|
||||
double py2 = v2_end.y;
|
||||
|
||||
px1 = v2_start.x;
|
||||
py1 = v2_start.y;
|
||||
px2 = v2_end.x;
|
||||
py2 = v2_end.y;
|
||||
|
||||
del = (ex1-ex2)*(py2-py1)+(ey2-ey1)*(px2-px1);
|
||||
double del = (ex1-ex2)*(py2-py1)+(ey2-ey1)*(px2-px1);
|
||||
if( del == 0)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
delA = (px1-ex1)*(py1-py2) + (ey1-py1)*(px1-px2);
|
||||
delB = (ex1-px1)*(ey1-ey2) + (py1-ey1)*(ex1-ex2);
|
||||
double delA = (px1-ex1)*(py1-py2) + (ey1-py1)*(px1-px2);
|
||||
//double delB = (ex1-px1)*(ey1-ey2) + (py1-ey1)*(ex1-ex2);
|
||||
|
||||
alpha = delA / del;
|
||||
betta = -delB / del;
|
||||
double alpha = delA / del;
|
||||
//double betta = -delB / del;
|
||||
|
||||
if( alpha < 0 || alpha > 1.0 )
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
delX = (ex1-ex2)*(py1*(px1-px2)-px1*(py1-py2))+
|
||||
double delX = (ex1-ex2)*(py1*(px1-px2)-px1*(py1-py2))+
|
||||
(px1-px2)*(ex1*(ey1-ey2)-ey1*(ex1-ex2));
|
||||
|
||||
delY = (ey1-ey2)*(px1*(py1-py2)-py1*(px1-px2))+
|
||||
double delY = (ey1-ey2)*(px1*(py1-py2)-py1*(px1-px2))+
|
||||
(py1-py2)*(ey1*(ex1-ex2)-ex1*(ey1-ey2));
|
||||
|
||||
cross->x = (float)( delX / del);
|
||||
|
@ -1159,10 +1159,9 @@ int cvTestSeqGetObjectPos(CvTestSeq* pTestSeq, int ObjIndex, CvPoint2D32f* pPos)
|
||||
if(p && p->pPos && p->PosNum>0)
|
||||
{
|
||||
CvTSTrans* pTrans;
|
||||
float t;
|
||||
int frame = pTS->CurFrame - p->FrameBegin - 1;
|
||||
if(frame < 0 || frame >= p->FrameNum) return 0;
|
||||
t = (p->FrameNum>1)?((float)frame / (p->FrameNum-1)):0;
|
||||
//float t = (p->FrameNum>1)?((float)frame / (p->FrameNum-1)):0;
|
||||
pTrans = p->pTrans + frame%p->TransNum;
|
||||
pPos[0] = p->pPos[frame%p->PosNum];
|
||||
|
||||
@ -1210,12 +1209,11 @@ int cvTestSeqGetObjectSize(CvTestSeq* pTestSeq, int ObjIndex, CvPoint2D32f* pSiz
|
||||
if(p && p->pSize && p->SizeNum>0)
|
||||
{
|
||||
CvTSTrans* pTrans;
|
||||
float t;
|
||||
int frame = pTS->CurFrame - p->FrameBegin - 1;
|
||||
|
||||
if(frame < 0 || frame >= p->FrameNum) return 0;
|
||||
|
||||
t = (p->FrameNum>1)?((float)frame / (p->FrameNum-1)):0;
|
||||
//float t = (p->FrameNum>1)?((float)frame / (p->FrameNum-1)):0;
|
||||
pTrans = p->pTrans + frame%p->TransNum;
|
||||
pSize[0] = p->pSize[frame%p->SizeNum];
|
||||
|
||||
|
@ -2169,7 +2169,7 @@ void icvReconstructPointsFor3View( CvMat* projMatr1,CvMat* projMatr2,CvMat* proj
|
||||
|
||||
/* Points was reconstructed. Try to reproject points */
|
||||
/* We can compute reprojection error if need */
|
||||
{
|
||||
/*{
|
||||
int i;
|
||||
CvMat point3D;
|
||||
double point3D_dat[4];
|
||||
@ -2188,7 +2188,7 @@ void icvReconstructPointsFor3View( CvMat* projMatr1,CvMat* projMatr2,CvMat* proj
|
||||
point3D_dat[2] = cvmGet(points4D,2,i)/W;
|
||||
point3D_dat[3] = 1;
|
||||
|
||||
/* !!! Project this point for each camera */
|
||||
// !!! Project this point for each camera
|
||||
for( int currCamera = 0; currCamera < 3; currCamera++ )
|
||||
{
|
||||
cvmMul(projMatrs[currCamera], &point3D, &point2D);
|
||||
@ -2207,7 +2207,7 @@ void icvReconstructPointsFor3View( CvMat* projMatr1,CvMat* projMatr2,CvMat* proj
|
||||
deltaY = (float)fabs(y-yr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}*/
|
||||
|
||||
__END__;
|
||||
return;
|
||||
@ -2537,8 +2537,7 @@ void FindTransformForProjectMatrices(CvMat* projMatr1,CvMat* projMatr2,CvMat* ro
|
||||
double resVect_dat[12];
|
||||
resVect = cvMat(12,1,CV_64F,resVect_dat);
|
||||
|
||||
int sing;
|
||||
sing = cvSolve(&matrA,&vectB,&resVect);
|
||||
cvSolve(&matrA,&vectB,&resVect);
|
||||
|
||||
/* Fill rotation matrix */
|
||||
for( i = 0; i < 12; i++ )
|
||||
|
@ -433,14 +433,14 @@ cvInitFaceTracker(CvFaceTracker* pFaceTracker, const IplImage* imgGray, CvRect*
|
||||
(nRects < NUM_FACE_ELEMENTS))
|
||||
return NULL;
|
||||
|
||||
int new_face = FALSE;
|
||||
//int new_face = FALSE;
|
||||
CvFaceTracker* pFace = pFaceTracker;
|
||||
if (NULL == pFace)
|
||||
{
|
||||
pFace = new CvFaceTracker;
|
||||
if (NULL == pFace)
|
||||
return NULL;
|
||||
new_face = TRUE;
|
||||
//new_face = TRUE;
|
||||
}
|
||||
pFace->Init(pRects, (IplImage*)imgGray);
|
||||
return pFace;
|
||||
|
@ -1249,13 +1249,11 @@ CvBoost::update_weights( CvBoostTree* tree )
|
||||
// recent weak classifier we know the responses. For other samples we need to compute them
|
||||
if( have_subsample )
|
||||
{
|
||||
float* values0, *values = (float*)cur_buf_pos;
|
||||
float* values = (float*)cur_buf_pos;
|
||||
cur_buf_pos = (uchar*)(values + data->buf->step);
|
||||
uchar* missing0, *missing = cur_buf_pos;
|
||||
uchar* missing = cur_buf_pos;
|
||||
cur_buf_pos = missing + data->buf->step;
|
||||
CvMat _sample, _mask;
|
||||
values0 = values;
|
||||
missing0 = missing;
|
||||
|
||||
// invert the subsample mask
|
||||
cvXorS( subsample_mask, cvScalar(1.), subsample_mask );
|
||||
|
@ -697,28 +697,18 @@ float CvRTrees::predict( const CvMat* sample, const CvMat* missing ) const
|
||||
|
||||
float CvRTrees::predict_prob( const CvMat* sample, const CvMat* missing) const
|
||||
{
|
||||
double result = -1;
|
||||
int k;
|
||||
|
||||
if( nclasses == 2 ) //classification
|
||||
{
|
||||
int max_nvotes = 0;
|
||||
cv::AutoBuffer<int> _votes(nclasses);
|
||||
int* votes = _votes;
|
||||
memset( votes, 0, sizeof(*votes)*nclasses );
|
||||
for( k = 0; k < ntrees; k++ )
|
||||
for( int k = 0; k < ntrees; k++ )
|
||||
{
|
||||
CvDTreeNode* predicted_node = trees[k]->predict( sample, missing );
|
||||
int nvotes;
|
||||
int class_idx = predicted_node->class_idx;
|
||||
CV_Assert( 0 <= class_idx && class_idx < nclasses );
|
||||
|
||||
nvotes = ++votes[class_idx];
|
||||
if( nvotes > max_nvotes )
|
||||
{
|
||||
max_nvotes = nvotes;
|
||||
result = predicted_node->value;
|
||||
}
|
||||
++votes[class_idx];
|
||||
}
|
||||
|
||||
return float(votes[1])/ntrees;
|
||||
|
@ -391,7 +391,6 @@ float ann_calc_error( CvANN_MLP* ann, CvMLData* _data, map<int, int>& cls_map, i
|
||||
int cls_count = (int)cls_map.size();
|
||||
Mat output( 1, cls_count, CV_32FC1 );
|
||||
CvMat _output = CvMat(output);
|
||||
map<int, int>::iterator b_it = cls_map.begin();
|
||||
for( int i = 0; i < sample_count; i++ )
|
||||
{
|
||||
CvMat sample;
|
||||
|
@ -898,7 +898,7 @@ void CV_DescriptorMatcherTest::radiusMatchTest( const Mat& query, const Mat& tra
|
||||
|
||||
dmatcher->radiusMatch( query, matches, radius, masks );
|
||||
|
||||
int curRes = cvtest::TS::OK;
|
||||
//int curRes = cvtest::TS::OK;
|
||||
if( (int)matches.size() != queryDescCount )
|
||||
{
|
||||
ts->printf(cvtest::TS::LOG, "Incorrect matches count while test radiusMatch() function (1).\n");
|
||||
@ -938,7 +938,6 @@ void CV_DescriptorMatcherTest::radiusMatchTest( const Mat& query, const Mat& tra
|
||||
}
|
||||
if( (float)badCount > (float)queryDescCount*badPart )
|
||||
{
|
||||
curRes = cvtest::TS::FAIL_INVALID_OUTPUT;
|
||||
ts->printf( cvtest::TS::LOG, "%f - too large bad matches part while test radiusMatch() function (2).\n",
|
||||
(float)badCount/(float)queryDescCount );
|
||||
ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
|
||||
|
@ -169,7 +169,7 @@ int Sampler::hasbars()
|
||||
|
||||
void Sampler::timing()
|
||||
{
|
||||
uchar light, dark = getpixel(9, 0);
|
||||
/*uchar light, dark = getpixel(9, 0);
|
||||
for (int i = 1; i < 3; i += 2) {
|
||||
light = getpixel(9, i);
|
||||
// if (light <= dark)
|
||||
@ -177,7 +177,7 @@ void Sampler::timing()
|
||||
dark = getpixel(9, i + 1);
|
||||
// if (up <= down)
|
||||
// goto endo;
|
||||
}
|
||||
}*/
|
||||
}
|
||||
|
||||
CvMat *Sampler::extract()
|
||||
@ -528,8 +528,8 @@ namespace
|
||||
line(image, code.corners[2], code.corners[3], c);
|
||||
line(image, code.corners[3], code.corners[0], c);
|
||||
string code_text(code.msg,4);
|
||||
int baseline = 0;
|
||||
Size sz = getTextSize(code_text, CV_FONT_HERSHEY_SIMPLEX, 1, 1, &baseline);
|
||||
//int baseline = 0;
|
||||
//Size sz = getTextSize(code_text, CV_FONT_HERSHEY_SIMPLEX, 1, 1, &baseline);
|
||||
putText(image, code_text, code.corners[0], CV_FONT_HERSHEY_SIMPLEX, 0.8, c2, 1, CV_AA, false);
|
||||
}
|
||||
cv::Mat& image;
|
||||
|
@ -657,8 +657,6 @@ CV_IMPL int
|
||||
cvRunHaarClassifierCascadeSum( const CvHaarClassifierCascade* _cascade,
|
||||
CvPoint pt, double& stage_sum, int start_stage )
|
||||
{
|
||||
int result = -1;
|
||||
|
||||
int p_offset, pq_offset;
|
||||
int i, j;
|
||||
double mean, variance_norm_factor;
|
||||
@ -690,12 +688,9 @@ cvRunHaarClassifierCascadeSum( const CvHaarClassifierCascade* _cascade,
|
||||
|
||||
if( cascade->is_tree )
|
||||
{
|
||||
CvHidHaarStageClassifier* ptr;
|
||||
CvHidHaarStageClassifier* ptr = cascade->stage_classifier;
|
||||
assert( start_stage == 0 );
|
||||
|
||||
result = 1;
|
||||
ptr = cascade->stage_classifier;
|
||||
|
||||
while( ptr )
|
||||
{
|
||||
stage_sum = 0.0;
|
||||
|
@ -929,7 +929,9 @@ void orUnaligned8u(const uchar * src, const int src_stride,
|
||||
{
|
||||
#if CV_SSE2
|
||||
volatile bool haveSSE2 = checkHardwareSupport(CV_CPU_SSE2);
|
||||
#if CV_SSE3
|
||||
volatile bool haveSSE3 = checkHardwareSupport(CV_CPU_SSE3);
|
||||
#endif
|
||||
bool src_aligned = reinterpret_cast<unsigned long long>(src) % 16 == 0;
|
||||
#endif
|
||||
|
||||
@ -1203,7 +1205,9 @@ void similarity(const std::vector<Mat>& linear_memories, const Template& templ,
|
||||
|
||||
#if CV_SSE2
|
||||
volatile bool haveSSE2 = checkHardwareSupport(CV_CPU_SSE2);
|
||||
#if CV_SSE3
|
||||
volatile bool haveSSE3 = checkHardwareSupport(CV_CPU_SSE3);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Compute the similarity measure for this template by accumulating the contribution of
|
||||
@ -1281,7 +1285,9 @@ void similarityLocal(const std::vector<Mat>& linear_memories, const Template& te
|
||||
|
||||
#if CV_SSE2
|
||||
volatile bool haveSSE2 = checkHardwareSupport(CV_CPU_SSE2);
|
||||
#if CV_SSE3
|
||||
volatile bool haveSSE3 = checkHardwareSupport(CV_CPU_SSE3);
|
||||
#endif
|
||||
__m128i* dst_ptr_sse = dst.ptr<__m128i>();
|
||||
#endif
|
||||
|
||||
|
@ -345,7 +345,7 @@ static bool pyopencv_to(PyObject* obj, size_t& value, const char* name = "<unkno
|
||||
if(!obj || obj == Py_None)
|
||||
return true;
|
||||
value = (int)PyLong_AsUnsignedLong(obj);
|
||||
return value != -1 || !PyErr_Occurred();
|
||||
return value != (size_t)-1 || !PyErr_Occurred();
|
||||
}
|
||||
|
||||
static PyObject* pyopencv_from(int value)
|
||||
|
@ -61,13 +61,9 @@ cvUpdateMotionHistory( const void* silhouette, void* mhimg,
|
||||
|
||||
CvSize size = cvGetMatSize( mhi );
|
||||
|
||||
int mhi_step = mhi->step;
|
||||
int silh_step = silh->step;
|
||||
|
||||
if( CV_IS_MAT_CONT( mhi->type & silh->type ))
|
||||
{
|
||||
size.width *= size.height;
|
||||
mhi_step = silh_step = CV_STUB_STEP;
|
||||
size.height = 1;
|
||||
}
|
||||
|
||||
|
@ -189,7 +189,7 @@ void CV_OptFlowPyrLKTest::run( int )
|
||||
|
||||
if( max_err > 1 )
|
||||
{
|
||||
ts->printf( cvtest::TS::LOG, "Maximum tracking error is too big (=%g)\n", max_err );
|
||||
ts->printf( cvtest::TS::LOG, "Maximum tracking error is too big (=%g) at %d\n", max_err, merr_i );
|
||||
code = cvtest::TS::FAIL_BAD_ACCURACY;
|
||||
goto _exit_;
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ int main( int argc, char** argv )
|
||||
for ( int j = 0; j < (int)r.size(); j++ )
|
||||
{
|
||||
Point pt = r[j];
|
||||
img.at<Vec3b>(r[j]) = bcolors[i%9];
|
||||
img.at<Vec3b>(pt) = bcolors[i%9];
|
||||
}
|
||||
|
||||
// find ellipse (it seems cvfitellipse2 have error or sth?)
|
||||
|
@ -39,7 +39,6 @@ namespace
|
||||
{
|
||||
Point2f pt_new = query[matches[i].queryIdx].pt;
|
||||
Point2f pt_old = train[matches[i].trainIdx].pt;
|
||||
Point2f dist = pt_new - pt_old;
|
||||
|
||||
cv::line(img, pt_new, pt_old, Scalar(125, 255, 125), 1);
|
||||
cv::circle(img, pt_new, 2, Scalar(255, 0, 125), 1);
|
||||
|
Loading…
Reference in New Issue
Block a user