mirror of
https://github.com/opencv/opencv.git
synced 2024-11-24 03:00:14 +08:00
Fixed issues found by static analysis (mostly DBZ)
This commit is contained in:
parent
78d07e841d
commit
1da46fe6fb
@ -2336,10 +2336,13 @@ void cvStereoRectify( const CvMat* _cameraMatrix1, const CvMat* _cameraMatrix2,
|
||||
_uu[2] = 1;
|
||||
cvCrossProduct(&uu, &t, &ww);
|
||||
nt = cvNorm(&t, 0, CV_L2);
|
||||
CV_Assert(fabs(nt) > 0);
|
||||
nw = cvNorm(&ww, 0, CV_L2);
|
||||
CV_Assert(fabs(nw) > 0);
|
||||
cvConvertScale(&ww, &ww, 1 / nw);
|
||||
cvCrossProduct(&t, &ww, &w3);
|
||||
nw = cvNorm(&w3, 0, CV_L2);
|
||||
CV_Assert(fabs(nw) > 0);
|
||||
cvConvertScale(&w3, &w3, 1 / nw);
|
||||
_uu[2] = 0;
|
||||
|
||||
@ -3870,12 +3873,14 @@ float cv::rectify3Collinear( InputArray _cameraMatrix1, InputArray _distCoeffs1,
|
||||
|
||||
int idx = fabs(t12(0,0)) > fabs(t12(1,0)) ? 0 : 1;
|
||||
double c = t12(idx,0), nt = norm(t12, CV_L2);
|
||||
CV_Assert(fabs(nt) > 0);
|
||||
Mat_<double> uu = Mat_<double>::zeros(3,1);
|
||||
uu(idx, 0) = c > 0 ? 1 : -1;
|
||||
|
||||
// calculate global Z rotation
|
||||
Mat_<double> ww = t12.cross(uu), wR;
|
||||
double nw = norm(ww, CV_L2);
|
||||
CV_Assert(fabs(nw) > 0);
|
||||
ww *= acos(fabs(c)/nt)/nw;
|
||||
Rodrigues(ww, wR);
|
||||
|
||||
|
@ -206,6 +206,7 @@ void dls::run_kernel(const cv::Mat& pp)
|
||||
|
||||
void dls::build_coeff_matrix(const cv::Mat& pp, cv::Mat& Mtilde, cv::Mat& D)
|
||||
{
|
||||
CV_Assert(!pp.empty());
|
||||
cv::Mat eye = cv::Mat::eye(3, 3, CV_64F);
|
||||
|
||||
// build coeff matrix
|
||||
|
@ -126,7 +126,8 @@ void cv::fisheye::projectPoints(InputArray objectPoints, OutputArray imagePoints
|
||||
{
|
||||
Vec3d Xi = objectPoints.depth() == CV_32F ? (Vec3d)Xf[i] : Xd[i];
|
||||
Vec3d Y = aff*Xi;
|
||||
|
||||
if (fabs(Y[2]) < DBL_MIN)
|
||||
Y[2] = 1;
|
||||
Vec2d x(Y[0]/Y[2], Y[1]/Y[2]);
|
||||
|
||||
double r2 = x.dot(x);
|
||||
@ -1186,6 +1187,7 @@ void cv::internal::ComputeExtrinsicRefine(const Mat& imagePoints, const Mat& obj
|
||||
{
|
||||
CV_Assert(!objectPoints.empty() && objectPoints.type() == CV_64FC3);
|
||||
CV_Assert(!imagePoints.empty() && imagePoints.type() == CV_64FC2);
|
||||
CV_Assert(rvec.total() > 2 && tvec.total() > 2);
|
||||
Vec6d extrinsics(rvec.at<double>(0), rvec.at<double>(1), rvec.at<double>(2),
|
||||
tvec.at<double>(0), tvec.at<double>(1), tvec.at<double>(2));
|
||||
double change = 1;
|
||||
@ -1365,9 +1367,13 @@ void cv::internal::InitExtrinsics(const Mat& _imagePoints, const Mat& _objectPoi
|
||||
double sc = .5 * (norm(H.col(0)) + norm(H.col(1)));
|
||||
H = H / sc;
|
||||
Mat u1 = H.col(0).clone();
|
||||
u1 = u1 / norm(u1);
|
||||
double norm_u1 = norm(u1);
|
||||
CV_Assert(fabs(norm_u1) > 0);
|
||||
u1 = u1 / norm_u1;
|
||||
Mat u2 = H.col(1).clone() - u1.dot(H.col(1).clone()) * u1;
|
||||
u2 = u2 / norm(u2);
|
||||
double norm_u2 = norm(u2);
|
||||
CV_Assert(fabs(norm_u2) > 0);
|
||||
u2 = u2 / norm_u2;
|
||||
Mat u3 = u1.cross(u2);
|
||||
Mat RRR;
|
||||
hconcat(u1, u2, RRR);
|
||||
|
@ -194,6 +194,7 @@ void HomographyDecompZhang::decompose(std::vector<CameraMotion>& camMotions)
|
||||
{
|
||||
Mat W, U, Vt;
|
||||
SVD::compute(getHnorm(), W, U, Vt);
|
||||
CV_Assert(W.total() > 2 && Vt.total() > 7);
|
||||
double lambda1=W.at<double>(0);
|
||||
double lambda3=W.at<double>(2);
|
||||
double lambda1m3 = (lambda1-lambda3);
|
||||
|
@ -861,7 +861,9 @@ bool Mat::isSubmatrix() const
|
||||
inline
|
||||
size_t Mat::elemSize() const
|
||||
{
|
||||
return dims > 0 ? step.p[dims - 1] : 0;
|
||||
size_t res = dims > 0 ? step.p[dims - 1] : 0;
|
||||
CV_DbgAssert(res != 0);
|
||||
return res;
|
||||
}
|
||||
|
||||
inline
|
||||
@ -3789,7 +3791,9 @@ bool UMat::isSubmatrix() const
|
||||
inline
|
||||
size_t UMat::elemSize() const
|
||||
{
|
||||
return dims > 0 ? step.p[dims - 1] : 0;
|
||||
size_t res = dims > 0 ? step.p[dims - 1] : 0;
|
||||
CV_DbgAssert(res != 0);
|
||||
return res;
|
||||
}
|
||||
|
||||
inline
|
||||
|
@ -263,6 +263,7 @@ void cv::batchDistance( InputArray _src1, InputArray _src2,
|
||||
if( crosscheck )
|
||||
{
|
||||
CV_Assert( K == 1 && update == 0 && mask.empty() );
|
||||
CV_Assert(!nidx.empty());
|
||||
Mat tdist, tidx;
|
||||
batchDistance(src2, src1, tdist, dtype, tidx, normType, K, mask, 0, false);
|
||||
|
||||
|
@ -14,7 +14,7 @@ namespace cv { namespace dnn {
|
||||
class ResizeLayerImpl : public ResizeLayer
|
||||
{
|
||||
public:
|
||||
ResizeLayerImpl(const LayerParams& params)
|
||||
ResizeLayerImpl(const LayerParams& params) : scaleWidth(0), scaleHeight(0)
|
||||
{
|
||||
setParamsFrom(params);
|
||||
outWidth = params.get<float>("width", 0);
|
||||
|
@ -177,6 +177,7 @@ void BOWImgDescriptorExtractor::compute( InputArray keypointDescriptors, OutputA
|
||||
CV_INSTRUMENT_REGION()
|
||||
|
||||
CV_Assert( !vocabulary.empty() );
|
||||
CV_Assert(!keypointDescriptors.empty());
|
||||
|
||||
int clusterCount = descriptorSize(); // = vocabulary.rows
|
||||
|
||||
|
@ -264,6 +264,8 @@ void SimpleBlobDetectorImpl::findBlobs(InputArray _image, InputArray _binaryImag
|
||||
convexHull(Mat(contours[contourIdx]), hull);
|
||||
double area = contourArea(Mat(contours[contourIdx]));
|
||||
double hullArea = contourArea(Mat(hull));
|
||||
if (fabs(hullArea) < DBL_EPSILON)
|
||||
continue;
|
||||
double ratio = area / hullArea;
|
||||
if (ratio < params.minConvexity || ratio >= params.maxConvexity)
|
||||
continue;
|
||||
@ -309,6 +311,7 @@ void SimpleBlobDetectorImpl::detect(InputArray image, std::vector<cv::KeyPoint>&
|
||||
CV_INSTRUMENT_REGION()
|
||||
|
||||
keypoints.clear();
|
||||
CV_Assert(params.minRepeatability != 0);
|
||||
Mat grayscaleImage;
|
||||
if (image.channels() == 3 || image.channels() == 4)
|
||||
cvtColor(image, grayscaleImage, COLOR_BGR2GRAY);
|
||||
|
@ -506,6 +506,7 @@ BRISK_Impl::smoothedIntensity(const cv::Mat& image, const cv::Mat& integral, con
|
||||
// scaling:
|
||||
const int scaling = (int)(4194304.0 / area);
|
||||
const int scaling2 = int(float(scaling) * area / 1024.0);
|
||||
CV_Assert(scaling2 != 0);
|
||||
|
||||
// the integral image is larger:
|
||||
const int integralcols = imagecols + 1;
|
||||
@ -2238,6 +2239,7 @@ BriskLayer::value(const cv::Mat& mat, float xf, float yf, float scale_in) const
|
||||
// scaling:
|
||||
const int scaling = (int)(4194304.0f / area);
|
||||
const int scaling2 = (int)(float(scaling) * area / 1024.0f);
|
||||
CV_Assert(scaling2 != 0);
|
||||
|
||||
// calculate borders
|
||||
const float x_1 = xf - sigma_half;
|
||||
|
@ -546,10 +546,10 @@ static bool ocl_Laplacian5(InputArray _src, OutputArray _dst,
|
||||
size_t lmsz = dev.localMemSize();
|
||||
size_t src_step = _src.step(), src_offset = _src.offset();
|
||||
const size_t tileSizeYmax = wgs / tileSizeX;
|
||||
CV_Assert(src_step != 0 && esz != 0);
|
||||
|
||||
// workaround for NVIDIA: 3 channel vector type takes 4*elem_size in local memory
|
||||
int loc_mem_cn = dev.vendorID() == ocl::Device::VENDOR_NVIDIA && cn == 3 ? 4 : cn;
|
||||
|
||||
if (((src_offset % src_step) % esz == 0) &&
|
||||
(
|
||||
(borderType == BORDER_CONSTANT || borderType == BORDER_REPLICATE) ||
|
||||
|
@ -4284,10 +4284,14 @@ static bool ocl_sepFilter2D_SinglePass(InputArray _src, OutputArray _dst,
|
||||
size_t src_step = _src.step(), src_offset = _src.offset();
|
||||
bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0;
|
||||
|
||||
if ((src_offset % src_step) % esz != 0 || (!doubleSupport && (sdepth == CV_64F || ddepth == CV_64F)) ||
|
||||
!(borderType == BORDER_CONSTANT || borderType == BORDER_REPLICATE ||
|
||||
borderType == BORDER_REFLECT || borderType == BORDER_WRAP ||
|
||||
borderType == BORDER_REFLECT_101))
|
||||
if (esz == 0
|
||||
|| (src_offset % src_step) % esz != 0
|
||||
|| (!doubleSupport && (sdepth == CV_64F || ddepth == CV_64F))
|
||||
|| !(borderType == BORDER_CONSTANT
|
||||
|| borderType == BORDER_REPLICATE
|
||||
|| borderType == BORDER_REFLECT
|
||||
|| borderType == BORDER_WRAP
|
||||
|| borderType == BORDER_REFLECT_101))
|
||||
return false;
|
||||
|
||||
size_t lt2[2] = { optimizedSepFilterLocalWidth, optimizedSepFilterLocalHeight };
|
||||
|
@ -174,6 +174,7 @@ void GMM::addSample( int ci, const Vec3d color )
|
||||
|
||||
void GMM::endLearning()
|
||||
{
|
||||
CV_Assert(totalSampleCount > 0);
|
||||
const double variance = 0.01;
|
||||
for( int ci = 0; ci < componentsCount; ci++ )
|
||||
{
|
||||
|
@ -3286,6 +3286,7 @@ void cv::warpPolar(InputArray _src, OutputArray _dst, Size dsize,
|
||||
|
||||
if (!(flags & CV_WARP_INVERSE_MAP))
|
||||
{
|
||||
CV_Assert(!dsize.empty());
|
||||
double Kangle = CV_2PI / dsize.height;
|
||||
int phi, rho;
|
||||
|
||||
@ -3332,6 +3333,7 @@ void cv::warpPolar(InputArray _src, OutputArray _dst, Size dsize,
|
||||
Mat src = _dst.getMat();
|
||||
Size ssize = _dst.size();
|
||||
ssize.height -= 2 * ANGLE_BORDER;
|
||||
CV_Assert(!ssize.empty());
|
||||
const double Kangle = CV_2PI / ssize.height;
|
||||
double Kmag;
|
||||
if (semiLog)
|
||||
|
@ -47,6 +47,7 @@ static const double eps = 1e-6;
|
||||
|
||||
static void fitLine2D_wods( const Point2f* points, int count, float *weights, float *line )
|
||||
{
|
||||
CV_Assert(count > 0);
|
||||
double x = 0, y = 0, x2 = 0, y2 = 0, xy = 0, w = 0;
|
||||
double dx2, dy2, dxy;
|
||||
int i;
|
||||
@ -98,6 +99,7 @@ static void fitLine2D_wods( const Point2f* points, int count, float *weights, fl
|
||||
|
||||
static void fitLine3D_wods( const Point3f * points, int count, float *weights, float *line )
|
||||
{
|
||||
CV_Assert(count > 0);
|
||||
int i;
|
||||
float w0 = 0;
|
||||
float x0 = 0, y0 = 0, z0 = 0;
|
||||
|
@ -772,6 +772,7 @@ bool LineSegmentDetectorImpl::refine(std::vector<RegionPoint>& reg, double reg_a
|
||||
++n;
|
||||
}
|
||||
}
|
||||
CV_Assert(n > 0);
|
||||
double mean_angle = sum / double(n);
|
||||
// 2 * standard deviation
|
||||
double tau = 2.0 * sqrt((s_sum - 2.0 * mean_angle * sum) / double(n) + mean_angle * mean_angle);
|
||||
|
@ -495,6 +495,13 @@ static bool ocl_moments( InputArray _src, Moments& m, bool binary)
|
||||
const int TILE_SIZE = 32;
|
||||
const int K = 10;
|
||||
|
||||
Size sz = _src.getSz();
|
||||
int xtiles = divUp(sz.width, TILE_SIZE);
|
||||
int ytiles = divUp(sz.height, TILE_SIZE);
|
||||
int ntiles = xtiles*ytiles;
|
||||
if (ntiles == 0)
|
||||
return false;
|
||||
|
||||
ocl::Kernel k = ocl::Kernel("moments", ocl::imgproc::moments_oclsrc,
|
||||
format("-D TILE_SIZE=%d%s",
|
||||
TILE_SIZE,
|
||||
@ -504,10 +511,6 @@ static bool ocl_moments( InputArray _src, Moments& m, bool binary)
|
||||
return false;
|
||||
|
||||
UMat src = _src.getUMat();
|
||||
Size sz = src.size();
|
||||
int xtiles = (sz.width + TILE_SIZE-1)/TILE_SIZE;
|
||||
int ytiles = (sz.height + TILE_SIZE-1)/TILE_SIZE;
|
||||
int ntiles = xtiles*ytiles;
|
||||
UMat umbuf(1, ntiles*K, CV_32S);
|
||||
|
||||
size_t globalsize[] = {(size_t)xtiles, std::max((size_t)TILE_SIZE, (size_t)sz.height)};
|
||||
|
@ -1709,6 +1709,7 @@ void cv::sqrBoxFilter( InputArray _src, OutputArray _dst, int ddepth,
|
||||
|
||||
cv::Mat cv::getGaussianKernel( int n, double sigma, int ktype )
|
||||
{
|
||||
CV_Assert(n > 0);
|
||||
const int SMALL_GAUSSIAN_SIZE = 7;
|
||||
static const float small_gaussian_tab[][SMALL_GAUSSIAN_SIZE] =
|
||||
{
|
||||
@ -1747,6 +1748,7 @@ cv::Mat cv::getGaussianKernel( int n, double sigma, int ktype )
|
||||
}
|
||||
}
|
||||
|
||||
CV_DbgAssert(fabs(sum) > 0);
|
||||
sum = 1./sum;
|
||||
for( i = 0; i < n; i++ )
|
||||
{
|
||||
@ -5334,6 +5336,7 @@ public:
|
||||
wsum += w;
|
||||
}
|
||||
// overflow is not possible here => there is no need to use cv::saturate_cast
|
||||
CV_DbgAssert(fabs(wsum) > 0);
|
||||
dptr[j] = (uchar)cvRound(sum/wsum);
|
||||
}
|
||||
}
|
||||
@ -5419,6 +5422,7 @@ public:
|
||||
sum_b += b*w; sum_g += g*w; sum_r += r*w;
|
||||
wsum += w;
|
||||
}
|
||||
CV_DbgAssert(fabs(wsum) > 0);
|
||||
wsum = 1.f/wsum;
|
||||
b0 = cvRound(sum_b*wsum);
|
||||
g0 = cvRound(sum_g*wsum);
|
||||
@ -5678,6 +5682,7 @@ public:
|
||||
sum += val*w;
|
||||
wsum += w;
|
||||
}
|
||||
CV_DbgAssert(fabs(wsum) > 0);
|
||||
dptr[j] = (float)(sum/wsum);
|
||||
}
|
||||
}
|
||||
@ -5768,6 +5773,7 @@ public:
|
||||
sum_b += b*w; sum_g += g*w; sum_r += r*w;
|
||||
wsum += w;
|
||||
}
|
||||
CV_DbgAssert(fabs(wsum) > 0);
|
||||
wsum = 1.f/wsum;
|
||||
b0 = sum_b*wsum;
|
||||
g0 = sum_g*wsum;
|
||||
|
@ -616,6 +616,7 @@ public:
|
||||
expDiffSum += v; // sum_j(exp(L_ij - L_iq))
|
||||
}
|
||||
|
||||
CV_Assert(expDiffSum > 0);
|
||||
if(probs)
|
||||
L.convertTo(*probs, ptype, 1./expDiffSum);
|
||||
|
||||
|
@ -170,6 +170,7 @@ public:
|
||||
double val = std::abs(w->ord_responses[w->sidx[i]]);
|
||||
max_response = std::max(max_response, val);
|
||||
}
|
||||
CV_Assert(fabs(max_response) > 0);
|
||||
}
|
||||
|
||||
if( rparams.calcVarImportance )
|
||||
|
@ -630,7 +630,7 @@ void DTreesImpl::calcValue( int nidx, const vector<int>& _sidx )
|
||||
w->cv_Tn[nidx*cv_n + j] = INT_MAX;
|
||||
}
|
||||
}
|
||||
|
||||
CV_Assert(fabs(sumw) > 0);
|
||||
node->node_risk = sum2 - (sum/sumw)*sum;
|
||||
node->node_risk /= sumw;
|
||||
node->value = sum/sumw;
|
||||
|
@ -599,7 +599,7 @@ cvSetImagesForHaarClassifierCascade( CvHaarClassifierCascade* _cascade,
|
||||
else
|
||||
sum0 += hidfeature->rect[k].weight * tr.width * tr.height;
|
||||
}
|
||||
|
||||
CV_Assert(area0 > 0);
|
||||
hidfeature->rect[0].weight = (float)(-sum0/area0);
|
||||
} /* l */
|
||||
} /* j */
|
||||
|
@ -111,6 +111,7 @@ vector<Vec3d> QRDecode::searchVerticalLines()
|
||||
|
||||
for (size_t i = 0; i < test_lines.size(); i++) { length += test_lines[i]; }
|
||||
|
||||
CV_Assert(length > 0);
|
||||
for (size_t i = 0; i < test_lines.size(); i++)
|
||||
{
|
||||
if (i == 2) { weight += abs((test_lines[i] / length) - 3.0/7.0); }
|
||||
@ -182,6 +183,7 @@ vector<Point2f> QRDecode::separateHorizontalLines(vector<Vec3d> list_lines)
|
||||
|
||||
for (size_t i = 0; i < test_lines.size(); i++) { length += test_lines[i]; }
|
||||
|
||||
CV_Assert(length > 0);
|
||||
for (size_t i = 0; i < test_lines.size(); i++)
|
||||
{
|
||||
if (i % 3 == 0) { weight += abs((test_lines[i] / length) - 3.0/14.0); }
|
||||
|
@ -140,6 +140,7 @@ public:
|
||||
|
||||
double max;
|
||||
minMaxLoc(gray_img, NULL, &max);
|
||||
CV_Assert(max > 0);
|
||||
|
||||
Mat map;
|
||||
log(gray_img + 1.0f, map);
|
||||
@ -429,12 +430,15 @@ public:
|
||||
for(int i = 0; i < max_iterations; i++)
|
||||
{
|
||||
calculateProduct(p, product);
|
||||
float alpha = rr / static_cast<float>(p.dot(product));
|
||||
double dprod = p.dot(product);
|
||||
CV_Assert(fabs(dprod) > 0);
|
||||
float alpha = rr / static_cast<float>(dprod);
|
||||
|
||||
r -= alpha * product;
|
||||
x += alpha * p;
|
||||
|
||||
float new_rr = static_cast<float>(r.dot(r));
|
||||
CV_Assert(fabs(rr) > 0);
|
||||
p = r + (new_rr / rr) * p;
|
||||
rr = new_rr;
|
||||
|
||||
|
@ -743,6 +743,7 @@ void SCDMatcher::hungarian(cv::Mat &costMatrix, std::vector<cv::DMatch> &outMatc
|
||||
|
||||
// calculate symmetric shape context cost
|
||||
cv::Mat trueCostMatrix(costMatrix, cv::Rect(0,0,sizeScd1, sizeScd2));
|
||||
CV_Assert(!trueCostMatrix.empty());
|
||||
float leftcost = 0;
|
||||
for (int nrow=0; nrow<trueCostMatrix.rows; nrow++)
|
||||
{
|
||||
|
@ -447,7 +447,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
float wSumInv = 1.f / wSum;
|
||||
float wSumInv = (std::fabs(wSum) > 0) ? (1.f / wSum) : 0; // if wSum is 0, c1-c3 will be 0 too
|
||||
frame(y,x) = Point3_<uchar>(
|
||||
static_cast<uchar>(c1*wSumInv),
|
||||
static_cast<uchar>(c2*wSumInv),
|
||||
|
Loading…
Reference in New Issue
Block a user