fixed multiple warnings from VS2010.

This commit is contained in:
Vadim Pisarevsky 2010-11-25 16:55:46 +00:00
parent 7e5c11a920
commit 1286c1db45
36 changed files with 178 additions and 177 deletions

View File

@ -2158,7 +2158,7 @@ inline void BruteForceMatcher<Distance>::commonKnnMatchImpl( BruteForceMatcher<D
Point minLoc;
minMaxLoc( allDists[iIdx], &minVal, 0, &minLoc, 0 );
if( minVal < bestMatch.distance )
bestMatch = DMatch( qIdx, minLoc.x, iIdx, (float)minVal );
bestMatch = DMatch( qIdx, minLoc.x, (int)iIdx, (float)minVal );
}
}
if( bestMatch.trainIdx == -1 )
@ -2214,7 +2214,7 @@ inline void BruteForceMatcher<Distance>::commonRadiusMatchImpl( BruteForceMatche
matcher.trainDescCollection[iIdx].step*tIdx);
DistanceType d = matcher.distance(d1, d2, dimension);
if( d < maxDistance )
curMatches->push_back( DMatch( qIdx, tIdx, iIdx, (float)d ) );
curMatches->push_back( DMatch( qIdx, tIdx, (int)iIdx, (float)d ) );
}
}
}

View File

@ -100,7 +100,7 @@ Mat BOWKMeansTrainer::cluster() const
Mat mergedDescriptors( descCount, descriptors[0].cols, descriptors[0].type() );
for( size_t i = 0, start = 0; i < descriptors.size(); i++ )
{
Mat submut = mergedDescriptors.rowRange(start, start + descriptors[i].rows);
Mat submut = mergedDescriptors.rowRange((int)start, (int)(start + descriptors[i].rows));
descriptors[i].copyTo(submut);
start += descriptors[i].rows;
}

View File

@ -166,7 +166,7 @@ void BriefDescriptorExtractor::computeImpl(const Mat& image, std::vector<KeyPoin
//Remove keypoints very close to the border
removeBorderKeypoints(keypoints, image.size(), PATCH_SIZE/2 + KERNEL_SIZE/2);
descriptors = Mat::zeros(keypoints.size(), bytes_, CV_8U);
descriptors = Mat::zeros((int)keypoints.size(), bytes_, CV_8U);
test_fn_(sum, keypoints, descriptors);
}

View File

@ -307,7 +307,7 @@ int RandomizedTree::getIndex(uchar* patch_data) const
int child_offset = nodes_[index](patch_data);
index = 2*index + 1 + child_offset;
}
return index - nodes_.size();
return (int)(index - nodes_.size());
}
void RandomizedTree::train(std::vector<BaseKeypoint> const& base_set,
@ -323,7 +323,7 @@ void RandomizedTree::train(std::vector<BaseKeypoint> const& base_set,
int depth, int views, size_t reduced_num_dim,
int num_quant_bits)
{
init(base_set.size(), depth, rng);
init((int)base_set.size(), depth, rng);
Mat patch;
@ -466,24 +466,24 @@ void RandomizedTree::compressLeaves(size_t reduced_num_dim)
}
// DO NOT FREE RETURNED POINTER
float *cs_phi = CSMatrixGenerator::getCSMatrix(reduced_num_dim, classes_, CSMatrixGenerator::PDT_BERNOULLI);
float *cs_phi = CSMatrixGenerator::getCSMatrix((int)reduced_num_dim, classes_, CSMatrixGenerator::PDT_BERNOULLI);
float *cs_posteriors = new float[num_leaves_ * reduced_num_dim]; // temp, num_leaves_ x reduced_num_dim
for (int i=0; i<num_leaves_; ++i) {
float *post = getPosteriorByIndex(i);
float *prod = &cs_posteriors[i*reduced_num_dim];
Mat A( reduced_num_dim, classes_, CV_32FC1, cs_phi );
Mat A( (int)reduced_num_dim, classes_, CV_32FC1, cs_phi );
Mat X( classes_, 1, CV_32FC1, post );
Mat Y( reduced_num_dim, 1, CV_32FC1, prod );
Mat Y( (int)reduced_num_dim, 1, CV_32FC1, prod );
Y = A*X;
}
// copy new posteriors
freePosteriors(3);
allocPosteriorsAligned(num_leaves_, reduced_num_dim);
allocPosteriorsAligned(num_leaves_, (int)reduced_num_dim);
for (int i=0; i<num_leaves_; ++i)
memcpy(posteriors_[i], &cs_posteriors[i*reduced_num_dim], reduced_num_dim*sizeof(float));
classes_ = reduced_num_dim;
classes_ = (int)reduced_num_dim;
delete [] cs_posteriors;
}
@ -682,8 +682,8 @@ void RTreeClassifier::train(std::vector<BaseKeypoint> const& base_set,
}
num_quant_bits_ = num_quant_bits;
classes_ = reduced_num_dim; // base_set.size();
original_num_classes_ = base_set.size();
classes_ = (int)reduced_num_dim; // base_set.size();
original_num_classes_ = (int)base_set.size();
trees_.resize(num_trees);
printf("[OK] Training trees: base size=%i, reduced size=%i\n", (int)base_set.size(), (int)reduced_num_dim);
@ -899,7 +899,7 @@ void RTreeClassifier::write(const char* file_name) const
void RTreeClassifier::write(std::ostream &os) const
{
int num_trees = trees_.size();
int num_trees = (int)trees_.size();
os.write((char*)(&num_trees), sizeof(num_trees));
os.write((char*)(&classes_), sizeof(classes_));
os.write((char*)(&original_num_classes_), sizeof(original_num_classes_));
@ -953,9 +953,9 @@ void RTreeClassifier::setFloatPosteriorsFromTextfile_176(std::string url)
float RTreeClassifier::countZeroElements()
{
int flt_zeros = 0;
int ui8_zeros = 0;
int num_elem = trees_[0].classes();
size_t flt_zeros = 0;
size_t ui8_zeros = 0;
size_t num_elem = trees_[0].classes();
for (int i=0; i<(int)trees_.size(); ++i)
for (int k=0; k<(int)trees_[i].num_leaves_; ++k) {
float *p = trees_[i].getPosteriorByIndex(k);

View File

@ -113,7 +113,7 @@ Ptr<DescriptorExtractor> DescriptorExtractor::create(const string& descriptorExt
{
DescriptorExtractor* de = 0;
int pos = 0;
size_t pos = 0;
if (!descriptorExtractorType.compare("SIFT"))
{
de = new SiftDescriptorExtractor();

View File

@ -100,7 +100,7 @@ void FeatureDetector::write( FileStorage& ) const
Ptr<FeatureDetector> FeatureDetector::create( const string& detectorType )
{
FeatureDetector* fd = 0;
int pos = 0;
size_t pos = 0;
if( !detectorType.compare( "FAST" ) )
{

View File

@ -74,12 +74,12 @@ void DynamicAdaptedFeatureDetector::detectImpl(const Mat& image, vector<KeyPoint
if (int(keypoints.size()) < min_features_)
{
down = true;
adjuster.tooFew(min_features_, keypoints.size());
adjuster.tooFew(min_features_, (int)keypoints.size());
}
else if (int(keypoints.size()) > max_features_)
{
up = true;
adjuster.tooMany(max_features_, keypoints.size());
adjuster.tooMany(max_features_, (int)keypoints.size());
}
else
thresh_good = true;
@ -96,13 +96,13 @@ void FastAdjuster::detectImpl(const Mat& image, vector<KeyPoint>& keypoints, con
FastFeatureDetector(thresh_, nonmax_).detect(image, keypoints, mask);
}
void FastAdjuster::tooFew(int min, int n_detected)
void FastAdjuster::tooFew(int, int)
{
//fast is easy to adjust
thresh_--;
}
void FastAdjuster::tooMany(int max, int n_detected)
void FastAdjuster::tooMany(int, int)
{
//fast is easy to adjust
thresh_++;
@ -121,18 +121,18 @@ StarAdjuster::StarAdjuster(double initial_thresh) :
void StarAdjuster::detectImpl(const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask) const
{
StarFeatureDetector detector_tmp(16, thresh_, 10, 8, 3);
StarFeatureDetector detector_tmp(16, cvRound(thresh_), 10, 8, 3);
detector_tmp.detect(image, keypoints, mask);
}
void StarAdjuster::tooFew(int min, int n_detected)
void StarAdjuster::tooFew(int, int)
{
thresh_ *= 0.9;
if (thresh_ < 1.1)
thresh_ = 1.1;
}
void StarAdjuster::tooMany(int max, int n_detected)
void StarAdjuster::tooMany(int, int)
{
thresh_ *= 1.1;
}
@ -152,14 +152,14 @@ void SurfAdjuster::detectImpl(const Mat& image, vector<KeyPoint>& keypoints, con
detector_tmp.detect(image, keypoints, mask);
}
void SurfAdjuster::tooFew(int min, int n_detected)
void SurfAdjuster::tooFew(int, int)
{
thresh_ *= 0.9;
if (thresh_ < 1.1)
thresh_ = 1.1;
}
void SurfAdjuster::tooMany(int max, int n_detected)
void SurfAdjuster::tooMany(int, int)
{
thresh_ *= 1.1;
}

View File

@ -333,7 +333,7 @@ static void computeOneToOneMatchedOverlaps( const vector<EllipticKeyPoint>& keyp
{
float ov = (float)ac.bna / (float)ac.bua;
if( ov >= minOverlap )
overlaps.push_back(SIdx(ov, i1, i2));
overlaps.push_back(SIdx(ov, (int)i1, (int)i2));
}
}
}
@ -385,10 +385,10 @@ static void calculateRepeatability( const Mat& img1, const Mat& img2, const Mat&
{
overlapThreshold = 1.f - 0.5f;
thresholdedOverlapMask->create( keypoints1.size(), keypoints2t.size(), CV_8UC1 );
thresholdedOverlapMask->create( (int)keypoints1.size(), (int)keypoints2t.size(), CV_8UC1 );
thresholdedOverlapMask->setTo( Scalar::all(0) );
}
int minCount = min( keypoints1.size(), keypoints2t.size() );
size_t minCount = min( keypoints1.size(), keypoints2t.size() );
// calculate overlap errors
vector<SIdx> overlaps;
@ -402,7 +402,7 @@ static void calculateRepeatability( const Mat& img1, const Mat& img2, const Mat&
if( ifEvaluateDetectors )
{
// regions one-to-one matching
correspondencesCount = overlaps.size();
correspondencesCount = (int)overlaps.size();
repeatability = minCount ? (float)correspondencesCount / minCount : -1;
}
else
@ -502,7 +502,7 @@ float cv::getRecall( const vector<Point2f>& recallPrecisionCurve, float l_precis
float curDiff = std::fabs(l_precision - recallPrecisionCurve[i].x);
if( curDiff <= minDiff )
{
bestIdx = i;
bestIdx = (int)i;
minDiff = curDiff;
}
}

View File

@ -56,10 +56,11 @@ Mat windowedMatchingMask( const vector<KeyPoint>& keypoints1, const vector<KeyPo
if( keypoints1.empty() || keypoints2.empty() )
return Mat();
Mat mask( keypoints1.size(), keypoints2.size(), CV_8UC1 );
for( size_t i = 0; i < keypoints1.size(); i++ )
int n1 = (int)keypoints1.size(), n2 = (int)keypoints2.size();
Mat mask( n1, n2, CV_8UC1 );
for( int i = 0; i < n1; i++ )
{
for( size_t j = 0; j < keypoints2.size(); j++ )
for( int j = 0; j < n2; j++ )
{
Point2f diff = keypoints2[j].pt - keypoints1[i].pt;
mask.at<uchar>(i, j) = std::abs(diff.x) < maxDeltaX && std::abs(diff.y) < maxDeltaY;
@ -166,11 +167,11 @@ void DescriptorMatcher::DescriptorCollection::getLocalIdx( int globalDescIdx, in
{
if( globalDescIdx < startIdxs[i] )
{
imgIdx = i - 1;
imgIdx = (int)(i - 1);
break;
}
}
imgIdx = imgIdx == -1 ? startIdxs.size() -1 : imgIdx;
imgIdx = imgIdx == -1 ? (int)(startIdxs.size() - 1) : imgIdx;
localDescIdx = globalDescIdx - startIdxs[imgIdx];
}
@ -648,7 +649,7 @@ void GenericDescriptorMatcher::KeyPointCollection::add( const vector<Mat>& _imag
images.insert( images.end(), _images.begin(), _images.end() );
keypoints.insert( keypoints.end(), _points.begin(), _points.end() );
for( size_t i = 0; i < _points.size(); i++ )
pointCount += _points[i].size();
pointCount += (int)_points[i].size();
size_t prevSize = startIndices.size(), addSize = _images.size();
startIndices.resize( prevSize + addSize );
@ -656,11 +657,11 @@ void GenericDescriptorMatcher::KeyPointCollection::add( const vector<Mat>& _imag
if( prevSize == 0 )
startIndices[prevSize] = 0; //first
else
startIndices[prevSize] = startIndices[prevSize-1] + keypoints[prevSize-1].size();
startIndices[prevSize] = (int)(startIndices[prevSize-1] + keypoints[prevSize-1].size());
for( size_t i = prevSize + 1; i < prevSize + addSize; i++ )
{
startIndices[i] = startIndices[i - 1] + keypoints[i - 1].size();
startIndices[i] = (int)(startIndices[i - 1] + keypoints[i - 1].size());
}
}
@ -712,11 +713,11 @@ void GenericDescriptorMatcher::KeyPointCollection::getLocalIdx( int globalPointI
{
if( globalPointIdx < startIndices[i] )
{
imgIdx = i - 1;
imgIdx = (int)(i - 1);
break;
}
}
imgIdx = imgIdx == -1 ? startIndices.size() -1 : imgIdx;
imgIdx = imgIdx == -1 ? (int)(startIndices.size() - 1) : imgIdx;
localPointIdx = globalPointIdx - startIndices[imgIdx];
}
@ -923,14 +924,14 @@ void OneWayDescriptorMatcher::train()
base = new OneWayDescriptorObject( params.patchSize, params.poseCount, params.pcaFilename,
params.trainPath, params.trainImagesList, params.minScale, params.maxScale, params.stepScale );
base->Allocate( trainPointCollection.keypointCount() );
prevTrainCount = trainPointCollection.keypointCount();
base->Allocate( (int)trainPointCollection.keypointCount() );
prevTrainCount = (int)trainPointCollection.keypointCount();
const vector<vector<KeyPoint> >& points = trainPointCollection.getKeypoints();
int count = 0;
for( size_t i = 0; i < points.size(); i++ )
{
IplImage _image = trainPointCollection.getImage(i);
IplImage _image = trainPointCollection.getImage((int)i);
for( size_t j = 0; j < points[i].size(); j++ )
base->InitializeDescriptor( count++, &_image, points[i][j], "" );
}
@ -961,7 +962,7 @@ void OneWayDescriptorMatcher::knnMatchImpl( const Mat& queryImage, vector<KeyPoi
int descIdx = -1, poseIdx = -1;
float distance;
base->FindDescriptor( &_qimage, queryKeypoints[i].pt, descIdx, poseIdx, distance );
matches[i].push_back( DMatch(i, descIdx, distance) );
matches[i].push_back( DMatch((int)i, descIdx, distance) );
}
}
@ -979,7 +980,7 @@ void OneWayDescriptorMatcher::radiusMatchImpl( const Mat& queryImage, vector<Key
float distance;
base->FindDescriptor( &_qimage, queryKeypoints[i].pt, descIdx, poseIdx, distance );
if( distance < maxDistance )
matches[i].push_back( DMatch(i, descIdx, distance) );
matches[i].push_back( DMatch((int)i, descIdx, distance) );
}
}
@ -1060,7 +1061,7 @@ void FernDescriptorMatcher::train()
vector<vector<Point2f> > points( trainPointCollection.imageCount() );
for( size_t imgIdx = 0; imgIdx < trainPointCollection.imageCount(); imgIdx++ )
KeyPoint::convert( trainPointCollection.getKeypoints(imgIdx), points[imgIdx] );
KeyPoint::convert( trainPointCollection.getKeypoints((int)imgIdx), points[imgIdx] );
classifier = new FernClassifier( points, trainPointCollection.getImages(), vector<vector<int> >(), 0, // each points is a class
params.patchSize, params.signatureSize, params.nstructs, params.structSize,
@ -1112,8 +1113,8 @@ void FernDescriptorMatcher::knnMatchImpl( const Mat& queryImage, vector<KeyPoint
if( -signature[ci] < bestMatch.distance )
{
int imgIdx = -1, trainIdx = -1;
trainPointCollection.getLocalIdx( ci , imgIdx, trainIdx );
bestMatch = DMatch( queryIdx, trainIdx, imgIdx, -signature[ci] );
trainPointCollection.getLocalIdx( (int)ci , imgIdx, trainIdx );
bestMatch = DMatch( (int)queryIdx, trainIdx, imgIdx, -signature[ci] );
}
}
@ -1143,7 +1144,7 @@ void FernDescriptorMatcher::radiusMatchImpl( const Mat& queryImage, vector<KeyPo
{
int imgIdx = -1, trainIdx = -1;
trainPointCollection.getLocalIdx( ci , imgIdx, trainIdx );
matches[i].push_back( DMatch( i, trainIdx, imgIdx, -signature[ci] ) );
matches[i].push_back( DMatch( (int)i, trainIdx, imgIdx, -signature[ci] ) );
}
}
}

View File

@ -831,10 +831,10 @@ void FernClassifier::prepare(int _nclasses, int _patchSize, int _signatureSize,
static int calcNumPoints( const vector<vector<Point2f> >& points )
{
int count = 0;
size_t count = 0;
for( size_t i = 0; i < points.size(); i++ )
count += points[i].size();
return count;
return (int)count;
}
void FernClassifier::train(const vector<vector<Point2f> >& points,

View File

@ -44,7 +44,7 @@
#define __OPENCV_PRECOMP_H__
#if _MSC_VER >= 1200
#pragma warning( disable: 4251 4710 4711 4514 4996 )
#pragma warning( disable: 4251 4512 4710 4711 4514 4996 )
#endif
#ifdef HAVE_CONFIG_H

View File

@ -1404,7 +1404,7 @@ Sift::detectKeypoints(VL::float_t threshold, VL::float_t edgeThreshold)
VL::float_t Dx=0,Dy=0,Ds=0,Dxx=0,Dyy=0,Dss=0,Dxy=0,Dxs=0,Dys=0 ;
VL::float_t b [3] ;
pixel_t* pt ;
pixel_t* pt = 0;
int dx = 0 ;
int dy = 0 ;
@ -1697,7 +1697,7 @@ Sift::computeKeypointOrientations(VL::float_t angles [4], Keypoint keypoint)
prepareGrad(o) ;
// clear the SIFT histogram
std::fill(hist, hist + nbins, 0) ;
std::fill(hist, hist + nbins, 0.f) ;
// fill the SIFT histogram
pixel_t* pt = temp + xi * xo + yi * yo + (si - smin -1) * so ;
@ -1896,7 +1896,7 @@ Sift::computeKeypointDescriptor
// make sure gradient buffer is up-to-date
prepareGrad(o) ;
std::fill( descr_pt, descr_pt + NBO*NBP*NBP, 0 ) ;
std::fill( descr_pt, descr_pt + NBO*NBP*NBP, 0.f ) ;
/* Center the scale space and the descriptor on the current keypoint.
* Note that dpt is pointing to the bin of center (SBP/2,SBP/2,0).

View File

@ -176,7 +176,7 @@ public:
template <typename T>
T* allocate(size_t count = 1)
{
T* mem = (T*) this->allocateBytes(sizeof(T)*count);
T* mem = (T*) this->allocateBytes((int)(sizeof(T)*count));
return mem;
}

View File

@ -240,12 +240,12 @@ private:
t.start();
kmeans.buildIndex();
t.stop();
float buildTime = t.value;
float buildTime = (float)t.value;
// measure search time
float searchTime = test_index_precision(kmeans, sampledDataset, testDataset, gt_matches, index_params.target_precision, checks, nn);;
float datasetMemory = sampledDataset.rows*sampledDataset.cols*sizeof(float);
float datasetMemory = (float)(sampledDataset.rows*sampledDataset.cols*sizeof(float));
cost.memoryCost = (kmeans.usedMemory()+datasetMemory)/datasetMemory;
cost.searchTimeCost = searchTime;
cost.buildTimeCost = buildTime;
@ -266,12 +266,12 @@ private:
t.start();
kdtree.buildIndex();
t.stop();
float buildTime = t.value;
float buildTime = (float)t.value;
//measure search time
float searchTime = test_index_precision(kdtree, sampledDataset, testDataset, gt_matches, index_params.target_precision, checks, nn);
float datasetMemory = sampledDataset.rows*sampledDataset.cols*sizeof(float);
float datasetMemory = (float)(sampledDataset.rows*sampledDataset.cols*sizeof(float));
cost.memoryCost = (kdtree.usedMemory()+datasetMemory)/datasetMemory;
cost.searchTimeCost = searchTime;
cost.buildTimeCost = buildTime;
@ -459,7 +459,7 @@ private:
for (size_t i=0;i<kdtreeParamSpaceSize;++i) {
kdtreeCosts[i].first.totalCost = (kdtreeCosts[i].first.timeCost/optTimeCost + index_params.memory_weight * kdtreeCosts[i].first.memoryCost);
int k = i;
int k = (int)i;
while (k>0 && kdtreeCosts[k].first.totalCost < kdtreeCosts[k-1].first.totalCost) {
swap(kdtreeCosts[k],kdtreeCosts[k-1]);
k--;
@ -502,12 +502,12 @@ private:
// We compute the ground truth using linear search
logger().info("Computing ground truth... \n");
gt_matches = Matrix<int>(new int[testDataset.rows],testDataset.rows, 1);
gt_matches = Matrix<int>(new int[testDataset.rows],(long)testDataset.rows, 1);
StartStopTimer t;
t.start();
compute_ground_truth(sampledDataset, testDataset, gt_matches, 0);
t.stop();
float bestCost = t.value;
float bestCost = (float)t.value;
IndexParams* bestParams = new LinearIndexParams();
// Start parameter autotune process
@ -550,19 +550,19 @@ private:
float speedup = 0;
int samples = min(dataset.rows/10, SAMPLE_COUNT);
int samples = (int)min(dataset.rows/10, SAMPLE_COUNT);
if (samples>0) {
Matrix<ELEM_TYPE> testDataset = random_sample(dataset,samples);
logger().info("Computing ground truth\n");
// we need to compute the ground truth first
Matrix<int> gt_matches(new int[testDataset.rows],testDataset.rows,1);
Matrix<int> gt_matches(new int[testDataset.rows],(long)testDataset.rows,1);
StartStopTimer t;
t.start();
compute_ground_truth(dataset, testDataset, gt_matches,1);
t.stop();
float linear = t.value;
float linear = (float)t.value;
int checks;
logger().info("Estimating number of checks\n");
@ -575,7 +575,7 @@ private:
float bestSearchTime = -1;
float best_cb_index = -1;
int best_checks = -1;
for (cb_index = 0;cb_index<1.1; cb_index+=0.2) {
for (cb_index = 0;cb_index<1.1f; cb_index+=0.2f) {
kmeans->set_cb_index(cb_index);
searchTime = test_index_precision(*kmeans, dataset, testDataset, gt_matches, index_params.target_precision, checks, nn, 1);
if (searchTime<bestSearchTime || bestSearchTime == -1) {

View File

@ -340,7 +340,7 @@ struct ZeroIterator {
return 0;
}
T operator[](int index) {
T operator[](int) {
return 0;
}

View File

@ -47,20 +47,20 @@ void find_nearest(const Matrix<T>& dataset, T* query, int* matches, int nn, int
long* match = new long[n];
T* dists = new T[n];
dists[0] = flann_dist(query, query_end, dataset[0]);
dists[0] = (float)flann_dist(query, query_end, dataset[0]);
match[0] = 0;
int dcnt = 1;
for (size_t i=1;i<dataset.rows;++i) {
T tmp = flann_dist(query, query_end, dataset[i]);
T tmp = (T)flann_dist(query, query_end, dataset[i]);
if (dcnt<n) {
match[dcnt] = i;
match[dcnt] = (long)i;
dists[dcnt++] = tmp;
}
else if (tmp < dists[dcnt-1]) {
dists[dcnt-1] = tmp;
match[dcnt-1] = i;
match[dcnt-1] = (long)i;
}
int j = dcnt-1;
@ -85,7 +85,7 @@ template <typename T>
void compute_ground_truth(const Matrix<T>& dataset, const Matrix<T>& testset, Matrix<int>& matches, int skip=0)
{
for (size_t i=0;i<testset.rows;++i) {
find_nearest(dataset, testset[i], matches[i], matches.cols, skip);
find_nearest(dataset, testset[i], matches[i], (int)matches.cols, skip);
}
}

View File

@ -55,8 +55,8 @@ float computeDistanceRaport(const Matrix<ELEM_TYPE>& inputData, ELEM_TYPE* targe
ELEM_TYPE* target_end = target + veclen;
float ret = 0;
for (int i=0;i<n;++i) {
float den = flann_dist(target,target_end, inputData[groundTruth[i]]);
float num = flann_dist(target,target_end, inputData[neighbors[i]]);
float den = (float)flann_dist(target,target_end, inputData[groundTruth[i]]);
float num = (float)flann_dist(target,target_end, inputData[neighbors[i]]);
if (den==0 && num==0) {
ret += 1;
@ -81,8 +81,8 @@ float search_with_ground_truth(NNIndex<ELEM_TYPE>& index, const Matrix<ELEM_TYPE
KNNResultSet<ELEM_TYPE> resultSet(nn+skipMatches);
SearchParams searchParams(checks);
int correct;
float distR;
int correct = 0;
float distR = 0;
StartStopTimer t;
int repeats = 0;
while (t.value<0.2) {
@ -92,17 +92,17 @@ float search_with_ground_truth(NNIndex<ELEM_TYPE>& index, const Matrix<ELEM_TYPE
distR = 0;
for (size_t i = 0; i < testData.rows; i++) {
ELEM_TYPE* target = testData[i];
resultSet.init(target, testData.cols);
resultSet.init(target, (int)testData.cols);
index.findNeighbors(resultSet,target, searchParams);
int* neighbors = resultSet.getNeighbors();
neighbors = neighbors+skipMatches;
correct += countCorrectMatches(neighbors,matches[i], nn);
distR += computeDistanceRaport(inputData, target,neighbors,matches[i], testData.cols, nn);
distR += computeDistanceRaport(inputData, target,neighbors,matches[i], (int)testData.cols, nn);
}
t.stop();
}
time = t.value/repeats;
time = (float)(t.value/repeats);
float precicion = (float)correct/(nn*testData.rows);
@ -134,7 +134,7 @@ template <typename ELEM_TYPE>
float test_index_precision(NNIndex<ELEM_TYPE>& index, const Matrix<ELEM_TYPE>& inputData, const Matrix<ELEM_TYPE>& testData, const Matrix<int>& matches,
float precision, int& checks, int nn = 1, int skipMatches = 0)
{
const float SEARCH_EPS = 0.001;
const float SEARCH_EPS = 0.001f;
logger().info(" Nodes Precision(%) Time(s) Time/vec(ms) Mean dist\n");
logger().info("---------------------------------------------------------\n");

View File

@ -201,7 +201,7 @@ public:
// Create a permutable array of indices to the input vectors.
vind = new int[size_];
for (size_t i = 0; i < size_; i++) {
vind[i] = i;
vind[i] = (int)i;
}
mean = new DIST_TYPE[veclen_];
@ -230,11 +230,11 @@ public:
/* Construct the randomized trees. */
for (int i = 0; i < numTrees; i++) {
/* Randomize the order of vectors to allow for unbiased sampling. */
for (int j = size_; j > 0; --j) {
for (int j = (int)size_; j > 0; --j) {
int rnd = rand_int(j);
swap(vind[j-1], vind[rnd]);
}
trees[i] = divideTree(0, size_ - 1);
trees[i] = divideTree(0, (int)size_ - 1);
}
}
@ -287,7 +287,7 @@ public:
*/
int usedMemory() const
{
return pool.usedMemory+pool.wastedMemory+dataset.rows*sizeof(int); // pool memory and vind array memory
return (int)(pool.usedMemory+pool.wastedMemory+dataset.rows*sizeof(int)); // pool memory and vind array memory
}
@ -424,10 +424,10 @@ private:
if (num < RAND_DIM || v[i] > v[topind[num-1]]) {
/* Put this element at end of topind. */
if (num < RAND_DIM) {
topind[num++] = i; /* Add to list. */
topind[num++] = (int)i; /* Add to list. */
}
else {
topind[num-1] = i; /* Replace last element. */
topind[num-1] = (int)i; /* Replace last element. */
}
/* Bubble end value down to right location by repeated swapping. */
int j = num - 1;
@ -505,7 +505,7 @@ private:
BranchSt branch;
int checkCount = 0;
Heap<BranchSt>* heap = new Heap<BranchSt>(size_);
Heap<BranchSt>* heap = new Heap<BranchSt>((int)size_);
vector<bool> checked(size_,false);
/* Search once through each tree down to root. */
@ -568,7 +568,7 @@ private:
adding exceeds their value.
*/
DIST_TYPE new_distsq = flann_dist(&val, &val+1, &node->divval, mindistsq);
DIST_TYPE new_distsq = (DIST_TYPE)flann_dist(&val, &val+1, &node->divval, mindistsq);
// if (2 * checkCount < maxCheck || !result.full()) {
if (new_distsq < result.worstDist() || !result.full()) {
heap->insert( BranchSt::make_branch(otherChild, new_distsq) );
@ -611,7 +611,7 @@ private:
/* Call recursively to search next level down. */
searchLevelExact(result, vec, bestChild, mindistsq);
DIST_TYPE new_distsq = flann_dist(&val, &val+1, &node->divval, mindistsq);
DIST_TYPE new_distsq = (DIST_TYPE)flann_dist(&val, &val+1, &node->divval, mindistsq);
searchLevelExact(result, vec, otherChild, new_distsq);
}

View File

@ -238,7 +238,7 @@ class KMeansIndex : public NNIndex<ELEM_TYPE>
centers[index] = indices[rnd];
for (int j=0;j<index;++j) {
float sq = flann_dist(dataset[centers[index]],dataset[centers[index]]+dataset.cols,dataset[centers[j]]);
float sq = (float)flann_dist(dataset[centers[index]],dataset[centers[index]]+dataset.cols,dataset[centers[j]]);
if (sq<1e-16) {
duplicate = true;
}
@ -275,9 +275,9 @@ class KMeansIndex : public NNIndex<ELEM_TYPE>
int best_index = -1;
float best_val = 0;
for (int j=0;j<n;++j) {
float dist = flann_dist(dataset[centers[0]],dataset[centers[0]]+dataset.cols,dataset[indices[j]]);
float dist = (float)flann_dist(dataset[centers[0]],dataset[centers[0]]+dataset.cols,dataset[indices[j]]);
for (int i=1;i<index;++i) {
float tmp_dist = flann_dist(dataset[centers[i]],dataset[centers[i]]+dataset.cols,dataset[indices[j]]);
float tmp_dist = (float)flann_dist(dataset[centers[i]],dataset[centers[i]]+dataset.cols,dataset[indices[j]]);
if (tmp_dist<dist) {
dist = tmp_dist;
}
@ -337,7 +337,7 @@ class KMeansIndex : public NNIndex<ELEM_TYPE>
// Repeat several trials
double bestNewPot = -1;
int bestNewIndex;
int bestNewIndex = -1;
for (int localTrial = 0; localTrial < numLocalTries; localTrial++) {
// Choose our center - have to be slightly careful to return a valid answer even accounting
@ -418,7 +418,7 @@ public:
else {
throw FLANNException("Unknown algorithm for choosing initial centers.");
}
cb_index = 0.4;
cb_index = 0.4f;
}
@ -481,12 +481,12 @@ public:
indices = new int[size_];
for (size_t i=0;i<size_;++i) {
indices[i] = i;
indices[i] = (int)i;
}
root = pool.allocate<KMeansNodeSt>();
computeNodeStatistics(root, indices, size_);
computeClustering(root, indices, size_, branching,0);
computeNodeStatistics(root, indices, (int)size_);
computeClustering(root, indices, (int)size_, branching,0);
}
@ -496,7 +496,7 @@ public:
save_value(stream, max_iter);
save_value(stream, memoryCounter);
save_value(stream, cb_index);
save_value(stream, *indices, size_);
save_value(stream, *indices, (int)size_);
save_tree(stream, root);
}
@ -512,7 +512,7 @@ public:
delete[] indices;
}
indices = new int[size_];
load_value(stream, *indices, size_);
load_value(stream, *indices, (int)size_);
if (root!=NULL) {
free_centers(root);
@ -540,7 +540,7 @@ public:
}
else {
// Priority queue storing intermediate branches in the best-bin-first search
Heap<BranchSt>* heap = new Heap<BranchSt>(size_);
Heap<BranchSt>* heap = new Heap<BranchSt>((int)size_);
int checks = 0;
@ -604,9 +604,9 @@ private:
void save_tree(FILE* stream, KMeansNode node)
{
save_value(stream, *node);
save_value(stream, *(node->pivot), veclen_);
save_value(stream, *(node->pivot), (int)veclen_);
if (node->childs==NULL) {
int indices_offset = node->indices - indices;
int indices_offset = (int)(node->indices - indices);
save_value(stream, indices_offset);
}
else {
@ -622,7 +622,7 @@ private:
node = pool.allocate<KMeansNodeSt>();
load_value(stream, *node);
node->pivot = new DIST_TYPE[veclen_];
load_value(stream, *(node->pivot), veclen_);
load_value(stream, *(node->pivot), (int)veclen_);
if (node->childs==NULL) {
int indices_offset;
load_value(stream, indices_offset);
@ -659,10 +659,10 @@ private:
*/
void computeNodeStatistics(KMeansNode node, int* indices, int indices_length) {
DIST_TYPE radius = 0;
DIST_TYPE variance = 0;
double radius = 0;
double variance = 0;
DIST_TYPE* mean = new DIST_TYPE[veclen_];
memoryCounter += veclen_*sizeof(DIST_TYPE);
memoryCounter += (int)(veclen_*sizeof(DIST_TYPE));
memset(mean,0,veclen_*sizeof(float));
@ -679,7 +679,7 @@ private:
variance /= size_;
variance -= flann_dist(mean,mean+veclen_,zero());
DIST_TYPE tmp = 0;
double tmp = 0;
for (int i=0;i<indices_length;++i) {
tmp = flann_dist(mean, mean + veclen_, dataset[indices[i]]);
if (tmp>radius) {
@ -687,8 +687,8 @@ private:
}
}
node->variance = variance;
node->radius = radius;
node->variance = (DIST_TYPE)variance;
node->radius = (DIST_TYPE)radius;
node->pivot = mean;
}
@ -728,7 +728,7 @@ private:
}
Matrix<double> dcenters(new double[branching*veclen_],branching,veclen_);
Matrix<double> dcenters(new double[branching*veclen_],branching,(long)veclen_);
for (int i=0; i<centers_length; ++i) {
ELEM_TYPE* vec = dataset[centers_idx[i]];
for (size_t k=0; k<veclen_; ++k) {
@ -748,17 +748,17 @@ private:
int* belongs_to = new int[indices_length];
for (int i=0;i<indices_length;++i) {
float sq_dist = flann_dist(dataset[indices[i]], dataset[indices[i]] + veclen_ ,dcenters[0]);
double sq_dist = flann_dist(dataset[indices[i]], dataset[indices[i]] + veclen_ ,dcenters[0]);
belongs_to[i] = 0;
for (int j=1;j<branching;++j) {
float new_sq_dist = flann_dist(dataset[indices[i]], dataset[indices[i]]+veclen_, dcenters[j]);
double new_sq_dist = flann_dist(dataset[indices[i]], dataset[indices[i]]+veclen_, dcenters[j]);
if (sq_dist>new_sq_dist) {
belongs_to[i] = j;
sq_dist = new_sq_dist;
}
}
if (sq_dist>radiuses[belongs_to[i]]) {
radiuses[belongs_to[i]] = sq_dist;
radiuses[belongs_to[i]] = (float)sq_dist;
}
count[belongs_to[i]]++;
}
@ -790,10 +790,10 @@ private:
// reassign points to clusters
for (int i=0;i<indices_length;++i) {
float sq_dist = flann_dist(dataset[indices[i]], dataset[indices[i]]+veclen_ ,dcenters[0]);
float sq_dist = (float)flann_dist(dataset[indices[i]], dataset[indices[i]]+veclen_ ,dcenters[0]);
int new_centroid = 0;
for (int j=1;j<branching;++j) {
float new_sq_dist = flann_dist(dataset[indices[i]], dataset[indices[i]]+veclen_,dcenters[j]);
float new_sq_dist = (float)flann_dist(dataset[indices[i]], dataset[indices[i]]+veclen_,dcenters[j]);
if (sq_dist>new_sq_dist) {
new_centroid = j;
sq_dist = new_sq_dist;
@ -838,9 +838,9 @@ private:
for (int i=0; i<branching; ++i) {
centers[i] = new DIST_TYPE[veclen_];
memoryCounter += veclen_*sizeof(DIST_TYPE);
memoryCounter += (int)(veclen_*sizeof(DIST_TYPE));
for (size_t k=0; k<veclen_; ++k) {
centers[i][k] = dcenters[i][k];
centers[i][k] = (DIST_TYPE)dcenters[i][k];
}
}
@ -852,11 +852,11 @@ private:
for (int c=0;c<branching;++c) {
int s = count[c];
float variance = 0;
float mean_radius =0;
double variance = 0;
double mean_radius =0;
for (int i=0;i<indices_length;++i) {
if (belongs_to[i]==c) {
float d = flann_dist(dataset[indices[i]],dataset[indices[i]]+veclen_,zero());
double d = flann_dist(dataset[indices[i]],dataset[indices[i]]+veclen_,zero());
variance += d;
mean_radius += sqrt(d);
swap(indices[i],indices[end]);
@ -871,8 +871,8 @@ private:
node->childs[c] = pool.allocate<KMeansNodeSt>();
node->childs[c]->radius = radiuses[c];
node->childs[c]->pivot = centers[c];
node->childs[c]->variance = variance;
node->childs[c]->mean_radius = mean_radius;
node->childs[c]->variance = (float)variance;
node->childs[c]->mean_radius = (float)mean_radius;
node->childs[c]->indices = NULL;
computeClustering(node->childs[c],indices+start, end-start, branching, level+1);
start=end;
@ -905,7 +905,7 @@ private:
{
// Ignore those clusters that are too far away
{
DIST_TYPE bsq = flann_dist(vec, vec+veclen_, node->pivot);
DIST_TYPE bsq = (DIST_TYPE)flann_dist(vec, vec+veclen_, node->pivot);
DIST_TYPE rsq = node->radius;
DIST_TYPE wsq = result.worstDist();
@ -947,9 +947,9 @@ private:
{
int best_index = 0;
domain_distances[best_index] = flann_dist(q,q+veclen_,node->childs[best_index]->pivot);
domain_distances[best_index] = (float)flann_dist(q,q+veclen_,node->childs[best_index]->pivot);
for (int i=1;i<branching;++i) {
domain_distances[i] = flann_dist(q,q+veclen_,node->childs[i]->pivot);
domain_distances[i] = (float)flann_dist(q,q+veclen_,node->childs[i]->pivot);
if (domain_distances[i]<domain_distances[best_index]) {
best_index = i;
}
@ -979,7 +979,7 @@ private:
{
// Ignore those clusters that are too far away
{
float bsq = flann_dist(vec, vec+veclen_, node->pivot);
float bsq = (float)flann_dist(vec, vec+veclen_, node->pivot);
float rsq = node->radius;
float wsq = result.worstDist();
@ -1021,7 +1021,7 @@ private:
{
float* domain_distances = new float[branching];
for (int i=0;i<branching;++i) {
float dist = flann_dist(q, q+veclen_, node->childs[i]->pivot);
float dist = (float)flann_dist(q, q+veclen_, node->childs[i]->pivot);
int j=0;
while (domain_distances[j]<dist && j<i) j++;

View File

@ -90,21 +90,21 @@ public:
/* nothing to do here for linear search */
}
void saveIndex(FILE* stream)
void saveIndex(FILE*)
{
/* nothing to do here for linear search */
}
void loadIndex(FILE* stream)
void loadIndex(FILE*)
{
/* nothing to do here for linear search */
}
void findNeighbors(ResultSet<ELEM_TYPE>& resultSet, const ELEM_TYPE* vec, const SearchParams& searchParams)
void findNeighbors(ResultSet<ELEM_TYPE>& resultSet, const ELEM_TYPE*, const SearchParams&)
{
for (size_t i=0;i<dataset.rows;++i) {
resultSet.addPoint(dataset[i],i);
resultSet.addPoint(dataset[i],(int)i);
}
}

View File

@ -299,7 +299,7 @@ public:
{
Item it;
it.index = index;
it.dist = flann_dist(target, target_end, point);
it.dist = (float)flann_dist(target, target_end, point);
if (it.dist<=radius) {
items.push_back(it);
push_heap(items.begin(), items.end());

View File

@ -41,8 +41,8 @@ namespace cvflann
template<typename T>
Matrix<T> random_sample(Matrix<T>& srcMatrix, long size, bool remove = false)
{
UniqueRandom rand(srcMatrix.rows);
Matrix<T> newSet(new T[size * srcMatrix.cols], size,srcMatrix.cols);
UniqueRandom rand((int)srcMatrix.rows);
Matrix<T> newSet(new T[size * srcMatrix.cols], size, (long)srcMatrix.cols);
T *src,*dest;
for (long i=0;i<size;++i) {
@ -73,8 +73,8 @@ Matrix<T> random_sample(Matrix<T>& srcMatrix, long size, bool remove = false)
template<typename T>
Matrix<T> random_sample(const Matrix<T>& srcMatrix, size_t size)
{
UniqueRandom rand(srcMatrix.rows);
Matrix<T> newSet(new T[size * srcMatrix.cols], size,srcMatrix.cols);
UniqueRandom rand((int)srcMatrix.rows);
Matrix<T> newSet(new T[size * srcMatrix.cols], (long)size, (long)srcMatrix.cols);
T *src,*dest;
for (size_t i=0;i<size;++i) {

View File

@ -104,7 +104,7 @@ void save_value(FILE* stream, const T& value, int count = 1)
template<typename T>
void load_value(FILE* stream, T& value, int count = 1)
{
int read_cnt = fread(&value, sizeof(value),count, stream);
int read_cnt = (int)fread(&value, sizeof(value),count, stream);
if (read_cnt!=count) {
throw FLANNException("Cannot read from file");
}

View File

@ -108,7 +108,7 @@ skip_input_data(j_decompress_ptr cinfo, long num_bytes)
{
// We need to skip more data than we have in the buffer.
// This will force the JPEG library to suspend decoding.
source->skip = num_bytes - source->pub.bytes_in_buffer;
source->skip = (int)(num_bytes - source->pub.bytes_in_buffer);
source->pub.next_input_byte += source->pub.bytes_in_buffer;
source->pub.bytes_in_buffer = 0;
}

View File

@ -116,7 +116,7 @@ bool TiffDecoder::readHeader()
if( tif )
{
int width = 0, height = 0, photometric = 0, compression = 0;
int width = 0, height = 0, photometric = 0;
m_tif = tif;
if( TIFFRGBAImageOK( tif, errmsg ) &&
@ -226,9 +226,9 @@ bool TiffDecoder::readData( Mat& img )
else
{
if( !is_tiled )
ok = TIFFReadEncodedStrip( tif, tileidx, (uint32*)buffer, -1 );
ok = (int)TIFFReadEncodedStrip( tif, tileidx, (uint32*)buffer, (tsize_t)-1 ) >= 0;
else
ok = TIFFReadEncodedTile( tif, tileidx, (uint32*)buffer, -1 );
ok = (int)TIFFReadEncodedTile( tif, tileidx, (uint32*)buffer, (tsize_t)-1 ) >= 0;
if( !ok )
{

View File

@ -164,7 +164,7 @@ void icvCvt_BGRA2BGR_16u_C4C3R( const ushort* bgra, int bgra_step,
{
for( i = 0; i < size.width; i++, bgr += 3, bgra += 4 )
{
uchar t0 = bgra[swap_rb], t1 = bgra[1];
ushort t0 = bgra[swap_rb], t1 = bgra[1];
bgr[0] = t0; bgr[1] = t1;
t0 = bgra[swap_rb^2]; bgr[2] = t0;
}

View File

@ -312,8 +312,8 @@ void matchTemplate( const Mat& _img, const Mat& _templ, Mat& result, int method
double* p2 = (double*)(sum.data + templ.rows*sum.step);
double* p3 = p2 + templ.cols*cn;
int sumstep = sum.data ? sum.step / sizeof(double) : 0;
int sqstep = sqsum.data ? sqsum.step / sizeof(double) : 0;
int sumstep = sum.data ? (int)(sum.step / sizeof(double)) : 0;
int sqstep = sqsum.data ? (int)(sqsum.step / sizeof(double)) : 0;
int i, j, k;

View File

@ -679,7 +679,7 @@ void CvGBTrees::leaves_get( CvDTreeNode** leaves, int& count, CvDTreeNode* node
CvDTreeNode** CvGBTrees::GetLeaves( const CvDTree* dtree, int& len )
{
len = 0;
CvDTreeNode** leaves = new pCvDTreeNode[1 << params.max_depth];
CvDTreeNode** leaves = new pCvDTreeNode[(size_t)1 << params.max_depth];
leaves_get(leaves, len, const_cast<pCvDTreeNode>(dtree->get_root()));
return leaves;
}
@ -718,7 +718,7 @@ void CvGBTrees::do_subsample()
//===========================================================================
float CvGBTrees::predict( const CvMat* _sample, const CvMat* _missing,
CvMat* weak_responses, CvSlice slice, int k) const
CvMat* /*weak_responses*/, CvSlice slice, int k) const
{
float result = 0.0f;

View File

@ -25,7 +25,7 @@
// RESULT
// Error status
*/
int convertPoints(int countLevel, int lambda,
int convertPoints(int /*countLevel*/, int lambda,
int initialImageLevel,
CvPoint *points, int *levels,
CvPoint **partsDisplacement, int kPoints, int n,

View File

@ -204,7 +204,7 @@ void parserRFilter (FILE * xmlf, int p, CvLSVMFilterObject * model, float *b){
st = 0;
tag = 0;
while(!feof(xmlf)){
ch = fgetc( xmlf );
ch = (char)fgetc( xmlf );
if(ch == '<'){
tag = 1;
j = 1;
@ -278,7 +278,7 @@ void parserRFilter (FILE * xmlf, int p, CvLSVMFilterObject * model, float *b){
}
}
void parserV (FILE * xmlf, int p, CvLSVMFilterObject * model){
void parserV (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){
int st = 0;
int tag;
int tagVal;
@ -293,7 +293,7 @@ void parserV (FILE * xmlf, int p, CvLSVMFilterObject * model){
st = 0;
tag = 0;
while(!feof(xmlf)){
ch = fgetc( xmlf );
ch = (char)fgetc( xmlf );
if(ch == '<'){
tag = 1;
j = 1;
@ -341,7 +341,7 @@ void parserV (FILE * xmlf, int p, CvLSVMFilterObject * model){
}
}
}
void parserD (FILE * xmlf, int p, CvLSVMFilterObject * model){
void parserD (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){
int st = 0;
int tag;
int tagVal;
@ -356,7 +356,7 @@ void parserD (FILE * xmlf, int p, CvLSVMFilterObject * model){
st = 0;
tag = 0;
while(!feof(xmlf)){
ch = fgetc( xmlf );
ch = (char)fgetc( xmlf );
if(ch == '<'){
tag = 1;
j = 1;
@ -430,7 +430,7 @@ void parserD (FILE * xmlf, int p, CvLSVMFilterObject * model){
}
}
void parserPFilter (FILE * xmlf, int p, int N_path, CvLSVMFilterObject * model){
void parserPFilter (FILE * xmlf, int p, int /*N_path*/, CvLSVMFilterObject * model){
int st = 0;
int sizeX, sizeY;
int tag;
@ -455,7 +455,7 @@ void parserPFilter (FILE * xmlf, int p, int N_path, CvLSVMFilterObject * model)
st = 0;
tag = 0;
while(!feof(xmlf)){
ch = fgetc( xmlf );
ch = (char)fgetc( xmlf );
if(ch == '<'){
tag = 1;
j = 1;
@ -540,7 +540,7 @@ void parserPFilterS (FILE * xmlf, int p, CvLSVMFilterObject *** model, int *last
st = 0;
tag = 0;
while(!feof(xmlf)){
ch = fgetc( xmlf );
ch = (char)fgetc( xmlf );
if(ch == '<'){
tag = 1;
j = 1;
@ -588,7 +588,7 @@ void parserComp (FILE * xmlf, int p, int *N_comp, CvLSVMFilterObject *** model,
st = 0;
tag = 0;
while(!feof(xmlf)){
ch = fgetc( xmlf );
ch = (char)fgetc( xmlf );
if(ch == '<'){
tag = 1;
j = 1;
@ -643,7 +643,7 @@ void parserModel(FILE * xmlf, CvLSVMFilterObject *** model, int *last, int *max,
st = 0;
tag = 0;
while(!feof(xmlf)){
ch = fgetc( xmlf );
ch = (char)fgetc( xmlf );
if(ch == '<'){
tag = 1;
j = 1;
@ -745,7 +745,7 @@ int LSVMparser(const char * filename, CvLSVMFilterObject *** model, int *last, i
st = 0;
tag = 0;
while(!feof(xmlf)){
ch = fgetc( xmlf );
ch = (char)fgetc( xmlf );
if(ch == '<'){
tag = 1;
j = 1;

View File

@ -334,7 +334,7 @@ cvCalcGlobalOrientation( const void* orientation, const void* maskimg, const voi
// add the dominant orientation and the relative shift
if( shift_weight == 0 )
shift_weight = 0.01;
shift_weight = 0.01f;
fbase_orient += shift_orient / shift_weight;
fbase_orient -= (fbase_orient < 360 ? 0 : 360);

View File

@ -467,7 +467,7 @@ int VocData::getDetectorGroundTruth(const string& obj_class, const ObdDatasetTyp
vector<ObdScoreIndexSorter> sorted_ids;
{
/* first count how many objects to allow preallocation */
int obj_count = 0;
size_t obj_count = 0;
CV_Assert(images.size() == bounding_boxes.size());
CV_Assert(scores.size() == bounding_boxes.size());
for (size_t im_idx = 0; im_idx < scores.size(); ++im_idx)
@ -484,8 +484,8 @@ int VocData::getDetectorGroundTruth(const string& obj_class, const ObdDatasetTyp
for (size_t ob_idx = 0; ob_idx < scores[im_idx].size(); ++ob_idx)
{
sorted_ids[flat_pos].score = scores[im_idx][ob_idx];
sorted_ids[flat_pos].image_idx = im_idx;
sorted_ids[flat_pos].obj_idx = ob_idx;
sorted_ids[flat_pos].image_idx = (int)im_idx;
sorted_ids[flat_pos].obj_idx = (int)ob_idx;
++flat_pos;
}
}
@ -579,7 +579,7 @@ int VocData::getDetectorGroundTruth(const string& obj_class, const ObdDatasetTyp
if (ov > maxov)
{
maxov = ov;
max_gt_obj_idx = gt_obj_idx;
max_gt_obj_idx = (int)gt_obj_idx;
//store whether the maximum detection is marked as difficult or not
max_is_difficult = (img_object_data[im_idx][gt_obj_idx].difficult);
}
@ -854,7 +854,7 @@ void VocData::calcPrecRecall_impl(const vector<char>& ground_truth, const vector
{
recall_norm = recall_normalization;
} else {
recall_norm = std::count_if(ground_truth.begin(),ground_truth.end(),std::bind2nd(std::equal_to<bool>(),true));
recall_norm = (int)std::count_if(ground_truth.begin(),ground_truth.end(),std::bind2nd(std::equal_to<bool>(),true));
}
ap = 0;

View File

@ -191,8 +191,8 @@ void saveCameraParams( const string& filename,
if( !rvecs.empty() && !tvecs.empty() )
{
Mat bigmat(rvecs.size(), 6, CV_32F);
for( size_t i = 0; i < rvecs.size(); i++ )
Mat bigmat((int)rvecs.size(), 6, CV_32F);
for( int i = 0; i < (int)rvecs.size(); i++ )
{
Mat r = bigmat(Range(i, i+1), Range(0,3));
Mat t = bigmat(Range(i, i+1), Range(3,6));
@ -205,8 +205,8 @@ void saveCameraParams( const string& filename,
if( !imagePoints.empty() )
{
Mat imagePtMat(imagePoints.size(), imagePoints[0].size(), CV_32FC2);
for( size_t i = 0; i < imagePoints.size(); i++ )
Mat imagePtMat((int)imagePoints.size(), imagePoints[0].size(), CV_32FC2);
for( int i = 0; i < (int)imagePoints.size(); i++ )
{
Mat r = imagePtMat.row(i).reshape(2, imagePtMat.cols);
Mat imgpti(imagePoints[i]);

View File

@ -56,7 +56,7 @@ int main(int,char**)
vector<Point2f> points(20);
for (size_t i = 0; i < points.size(); ++i)
points[i] = Point2f(i * 5, i % 7);
points[i] = Point2f((float)(i * 5), (float)(i % 7));
cout << "points = " << points << ";" << endl;
return 0;

View File

@ -45,7 +45,7 @@ int getMatcherFilterType( const string& str )
return NONE_FILTER;
if( str == "CrossCheckFilter" )
return CROSS_CHECK_FILTER;
CV_Assert(0);
CV_Error(CV_StsBadArg, "Invalid filter name");
return -1;
}
@ -155,7 +155,7 @@ void doIteration( const Mat& img1, Mat& img2, bool isWarpPerspective,
vector<Point2f> curve;
Ptr<GenericDescriptorMatcher> gdm = new VectorDescriptorMatcher( descriptorExtractor, descriptorMatcher );
evaluateGenericDescriptorMatcher( img1, img2, H12, keypoints1, keypoints2, 0, 0, curve, gdm );
for( float l_p = 0; l_p < 1 - FLT_EPSILON; l_p+=0.1 )
for( float l_p = 0; l_p < 1 - FLT_EPSILON; l_p+=0.1f )
cout << "1-precision = " << l_p << "; recall = " << getRecall( curve, l_p ) << endl;
cout << ">" << endl;
}
@ -185,7 +185,7 @@ void doIteration( const Mat& img1, Mat& img2, bool isWarpPerspective,
Mat points1t; perspectiveTransform(Mat(points1), points1t, H12);
for( size_t i1 = 0; i1 < points1.size(); i1++ )
{
if( norm(points2[i1] - points1t.at<Point2f>(i1,0)) < 4 ) // inlier
if( norm(points2[i1] - points1t.at<Point2f>((int)i1,0)) < 4 ) // inlier
matchesMask[i1] = 1;
}
// draw inliers

View File

@ -51,7 +51,7 @@ struct MyData
};
//These write and read functions must exist as per the inline functions in operations.hpp
void write(FileStorage& fs, const std::string& name, const MyData& x){
void write(FileStorage& fs, const std::string&, const MyData& x){
x.write(fs);
}
void read(const FileNode& node, MyData& x, const MyData& default_value = MyData()){