Move border type constants and Moments class to core module

This commit is contained in:
Andrey Kamaev 2013-04-10 15:54:14 +04:00
parent f4ae0cf19c
commit c98c246fc2
83 changed files with 541 additions and 639 deletions

View File

@ -60,6 +60,7 @@
\************************************************************************************/
#include "precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "circlesgrid.hpp"
#include <stdarg.h>

View File

@ -41,6 +41,7 @@
//M*/
#include "precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include <stdio.h>
#include <iterator>

View File

@ -40,6 +40,7 @@
//M*/
#include "precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include <vector>
#include <algorithm>

View File

@ -1364,7 +1364,7 @@ void CirclesGridFinder::drawHoles(const Mat &srcImage, Mat &drawImage) const
const Scalar holeColor = Scalar(0, 255, 0);
if (srcImage.channels() == 1)
cvtColor(srcImage, drawImage, CV_GRAY2RGB);
cvtColor(srcImage, drawImage, COLOR_GRAY2RGB);
else
srcImage.copyTo(drawImage);

View File

@ -44,7 +44,6 @@
#include "opencv2/calib3d.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/features2d.hpp"
#include "opencv2/core/utility.hpp"

View File

@ -47,40 +47,8 @@
#include <math.h>
//#define _SUBPIX_VERBOSE
#undef max
namespace cv {
// static void drawCircles(Mat& img, const std::vector<Point2f>& corners, const std::vector<float>& radius)
// {
// for(size_t i = 0; i < corners.size(); i++)
// {
// circle(img, corners[i], cvRound(radius[i]), CV_RGB(255, 0, 0));
// }
// }
// static int histQuantile(const Mat& hist, float quantile)
// {
// if(hist.dims > 1) return -1; // works for 1D histograms only
// float cur_sum = 0;
// float total_sum = (float)sum(hist).val[0];
// float quantile_sum = total_sum*quantile;
// for(int j = 0; j < hist.size[0]; j++)
// {
// cur_sum += (float)hist.at<float>(j);
// if(cur_sum > quantile_sum)
// {
// return j;
// }
// }
// return hist.size[0] - 1;
// }
inline bool is_smaller(const std::pair<int, float>& p1, const std::pair<int, float>& p2)
{
return p1.second < p2.second;
@ -124,29 +92,6 @@ static void findLinesCrossPoint(Point2f origin1, Point2f dir1, Point2f origin2,
cross_point = origin1 + dir1*alpha;
}
// static void findCorner(const std::vector<Point>& contour, Point2f point, Point2f& corner)
// {
// // find the nearest point
// double min_dist = std::numeric_limits<double>::max();
// int min_idx = -1;
// // find corner idx
// for(size_t i = 0; i < contour.size(); i++)
// {
// double dist = norm(Point2f((float)contour[i].x, (float)contour[i].y) - point);
// if(dist < min_dist)
// {
// min_dist = dist;
// min_idx = (int)i;
// }
// }
// assert(min_idx >= 0);
// // temporary solution, have to make something more precise
// corner = contour[min_idx];
// return;
// }
static void findCorner(const std::vector<Point2f>& contour, Point2f point, Point2f& corner)
{
// find the nearest point
@ -173,13 +118,7 @@ static void findCorner(const std::vector<Point2f>& contour, Point2f point, Point
static int segment_hist_max(const Mat& hist, int& low_thresh, int& high_thresh)
{
Mat bw;
//const double max_bell_width = 20; // we expect two bells with width bounded above
//const double min_bell_width = 5; // and below
double total_sum = sum(hist).val[0];
//double thresh = total_sum/(2*max_bell_width)*0.25f; // quarter of a bar inside a bell
// threshold(hist, bw, thresh, 255.0, CV_THRESH_BINARY);
double quantile_sum = 0.0;
//double min_quantile = 0.2;
@ -233,12 +172,6 @@ bool cv::find4QuadCornerSubpix(InputArray _img, InputOutputArray _corners, Size
const float* _ranges = ranges;
Mat hist;
#if defined(_SUBPIX_VERBOSE)
std::vector<float> radius;
radius.assign(corners.size(), 0.0f);
#endif //_SUBPIX_VERBOSE
Mat black_comp, white_comp;
for(int i = 0; i < ncorners; i++)
{
@ -248,39 +181,20 @@ bool cv::find4QuadCornerSubpix(InputArray _img, InputOutputArray _corners, Size
Mat img_roi = img(roi);
calcHist(&img_roi, 1, &channels, Mat(), hist, 1, &nbins, &_ranges);
#if 0
int black_thresh = histQuantile(hist, 0.45f);
int white_thresh = histQuantile(hist, 0.55f);
#else
int black_thresh = 0, white_thresh = 0;
segment_hist_max(hist, black_thresh, white_thresh);
#endif
threshold(img, black_comp, black_thresh, 255.0, CV_THRESH_BINARY_INV);
threshold(img, white_comp, white_thresh, 255.0, CV_THRESH_BINARY);
threshold(img, black_comp, black_thresh, 255.0, THRESH_BINARY_INV);
threshold(img, white_comp, white_thresh, 255.0, THRESH_BINARY);
const int erode_count = 1;
erode(black_comp, black_comp, Mat(), Point(-1, -1), erode_count);
erode(white_comp, white_comp, Mat(), Point(-1, -1), erode_count);
#if defined(_SUBPIX_VERBOSE)
namedWindow("roi", 1);
imshow("roi", img_roi);
imwrite("test.jpg", img);
namedWindow("black", 1);
imshow("black", black_comp);
namedWindow("white", 1);
imshow("white", white_comp);
cvWaitKey(0);
imwrite("black.jpg", black_comp);
imwrite("white.jpg", white_comp);
#endif
std::vector<std::vector<Point> > white_contours, black_contours;
std::vector<Vec4i> white_hierarchy, black_hierarchy;
findContours(black_comp, black_contours, black_hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
findContours(white_comp, white_contours, white_hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
findContours(black_comp, black_contours, black_hierarchy, RETR_LIST, CHAIN_APPROX_SIMPLE);
findContours(white_comp, white_contours, white_hierarchy, RETR_LIST, CHAIN_APPROX_SIMPLE);
if(black_contours.size() < 5 || white_contours.size() < 5) continue;
@ -302,15 +216,11 @@ bool cv::find4QuadCornerSubpix(InputArray _img, InputOutputArray _corners, Size
Point2f quad_corners[4];
for(int k = 0; k < 4; k++)
{
#if 1
std::vector<Point2f> temp;
for(size_t j = 0; j < quads[k]->size(); j++) temp.push_back((*quads[k])[j]);
approxPolyDP(Mat(temp), quads_approx[k], 0.5, true);
findCorner(quads_approx[k], corners[i], quad_corners[k]);
#else
findCorner(*quads[k], corners[i], quad_corners[k]);
#endif
quad_corners[k] += Point2f(0.5f, 0.5f);
}
@ -323,44 +233,7 @@ bool cv::find4QuadCornerSubpix(InputArray _img, InputOutputArray _corners, Size
if(cvIsNaN(angle) || cvIsInf(angle) || angle < 0.5 || angle > CV_PI - 0.5) continue;
findLinesCrossPoint(origin1, dir1, origin2, dir2, corners[i]);
#if defined(_SUBPIX_VERBOSE)
radius[i] = norm(corners[i] - ground_truth_corners[ground_truth_idx])*6;
#if 1
Mat test(img.size(), CV_32FC3);
cvtColor(img, test, CV_GRAY2RGB);
// line(test, quad_corners[0] - corners[i] + Point2f(30, 30), quad_corners[1] - corners[i] + Point2f(30, 30), cvScalar(0, 255, 0));
// line(test, quad_corners[2] - corners[i] + Point2f(30, 30), quad_corners[3] - corners[i] + Point2f(30, 30), cvScalar(0, 255, 0));
std::vector<std::vector<Point> > contrs;
contrs.resize(1);
for(int k = 0; k < 4; k++)
{
//contrs[0] = quads_approx[k];
contrs[0].clear();
for(size_t j = 0; j < quads_approx[k].size(); j++) contrs[0].push_back(quads_approx[k][j]);
drawContours(test, contrs, 0, CV_RGB(0, 0, 255), 1, 1, std::vector<Vec4i>(), 2);
circle(test, quad_corners[k], 0.5, CV_RGB(255, 0, 0));
}
Mat test1 = test(Rect(corners[i].x - 30, corners[i].y - 30, 60, 60));
namedWindow("1", 1);
imshow("1", test1);
imwrite("test.jpg", test);
waitKey(0);
#endif
#endif //_SUBPIX_VERBOSE
}
#if defined(_SUBPIX_VERBOSE)
Mat test(img.size(), CV_32FC3);
cvtColor(img, test, CV_GRAY2RGB);
drawCircles(test, corners, radius);
namedWindow("corners", 1);
imshow("corners", test);
waitKey();
#endif //_SUBPIX_VERBOSE
return true;
}

View File

@ -304,7 +304,7 @@ protected:
for(size_t i = 0; i < brdsNum; ++i)
{
Mat gray;
cvtColor(boards[i], gray, CV_BGR2GRAY);
cvtColor(boards[i], gray, COLOR_BGR2GRAY);
vector<Point2f> tmp = imagePoints_findCb[i];
cornerSubPix(gray, tmp, Size(5, 5), Size(-1,-1), tc);
imagePoints.push_back(tmp);
@ -314,7 +314,7 @@ protected:
for(size_t i = 0; i < brdsNum; ++i)
{
Mat gray;
cvtColor(boards[i], gray, CV_BGR2GRAY);
cvtColor(boards[i], gray, COLOR_BGR2GRAY);
vector<Point2f> tmp = imagePoints_findCb[i];
find4QuadCornerSubpix(gray, tmp, Size(5, 5));
imagePoints.push_back(tmp);

View File

@ -40,6 +40,7 @@
//M*/
#include "test_precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
class CV_ChessboardDetectorTimingTest : public cvtest::BaseTest
{

View File

@ -40,6 +40,7 @@
//M*/
#include "test_precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include <limits>
#include "test_chessboardgenerator.hpp"

View File

@ -12,7 +12,6 @@
#include <iostream>
#include "opencv2/ts.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/calib3d.hpp"
#include "opencv2/highgui.hpp"

View File

@ -80,7 +80,7 @@ void computeTextureBasedMasks( const Mat& _img, Mat* texturelessMask, Mat* textu
Mat img = _img;
if( _img.channels() > 1)
{
Mat tmp; cvtColor( _img, tmp, CV_BGR2GRAY ); img = tmp;
Mat tmp; cvtColor( _img, tmp, COLOR_BGR2GRAY ); img = tmp;
}
Mat dxI; Sobel( img, dxI, CV_32FC1, 1, 0, 3 );
Mat dxI2; pow( dxI / 8.f/*normalize*/, 2, dxI2 );
@ -714,8 +714,8 @@ protected:
RunParams params = caseRunParams[caseIdx];
assert( params.ndisp%16 == 0 );
assert( _leftImg.type() == CV_8UC3 && _rightImg.type() == CV_8UC3 );
Mat leftImg; cvtColor( _leftImg, leftImg, CV_BGR2GRAY );
Mat rightImg; cvtColor( _rightImg, rightImg, CV_BGR2GRAY );
Mat leftImg; cvtColor( _leftImg, leftImg, COLOR_BGR2GRAY );
Mat rightImg; cvtColor( _rightImg, rightImg, COLOR_BGR2GRAY );
Ptr<StereoBM> bm = createStereoBM( params.ndisp, params.winSize );
Mat tempDisp;

View File

@ -41,6 +41,7 @@
//M*/
#include "test_precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
using namespace cv;
using namespace std;

View File

@ -40,6 +40,7 @@
//M*/
#include "test_precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
class CV_UndistortPointsBadArgTest : public cvtest::BadArgTest
{

View File

@ -35,6 +35,7 @@
//M*/
#include "precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#define ASD_INTENSITY_SET_PIXEL(pointer, qq) {(*pointer) = (unsigned char)qq;}

View File

@ -483,8 +483,8 @@ namespace colormap
}
// Turn into a BGR matrix into its grayscale representation.
if(src.type() == CV_8UC3)
cvtColor(src.clone(), src, CV_BGR2GRAY);
cvtColor(src.clone(), src, CV_GRAY2BGR);
cvtColor(src.clone(), src, COLOR_BGR2GRAY);
cvtColor(src.clone(), src, COLOR_GRAY2BGR);
// Apply the ColorMap.
LUT(src, _lut, _dst);
}

View File

@ -832,7 +832,7 @@ void LBPH::predict(InputArray _src, int &minClass, double &minDist) const {
minDist = DBL_MAX;
minClass = -1;
for(size_t sampleIdx = 0; sampleIdx < _histograms.size(); sampleIdx++) {
double dist = compareHist(_histograms[sampleIdx], query, CV_COMP_CHISQR);
double dist = compareHist(_histograms[sampleIdx], query, HISTCMP_CHISQR);
if((dist < minDist) && (dist < _threshold)) {
minDist = dist;
minClass = _labels.at<int>((int) sampleIdx);

View File

@ -118,7 +118,7 @@ void cv::generateColors( std::vector<Scalar>& colors, size_t count, size_t facto
// Convert the colors set to Lab space.
// Distances between colors in this space correspond a human perception.
Mat lab;
cvtColor( bgr, lab, CV_BGR2Lab);
cvtColor( bgr, lab, COLOR_BGR2Lab);
// Subsample colors from the generated set so that
// to maximize the minimum distances between each other.
@ -128,7 +128,7 @@ void cv::generateColors( std::vector<Scalar>& colors, size_t count, size_t facto
// Convert subsampled colors back to RGB
Mat bgr_subset;
cvtColor( lab_subset, bgr_subset, CV_Lab2BGR );
cvtColor( lab_subset, bgr_subset, COLOR_Lab2BGR );
CV_Assert( bgr_subset.total() == count );
for( size_t i = 0; i < count; i++ )

View File

@ -47,7 +47,6 @@
#include "opencv2/features2d.hpp"
#include "opencv2/objdetect.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/core/utility.hpp"
#include "opencv2/core/private.hpp"

View File

@ -770,7 +770,7 @@ Mat cv::SpinImageModel::packRandomScaledSpins(bool separateScale, size_t xCount,
int endx = (x + 1) * sz + x;
Mat color;
cvtColor(spins[pos++], color, CV_GRAY2BGR);
cvtColor(spins[pos++], color, COLOR_GRAY2BGR);
Mat roi = result(Range(starty, endy), Range(startx, endx));
color.copyTo(roi);
}

View File

@ -367,8 +367,8 @@ void StereoVar::operator ()( const Mat& left, const Mat& right, Mat& disp )
// Preprocessing
Mat leftgray, rightgray;
if (left.type() != CV_8UC1) {
cvtColor(left, leftgray, CV_BGR2GRAY);
cvtColor(right, rightgray, CV_BGR2GRAY);
cvtColor(left, leftgray, COLOR_BGR2GRAY);
cvtColor(right, rightgray, COLOR_BGR2GRAY);
} else {
left.copyTo(leftgray);
right.copyTo(rightgray);

View File

@ -121,6 +121,14 @@ CV_EXPORTS BinaryFunc getCopyMaskFunc(size_t esz);
//! swaps two matrices
CV_EXPORTS void swap(Mat& a, Mat& b);
//! 1D interpolation function: returns coordinate of the "donor" pixel for the specified location p.
CV_EXPORTS_W int borderInterpolate(int p, int len, int borderType);
//! copies 2D array to a larger destination array with extrapolation of the outer part of src using the specified border mode
CV_EXPORTS_W void copyMakeBorder(InputArray src, OutputArray dst,
int top, int bottom, int left, int right,
int borderType, const Scalar& value = Scalar() );
//! adds one matrix to another (dst = src1 + src2)
CV_EXPORTS_W void add(InputArray src1, InputArray src2, OutputArray dst,
InputArray mask = noArray(), int dtype = -1);
@ -179,6 +187,9 @@ CV_EXPORTS_W double norm(InputArray src1, int normType = NORM_L2, InputArray mas
CV_EXPORTS_W double norm(InputArray src1, InputArray src2,
int normType = NORM_L2, InputArray mask = noArray());
//! computes PSNR image/video quality metric
CV_EXPORTS_W double PSNR(InputArray src1, InputArray src2);
//! computes norm of a sparse matrix
CV_EXPORTS double norm( const SparseMat& src, int normType );

View File

@ -157,6 +157,20 @@ enum { DFT_INVERSE = 1,
DCT_ROWS = DFT_ROWS
};
//! Various border types, image boundaries are denoted with '|'
enum {
BORDER_CONSTANT = 0, // iiiiii|abcdefgh|iiiiiii with some specified 'i'
BORDER_REPLICATE = 1, // aaaaaa|abcdefgh|hhhhhhh
BORDER_REFLECT = 2, // fedcba|abcdefgh|hgfedcb
BORDER_WRAP = 3, // cdefgh|abcdefgh|abcdefg
BORDER_REFLECT_101 = 4, // gfedcb|abcdefgh|gfedcba
BORDER_TRANSPARENT = 5, // uvwxyz|absdefgh|ijklmno
BORDER_REFLECT101 = BORDER_REFLECT_101,
BORDER_DEFAULT = BORDER_REFLECT_101,
BORDER_ISOLATED = 16 // do not look outside of ROI
};
//////////////// static assert /////////////////

View File

@ -667,6 +667,51 @@ public:
///////////////////////// raster image moments //////////////////////////
class CV_EXPORTS_W_MAP Moments
{
public:
//! the default constructor
Moments();
//! the full constructor
Moments(double m00, double m10, double m01, double m20, double m11,
double m02, double m30, double m21, double m12, double m03 );
////! the conversion from CvMoments
//Moments( const CvMoments& moments );
////! the conversion to CvMoments
//operator CvMoments() const;
//! spatial moments
CV_PROP_RW double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03;
//! central moments
CV_PROP_RW double mu20, mu11, mu02, mu30, mu21, mu12, mu03;
//! central normalized moments
CV_PROP_RW double nu20, nu11, nu02, nu30, nu21, nu12, nu03;
};
/*!
traits
*/
template<> class DataType<Moments>
{
public:
typedef Moments value_type;
typedef double work_type;
typedef double channel_type;
enum { generic_type = 0,
depth = DataType<channel_type>::depth,
channels = (int)(sizeof(value_type)/sizeof(channel_type)), // 24
fmt = DataType<channel_type>::fmt + ((channels - 1) << 8),
type = CV_MAKETYPE(depth, channels)
};
typedef Vec<channel_type, channels> vec_type;
};
/////////////////////////////////////////////////////////////////////////
///////////////////////////// Implementation ////////////////////////////
/////////////////////////////////////////////////////////////////////////

View File

@ -512,6 +512,231 @@ Mat repeat(const Mat& src, int ny, int nx)
return dst;
}
} // cv
/*
Various border types, image boundaries are denoted with '|'
* BORDER_REPLICATE: aaaaaa|abcdefgh|hhhhhhh
* BORDER_REFLECT: fedcba|abcdefgh|hgfedcb
* BORDER_REFLECT_101: gfedcb|abcdefgh|gfedcba
* BORDER_WRAP: cdefgh|abcdefgh|abcdefg
* BORDER_CONSTANT: iiiiii|abcdefgh|iiiiiii with some specified 'i'
*/
int cv::borderInterpolate( int p, int len, int borderType )
{
if( (unsigned)p < (unsigned)len )
;
else if( borderType == BORDER_REPLICATE )
p = p < 0 ? 0 : len - 1;
else if( borderType == BORDER_REFLECT || borderType == BORDER_REFLECT_101 )
{
int delta = borderType == BORDER_REFLECT_101;
if( len == 1 )
return 0;
do
{
if( p < 0 )
p = -p - 1 + delta;
else
p = len - 1 - (p - len) - delta;
}
while( (unsigned)p >= (unsigned)len );
}
else if( borderType == BORDER_WRAP )
{
if( p < 0 )
p -= ((p-len+1)/len)*len;
if( p >= len )
p %= len;
}
else if( borderType == BORDER_CONSTANT )
p = -1;
else
CV_Error( CV_StsBadArg, "Unknown/unsupported border type" );
return p;
}
namespace
{
void copyMakeBorder_8u( const uchar* src, size_t srcstep, cv::Size srcroi,
uchar* dst, size_t dststep, cv::Size dstroi,
int top, int left, int cn, int borderType )
{
const int isz = (int)sizeof(int);
int i, j, k, elemSize = 1;
bool intMode = false;
if( (cn | srcstep | dststep | (size_t)src | (size_t)dst) % isz == 0 )
{
cn /= isz;
elemSize = isz;
intMode = true;
}
cv::AutoBuffer<int> _tab((dstroi.width - srcroi.width)*cn);
int* tab = _tab;
int right = dstroi.width - srcroi.width - left;
int bottom = dstroi.height - srcroi.height - top;
for( i = 0; i < left; i++ )
{
j = cv::borderInterpolate(i - left, srcroi.width, borderType)*cn;
for( k = 0; k < cn; k++ )
tab[i*cn + k] = j + k;
}
for( i = 0; i < right; i++ )
{
j = cv::borderInterpolate(srcroi.width + i, srcroi.width, borderType)*cn;
for( k = 0; k < cn; k++ )
tab[(i+left)*cn + k] = j + k;
}
srcroi.width *= cn;
dstroi.width *= cn;
left *= cn;
right *= cn;
uchar* dstInner = dst + dststep*top + left*elemSize;
for( i = 0; i < srcroi.height; i++, dstInner += dststep, src += srcstep )
{
if( dstInner != src )
memcpy(dstInner, src, srcroi.width*elemSize);
if( intMode )
{
const int* isrc = (int*)src;
int* idstInner = (int*)dstInner;
for( j = 0; j < left; j++ )
idstInner[j - left] = isrc[tab[j]];
for( j = 0; j < right; j++ )
idstInner[j + srcroi.width] = isrc[tab[j + left]];
}
else
{
for( j = 0; j < left; j++ )
dstInner[j - left] = src[tab[j]];
for( j = 0; j < right; j++ )
dstInner[j + srcroi.width] = src[tab[j + left]];
}
}
dstroi.width *= elemSize;
dst += dststep*top;
for( i = 0; i < top; i++ )
{
j = cv::borderInterpolate(i - top, srcroi.height, borderType);
memcpy(dst + (i - top)*dststep, dst + j*dststep, dstroi.width);
}
for( i = 0; i < bottom; i++ )
{
j = cv::borderInterpolate(i + srcroi.height, srcroi.height, borderType);
memcpy(dst + (i + srcroi.height)*dststep, dst + j*dststep, dstroi.width);
}
}
void copyMakeConstBorder_8u( const uchar* src, size_t srcstep, cv::Size srcroi,
uchar* dst, size_t dststep, cv::Size dstroi,
int top, int left, int cn, const uchar* value )
{
int i, j;
cv::AutoBuffer<uchar> _constBuf(dstroi.width*cn);
uchar* constBuf = _constBuf;
int right = dstroi.width - srcroi.width - left;
int bottom = dstroi.height - srcroi.height - top;
for( i = 0; i < dstroi.width; i++ )
{
for( j = 0; j < cn; j++ )
constBuf[i*cn + j] = value[j];
}
srcroi.width *= cn;
dstroi.width *= cn;
left *= cn;
right *= cn;
uchar* dstInner = dst + dststep*top + left;
for( i = 0; i < srcroi.height; i++, dstInner += dststep, src += srcstep )
{
if( dstInner != src )
memcpy( dstInner, src, srcroi.width );
memcpy( dstInner - left, constBuf, left );
memcpy( dstInner + srcroi.width, constBuf, right );
}
dst += dststep*top;
for( i = 0; i < top; i++ )
memcpy(dst + (i - top)*dststep, constBuf, dstroi.width);
for( i = 0; i < bottom; i++ )
memcpy(dst + (i + srcroi.height)*dststep, constBuf, dstroi.width);
}
}
void cv::copyMakeBorder( InputArray _src, OutputArray _dst, int top, int bottom,
int left, int right, int borderType, const Scalar& value )
{
Mat src = _src.getMat();
CV_Assert( top >= 0 && bottom >= 0 && left >= 0 && right >= 0 );
if( src.isSubmatrix() && (borderType & BORDER_ISOLATED) == 0 )
{
Size wholeSize;
Point ofs;
src.locateROI(wholeSize, ofs);
int dtop = std::min(ofs.y, top);
int dbottom = std::min(wholeSize.height - src.rows - ofs.y, bottom);
int dleft = std::min(ofs.x, left);
int dright = std::min(wholeSize.width - src.cols - ofs.x, right);
src.adjustROI(dtop, dbottom, dleft, dright);
top -= dtop;
left -= dleft;
bottom -= dbottom;
right -= dright;
}
_dst.create( src.rows + top + bottom, src.cols + left + right, src.type() );
Mat dst = _dst.getMat();
if(top == 0 && left == 0 && bottom == 0 && right == 0)
{
if(src.data != dst.data || src.step != dst.step)
src.copyTo(dst);
return;
}
borderType &= ~BORDER_ISOLATED;
if( borderType != BORDER_CONSTANT )
copyMakeBorder_8u( src.data, src.step, src.size(),
dst.data, dst.step, dst.size(),
top, left, (int)src.elemSize(), borderType );
else
{
int cn = src.channels(), cn1 = cn;
AutoBuffer<double> buf(cn);
if( cn > 4 )
{
CV_Assert( value[0] == value[1] && value[0] == value[2] && value[0] == value[3] );
cn1 = 1;
}
scalarToRawData(value, buf, CV_MAKETYPE(src.depth(), cn1), cn);
copyMakeConstBorder_8u( src.data, src.step, src.size(),
dst.data, dst.step, dst.size(),
top, left, (int)src.elemSize(), (uchar*)(double*)buf );
}
}
/* dst = src */

View File

@ -41,6 +41,7 @@
//M*/
#include "precomp.hpp"
#include <limits>
using namespace cv;
using namespace cv::gpu;

View File

@ -41,6 +41,7 @@
//M*/
#include "precomp.hpp"
#include <limits>
#if defined _M_IX86 && defined _MSC_VER && _MSC_VER < 1700
#pragma float_control(precise, on)

View File

@ -54,6 +54,8 @@
#undef min
#undef max
#undef abs
#else
#include <pthread.h>
#endif
#if defined __SSE2__ || (defined _M_IX86_FP && 2 == _M_IX86_FP)

View File

@ -1922,6 +1922,14 @@ void cv::findNonZero( InputArray _src, OutputArray _idx )
}
}
double cv::PSNR(InputArray _src1, InputArray _src2)
{
Mat src1 = _src1.getMat(), src2 = _src2.getMat();
CV_Assert( src1.depth() == CV_8U );
double diff = std::sqrt(norm(src1, src2, NORM_L2SQR)/(src1.total()*src1.channels()));
return 20*log10(255./(diff+DBL_EPSILON));
}
CV_IMPL CvScalar cvSum( const CvArr* srcarr )
{

View File

@ -169,7 +169,7 @@ void SimpleBlobDetector::findBlobs(const cv::Mat &image, const cv::Mat &binaryIm
std::vector < std::vector<Point> > contours;
Mat tmpBinaryImage = binaryImage.clone();
findContours(tmpBinaryImage, contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);
findContours(tmpBinaryImage, contours, RETR_LIST, CHAIN_APPROX_NONE);
#ifdef DEBUG_BLOB_DETECTOR
// Mat keypointsImage;
@ -239,7 +239,7 @@ void SimpleBlobDetector::findBlobs(const cv::Mat &image, const cv::Mat &binaryIm
if (ratio < params.minConvexity || ratio >= params.maxConvexity)
continue;
}
center.location = Point2d(moms.m10 / moms.m00, moms.m01 / moms.m00);
if (params.filterByColor)
@ -281,7 +281,7 @@ void SimpleBlobDetector::detectImpl(const cv::Mat& image, std::vector<cv::KeyPoi
keypoints.clear();
Mat grayscaleImage;
if (image.channels() == 3)
cvtColor(image, grayscaleImage, CV_BGR2GRAY);
cvtColor(image, grayscaleImage, COLOR_BGR2GRAY);
else
grayscaleImage = image;

View File

@ -111,7 +111,7 @@ BriefDescriptorExtractor::BriefDescriptorExtractor(int bytes) :
test_fn_ = pixelTests64;
break;
default:
CV_Error(CV_StsBadArg, "bytes must be 16, 32, or 64");
CV_Error(Error::StsBadArg, "bytes must be 16, 32, or 64");
}
}
@ -140,7 +140,7 @@ void BriefDescriptorExtractor::read( const FileNode& fn)
test_fn_ = pixelTests64;
break;
default:
CV_Error(CV_StsBadArg, "descriptorSize must be 16, 32, or 64");
CV_Error(Error::StsBadArg, "descriptorSize must be 16, 32, or 64");
}
bytes_ = dSize;
}
@ -156,7 +156,7 @@ void BriefDescriptorExtractor::computeImpl(const Mat& image, std::vector<KeyPoin
Mat sum;
Mat grayImage = image;
if( image.type() != CV_8U ) cvtColor( image, grayImage, CV_BGR2GRAY );
if( image.type() != CV_8U ) cvtColor( image, grayImage, COLOR_BGR2GRAY );
///TODO allow the user to pass in a precomputed integral image
//if(image.type() == CV_32S)

View File

@ -236,7 +236,7 @@ BRISK::generateKernel(std::vector<float> &radiusList, std::vector<int> &numberLi
// get the total number of points
const int rings = (int)radiusList.size();
assert(radiusList.size()!=0&&radiusList.size()==numberList.size());
CV_Assert(radiusList.size() != 0 && radiusList.size() == numberList.size());
points_ = 0; // remember the total number of points
for (int ring = 0; ring < rings; ring++)
{
@ -336,7 +336,7 @@ BRISK::generateKernel(std::vector<float> &radiusList, std::vector<int> &numberLi
else if (norm_sq < dMax_sq)
{
// save to short pairs
assert(noShortPairs_<indSize);
CV_Assert(noShortPairs_ < indSize);
// make sure the user passes something sensible
BriskShortPair& shortPair = shortPairs_[indexChange[noShortPairs_]];
shortPair.j = j;
@ -536,7 +536,7 @@ BRISK::computeDescriptorsAndOrOrientation(InputArray _image, InputArray _mask, s
{
Mat image = _image.getMat(), mask = _mask.getMat();
if( image.type() != CV_8UC1 )
cvtColor(image, image, CV_BGR2GRAY);
cvtColor(image, image, COLOR_BGR2GRAY);
if (!useProvidedKeypoints)
{
@ -729,7 +729,7 @@ BRISK::computeKeypointsNoOrientation(InputArray _image, InputArray _mask, std::v
{
Mat image = _image.getMat(), mask = _mask.getMat();
if( image.type() != CV_8UC1 )
cvtColor(_image, image, CV_BGR2GRAY);
cvtColor(_image, image, COLOR_BGR2GRAY);
BriskScaleSpace briskScaleSpace(octaves);
briskScaleSpace.constructPyramid(image);
@ -912,7 +912,7 @@ BriskScaleSpace::getKeypoints(const int threshold_, std::vector<cv::KeyPoint>& k
inline int
BriskScaleSpace::getScoreAbove(const int layer, const int x_layer, const int y_layer) const
{
assert(layer<layers_-1);
CV_Assert(layer < layers_-1);
const BriskLayer& l = pyramid_[layer + 1];
if (layer % 2 == 0)
{ // octave
@ -955,7 +955,7 @@ BriskScaleSpace::getScoreAbove(const int layer, const int x_layer, const int y_l
inline int
BriskScaleSpace::getScoreBelow(const int layer, const int x_layer, const int y_layer) const
{
assert(layer);
CV_Assert(layer);
const BriskLayer& l = pyramid_[layer - 1];
int sixth_x;
int quarter_x;
@ -1343,7 +1343,7 @@ BriskScaleSpace::getScoreMaxAbove(const int layer, const int x_layer, const int
float y1;
// the layer above
assert(layer+1<layers_);
CV_Assert(layer + 1 < layers_);
const BriskLayer& layerAbove = pyramid_[layer + 1];
if (layer % 2 == 0)
@ -1539,7 +1539,7 @@ BriskScaleSpace::getScoreMaxBelow(const int layer, const int x_layer, const int
}
// the layer below
assert(layer>0);
CV_Assert(layer > 0);
const BriskLayer& layerBelow = pyramid_[layer - 1];
// check the first row
@ -2109,7 +2109,7 @@ BriskLayer::getAgastScore(float xf, float yf, int threshold_in, float scale_in)
inline int
BriskLayer::value(const cv::Mat& mat, float xf, float yf, float scale_in) const
{
assert(!mat.empty());
CV_Assert(!mat.empty());
// get the position
const int x = cvFloor(xf);
const int y = cvFloor(yf);
@ -2216,8 +2216,8 @@ inline void
BriskLayer::halfsample(const cv::Mat& srcimg, cv::Mat& dstimg)
{
// make sure the destination image is of the right size:
assert(srcimg.cols/2==dstimg.cols);
assert(srcimg.rows/2==dstimg.rows);
CV_Assert(srcimg.cols / 2 == dstimg.cols);
CV_Assert(srcimg.rows / 2 == dstimg.rows);
// handle non-SSE case
resize(srcimg, dstimg, dstimg.size(), 0, 0, INTER_AREA);
@ -2227,8 +2227,8 @@ inline void
BriskLayer::twothirdsample(const cv::Mat& srcimg, cv::Mat& dstimg)
{
// make sure the destination image is of the right size:
assert((srcimg.cols/3)*2==dstimg.cols);
assert((srcimg.rows/3)*2==dstimg.rows);
CV_Assert((srcimg.cols / 3) * 2 == dstimg.cols);
CV_Assert((srcimg.rows / 3) * 2 == dstimg.rows);
resize(srcimg, dstimg, dstimg.size(), 0, 0, INTER_AREA);
}

View File

@ -118,7 +118,7 @@ OpponentColorDescriptorExtractor::OpponentColorDescriptorExtractor( const Ptr<De
static void convertBGRImageToOpponentColorSpace( const Mat& bgrImage, std::vector<Mat>& opponentChannels )
{
if( bgrImage.type() != CV_8UC3 )
CV_Error( CV_StsBadArg, "input image must be an BGR image of type CV_8UC3" );
CV_Error( Error::StsBadArg, "input image must be an BGR image of type CV_8UC3" );
// Prepare opponent color space storage matrices.
opponentChannels.resize( 3 );

View File

@ -128,7 +128,7 @@ GFTTDetector::GFTTDetector( int _nfeatures, double _qualityLevel,
void GFTTDetector::detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask) const
{
Mat grayImage = image;
if( image.type() != CV_8U ) cvtColor( image, grayImage, CV_BGR2GRAY );
if( image.type() != CV_8U ) cvtColor( image, grayImage, COLOR_BGR2GRAY );
std::vector<Point2f> corners;
goodFeaturesToTrack( grayImage, corners, nfeatures, qualityLevel, minDistance, mask,
@ -346,7 +346,7 @@ void PyramidAdaptedFeatureDetector::detectImpl( const Mat& image, std::vector<Ke
src = dst;
if( !mask.empty() )
resize( dilated_mask, src_mask, src.size(), 0, 0, CV_INTER_AREA );
resize( dilated_mask, src_mask, src.size(), 0, 0, INTER_AREA );
}
}

View File

@ -60,7 +60,7 @@ static inline void _drawKeypoint( Mat& img, const KeyPoint& p, const Scalar& col
int radius = cvRound(p.size/2 * draw_multiplier); // KeyPoint::size is a diameter
// draw the circles around keypoints with the keypoints size
circle( img, center, radius, color, 1, CV_AA, draw_shift_bits );
circle( img, center, radius, color, 1, LINE_AA, draw_shift_bits );
// draw orientation of the keypoint, if it is applicable
if( p.angle != -1 )
@ -69,14 +69,14 @@ static inline void _drawKeypoint( Mat& img, const KeyPoint& p, const Scalar& col
Point orient( cvRound(cos(srcAngleRad)*radius ),
cvRound(sin(srcAngleRad)*radius )
);
line( img, center, center+orient, color, 1, CV_AA, draw_shift_bits );
line( img, center, center+orient, color, 1, LINE_AA, draw_shift_bits );
}
#if 0
else
{
// draw center with R=1
int radius = 1 * draw_multiplier;
circle( img, center, radius, color, 1, CV_AA, draw_shift_bits );
circle( img, center, radius, color, 1, LINE_AA, draw_shift_bits );
}
#endif
}
@ -84,7 +84,7 @@ static inline void _drawKeypoint( Mat& img, const KeyPoint& p, const Scalar& col
{
// draw center with R=3
int radius = 3 * draw_multiplier;
circle( img, center, radius, color, 1, CV_AA, draw_shift_bits );
circle( img, center, radius, color, 1, LINE_AA, draw_shift_bits );
}
}
@ -99,11 +99,11 @@ void drawKeypoints( const Mat& image, const std::vector<KeyPoint>& keypoints, Ma
}
else if( image.type() == CV_8UC1 )
{
cvtColor( image, outImage, CV_GRAY2BGR );
cvtColor( image, outImage, COLOR_GRAY2BGR );
}
else
{
CV_Error( CV_StsBadArg, "Incorrect type of input image.\n" );
CV_Error( Error::StsBadArg, "Incorrect type of input image.\n" );
}
}
@ -129,7 +129,7 @@ static void _prepareImgAndDrawKeypoints( const Mat& img1, const std::vector<KeyP
if( flags & DrawMatchesFlags::DRAW_OVER_OUTIMG )
{
if( size.width > outImg.cols || size.height > outImg.rows )
CV_Error( CV_StsBadSize, "outImg has size less than need to draw img1 and img2 together" );
CV_Error( Error::StsBadSize, "outImg has size less than need to draw img1 and img2 together" );
outImg1 = outImg( Rect(0, 0, img1.cols, img1.rows) );
outImg2 = outImg( Rect(img1.cols, 0, img2.cols, img2.rows) );
}
@ -141,12 +141,12 @@ static void _prepareImgAndDrawKeypoints( const Mat& img1, const std::vector<KeyP
outImg2 = outImg( Rect(img1.cols, 0, img2.cols, img2.rows) );
if( img1.type() == CV_8U )
cvtColor( img1, outImg1, CV_GRAY2BGR );
cvtColor( img1, outImg1, COLOR_GRAY2BGR );
else
img1.copyTo( outImg1 );
if( img2.type() == CV_8U )
cvtColor( img2, outImg2, CV_GRAY2BGR );
cvtColor( img2, outImg2, COLOR_GRAY2BGR );
else
img2.copyTo( outImg2 );
}
@ -179,7 +179,7 @@ static inline void _drawMatch( Mat& outImg, Mat& outImg1, Mat& outImg2 ,
line( outImg,
Point(cvRound(pt1.x*draw_multiplier), cvRound(pt1.y*draw_multiplier)),
Point(cvRound(dpt2.x*draw_multiplier), cvRound(dpt2.y*draw_multiplier)),
color, 1, CV_AA, draw_shift_bits );
color, 1, LINE_AA, draw_shift_bits );
}
void drawMatches( const Mat& img1, const std::vector<KeyPoint>& keypoints1,
@ -189,7 +189,7 @@ void drawMatches( const Mat& img1, const std::vector<KeyPoint>& keypoints1,
const std::vector<char>& matchesMask, int flags )
{
if( !matchesMask.empty() && matchesMask.size() != matches1to2.size() )
CV_Error( CV_StsBadSize, "matchesMask must have the same size as matches1to2" );
CV_Error( Error::StsBadSize, "matchesMask must have the same size as matches1to2" );
Mat outImg1, outImg2;
_prepareImgAndDrawKeypoints( img1, keypoints1, img2, keypoints2,
@ -218,7 +218,7 @@ void drawMatches( const Mat& img1, const std::vector<KeyPoint>& keypoints1,
const std::vector<std::vector<char> >& matchesMask, int flags )
{
if( !matchesMask.empty() && matchesMask.size() != matches1to2.size() )
CV_Error( CV_StsBadSize, "matchesMask must have the same size as matches1to2" );
CV_Error( Error::StsBadSize, "matchesMask must have the same size as matches1to2" );
Mat outImg1, outImg2;
_prepareImgAndDrawKeypoints( img1, keypoints1, img2, keypoints2,

View File

@ -185,7 +185,7 @@ void EllipticKeyPoint::convert( const std::vector<KeyPoint>& src, std::vector<El
for( size_t i = 0; i < src.size(); i++ )
{
float rad = src[i].size/2;
assert( rad );
CV_Assert( rad );
float fac = 1.f/(rad*rad);
dst[i] = EllipticKeyPoint( src[i].pt, Scalar(fac, 0, fac) );
}
@ -210,7 +210,7 @@ void EllipticKeyPoint::calcProjection( const std::vector<EllipticKeyPoint>& src,
{
if( !src.empty() )
{
assert( !H.empty() && H.cols == 3 && H.rows == 3);
CV_Assert( !H.empty() && H.cols == 3 && H.rows == 3);
dst.resize(src.size());
std::vector<EllipticKeyPoint>::const_iterator srcIt = src.begin();
std::vector<EllipticKeyPoint>::iterator dstIt = dst.begin();
@ -462,7 +462,7 @@ void cv::evaluateFeatureDetector( const Mat& img1, const Mat& img2, const Mat& H
keypoints2 = _keypoints2 != 0 ? _keypoints2 : &buf2;
if( (keypoints1->empty() || keypoints2->empty()) && fdetector.empty() )
CV_Error( CV_StsBadArg, "fdetector must not be empty when keypoints1 or keypoints2 is empty" );
CV_Error( Error::StsBadArg, "fdetector must not be empty when keypoints1 or keypoints2 is empty" );
if( keypoints1->empty() )
fdetector->detect( img1, *keypoints1 );
@ -573,15 +573,15 @@ void cv::evaluateGenericDescriptorMatcher( const Mat& img1, const Mat& img2, con
correctMatches1to2Mask = _correctMatches1to2Mask != 0 ? _correctMatches1to2Mask : &buf2;
if( keypoints1.empty() )
CV_Error( CV_StsBadArg, "keypoints1 must not be empty" );
CV_Error( Error::StsBadArg, "keypoints1 must not be empty" );
if( matches1to2->empty() && dmatcher.empty() )
CV_Error( CV_StsBadArg, "dmatch must not be empty when matches1to2 is empty" );
CV_Error( Error::StsBadArg, "dmatch must not be empty when matches1to2 is empty" );
bool computeKeypoints2ByPrj = keypoints2.empty();
if( computeKeypoints2ByPrj )
{
assert(0);
CV_Error(Error::StsNotImplemented, "");
// TODO: add computing keypoints2 from keypoints1 using H1to2
}

View File

@ -286,7 +286,7 @@ FastFeatureDetector::FastFeatureDetector( int _threshold, bool _nonmaxSuppressio
void FastFeatureDetector::detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask ) const
{
Mat grayImage = image;
if( image.type() != CV_8U ) cvtColor( image, grayImage, CV_BGR2GRAY );
if( image.type() != CV_8U ) cvtColor( image, grayImage, COLOR_BGR2GRAY );
FAST( grayImage, keypoints, threshold, nonmaxSuppression, type );
KeyPointsFilter::runByPixelsMask( keypoints, mask );
}

View File

@ -206,7 +206,7 @@ void FREAK::buildPattern()
descriptionPairs[i] = allPairs[selectedPairs0.at(i)];
}
else {
CV_Error(CV_StsVecLengthErr, "Input vector does not match the required size");
CV_Error(Error::StsVecLengthErr, "Input vector does not match the required size");
}
}
else { // default selected pairs
@ -548,7 +548,7 @@ std::vector<int> FREAK::selectPairs(const std::vector<Mat>& images
int idxB = pairStat[m].idx;
double corr(0);
// compute correlation between 2 pairs
corr = fabs(compareHist(descriptorsFloat.col(idxA), descriptorsFloat.col(idxB), CV_COMP_CORREL));
corr = fabs(compareHist(descriptorsFloat.col(idxA), descriptorsFloat.col(idxB), HISTCMP_CORREL));
if( corr > corrMax ) {
corrMax = corr;
@ -575,7 +575,7 @@ std::vector<int> FREAK::selectPairs(const std::vector<Mat>& images
else {
if( verbose )
std::cout << "correlation threshold too small (restrictive)" << std::endl;
CV_Error(CV_StsError, "correlation threshold too small (restrictive)");
CV_Error(Error::StsError, "correlation threshold too small (restrictive)");
}
extAll = false;
return idxBestPairs;

View File

@ -112,7 +112,7 @@ void DescriptorMatcher::DescriptorCollection::set( const std::vector<Mat>& descr
dim = descriptors[0].cols;
type = descriptors[0].type();
}
assert( dim > 0 );
CV_Assert( dim > 0 );
int count = startIdxs[imageCount-1] + descriptors[imageCount-1].rows;
@ -484,7 +484,7 @@ Ptr<DescriptorMatcher> DescriptorMatcher::create( const String& descriptorMatche
dm = new BFMatcher(NORM_HAMMING2);
}
else
CV_Error( CV_StsBadArg, "Unknown matcher name" );
CV_Error( Error::StsBadArg, "Unknown matcher name" );
return dm;
}
@ -727,7 +727,7 @@ Ptr<DescriptorMatcher> FlannBasedMatcher::clone( bool emptyTrainData ) const
FlannBasedMatcher* matcher = new FlannBasedMatcher(indexParams, searchParams);
if( !emptyTrainData )
{
CV_Error( CV_StsNotImplemented, "deep clone functionality is not implemented, because "
CV_Error( Error::StsNotImplemented, "deep clone functionality is not implemented, because "
"Flann::Index has not copy constructor or clone method ");
//matcher->flannIndex;
matcher->addedDescCount = addedDescCount;

View File

@ -40,6 +40,7 @@
*/
#include "precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
namespace cv
{

View File

@ -240,7 +240,7 @@ static void computeOrbDescriptor(const KeyPoint& kpt,
}
}
else
CV_Error( CV_StsBadSize, "Wrong WTA_K. It can be only 2, 3 or 4." );
CV_Error( Error::StsBadSize, "Wrong WTA_K. It can be only 2, 3 or 4." );
#undef GET_VALUE
}
@ -738,7 +738,7 @@ void ORB::operator()( InputArray _image, InputArray _mask, std::vector<KeyPoint>
Mat image = _image.getMat(), mask = _mask.getMat();
if( image.type() != CV_8UC1 )
cvtColor(_image, image, CV_BGR2GRAY);
cvtColor(_image, image, COLOR_BGR2GRAY);
int levelsNum = this->nlevels;

View File

@ -45,7 +45,6 @@
#include "opencv2/features2d.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/core/utility.hpp"
#include "opencv2/core/private.hpp"

View File

@ -114,7 +114,7 @@ StarDetectorComputeResponses( const Mat& img, Mat& responses, Mat& sizes, int ma
#if CV_SSE2
__m128 invSizes4[MAX_PATTERN][2];
__m128 sizes1_4[MAX_PATTERN];
Cv32suf absmask;
union { int i; float f; } absmask;
absmask.i = 0x7fffffff;
volatile bool useSIMD = cv::checkHardwareSupport(CV_CPU_SSE2);
#endif
@ -429,7 +429,7 @@ StarDetector::StarDetector(int _maxSize, int _responseThreshold,
void StarDetector::detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask ) const
{
Mat grayImage = image;
if( image.type() != CV_8U ) cvtColor( image, grayImage, CV_BGR2GRAY );
if( image.type() != CV_8U ) cvtColor( image, grayImage, COLOR_BGR2GRAY );
(*this)(grayImage, keypoints);
KeyPointsFilter::runByPixelsMask( keypoints, mask );

View File

@ -69,8 +69,8 @@ void CV_BRISKTest::run( int )
}
Mat gray1, gray2;
cvtColor(image1, gray1, CV_BGR2GRAY);
cvtColor(image2, gray2, CV_BGR2GRAY);
cvtColor(image1, gray1, COLOR_BGR2GRAY);
cvtColor(image2, gray2, COLOR_BGR2GRAY);
Ptr<FeatureDetector> detector = Algorithm::create<FeatureDetector>("Feature2D.BRISK");

View File

@ -80,12 +80,16 @@ static Mat readMatFromBin( const string& filename )
size_t elements_read4 = fread( (void*)&dataSize, sizeof(int), 1, f );
CV_Assert(elements_read1 == 1 && elements_read2 == 1 && elements_read3 == 1 && elements_read4 == 1);
uchar* data = (uchar*)cvAlloc(dataSize);
size_t elements_read = fread( (void*)data, 1, dataSize, f );
size_t step = dataSize / rows / CV_ELEM_SIZE(type);
CV_Assert(step >= (size_t)cols);
Mat m = Mat( rows, step, type).colRange(0, cols);
size_t elements_read = fread( m.ptr(), 1, dataSize, f );
CV_Assert(elements_read == (size_t)(dataSize));
fclose(f);
return Mat( rows, cols, type, data );
return m;
}
return Mat();
}
@ -205,7 +209,7 @@ protected:
double t = (double)getTickCount();
dextractor->compute( img, keypoints, calcDescriptors );
t = getTickCount() - t;
ts->printf(cvtest::TS::LOG, "\nAverage time of computing one descriptor = %g ms.\n", t/((double)cvGetTickFrequency()*1000.)/calcDescriptors.rows);
ts->printf(cvtest::TS::LOG, "\nAverage time of computing one descriptor = %g ms.\n", t/((double)getTickFrequency()*1000.)/calcDescriptors.rows);
if( calcDescriptors.rows != (int)keypoints.size() )
{

View File

@ -71,8 +71,8 @@ void CV_FastTest::run( int )
}
Mat gray1, gray2;
cvtColor(image1, gray1, CV_BGR2GRAY);
cvtColor(image2, gray2, CV_BGR2GRAY);
cvtColor(image1, gray1, COLOR_BGR2GRAY);
cvtColor(image2, gray2, COLOR_BGR2GRAY);
vector<KeyPoint> keypoints1;
vector<KeyPoint> keypoints2;
@ -82,13 +82,13 @@ void CV_FastTest::run( int )
for(size_t i = 0; i < keypoints1.size(); ++i)
{
const KeyPoint& kp = keypoints1[i];
cv::circle(image1, kp.pt, cvRound(kp.size/2), CV_RGB(255, 0, 0));
cv::circle(image1, kp.pt, cvRound(kp.size/2), Scalar(255, 0, 0));
}
for(size_t i = 0; i < keypoints2.size(); ++i)
{
const KeyPoint& kp = keypoints2[i];
cv::circle(image2, kp.pt, cvRound(kp.size/2), CV_RGB(255, 0, 0));
cv::circle(image2, kp.pt, cvRound(kp.size/2), Scalar(255, 0, 0));
}
Mat kps1(1, (int)(keypoints1.size() * sizeof(KeyPoint)), CV_8U, &keypoints1[0]);

View File

@ -41,6 +41,7 @@
//M*/
#include "test_precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include <vector>
#include <string>

View File

@ -11,7 +11,6 @@
#include "opencv2/ts.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/features2d.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>

View File

@ -217,7 +217,7 @@ protected:
vector<KeyPoint> keypoints0;
featureDetector->detect(image0, keypoints0);
if(keypoints0.size() < 15)
CV_Error(CV_StsAssert, "Detector gives too few points in a test image\n");
CV_Error(Error::StsAssert, "Detector gives too few points in a test image\n");
const int maxAngle = 360, angleStep = 15;
for(int angle = 0; angle < maxAngle; angle += angleStep)
@ -246,7 +246,7 @@ protected:
float angle0 = keypoints0[matches[m].queryIdx].angle;
float angle1 = keypoints1[matches[m].trainIdx].angle;
if(angle0 == -1 || angle1 == -1)
CV_Error(CV_StsBadArg, "Given FeatureDetector is not rotation invariant, it can not be tested here.\n");
CV_Error(Error::StsBadArg, "Given FeatureDetector is not rotation invariant, it can not be tested here.\n");
CV_Assert(angle0 >= 0.f && angle0 < 360.f);
CV_Assert(angle1 >= 0.f && angle1 < 360.f);
@ -330,7 +330,7 @@ protected:
Mat descriptors0;
featureDetector->detect(image0, keypoints0);
if(keypoints0.size() < 15)
CV_Error(CV_StsAssert, "Detector gives too few points in a test image\n");
CV_Error(Error::StsAssert, "Detector gives too few points in a test image\n");
descriptorExtractor->compute(image0, keypoints0, descriptors0);
BFMatcher bfmatcher(normType);
@ -413,7 +413,7 @@ protected:
vector<KeyPoint> keypoints0;
featureDetector->detect(image0, keypoints0);
if(keypoints0.size() < 15)
CV_Error(CV_StsAssert, "Detector gives too few points in a test image\n");
CV_Error(Error::StsAssert, "Detector gives too few points in a test image\n");
for(int scaleIdx = 1; scaleIdx <= 3; scaleIdx++)
{
@ -424,7 +424,7 @@ protected:
vector<KeyPoint> keypoints1, osiKeypoints1; // osi - original size image
featureDetector->detect(image1, keypoints1);
if(keypoints1.size() < 15)
CV_Error(CV_StsAssert, "Detector gives too few points in a test image\n");
CV_Error(Error::StsAssert, "Detector gives too few points in a test image\n");
if(keypoints1.size() > keypoints0.size())
{
@ -532,7 +532,7 @@ protected:
vector<KeyPoint> keypoints0;
featureDetector->detect(image0, keypoints0);
if(keypoints0.size() < 15)
CV_Error(CV_StsAssert, "Detector gives too few points in a test image\n");
CV_Error(Error::StsAssert, "Detector gives too few points in a test image\n");
Mat descriptors0;
descriptorExtractor->compute(image0, keypoints0, descriptors0);

View File

@ -65,7 +65,6 @@
#include "opencv2/core/utility.hpp"
#include "opencv2/gpu.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/calib3d.hpp"
#include "opencv2/video.hpp"

View File

@ -43,12 +43,13 @@
#define __HIGHGUI_H_
#include "opencv2/highgui.hpp"
#include "opencv2/highgui/highgui_c.h"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/core/utility.hpp"
#include "opencv2/core/private.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/highgui/highgui_c.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>

View File

@ -51,20 +51,6 @@
namespace cv
{
//! Various border types, image boundaries are denoted with '|'
enum {
BORDER_CONSTANT = 0, // iiiiii|abcdefgh|iiiiiii with some specified 'i'
BORDER_REPLICATE = 1, // aaaaaa|abcdefgh|hhhhhhh
BORDER_REFLECT = 2, // fedcba|abcdefgh|hgfedcb
BORDER_WRAP = 3, // cdefgh|abcdefgh|abcdefg
BORDER_REFLECT_101 = 4, // gfedcb|abcdefgh|gfedcba
BORDER_TRANSPARENT = 5, // uvwxyz|absdefgh|ijklmno
BORDER_REFLECT101 = BORDER_REFLECT_101,
BORDER_DEFAULT = BORDER_REFLECT_101,
BORDER_ISOLATED = 16 // do not look outside of ROI
};
//! type of the kernel
enum { KERNEL_GENERAL = 0, // the kernel is generic. No any type of symmetry or other properties.
KERNEL_SYMMETRICAL = 1, // kernel[i] == kernel[ksize-i-1] , and the anchor is at the center
@ -753,29 +739,6 @@ public:
};
//! raster image moments
class CV_EXPORTS_W_MAP Moments
{
public:
//! the default constructor
Moments();
//! the full constructor
Moments(double m00, double m10, double m01, double m20, double m11,
double m02, double m30, double m21, double m12, double m03 );
////! the conversion from CvMoments
//Moments( const CvMoments& moments );
////! the conversion to CvMoments
//operator CvMoments() const;
//! spatial moments
CV_PROP_RW double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03;
//! central moments
CV_PROP_RW double mu20, mu11, mu02, mu30, mu21, mu12, mu03;
//! central normalized moments
CV_PROP_RW double nu20, nu11, nu02, nu30, nu21, nu12, nu03;
};
class CV_EXPORTS_W Subdiv2D
{
public:
@ -959,14 +922,6 @@ CV_EXPORTS Ptr<FilterEngine> createMorphologyFilter(int op, int type, InputArray
//! returns structuring element of the specified shape and size
CV_EXPORTS_W Mat getStructuringElement(int shape, Size ksize, Point anchor = Point(-1,-1));
//! 1D interpolation function: returns coordinate of the "donor" pixel for the specified location p.
CV_EXPORTS_W int borderInterpolate( int p, int len, int borderType );
//! copies 2D array to a larger destination array with extrapolation of the outer part of src using the specified border mode
CV_EXPORTS_W void copyMakeBorder( InputArray src, OutputArray dst,
int top, int bottom, int left, int right,
int borderType, const Scalar& value = Scalar() );
//! smooths the image using median filter.
CV_EXPORTS_W void medianBlur( InputArray src, OutputArray dst, int ksize );
@ -1165,9 +1120,6 @@ CV_EXPORTS_W void accumulateProduct( InputArray src1, InputArray src2,
CV_EXPORTS_W void accumulateWeighted( InputArray src, InputOutputArray dst,
double alpha, InputArray mask = noArray() );
//! computes PSNR image/video quality metric
CV_EXPORTS_W double PSNR(InputArray src1, InputArray src2);
CV_EXPORTS_W Point2d phaseCorrelate(InputArray src1, InputArray src2,
InputArray window = noArray(), CV_OUT double* response = 0);

View File

@ -45,10 +45,6 @@
#include "opencv2/core/core_c.h"
#ifdef __cplusplus
# include "opencv2/imgproc.hpp"
#endif
#ifdef __cplusplus
extern "C" {
#endif

View File

@ -46,50 +46,6 @@
Base Image Filter
\****************************************************************************************/
/*
Various border types, image boundaries are denoted with '|'
* BORDER_REPLICATE: aaaaaa|abcdefgh|hhhhhhh
* BORDER_REFLECT: fedcba|abcdefgh|hgfedcb
* BORDER_REFLECT_101: gfedcb|abcdefgh|gfedcba
* BORDER_WRAP: cdefgh|abcdefgh|abcdefg
* BORDER_CONSTANT: iiiiii|abcdefgh|iiiiiii with some specified 'i'
*/
int cv::borderInterpolate( int p, int len, int borderType )
{
if( (unsigned)p < (unsigned)len )
;
else if( borderType == BORDER_REPLICATE )
p = p < 0 ? 0 : len - 1;
else if( borderType == BORDER_REFLECT || borderType == BORDER_REFLECT_101 )
{
int delta = borderType == BORDER_REFLECT_101;
if( len == 1 )
return 0;
do
{
if( p < 0 )
p = -p - 1 + delta;
else
p = len - 1 - (p - len) - delta;
}
while( (unsigned)p >= (unsigned)len );
}
else if( borderType == BORDER_WRAP )
{
if( p < 0 )
p -= ((p-len+1)/len)*len;
if( p >= len )
p %= len;
}
else if( borderType == BORDER_CONSTANT )
p = -1;
else
CV_Error( CV_StsBadArg, "Unknown/unsupported border type" );
return p;
}
namespace cv
{

View File

@ -41,6 +41,7 @@
#include "precomp.hpp"
#include <functional>
#include <limits>
using namespace cv;

View File

@ -43,11 +43,10 @@
#ifndef __OPENCV_PRECOMP_H__
#define __OPENCV_PRECOMP_H__
#include "opencv2/imgproc.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/core/utility.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/core/private.hpp"
#include <math.h>

View File

@ -75,196 +75,6 @@ CV_IMPL CvSeq* cvPointSeqFromMat( int seq_kind, const CvArr* arr,
return (CvSeq*)contour_header;
}
namespace cv
{
static void copyMakeBorder_8u( const uchar* src, size_t srcstep, Size srcroi,
uchar* dst, size_t dststep, Size dstroi,
int top, int left, int cn, int borderType )
{
const int isz = (int)sizeof(int);
int i, j, k, elemSize = 1;
bool intMode = false;
if( (cn | srcstep | dststep | (size_t)src | (size_t)dst) % isz == 0 )
{
cn /= isz;
elemSize = isz;
intMode = true;
}
AutoBuffer<int> _tab((dstroi.width - srcroi.width)*cn);
int* tab = _tab;
int right = dstroi.width - srcroi.width - left;
int bottom = dstroi.height - srcroi.height - top;
for( i = 0; i < left; i++ )
{
j = borderInterpolate(i - left, srcroi.width, borderType)*cn;
for( k = 0; k < cn; k++ )
tab[i*cn + k] = j + k;
}
for( i = 0; i < right; i++ )
{
j = borderInterpolate(srcroi.width + i, srcroi.width, borderType)*cn;
for( k = 0; k < cn; k++ )
tab[(i+left)*cn + k] = j + k;
}
srcroi.width *= cn;
dstroi.width *= cn;
left *= cn;
right *= cn;
uchar* dstInner = dst + dststep*top + left*elemSize;
for( i = 0; i < srcroi.height; i++, dstInner += dststep, src += srcstep )
{
if( dstInner != src )
memcpy(dstInner, src, srcroi.width*elemSize);
if( intMode )
{
const int* isrc = (int*)src;
int* idstInner = (int*)dstInner;
for( j = 0; j < left; j++ )
idstInner[j - left] = isrc[tab[j]];
for( j = 0; j < right; j++ )
idstInner[j + srcroi.width] = isrc[tab[j + left]];
}
else
{
for( j = 0; j < left; j++ )
dstInner[j - left] = src[tab[j]];
for( j = 0; j < right; j++ )
dstInner[j + srcroi.width] = src[tab[j + left]];
}
}
dstroi.width *= elemSize;
dst += dststep*top;
for( i = 0; i < top; i++ )
{
j = borderInterpolate(i - top, srcroi.height, borderType);
memcpy(dst + (i - top)*dststep, dst + j*dststep, dstroi.width);
}
for( i = 0; i < bottom; i++ )
{
j = borderInterpolate(i + srcroi.height, srcroi.height, borderType);
memcpy(dst + (i + srcroi.height)*dststep, dst + j*dststep, dstroi.width);
}
}
static void copyMakeConstBorder_8u( const uchar* src, size_t srcstep, Size srcroi,
uchar* dst, size_t dststep, Size dstroi,
int top, int left, int cn, const uchar* value )
{
int i, j;
AutoBuffer<uchar> _constBuf(dstroi.width*cn);
uchar* constBuf = _constBuf;
int right = dstroi.width - srcroi.width - left;
int bottom = dstroi.height - srcroi.height - top;
for( i = 0; i < dstroi.width; i++ )
{
for( j = 0; j < cn; j++ )
constBuf[i*cn + j] = value[j];
}
srcroi.width *= cn;
dstroi.width *= cn;
left *= cn;
right *= cn;
uchar* dstInner = dst + dststep*top + left;
for( i = 0; i < srcroi.height; i++, dstInner += dststep, src += srcstep )
{
if( dstInner != src )
memcpy( dstInner, src, srcroi.width );
memcpy( dstInner - left, constBuf, left );
memcpy( dstInner + srcroi.width, constBuf, right );
}
dst += dststep*top;
for( i = 0; i < top; i++ )
memcpy(dst + (i - top)*dststep, constBuf, dstroi.width);
for( i = 0; i < bottom; i++ )
memcpy(dst + (i + srcroi.height)*dststep, constBuf, dstroi.width);
}
}
void cv::copyMakeBorder( InputArray _src, OutputArray _dst, int top, int bottom,
int left, int right, int borderType, const Scalar& value )
{
Mat src = _src.getMat();
CV_Assert( top >= 0 && bottom >= 0 && left >= 0 && right >= 0 );
if( src.isSubmatrix() && (borderType & BORDER_ISOLATED) == 0 )
{
Size wholeSize;
Point ofs;
src.locateROI(wholeSize, ofs);
int dtop = std::min(ofs.y, top);
int dbottom = std::min(wholeSize.height - src.rows - ofs.y, bottom);
int dleft = std::min(ofs.x, left);
int dright = std::min(wholeSize.width - src.cols - ofs.x, right);
src.adjustROI(dtop, dbottom, dleft, dright);
top -= dtop;
left -= dleft;
bottom -= dbottom;
right -= dright;
}
_dst.create( src.rows + top + bottom, src.cols + left + right, src.type() );
Mat dst = _dst.getMat();
if(top == 0 && left == 0 && bottom == 0 && right == 0)
{
if(src.data != dst.data || src.step != dst.step)
src.copyTo(dst);
return;
}
borderType &= ~BORDER_ISOLATED;
if( borderType != BORDER_CONSTANT )
copyMakeBorder_8u( src.data, src.step, src.size(),
dst.data, dst.step, dst.size(),
top, left, (int)src.elemSize(), borderType );
else
{
int cn = src.channels(), cn1 = cn;
AutoBuffer<double> buf(cn);
if( cn > 4 )
{
CV_Assert( value[0] == value[1] && value[0] == value[2] && value[0] == value[3] );
cn1 = 1;
}
scalarToRawData(value, buf, CV_MAKETYPE(src.depth(), cn1), cn);
copyMakeConstBorder_8u( src.data, src.step, src.size(),
dst.data, dst.step, dst.size(),
top, left, (int)src.elemSize(), (uchar*)(double*)buf );
}
}
double cv::PSNR(InputArray _src1, InputArray _src2)
{
Mat src1 = _src1.getMat(), src2 = _src2.getMat();
CV_Assert( src1.depth() == CV_8U );
double diff = std::sqrt(norm(src1, src2, NORM_L2SQR)/(src1.total()*src1.channels()));
return 20*log10(255./(diff+DBL_EPSILON));
}
CV_IMPL void
cvCopyMakeBorder( const CvArr* srcarr, CvArr* dstarr, CvPoint offset,
int borderType, CvScalar value )

View File

@ -2268,4 +2268,35 @@ public class CoreTest extends OpenCVTestCase {
}
public void testCopyMakeBorderMatMatIntIntIntIntInt() {
Mat src = new Mat(2, 2, CvType.CV_32F, new Scalar(1));
int border = 2;
Core.copyMakeBorder(src, dst, border, border, border, border, Core.BORDER_REPLICATE);
truth = new Mat(6, 6, CvType.CV_32F, new Scalar(1));
assertMatEqual(truth, dst, EPS);
}
public void testCopyMakeBorderMatMatIntIntIntIntIntScalar() {
Mat src = new Mat(2, 2, CvType.CV_32F, new Scalar(1));
Scalar value = new Scalar(0);
int border = 2;
Core.copyMakeBorder(src, dst, border, border, border, border, Core.BORDER_REPLICATE, value);
// TODO_: write better test (use Core.BORDER_CONSTANT)
truth = new Mat(6, 6, CvType.CV_32F, new Scalar(1));
assertMatEqual(truth, dst, EPS);
}
public void testBorderInterpolate() {
float val1 = Core.borderInterpolate(100, 150, Core.BORDER_REFLECT_101);
assertEquals(100f, val1);
float val2 = Core.borderInterpolate(-5, 10, Core.BORDER_WRAP);
assertEquals(5f, val2);
}
}

View File

@ -176,7 +176,7 @@ public class ImgprocTest extends OpenCVTestCase {
}
public void testBilateralFilterMatMatIntDoubleDoubleInt() {
Imgproc.bilateralFilter(gray255, dst, 5, 10, 5, Imgproc.BORDER_REFLECT);
Imgproc.bilateralFilter(gray255, dst, 5, 10, 5, Core.BORDER_REFLECT);
assertMatEqual(gray255, dst);
// TODO_: write better test
@ -198,19 +198,11 @@ public class ImgprocTest extends OpenCVTestCase {
}
public void testBlurMatMatSizePointInt() {
Imgproc.blur(gray0, dst, size, anchorPoint, Imgproc.BORDER_REFLECT);
Imgproc.blur(gray0, dst, size, anchorPoint, Core.BORDER_REFLECT);
assertMatEqual(gray0, dst);
// TODO_: write better test
}
public void testBorderInterpolate() {
float val1 = Imgproc.borderInterpolate(100, 150, Imgproc.BORDER_REFLECT_101);
assertEquals(100f, val1);
float val2 = Imgproc.borderInterpolate(-5, 10, Imgproc.BORDER_WRAP);
assertEquals(5f, val2);
}
public void testBoundingRect() {
MatOfPoint points = new MatOfPoint(new Point(0, 0), new Point(0, 4), new Point(4, 0), new Point(4, 4));
Point p1 = new Point(1, 1);
@ -236,7 +228,7 @@ public class ImgprocTest extends OpenCVTestCase {
}
public void testBoxFilterMatMatIntSizePointBooleanInt() {
Imgproc.boxFilter(gray255, dst, 8, size, anchorPoint, false, Imgproc.BORDER_REFLECT);
Imgproc.boxFilter(gray255, dst, 8, size, anchorPoint, false, Core.BORDER_REFLECT);
assertMatEqual(gray255, dst);
// TODO_: write better test
}
@ -479,29 +471,6 @@ public class ImgprocTest extends OpenCVTestCase {
assertMatEqual(new MatOfInt4(3, 0, 5, 3620), convexityDefects);
}
public void testCopyMakeBorderMatMatIntIntIntIntInt() {
Mat src = new Mat(imgprocSz, imgprocSz, CvType.CV_32F, new Scalar(1));
int border = 2;
Imgproc.copyMakeBorder(src, dst, border, border, border, border, Imgproc.BORDER_REPLICATE);
truth = new Mat(6, 6, CvType.CV_32F, new Scalar(1));
assertMatEqual(truth, dst, EPS);
}
public void testCopyMakeBorderMatMatIntIntIntIntIntScalar() {
Mat src = new Mat(imgprocSz, imgprocSz, CvType.CV_32F, new Scalar(1));
Scalar value = new Scalar(0);
int border = 2;
Imgproc.copyMakeBorder(src, dst, border, border, border, border, Imgproc.BORDER_REPLICATE, value);
// TODO_: write better test (use Imgproc.BORDER_CONSTANT)
truth = new Mat(6, 6, CvType.CV_32F, new Scalar(1));
assertMatEqual(truth, dst, EPS);
}
public void testCornerEigenValsAndVecsMatMatIntInt() {
fail("Not yet implemented");
// TODO: write better test
@ -528,7 +497,7 @@ public class ImgprocTest extends OpenCVTestCase {
truth = new Mat(4, 4, CvType.CV_32FC(6), new Scalar(0));
Imgproc.cornerEigenValsAndVecs(src, dst, blockSize, ksize, Imgproc.BORDER_REFLECT);
Imgproc.cornerEigenValsAndVecs(src, dst, blockSize, ksize, Core.BORDER_REFLECT);
assertMatEqual(truth, dst, EPS);
}
@ -552,7 +521,7 @@ public class ImgprocTest extends OpenCVTestCase {
int blockSize = 5;
int ksize = 7;
double k = 0.1;
Imgproc.cornerHarris(gray255, dst, blockSize, ksize, k, Imgproc.BORDER_REFLECT);
Imgproc.cornerHarris(gray255, dst, blockSize, ksize, k, Core.BORDER_REFLECT);
assertMatEqual(truth, dst, EPS);
}
@ -598,7 +567,7 @@ public class ImgprocTest extends OpenCVTestCase {
int blockSize = 3;
int ksize = 5;
Imgproc.cornerMinEigenVal(src, dst, blockSize, ksize, Imgproc.BORDER_REFLECT);
Imgproc.cornerMinEigenVal(src, dst, blockSize, ksize, Core.BORDER_REFLECT);
truth = new Mat(3, 3, CvType.CV_32FC1) {
{
@ -742,7 +711,7 @@ public class ImgprocTest extends OpenCVTestCase {
Mat kernel = new Mat();
Scalar sc = new Scalar(3, 3);
Imgproc.erode(src, dst, kernel, anchorPoint, 10, Imgproc.BORDER_REFLECT, sc);
Imgproc.erode(src, dst, kernel, anchorPoint, 10, Core.BORDER_REFLECT, sc);
truth = new Mat(3, 3, CvType.CV_8U, new Scalar(8));
assertMatEqual(truth, dst);
@ -773,7 +742,7 @@ public class ImgprocTest extends OpenCVTestCase {
Mat kernel = new Mat(imgprocSz, imgprocSz, CvType.CV_32F, new Scalar(0));
Point point = new Point(0, 0);
Imgproc.filter2D(gray128, dst, -1, kernel, point, 2, Imgproc.BORDER_CONSTANT);
Imgproc.filter2D(gray128, dst, -1, kernel, point, 2, Core.BORDER_CONSTANT);
assertMatEqual(gray2, dst);
}
@ -901,7 +870,7 @@ public class ImgprocTest extends OpenCVTestCase {
}
public void testGaussianBlurMatMatSizeDoubleDoubleInt() {
Imgproc.GaussianBlur(gray2, dst, size, 1, 3, Imgproc.BORDER_REFLECT);
Imgproc.GaussianBlur(gray2, dst, size, 1, 3, Core.BORDER_REFLECT);
assertMatEqual(gray2, dst);
// TODO_: write better test
@ -1384,7 +1353,7 @@ public class ImgprocTest extends OpenCVTestCase {
public void testLaplacianMatMatIntIntDoubleDoubleInt() {
Mat src = new Mat(3, 3, CvType.CV_32F, new Scalar(2));
Imgproc.Laplacian(src, dst, CvType.CV_32F, 1, 2, EPS, Imgproc.BORDER_REFLECT);
Imgproc.Laplacian(src, dst, CvType.CV_32F, 1, 2, EPS, Core.BORDER_REFLECT);
truth = new Mat(3, 3, CvType.CV_32F, new Scalar(0.00099945068));
assertMatEqual(truth, dst, EPS);
@ -1486,7 +1455,7 @@ public class ImgprocTest extends OpenCVTestCase {
Point point = new Point(1, 1);
Scalar sc = new Scalar(3, 3);
Imgproc.morphologyEx(src, dst, Imgproc.MORPH_TOPHAT, kernel, point, 10, Imgproc.BORDER_REFLECT, sc);
Imgproc.morphologyEx(src, dst, Imgproc.MORPH_TOPHAT, kernel, point, 10, Core.BORDER_REFLECT, sc);
truth = new Mat(imgprocSz, imgprocSz, CvType.CV_8U) {
{
put(0, 0, 1, 0);
@ -1520,7 +1489,7 @@ public class ImgprocTest extends OpenCVTestCase {
Mat src = new Mat(4, 4, CvType.CV_32F, new Scalar(1));
int ksize = 3;
Imgproc.preCornerDetect(src, dst, ksize, Imgproc.BORDER_REFLECT);
Imgproc.preCornerDetect(src, dst, ksize, Core.BORDER_REFLECT);
truth = new Mat(4, 4, CvType.CV_32F, new Scalar(0));
assertMatEqual(truth, dst, EPS);
@ -1640,7 +1609,7 @@ public class ImgprocTest extends OpenCVTestCase {
truth = new Mat(1, 3, CvType.CV_32F, new Scalar(2));
Imgproc.remap(src, dst, map1, map2, Imgproc.INTER_LINEAR, Imgproc.BORDER_REFLECT, sc);
Imgproc.remap(src, dst, map1, map2, Imgproc.INTER_LINEAR, Core.BORDER_REFLECT, sc);
assertMatEqual(truth, dst, EPS);
}
@ -1683,7 +1652,7 @@ public class ImgprocTest extends OpenCVTestCase {
public void testScharrMatMatIntIntIntDoubleDoubleInt() {
Mat src = Mat.eye(3, 3, CvType.CV_32F);
Imgproc.Scharr(src, dst, CvType.CV_32F, 1, 0, 1.5, 0, Imgproc.BORDER_REFLECT);
Imgproc.Scharr(src, dst, CvType.CV_32F, 1, 0, 1.5, 0, Core.BORDER_REFLECT);
truth = new Mat(3, 3, CvType.CV_32F) {
{
@ -1728,7 +1697,7 @@ public class ImgprocTest extends OpenCVTestCase {
Mat kernelY = new Mat(1, 3, CvType.CV_32FC1);
kernelY.put(0, 0, 1, 1, 1);
Imgproc.sepFilter2D(gray0, dst, CvType.CV_32F, kernelX, kernelY, anchorPoint, weakEPS, Imgproc.BORDER_REFLECT);
Imgproc.sepFilter2D(gray0, dst, CvType.CV_32F, kernelX, kernelY, anchorPoint, weakEPS, Core.BORDER_REFLECT);
truth = new Mat(10, 10, CvType.CV_32F, new Scalar(weakEPS));
assertMatEqual(truth, dst, EPS);
@ -1756,7 +1725,7 @@ public class ImgprocTest extends OpenCVTestCase {
}
};
Imgproc.Sobel(src, dst, CvType.CV_32F, 1, 0, 3, 2, 0, Imgproc.BORDER_REPLICATE);
Imgproc.Sobel(src, dst, CvType.CV_32F, 1, 0, 3, 2, 0, Core.BORDER_REPLICATE);
truth = new Mat(3, 3, CvType.CV_32F) {
{

View File

@ -1126,7 +1126,7 @@ void CascadeClassifier::detectMultiScale( const Mat& image, std::vector<Rect>& o
if( grayImage.channels() > 1 )
{
Mat temp;
cvtColor(grayImage, temp, CV_BGR2GRAY);
cvtColor(grayImage, temp, COLOR_BGR2GRAY);
grayImage = temp;
}
@ -1149,7 +1149,7 @@ void CascadeClassifier::detectMultiScale( const Mat& image, std::vector<Rect>& o
continue;
Mat scaledImage( scaledImageSize, CV_8U, imageBuffer.data );
resize( grayImage, scaledImage, scaledImageSize, 0, 0, CV_INTER_LINEAR );
resize( grayImage, scaledImage, scaledImageSize, 0, 0, INTER_LINEAR );
int yStep;
if( getFeatureType() == cv::FeatureEvaluator::HOG )

View File

@ -1,4 +1,5 @@
#include "precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include <deque>
#include <algorithm>

View File

@ -1,4 +1,5 @@
#include "precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "_latentsvm.h"
#include "_lsvm_resizeimg.h"

View File

@ -42,7 +42,8 @@
/* Haar features calculation */
#include "precomp.hpp"
#include "stdio.h"
#include "opencv2/imgproc/imgproc_c.h"
#include <stdio.h>
#if CV_SSE2
# if 1 /*!CV_SSE4_1 && !CV_SSE4_2*/

View File

@ -1,4 +1,5 @@
#include "precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "_lsvmparser.h"
#include "_lsvm_matching.h"

View File

@ -483,7 +483,7 @@ void ColorGradientPyramid::pyrDown()
if (!mask.empty())
{
Mat next_mask;
resize(mask, next_mask, size, 0.0, 0.0, CV_INTER_NN);
resize(mask, next_mask, size, 0.0, 0.0, INTER_NEAREST);
mask = next_mask;
}
@ -635,17 +635,11 @@ static void quantizedNormals(const Mat& src, Mat& dst, int distance_threshold,
{
dst = Mat::zeros(src.size(), CV_8U);
IplImage src_ipl = src;
IplImage* ap_depth_data = &src_ipl;
IplImage dst_ipl = dst;
IplImage* dst_ipl_ptr = &dst_ipl;
IplImage** m_dep = &dst_ipl_ptr;
const unsigned short * lp_depth = src.ptr<ushort>();
unsigned char * lp_normals = dst.ptr<uchar>();
unsigned short * lp_depth = (unsigned short *)ap_depth_data->imageData;
unsigned char * lp_normals = (unsigned char *)m_dep[0]->imageData;
const int l_W = ap_depth_data->width;
const int l_H = ap_depth_data->height;
const int l_W = src.cols;
const int l_H = src.rows;
const int l_r = 5; // used to be 7
const int l_offset0 = -l_r - l_r * l_W;
@ -662,7 +656,7 @@ static void quantizedNormals(const Mat& src, Mat& dst, int distance_threshold,
for (int l_y = l_r; l_y < l_H - l_r - 1; ++l_y)
{
unsigned short * lp_line = lp_depth + (l_y * l_W + l_r);
const unsigned short * lp_line = lp_depth + (l_y * l_W + l_r);
unsigned char * lp_norm = lp_normals + (l_y * l_W + l_r);
for (int l_x = l_r; l_x < l_W - l_r - 1; ++l_x)
@ -725,7 +719,7 @@ static void quantizedNormals(const Mat& src, Mat& dst, int distance_threshold,
++lp_norm;
}
}
cvSmooth(m_dep[0], m_dep[0], CV_MEDIAN, 5, 5);
medianBlur(dst, dst, 5);
}
class DepthNormalPyramid : public QuantizedPyramid
@ -772,12 +766,12 @@ void DepthNormalPyramid::pyrDown()
// In this case, NN-downsample the quantized image
Mat next_normal;
Size size(normal.cols / 2, normal.rows / 2);
resize(normal, next_normal, size, 0.0, 0.0, CV_INTER_NN);
resize(normal, next_normal, size, 0.0, 0.0, INTER_NEAREST);
normal = next_normal;
if (!mask.empty())
{
Mat next_mask;
resize(mask, next_mask, size, 0.0, 0.0, CV_INTER_NN);
resize(mask, next_mask, size, 0.0, 0.0, INTER_NEAREST);
mask = next_mask;
}
}
@ -805,7 +799,7 @@ bool DepthNormalPyramid::extractTemplate(Template& templ) const
temp.setTo(1 << i, local_mask);
bitwise_and(temp, normal, temp);
// temp is now non-zero at pixels in the mask with quantized orientation i
distanceTransform(temp, distances[i], CV_DIST_C, 3);
distanceTransform(temp, distances[i], DIST_C, 3);
}
// Count how many features taken for each label

View File

@ -45,8 +45,6 @@
#include "opencv2/objdetect.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/core/core_c.h"
#include "opencv2/core/utility.hpp"

View File

@ -1,4 +1,5 @@
#include "precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "_lsvm_resizeimg.h"
#include <stdio.h>
#include <assert.h>

View File

@ -182,90 +182,90 @@ void cvtColor_caller(const oclMat &src, oclMat &dst, int code, int dcn)
switch (code)
{
/*
case CV_BGR2BGRA: case CV_RGB2BGRA: case CV_BGRA2BGR:
case CV_RGBA2BGR: case CV_RGB2BGR: case CV_BGRA2RGBA:
case CV_BGR2BGR565: case CV_BGR2BGR555: case CV_RGB2BGR565: case CV_RGB2BGR555:
case CV_BGRA2BGR565: case CV_BGRA2BGR555: case CV_RGBA2BGR565: case CV_RGBA2BGR555:
case CV_BGR5652BGR: case CV_BGR5552BGR: case CV_BGR5652RGB: case CV_BGR5552RGB:
case CV_BGR5652BGRA: case CV_BGR5552BGRA: case CV_BGR5652RGBA: case CV_BGR5552RGBA:
case COLOR_BGR2BGRA: case COLOR_RGB2BGRA: case COLOR_BGRA2BGR:
case COLOR_RGBA2BGR: case COLOR_RGB2BGR: case COLOR_BGRA2RGBA:
case COLOR_BGR2BGR565: case COLOR_BGR2BGR555: case COLOR_RGB2BGR565: case COLOR_RGB2BGR555:
case COLOR_BGRA2BGR565: case COLOR_BGRA2BGR555: case COLOR_RGBA2BGR565: case COLOR_RGBA2BGR555:
case COLOR_BGR5652BGR: case COLOR_BGR5552BGR: case COLOR_BGR5652RGB: case COLOR_BGR5552RGB:
case COLOR_BGR5652BGRA: case COLOR_BGR5552BGRA: case COLOR_BGR5652RGBA: case COLOR_BGR5552RGBA:
*/
case CV_BGR2GRAY:
case CV_BGRA2GRAY:
case CV_RGB2GRAY:
case CV_RGBA2GRAY:
case COLOR_BGR2GRAY:
case COLOR_BGRA2GRAY:
case COLOR_RGB2GRAY:
case COLOR_RGBA2GRAY:
{
CV_Assert(scn == 3 || scn == 4);
bidx = code == CV_BGR2GRAY || code == CV_BGRA2GRAY ? 0 : 2;
bidx = code == COLOR_BGR2GRAY || code == COLOR_BGRA2GRAY ? 0 : 2;
dst.create(sz, CV_MAKETYPE(depth, 1));
RGB2Gray_caller(src, dst, bidx);
break;
}
case CV_GRAY2BGR:
case CV_GRAY2BGRA:
case COLOR_GRAY2BGR:
case COLOR_GRAY2BGRA:
{
CV_Assert(scn == 1);
dcn = code == CV_GRAY2BGRA ? 4 : 3;
dcn = code == COLOR_GRAY2BGRA ? 4 : 3;
dst.create(sz, CV_MAKETYPE(depth, dcn));
Gray2RGB_caller(src, dst);
break;
}
case CV_BGR2YUV:
case CV_RGB2YUV:
case COLOR_BGR2YUV:
case COLOR_RGB2YUV:
{
CV_Assert(scn == 3 || scn == 4);
bidx = code == CV_BGR2YUV ? 0 : 2;
bidx = code == COLOR_BGR2YUV ? 0 : 2;
dst.create(sz, CV_MAKETYPE(depth, 3));
RGB2YUV_caller(src, dst, bidx);
break;
}
case CV_YUV2BGR:
case CV_YUV2RGB:
case COLOR_YUV2BGR:
case COLOR_YUV2RGB:
{
CV_Assert(scn == 3 || scn == 4);
bidx = code == CV_YUV2BGR ? 0 : 2;
bidx = code == COLOR_YUV2BGR ? 0 : 2;
dst.create(sz, CV_MAKETYPE(depth, 3));
YUV2RGB_caller(src, dst, bidx);
break;
}
case CV_YUV2RGB_NV12:
case CV_YUV2BGR_NV12:
case CV_YUV2RGBA_NV12:
case CV_YUV2BGRA_NV12:
case COLOR_YUV2RGB_NV12:
case COLOR_YUV2BGR_NV12:
case COLOR_YUV2RGBA_NV12:
case COLOR_YUV2BGRA_NV12:
{
CV_Assert(scn == 1);
CV_Assert( sz.width % 2 == 0 && sz.height % 3 == 0 && depth == CV_8U );
dcn = code == CV_YUV2BGRA_NV12 || code == CV_YUV2RGBA_NV12 ? 4 : 3;
bidx = code == CV_YUV2BGRA_NV12 || code == CV_YUV2BGR_NV12 ? 0 : 2;
dcn = code == COLOR_YUV2BGRA_NV12 || code == COLOR_YUV2RGBA_NV12 ? 4 : 3;
bidx = code == COLOR_YUV2BGRA_NV12 || code == COLOR_YUV2BGR_NV12 ? 0 : 2;
Size dstSz(sz.width, sz.height * 2 / 3);
dst.create(dstSz, CV_MAKETYPE(depth, dcn));
YUV2RGB_NV12_caller(src, dst, bidx);
break;
}
case CV_BGR2YCrCb:
case CV_RGB2YCrCb:
case COLOR_BGR2YCrCb:
case COLOR_RGB2YCrCb:
{
CV_Assert(scn == 3 || scn == 4);
bidx = code == CV_BGR2YCrCb ? 0 : 2;
bidx = code == COLOR_BGR2YCrCb ? 0 : 2;
dst.create(sz, CV_MAKETYPE(depth, 3));
RGB2YCrCb_caller(src, dst, bidx);
break;
}
case CV_YCrCb2BGR:
case CV_YCrCb2RGB:
case COLOR_YCrCb2BGR:
case COLOR_YCrCb2RGB:
{
break;
}
/*
case CV_BGR5652GRAY: case CV_BGR5552GRAY:
case CV_GRAY2BGR565: case CV_GRAY2BGR555:
case CV_BGR2YCrCb: case CV_RGB2YCrCb:
case CV_BGR2XYZ: case CV_RGB2XYZ:
case CV_XYZ2BGR: case CV_XYZ2RGB:
case CV_BGR2HSV: case CV_RGB2HSV: case CV_BGR2HSV_FULL: case CV_RGB2HSV_FULL:
case CV_BGR2HLS: case CV_RGB2HLS: case CV_BGR2HLS_FULL: case CV_RGB2HLS_FULL:
case CV_HSV2BGR: case CV_HSV2RGB: case CV_HSV2BGR_FULL: case CV_HSV2RGB_FULL:
case CV_HLS2BGR: case CV_HLS2RGB: case CV_HLS2BGR_FULL: case CV_HLS2RGB_FULL:
case COLOR_BGR5652GRAY: case COLOR_BGR5552GRAY:
case COLOR_GRAY2BGR565: case COLOR_GRAY2BGR555:
case COLOR_BGR2YCrCb: case COLOR_RGB2YCrCb:
case COLOR_BGR2XYZ: case COLOR_RGB2XYZ:
case COLOR_XYZ2BGR: case COLOR_XYZ2RGB:
case COLOR_BGR2HSV: case COLOR_RGB2HSV: case COLOR_BGR2HSV_FULL: case COLOR_RGB2HSV_FULL:
case COLOR_BGR2HLS: case COLOR_RGB2HLS: case COLOR_BGR2HLS_FULL: case COLOR_RGB2HLS_FULL:
case COLOR_HSV2BGR: case COLOR_HSV2RGB: case COLOR_HSV2BGR_FULL: case COLOR_HSV2RGB_FULL:
case COLOR_HLS2BGR: case COLOR_HLS2RGB: case COLOR_HLS2BGR_FULL: case COLOR_HLS2RGB_FULL:
*/
default:
CV_Error( CV_StsBadFlag, "Unknown/unsupported color conversion code" );

View File

@ -544,21 +544,21 @@ void cv::ocl::morphologyEx(const oclMat &src, oclMat &dst, int op, const Mat &ke
erode(src, temp, kernel, anchor, iterations, borderType, borderValue);
dilate(temp, dst, kernel, anchor, iterations, borderType, borderValue);
break;
case CV_MOP_CLOSE:
case MORPH_CLOSE:
dilate(src, temp, kernel, anchor, iterations, borderType, borderValue);
erode(temp, dst, kernel, anchor, iterations, borderType, borderValue);
break;
case CV_MOP_GRADIENT:
case MORPH_GRADIENT:
erode(src, temp, kernel, anchor, iterations, borderType, borderValue);
dilate(src, dst, kernel, anchor, iterations, borderType, borderValue);
subtract(dst, temp, dst);
break;
case CV_MOP_TOPHAT:
case MORPH_TOPHAT:
erode(src, dst, kernel, anchor, iterations, borderType, borderValue);
dilate(dst, temp, kernel, anchor, iterations, borderType, borderValue);
subtract(src, temp, dst);
break;
case CV_MOP_BLACKHAT:
case MORPH_BLACKHAT:
dilate(src, dst, kernel, anchor, iterations, borderType, borderValue);
erode(dst, temp, kernel, anchor, iterations, borderType, borderValue);
subtract(temp, src, dst);

View File

@ -906,7 +906,7 @@ CvSeq *cv::ocl::OclCascadeClassifier::oclHaarDetectObjects( oclMat &gimg, CvMemS
if( CV_MAT_CN(gimg.type()) > 1 )
{
cvtColor( gimg, gtemp, CV_BGR2GRAY );
cvtColor( gimg, gtemp, COLOR_BGR2GRAY );
gimg = gtemp;
}

View File

@ -242,7 +242,7 @@ void cv::ocl::HoughCircles(const oclMat& src, oclMat& circles, HoughCirclesBuf&
CV_Assert(src.type() == CV_8UC1);
CV_Assert(src.cols < std::numeric_limits<unsigned short>::max());
CV_Assert(src.rows < std::numeric_limits<unsigned short>::max());
CV_Assert(method == CV_HOUGH_GRADIENT);
CV_Assert(method == HOUGH_GRADIENT);
CV_Assert(dp > 0);
CV_Assert(minRadius > 0 && maxRadius > minRadius);
CV_Assert(cannyThreshold > 0);

View File

@ -125,7 +125,7 @@ namespace cv
const oclMat &image, const oclMat &templ, oclMat &result, MatchTemplateBuf & buf)
{
result.create(image.rows - templ.rows + 1, image.cols - templ.cols + 1, CV_32F);
if (useNaive(CV_TM_SQDIFF, image.depth(), templ.size()))
if (useNaive(TM_SQDIFF, image.depth(), templ.size()))
{
matchTemplateNaive_SQDIFF(image, templ, result, image.oclchannels());
return;
@ -255,7 +255,7 @@ namespace cv
const oclMat &image, const oclMat &templ, oclMat &result, MatchTemplateBuf &buf)
{
result.create(image.rows - templ.rows + 1, image.cols - templ.cols + 1, CV_32F);
if (useNaive(CV_TM_CCORR, image.depth(), templ.size()))
if (useNaive(TM_CCORR, image.depth(), templ.size()))
{
matchTemplateNaive_CCORR(image, templ, result, image.oclchannels());
return;

View File

@ -44,7 +44,10 @@
//
//M*/
#include "precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include <iostream>
namespace cv
{
namespace ocl

View File

@ -62,8 +62,6 @@
#include <stdio.h>
#include "opencv2/imgproc.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/core/core_c.h"
#include "opencv2/objdetect.hpp"
#include "opencv2/ocl.hpp"

View File

@ -76,7 +76,7 @@ public:
channels.create(h * N_CHANNELS, w, CV_8UC1);
channels.setTo(0);
cvtColor(frame, gray, CV_BGR2GRAY);
cvtColor(frame, gray, cv::COLOR_BGR2GRAY);
cv::Mat df_dx, df_dy, mag, angle;
cv::Sobel(gray, df_dx, CV_32F, 1, 0);
@ -102,13 +102,13 @@ public:
}
cv::Mat luv, shrunk;
cv::cvtColor(frame, luv, CV_BGR2Luv);
cv::cvtColor(frame, luv, cv::COLOR_BGR2Luv);
std::vector<cv::Mat> splited;
for (int i = 0; i < 3; ++i)
splited.push_back(channels(cv::Rect(0, h * (7 + i), w, h)));
split(luv, splited);
cv::resize(channels, shrunk, cv::Size(integrals.cols - 1, integrals.rows - 1), -1 , -1, CV_INTER_AREA);
cv::resize(channels, shrunk, cv::Size(integrals.cols - 1, integrals.rows - 1), -1 , -1, cv::INTER_AREA);
cv::integral(shrunk, integrals, cv::noArray(), CV_32S);
}
};

View File

@ -45,8 +45,6 @@
#include "opencv2/softcascade.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/core/core_c.h"
#include "opencv2/ml.hpp"
#include "opencv2/core/private.hpp"

View File

@ -48,7 +48,7 @@
# include "opencv2/imgproc.hpp"
#endif
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/imgproc/types_c.h"
#ifdef __cplusplus
extern "C" {

View File

@ -41,6 +41,7 @@
//M*/
#include "precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
// to be moved to legacy

View File

@ -49,6 +49,7 @@
*/
#include "precomp.hpp"
#include <limits>
namespace cv
{
@ -72,7 +73,7 @@ public:
minVal_ = maxVal_ = 0;
name_ = "BackgroundSubtractor.GMG";
}
~BackgroundSubtractorGMGImpl()
{
}

View File

@ -44,9 +44,8 @@
#define __OPENCV_PRECOMP_H__
#include "opencv2/video.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/core/utility.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/core/private.hpp"
#include <list>

View File

@ -73,6 +73,7 @@
*/
#include "precomp.hpp"
#include <limits>
using namespace cv;

View File

@ -40,6 +40,7 @@
//M*/
#include "test_precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
using namespace cv;
using namespace std;

View File

@ -12,7 +12,6 @@
#include <iostream>
#include "opencv2/ts.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/video.hpp"
#include "opencv2/highgui.hpp"

View File

@ -8,9 +8,10 @@
#include <opencv2/core/core_c.h>
#include <opencv2/imgproc/imgproc_c.h>
#include <opencv2/legacy/compat.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/calib3d.hpp>
#if defined WIN32 || defined _WIN32 || defined WINCE
#include <windows.h>
@ -126,7 +127,7 @@ static void foundCorners(vector<CvPoint2D32f> *srcImagePoints,IplImage* source,
//MgrayImage = MgrayImage.clone();//deep copy
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours(MgrayImage, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
findContours(MgrayImage, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_NONE);
Point p;
vector<CvPoint2D32f> srcImagePoints_temp(4,cvPoint2D32f(0,0));