Merge pull request #975 from SpecLad:merge-2.4

This commit is contained in:
Roman Donchenko 2013-06-11 17:28:26 +04:00 committed by OpenCV Buildbot
commit 7c4e3715b3
31 changed files with 435 additions and 444 deletions

2
.gitattributes vendored
View File

@ -33,7 +33,7 @@
CMakeLists.txt text whitespace=tabwidth=2
*.png binary
*.jepg binary
*.jpeg binary
*.jpg binary
*.exr binary
*.ico binary

View File

@ -287,6 +287,10 @@ endif()
set(OPENCV_CONFIG_FILE_INCLUDE_DIR "${CMAKE_BINARY_DIR}/" CACHE PATH "Where to create the platform-dependant cvconfig.h")
ocv_include_directories(${OPENCV_CONFIG_FILE_INCLUDE_DIR})
# ----------------------------------------------------------------------------
# Path for additional modules
# ----------------------------------------------------------------------------
set(OPENCV_EXTRA_MODULES_PATH "" CACHE PATH "Where to look for additional OpenCV modules")
# ----------------------------------------------------------------------------
# Autodetect if we are in a GIT repository

View File

@ -47,6 +47,9 @@ macro(add_extra_compiler_option option)
endif()
endmacro()
# OpenCV fails some tests when 'char' is 'unsigned' by default
add_extra_compiler_option(-fsigned-char)
if(MINGW)
# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=40838
# here we are trying to workaround the problem

View File

@ -33,7 +33,7 @@ if(WITH_QT)
endif()
if(NOT HAVE_QT)
find_package(Qt4)
find_package(Qt4 REQUIRED QtCore QtGui QtTest)
if(QT4_FOUND)
set(HAVE_QT TRUE)
add_definitions(-DHAVE_QT) # We need to define the macro this way, using cvconfig.h does not work

View File

@ -303,7 +303,7 @@ macro(ocv_glob_modules)
# collect modules
set(OPENCV_INITIAL_PASS ON)
foreach(__path ${ARGN})
ocv_get_real_path(__path "${__path}")
get_filename_component(__path "${__path}" ABSOLUTE)
list(FIND __directories_observed "${__path}" __pathIdx)
if(__pathIdx GREATER -1)
@ -315,7 +315,7 @@ macro(ocv_glob_modules)
if(__ocvmodules)
list(SORT __ocvmodules)
foreach(mod ${__ocvmodules})
ocv_get_real_path(__modpath "${__path}/${mod}")
get_filename_component(__modpath "${__path}/${mod}" ABSOLUTE)
if(EXISTS "${__modpath}/CMakeLists.txt")
list(FIND __directories_observed "${__modpath}" __pathIdx)

View File

@ -411,16 +411,6 @@ macro(ocv_regex_escape var regex)
endmacro()
# get absolute path with symlinks resolved
macro(ocv_get_real_path VAR PATHSTR)
if(CMAKE_VERSION VERSION_LESS 2.8)
get_filename_component(${VAR} "${PATHSTR}" ABSOLUTE)
else()
get_filename_component(${VAR} "${PATHSTR}" REALPATH)
endif()
endmacro()
# convert list of paths to full paths
macro(ocv_convert_to_full_paths VAR)
if(${VAR})

View File

@ -4,4 +4,4 @@ if(NOT OPENCV_MODULES_PATH)
set(OPENCV_MODULES_PATH "${CMAKE_CURRENT_SOURCE_DIR}")
endif()
ocv_glob_modules(${OPENCV_MODULES_PATH})
ocv_glob_modules(${OPENCV_MODULES_PATH} ${OPENCV_EXTRA_MODULES_PATH})

View File

@ -2136,7 +2136,7 @@ template<typename _Tp> inline void Seq<_Tp>::remove(int idx)
{ seqRemove(seq, idx); }
template<typename _Tp> inline void Seq<_Tp>::remove(const Range& r)
{ seqRemoveSlice(seq, r); }
{ seqRemoveSlice(seq, cvSlice(r.start, r.end)); }
template<typename _Tp> inline void Seq<_Tp>::copyTo(std::vector<_Tp>& vec, const Range& range) const
{

View File

@ -1041,7 +1041,7 @@ typedef struct CvSlice
{
int start_index, end_index;
#ifdef __cplusplus
#if defined(__cplusplus) && !defined(__CUDACC__)
CvSlice(int start = 0, int end = 0) : start_index(start), end_index(end) {}
CvSlice(const cv::Range& r) { *this = (r.start != INT_MIN && r.end != INT_MAX) ? CvSlice(r.start, r.end) : CvSlice(0, CV_WHOLE_SEQ_END_INDEX); }
operator cv::Range() const { return (start_index == 0 && end_index == CV_WHOLE_SEQ_END_INDEX ) ? cv::Range::all() : cv::Range(start_index, end_index); }

View File

@ -294,6 +294,9 @@ public:
~AutoLock() { mutex->unlock(); }
protected:
Mutex* mutex;
private:
AutoLock(const AutoLock&);
AutoLock& operator = (const AutoLock&);
};
// The CommandLineParser class is designed for command line arguments parsing

View File

@ -437,7 +437,7 @@ public:
GpuMat dclassified(1, 1, CV_32S);
cudaSafeCall( cudaMemcpy(dclassified.ptr(), &classified, sizeof(int), cudaMemcpyHostToDevice) );
PyrLavel level(0, 1.0f, image.size(), NxM, minObjectSize);
PyrLavel level(0, scaleFactor, image.size(), NxM, minObjectSize);
while (level.isFeasible(maxObjectSize))
{

View File

@ -101,14 +101,10 @@ elseif(HAVE_QT)
endif()
include(${QT_USE_FILE})
if(QT_INCLUDE_DIR)
ocv_include_directories(${QT_INCLUDE_DIR})
endif()
QT4_ADD_RESOURCES(_RCC_OUTFILES src/window_QT.qrc)
QT4_WRAP_CPP(_MOC_OUTFILES src/window_QT.h)
list(APPEND HIGHGUI_LIBRARIES ${QT_LIBRARIES} ${QT_QTTEST_LIBRARY})
list(APPEND HIGHGUI_LIBRARIES ${QT_LIBRARIES})
list(APPEND highgui_srcs src/window_QT.cpp ${_MOC_OUTFILES} ${_RCC_OUTFILES})
ocv_check_flag_support(CXX -Wno-missing-declarations _have_flag)
if(${_have_flag})

View File

@ -708,6 +708,8 @@ namespace cv
}
//! applies non-separable 2D linear filter to the image
// Note, at the moment this function only works when anchor point is in the kernel center
// and kernel size supported is either 3x3 or 5x5; otherwise the function will fail to output valid result
CV_EXPORTS void filter2D(const oclMat &src, oclMat &dst, int ddepth, const Mat &kernel,
Point anchor = Point(-1, -1), int borderType = BORDER_DEFAULT);

View File

@ -62,7 +62,6 @@ PERFTEST(lut)
gen(src, size, size, all_type[j], 0, 256);
gen(lut, 1, 256, CV_8UC1, 0, 1);
dst = src;
LUT(src, lut, dst);
@ -233,8 +232,6 @@ PERFTEST(Mul)
gen(src1, size, size, all_type[j], 0, 256);
gen(src2, size, size, all_type[j], 0, 256);
dst = src1;
dst.setTo(0);
multiply(src1, src2, dst);
@ -281,8 +278,6 @@ PERFTEST(Div)
gen(src1, size, size, all_type[j], 0, 256);
gen(src2, size, size, all_type[j], 0, 256);
dst = src1;
dst.setTo(0);
divide(src1, src2, dst);

View File

@ -291,9 +291,7 @@ PERFTEST(GaussianBlur)
{
SUBTEST << size << 'x' << size << "; " << type_name[j] ;
gen(src, size, size, all_type[j], 0, 256);
dst = src;
dst.setTo(0);
gen(src, size, size, all_type[j], 5, 16);
GaussianBlur(src, dst, Size(9, 9), 0);
@ -339,39 +337,38 @@ PERFTEST(filter2D)
{
gen(src, size, size, all_type[j], 0, 256);
for (int ksize = 3; ksize <= 15; ksize = 2*ksize+1)
{
SUBTEST << "ksize = " << ksize << "; " << size << 'x' << size << "; " << type_name[j] ;
const int ksize = 3;
Mat kernel;
gen(kernel, ksize, ksize, CV_32FC1, 0.0, 1.0);
SUBTEST << "ksize = " << ksize << "; " << size << 'x' << size << "; " << type_name[j] ;
Mat dst, ocl_dst;
dst.setTo(0);
cv::filter2D(src, dst, -1, kernel);
Mat kernel;
gen(kernel, ksize, ksize, CV_32SC1, -3.0, 3.0);
CPU_ON;
cv::filter2D(src, dst, -1, kernel);
CPU_OFF;
Mat dst, ocl_dst;
ocl::oclMat d_src(src), d_dst;
cv::filter2D(src, dst, -1, kernel);
WARMUP_ON;
ocl::filter2D(d_src, d_dst, -1, kernel);
WARMUP_OFF;
CPU_ON;
cv::filter2D(src, dst, -1, kernel);
CPU_OFF;
GPU_ON;
ocl::filter2D(d_src, d_dst, -1, kernel);
GPU_OFF;
ocl::oclMat d_src(src), d_dst;
GPU_FULL_ON;
d_src.upload(src);
ocl::filter2D(d_src, d_dst, -1, kernel);
d_dst.download(ocl_dst);
GPU_FULL_OFF;
WARMUP_ON;
ocl::filter2D(d_src, d_dst, -1, kernel);
WARMUP_OFF;
TestSystem::instance().ExpectedMatNear(ocl_dst, dst, 1e-5);
}
GPU_ON;
ocl::filter2D(d_src, d_dst, -1, kernel);
GPU_OFF;
GPU_FULL_ON;
d_src.upload(src);
ocl::filter2D(d_src, d_dst, -1, kernel);
d_dst.download(ocl_dst);
GPU_FULL_OFF;
TestSystem::instance().ExpectedMatNear(ocl_dst, dst, 1e-5);
}

View File

@ -674,8 +674,8 @@ COOR do_meanShift(int x0, int y0, uchar *sptr, uchar *dptr, int sstep, cv::Size
coor.y = static_cast<short>(y0);
return coor;
}
void meanShiftFiltering_(const Mat &src_roi, Mat &dst_roi, int sp, int sr, cv::TermCriteria crit);
void meanShiftFiltering_(const Mat &src_roi, Mat &dst_roi, int sp, int sr, cv::TermCriteria crit)
static void meanShiftFiltering_(const Mat &src_roi, Mat &dst_roi, int sp, int sr, cv::TermCriteria crit)
{
if( src_roi.empty() )
CV_Error( Error::StsBadArg, "The input image is empty" );
@ -683,6 +683,8 @@ void meanShiftFiltering_(const Mat &src_roi, Mat &dst_roi, int sp, int sr, cv::T
if( src_roi.depth() != CV_8U || src_roi.channels() != 4 )
CV_Error( Error::StsUnsupportedFormat, "Only 8-bit, 4-channel images are supported" );
dst_roi.create(src_roi.size(), src_roi.type());
CV_Assert( (src_roi.cols == dst_roi.cols) && (src_roi.rows == dst_roi.rows) );
CV_Assert( !(dst_roi.step & 0x3) );
@ -725,9 +727,6 @@ PERFTEST(meanShiftFiltering)
SUBTEST << size << 'x' << size << "; 8UC3 vs 8UC4";
gen(src, size, size, CV_8UC4, Scalar::all(0), Scalar::all(256));
//gen(dst, size, size, CV_8UC4, Scalar::all(0), Scalar::all(256));
dst = src;
dst.setTo(0);
cv::TermCriteria crit(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 5, 1);
@ -756,201 +755,21 @@ PERFTEST(meanShiftFiltering)
TestSystem::instance().ExpectedMatNear(dst, ocl_dst, 0.0);
}
}
///////////// meanShiftProc////////////////////////
#if 0
COOR do_meanShift(int x0, int y0, uchar *sptr, uchar *dptr, int sstep, cv::Size size, int sp, int sr, int maxIter, float eps, int *tab)
{
int isr2 = sr * sr;
int c0, c1, c2, c3;
int iter;
uchar *ptr = NULL;
uchar *pstart = NULL;
int revx = 0, revy = 0;
c0 = sptr[0];
c1 = sptr[1];
c2 = sptr[2];
c3 = sptr[3];
// iterate meanshift procedure
for (iter = 0; iter < maxIter; iter++)
{
int count = 0;
int s0 = 0, s1 = 0, s2 = 0, sx = 0, sy = 0;
//mean shift: process pixels in window (p-sigmaSp)x(p+sigmaSp)
int minx = x0 - sp;
int miny = y0 - sp;
int maxx = x0 + sp;
int maxy = y0 + sp;
//deal with the image boundary
if (minx < 0)
{
minx = 0;
}
if (miny < 0)
{
miny = 0;
}
if (maxx >= size.width)
{
maxx = size.width - 1;
}
if (maxy >= size.height)
{
maxy = size.height - 1;
}
if (iter == 0)
{
pstart = sptr;
}
else
{
pstart = pstart + revy * sstep + (revx << 2); //point to the new position
}
ptr = pstart;
ptr = ptr + (miny - y0) * sstep + ((minx - x0) << 2); //point to the start in the row
for (int y = miny; y <= maxy; y++, ptr += sstep - ((maxx - minx + 1) << 2))
{
int rowCount = 0;
int x = minx;
#if CV_ENABLE_UNROLLED
for (; x + 4 <= maxx; x += 4, ptr += 16)
{
int t0, t1, t2;
t0 = ptr[0], t1 = ptr[1], t2 = ptr[2];
if (tab[t0 - c0 + 255] + tab[t1 - c1 + 255] + tab[t2 - c2 + 255] <= isr2)
{
s0 += t0;
s1 += t1;
s2 += t2;
sx += x;
rowCount++;
}
t0 = ptr[4], t1 = ptr[5], t2 = ptr[6];
if (tab[t0 - c0 + 255] + tab[t1 - c1 + 255] + tab[t2 - c2 + 255] <= isr2)
{
s0 += t0;
s1 += t1;
s2 += t2;
sx += x + 1;
rowCount++;
}
t0 = ptr[8], t1 = ptr[9], t2 = ptr[10];
if (tab[t0 - c0 + 255] + tab[t1 - c1 + 255] + tab[t2 - c2 + 255] <= isr2)
{
s0 += t0;
s1 += t1;
s2 += t2;
sx += x + 2;
rowCount++;
}
t0 = ptr[12], t1 = ptr[13], t2 = ptr[14];
if (tab[t0 - c0 + 255] + tab[t1 - c1 + 255] + tab[t2 - c2 + 255] <= isr2)
{
s0 += t0;
s1 += t1;
s2 += t2;
sx += x + 3;
rowCount++;
}
}
#endif
for (; x <= maxx; x++, ptr += 4)
{
int t0 = ptr[0], t1 = ptr[1], t2 = ptr[2];
if (tab[t0 - c0 + 255] + tab[t1 - c1 + 255] + tab[t2 - c2 + 255] <= isr2)
{
s0 += t0;
s1 += t1;
s2 += t2;
sx += x;
rowCount++;
}
}
if (rowCount == 0)
{
continue;
}
count += rowCount;
sy += y * rowCount;
}
if (count == 0)
{
break;
}
int x1 = sx / count;
int y1 = sy / count;
s0 = s0 / count;
s1 = s1 / count;
s2 = s2 / count;
bool stopFlag = (x0 == x1 && y0 == y1) || (abs(x1 - x0) + abs(y1 - y0) +
tab[s0 - c0 + 255] + tab[s1 - c1 + 255] + tab[s2 - c2 + 255] <= eps);
//revise the pointer corresponding to the new (y0,x0)
revx = x1 - x0;
revy = y1 - y0;
x0 = x1;
y0 = y1;
c0 = s0;
c1 = s1;
c2 = s2;
if (stopFlag)
{
break;
}
} //for iter
dptr[0] = (uchar)c0;
dptr[1] = (uchar)c1;
dptr[2] = (uchar)c2;
dptr[3] = (uchar)c3;
COOR coor;
coor.x = static_cast<short>(x0);
coor.y = static_cast<short>(y0);
return coor;
}
#endif
void meanShiftProc_(const Mat &src_roi, Mat &dst_roi, Mat &dstCoor_roi, int sp, int sr, cv::TermCriteria crit)
{
if (src_roi.empty())
{
CV_Error(Error::StsBadArg, "The input image is empty");
}
if (src_roi.depth() != CV_8U || src_roi.channels() != 4)
{
CV_Error(Error::StsUnsupportedFormat, "Only 8-bit, 4-channel images are supported");
}
dst_roi.create(src_roi.size(), src_roi.type());
dstCoor_roi.create(src_roi.size(), CV_16SC2);
CV_Assert((src_roi.cols == dst_roi.cols) && (src_roi.rows == dst_roi.rows) &&
(src_roi.cols == dstCoor_roi.cols) && (src_roi.rows == dstCoor_roi.rows));
CV_Assert(!(dstCoor_roi.step & 0x3));
@ -1008,8 +827,6 @@ PERFTEST(meanShiftProc)
SUBTEST << size << 'x' << size << "; 8UC4 and CV_16SC2 ";
gen(src, size, size, CV_8UC4, Scalar::all(0), Scalar::all(256));
gen(dst[0], size, size, CV_8UC4, Scalar::all(0), Scalar::all(256));
gen(dst[1], size, size, CV_16SC2, Scalar::all(0), Scalar::all(256));
meanShiftProc_(src, dst[0], dst[1], 5, 6, crit);

View File

@ -48,8 +48,8 @@
///////////// PyrLKOpticalFlow ////////////////////////
PERFTEST(PyrLKOpticalFlow)
{
std::string images1[] = {"rubberwhale1.png", "aloeL.jpg"};
std::string images2[] = {"rubberwhale2.png", "aloeR.jpg"};
std::string images1[] = {"rubberwhale1.png", "basketball1.png"};
std::string images2[] = {"rubberwhale2.png", "basketball2.png"};
for (size_t i = 0; i < sizeof(images1) / sizeof(std::string); i++)
{

View File

@ -645,7 +645,11 @@ static void GPUFilter2D(const oclMat &src, oclMat &dst, oclMat &mat_kernel,
args.push_back(std::make_pair(sizeof(cl_int), (void *)&src.wholecols));
args.push_back(std::make_pair(sizeof(cl_int), (void *)&src.wholerows));
openCLExecuteKernel(clCxt, &filtering_laplacian, kernelName, globalThreads, localThreads, args, cn, depth);
const int buffer_size = 100;
char opt_buffer [buffer_size] = "";
sprintf(opt_buffer, "-DANCHOR=%d -DANX=%d -DANY=%d", ksize.width, anchor.x, anchor.y);
openCLExecuteKernel(clCxt, &filtering_laplacian, kernelName, globalThreads, localThreads, args, cn, depth, opt_buffer);
}
Ptr<BaseFilter_GPU> cv::ocl::getLinearFilter_GPU(int srcType, int dstType, const Mat &kernel, const Size &ksize,
Point anchor, int borderType)
@ -656,7 +660,7 @@ Ptr<BaseFilter_GPU> cv::ocl::getLinearFilter_GPU(int srcType, int dstType, const
oclMat gpu_krnl;
int nDivisor;
normalizeKernel(kernel, gpu_krnl, CV_32S, &nDivisor, true);
normalizeKernel(kernel, gpu_krnl, CV_32S, &nDivisor, false);
normalizeAnchor(anchor, ksize);
return Ptr<BaseFilter_GPU>(new LinearFilter_GPU(ksize, anchor, gpu_krnl, GPUFilter2D_callers[CV_MAT_CN(srcType)],
@ -1172,7 +1176,7 @@ void linearRowFilter_gpu(const oclMat &src, const oclMat &dst, oclMat mat_kernel
args.push_back(std::make_pair(sizeof(cl_int), (void *)&ridusy));
args.push_back(std::make_pair(sizeof(cl_mem), (void *)&mat_kernel.data));
openCLExecuteKernel2(clCxt, &filter_sep_row, kernelName, globalThreads, localThreads, args, channels, src.depth(), compile_option, CLFLUSH);
openCLExecuteKernel(clCxt, &filter_sep_row, kernelName, globalThreads, localThreads, args, channels, src.depth(), compile_option);
}
Ptr<BaseRowFilter_GPU> cv::ocl::getLinearRowFilter_GPU(int srcType, int /*bufType*/, const Mat &rowKernel, int anchor, int bordertype)

View File

@ -257,7 +257,8 @@ void cv::ocl::GoodFeaturesToTrackDetector_OCL::operator ()(const oclMat& image,
if (minDistance < 1)
{
corners = tmpCorners_(Rect(0, 0, maxCorners > 0 ? std::min(maxCorners, total) : total, 1));
Rect roi_range(0, 0, maxCorners > 0 ? std::min(maxCorners, total) : total, 1);
tmpCorners_(roi_range).copyTo(corners);
}
else
{

View File

@ -337,6 +337,10 @@ namespace cv
oclinfo.push_back(ocltmpinfo);
}
}
if(devcienums > 0)
{
setDevice(oclinfo[0]);
}
return devcienums;
}

View File

@ -82,9 +82,9 @@
//////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////Macro for define elements number per thread/////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
#define ANCHOR 3
#define ANX 1
#define ANY 1
//#define ANCHOR 3
//#define ANX 1
//#define ANY 1
#define ROWS_PER_GROUP 4
#define ROWS_PER_GROUP_BITS 2
@ -185,7 +185,7 @@ __kernel void filter2D_C1_D0(__global uchar *src, int src_step, int src_offset_x
for(int i = 0; i < ANCHOR; i++)
{
#pragma unroll 3
#pragma unroll
for(int j = 0; j < ANCHOR; j++)
{
if(dst_rows_index < dst_rows_end)
@ -295,7 +295,7 @@ __kernel void filter2D_C1_D5(__global float *src, int src_step, int src_offset_x
for(int i = 0; i < ANCHOR; i++)
{
#pragma unroll 3
#pragma unroll
for(int j = 0; j < ANCHOR; j++)
{
if(dst_rows_index < dst_rows_end)
@ -410,7 +410,7 @@ __kernel void filter2D_C4_D0(__global uchar4 *src, int src_step, int src_offset_
for(int i = 0; i < ANCHOR; i++)
{
#pragma unroll 3
#pragma unroll
for(int j = 0; j < ANCHOR; j++)
{
if(dst_rows_index < dst_rows_end)

View File

@ -130,28 +130,29 @@ __kernel void calcHarris(__global const float *Dx,__global const float *Dy, __gl
data[2][i] = dy_data[i] * dy_data[i];
}
#else
for(int i=0; i < ksY+1; i++)
{
int clamped_col = min(dst_cols, col);
for(int i=0; i < ksY+1; i++)
{
int dx_selected_row;
int dx_selected_col;
dx_selected_row = ADDR_H(dx_startY+i, 0, dx_whole_rows);
dx_selected_row = ADDR_B(dx_startY+i, dx_whole_rows, dx_selected_row);
dx_selected_col = ADDR_L(dx_startX+col, 0, dx_whole_cols);
dx_selected_col = ADDR_R(dx_startX+col, dx_whole_cols, dx_selected_col);
dx_selected_col = ADDR_L(dx_startX+clamped_col, 0, dx_whole_cols);
dx_selected_col = ADDR_R(dx_startX+clamped_col, dx_whole_cols, dx_selected_col);
dx_data[i] = Dx[dx_selected_row * (dx_step>>2) + dx_selected_col];
int dy_selected_row;
int dy_selected_col;
dy_selected_row = ADDR_H(dy_startY+i, 0, dy_whole_rows);
dy_selected_row = ADDR_B(dy_startY+i, dy_whole_rows, dy_selected_row);
dy_selected_col = ADDR_L(dy_startX+col, 0, dy_whole_cols);
dy_selected_col = ADDR_R(dy_startX+col, dy_whole_cols, dy_selected_col);
dy_selected_col = ADDR_L(dy_startX+clamped_col, 0, dy_whole_cols);
dy_selected_col = ADDR_R(dy_startX+clamped_col, dy_whole_cols, dy_selected_col);
dy_data[i] = Dy[dy_selected_row * (dy_step>>2) + dy_selected_col];
data[0][i] = dx_data[i] * dx_data[i];
data[1][i] = dx_data[i] * dy_data[i];
data[2][i] = dy_data[i] * dy_data[i];
}
}
#endif
float sum0 = 0.0, sum1 = 0.0, sum2 = 0.0;
for(int i=1; i < ksY; i++)

View File

@ -130,28 +130,30 @@ __kernel void calcMinEigenVal(__global const float *Dx,__global const float *Dy,
data[2][i] = dy_data[i] * dy_data[i];
}
#else
for(int i=0; i < ksY+1; i++)
{
int clamped_col = min(dst_cols, col);
for(int i=0; i < ksY+1; i++)
{
int dx_selected_row;
int dx_selected_col;
dx_selected_row = ADDR_H(dx_startY+i, 0, dx_whole_rows);
dx_selected_row = ADDR_B(dx_startY+i, dx_whole_rows, dx_selected_row);
dx_selected_col = ADDR_L(dx_startX+col, 0, dx_whole_cols);
dx_selected_col = ADDR_R(dx_startX+col, dx_whole_cols, dx_selected_col);
dx_selected_col = ADDR_L(dx_startX+clamped_col, 0, dx_whole_cols);
dx_selected_col = ADDR_R(dx_startX+clamped_col, dx_whole_cols, dx_selected_col);
dx_data[i] = Dx[dx_selected_row * (dx_step>>2) + dx_selected_col];
int dy_selected_row;
int dy_selected_col;
dy_selected_row = ADDR_H(dy_startY+i, 0, dy_whole_rows);
dy_selected_row = ADDR_B(dy_startY+i, dy_whole_rows, dy_selected_row);
dy_selected_col = ADDR_L(dy_startX+col, 0, dy_whole_cols);
dy_selected_col = ADDR_R(dy_startX+col, dy_whole_cols, dy_selected_col);
dy_selected_col = ADDR_L(dy_startX+clamped_col, 0, dy_whole_cols);
dy_selected_col = ADDR_R(dy_startX+clamped_col, dy_whole_cols, dy_selected_col);
dy_data[i] = Dy[dy_selected_row * (dy_step>>2) + dy_selected_col];
data[0][i] = dx_data[i] * dx_data[i];
data[1][i] = dx_data[i] * dy_data[i];
data[2][i] = dy_data[i] * dy_data[i];
}
}
#endif
float sum0 = 0.0, sum1 = 0.0, sum2 = 0.0;
for(int i=1; i < ksY; i++)

View File

@ -389,8 +389,8 @@ __kernel void pyrUp_C4_D0(__global uchar4* src,__global uchar4* dst,
float4 sum = (float4)(0,0,0,0);
const int evenFlag = (int)((tidx & 1) == 0);
const int oddFlag = (int)((tidx & 1) != 0);
const float4 evenFlag = (float4)((tidx & 1) == 0);
const float4 oddFlag = (float4)((tidx & 1) != 0);
const bool eveny = ((tidy & 1) == 0);
float4 co1 = (float4)(0.375f, 0.375f, 0.375f, 0.375f);
@ -455,6 +455,7 @@ __kernel void pyrUp_C4_D0(__global uchar4* src,__global uchar4* dst,
dst[x + y * dstStep] = convert_uchar4_sat_rte(4.0f * sum);
}
}
///////////////////////////////////////////////////////////////////////
////////////////////////// CV_16UC4 //////////////////////////////////
///////////////////////////////////////////////////////////////////////
@ -492,8 +493,8 @@ __kernel void pyrUp_C4_D2(__global ushort4* src,__global ushort4* dst,
float4 sum = (float4)(0,0,0,0);
const int evenFlag = (int)((get_local_id(0) & 1) == 0);
const int oddFlag = (int)((get_local_id(0) & 1) != 0);
const float4 evenFlag = (float4)((get_local_id(0) & 1) == 0);
const float4 oddFlag = (float4)((get_local_id(0) & 1) != 0);
const bool eveny = ((get_local_id(1) & 1) == 0);
const int tidx = get_local_id(0);
@ -604,8 +605,8 @@ __kernel void pyrUp_C4_D5(__global float4* src,__global float4* dst,
float4 sum = (float4)(0,0,0,0);
const int evenFlag = (int)((tidx & 1) == 0);
const int oddFlag = (int)((tidx & 1) != 0);
const float4 evenFlag = (float4)((tidx & 1) == 0);
const float4 oddFlag = (float4)((tidx & 1) != 0);
const bool eveny = ((tidy & 1) == 0);
float4 co1 = (float4)(0.375f, 0.375f, 0.375f, 0.375f);
@ -669,4 +670,4 @@ __kernel void pyrUp_C4_D5(__global float4* src,__global float4* dst,
{
dst[x + y * dstStep] = 4.0f * sum;
}
}
}

View File

@ -508,7 +508,7 @@ static void lkSparse_run(oclMat &I, oclMat &J,
int wave_size = queryDeviceInfo<WAVEFRONT_SIZE, int>(kernel);
openCLSafeCall(clReleaseKernel(kernel));
static char opt[16] = {0};
static char opt[32] = {0};
sprintf(opt, " -D WAVE_SIZE=%d", wave_size);
openCLExecuteKernel2(clCxt, &pyrlk, kernelName, globalThreads, localThreads, args, I.oclchannels(), I.depth(), opt, CLFLUSH);

View File

@ -215,9 +215,9 @@ public class FdActivity extends Activity implements CvCameraViewListener2 {
else if (item == mItemFace20)
setMinFaceSize(0.2f);
else if (item == mItemType) {
mDetectorType = (mDetectorType + 1) % mDetectorName.length;
item.setTitle(mDetectorName[mDetectorType]);
setDetectorType(mDetectorType);
int tmpDetectorType = (mDetectorType + 1) % mDetectorName.length;
item.setTitle(mDetectorName[tmpDetectorType]);
setDetectorType(tmpDetectorType);
}
return true;
}

View File

@ -19,12 +19,21 @@ using namespace std;
using namespace cv;
#if !defined(HAVE_CUDA)
#if !defined(HAVE_CUDA) || defined(__arm__)
int main( int, const char** )
{
cout << "Please compile the library with CUDA support" << endl;
return -1;
#if !defined(HAVE_CUDA)
std::cout << "CUDA support is required (CMake key 'WITH_CUDA' must be true)." << std::endl;
#endif
#if defined(__arm__)
std::cout << "Unsupported for ARM CUDA library." << std::endl;
#endif
return 0;
}
#else

View File

@ -23,7 +23,7 @@
# endif
#endif
#if !defined(HAVE_CUDA) || !defined(HAVE_TBB)
#if !defined(HAVE_CUDA) || !defined(HAVE_TBB) || defined(__arm__)
int main()
{
@ -35,6 +35,10 @@ int main()
std::cout << "TBB support is required (CMake key 'WITH_TBB' must be true).\n";
#endif
#if defined(__arm__)
std::cout << "Unsupported for ARM CUDA library." << std::endl;
#endif
return 0;
}

View File

@ -25,7 +25,7 @@
# endif
#endif
#if !defined(HAVE_CUDA) || !defined(HAVE_TBB)
#if !defined(HAVE_CUDA) || !defined(HAVE_TBB) || defined(__arm__)
int main()
{
@ -37,6 +37,10 @@ int main()
std::cout << "TBB support is required (CMake key 'WITH_TBB' must be true).\n";
#endif
#if defined(__arm__)
std::cout << "Unsupported for ARM CUDA library." << std::endl;
#endif
return 0;
}

View File

@ -30,6 +30,7 @@ static double getTime(){
static void download(const oclMat& d_mat, vector<Point2f>& vec)
{
vec.clear();
vec.resize(d_mat.cols);
Mat mat(1, d_mat.cols, CV_32FC2, (void*)&vec[0]);
d_mat.download(mat);
@ -37,6 +38,7 @@ static void download(const oclMat& d_mat, vector<Point2f>& vec)
static void download(const oclMat& d_mat, vector<uchar>& vec)
{
vec.clear();
vec.resize(d_mat.cols);
Mat mat(1, d_mat.cols, CV_8UC1, (void*)&vec[0]);
d_mat.download(mat);
@ -118,14 +120,15 @@ int main(int argc, const char* argv[])
bool useCPU = cmd.has("s");
bool useCamera = cmd.has("c");
int inputName = cmd.get<int>("c");
oclMat d_nextPts, d_status;
oclMat d_nextPts, d_status;
GoodFeaturesToTrackDetector_OCL d_features(points);
Mat frame0 = imread(fname0, cv::IMREAD_GRAYSCALE);
Mat frame1 = imread(fname1, cv::IMREAD_GRAYSCALE);
PyrLKOpticalFlow d_pyrLK;
vector<cv::Point2f> pts;
vector<cv::Point2f> nextPts;
vector<unsigned char> status;
vector<cv::Point2f> pts(points);
vector<cv::Point2f> nextPts(points);
vector<unsigned char> status(points);
vector<float> err;
if (frame0.empty() || frame1.empty())
@ -196,29 +199,24 @@ int main(int argc, const char* argv[])
ptr1 = frame0Gray;
}
pts.clear();
cv::goodFeaturesToTrack(ptr0, pts, points, 0.01, 0.0);
if (pts.size() == 0)
{
continue;
}
if (useCPU)
{
cv::calcOpticalFlowPyrLK(ptr0, ptr1, pts, nextPts, status, err);
pts.clear();
goodFeaturesToTrack(ptr0, pts, points, 0.01, 0.0);
if(pts.size() == 0)
continue;
calcOpticalFlowPyrLK(ptr0, ptr1, pts, nextPts, status, err);
}
else
{
oclMat d_prevPts(1, points, CV_32FC2, (void*)&pts[0]);
d_pyrLK.sparse(oclMat(ptr0), oclMat(ptr1), d_prevPts, d_nextPts, d_status);
download(d_prevPts, pts);
oclMat d_img(ptr0), d_prevPts;
d_features(d_img, d_prevPts);
if(!d_prevPts.rows || !d_prevPts.cols)
continue;
d_pyrLK.sparse(d_img, oclMat(ptr1), d_prevPts, d_nextPts, d_status);
d_features.downloadPoints(d_prevPts,pts);
download(d_nextPts, nextPts);
download(d_status, status);
}
if (i%2 == 1)
frame1.copyTo(frameCopy);
@ -243,21 +241,19 @@ nocamera:
for(int i = 0; i <= LOOP_NUM;i ++)
{
cout << "loop" << i << endl;
if (i > 0) workBegin();
cv::goodFeaturesToTrack(frame0, pts, points, 0.01, minDist);
if (i > 0) workBegin();
if (useCPU)
{
cv::calcOpticalFlowPyrLK(frame0, frame1, pts, nextPts, status, err);
goodFeaturesToTrack(frame0, pts, points, 0.01, minDist);
calcOpticalFlowPyrLK(frame0, frame1, pts, nextPts, status, err);
}
else
{
oclMat d_prevPts(1, points, CV_32FC2, (void*)&pts[0]);
d_pyrLK.sparse(oclMat(frame0), oclMat(frame1), d_prevPts, d_nextPts, d_status);
download(d_prevPts, pts);
oclMat d_img(frame0), d_prevPts;
d_features(d_img, d_prevPts);
d_pyrLK.sparse(d_img, oclMat(frame1), d_prevPts, d_nextPts, d_status);
d_features.downloadPoints(d_prevPts, pts);
download(d_nextPts, nextPts);
download(d_status, status);
}

View File

@ -46,156 +46,102 @@
#include <iostream>
#include <stdio.h>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/ocl/ocl.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/nonfree/ocl.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/nonfree/nonfree.hpp"
using namespace std;
using namespace cv;
using namespace cv::ocl;
//#define USE_CPU_DESCRIPTOR // use cpu descriptor extractor until ocl descriptor extractor is fixed
//#define USE_CPU_BFMATCHER
const int LOOP_NUM = 10;
const int GOOD_PTS_MAX = 50;
const float GOOD_PORTION = 0.15f;
namespace
{
void help();
void help()
{
cout << "\nThis program demonstrates using SURF_OCL features detector and descriptor extractor" << endl;
cout << "\nUsage:\n\tsurf_matcher --left <image1> --right <image2>" << endl;
std::cout << "\nThis program demonstrates using SURF_OCL features detector and descriptor extractor" << std::endl;
std::cout << "\nUsage:\n\tsurf_matcher --left <image1> --right <image2> [-c]" << std::endl;
std::cout << "\nExample:\n\tsurf_matcher --left box.png --right box_in_scene.png" << std::endl;
}
int64 work_begin = 0;
int64 work_end = 0;
////////////////////////////////////////////////////
// This program demonstrates the usage of SURF_OCL.
// use cpu findHomography interface to calculate the transformation matrix
int main(int argc, char* argv[])
void workBegin()
{
if (argc != 5 && argc != 1)
work_begin = getTickCount();
}
void workEnd()
{
work_end = getTickCount() - work_begin;
}
double getTime(){
return work_end /((double)getTickFrequency() * 1000.);
}
template<class KPDetector>
struct SURFDetector
{
KPDetector surf;
SURFDetector(double hessian = 800.0)
:surf(hessian)
{
help();
return -1;
}
vector<cv::ocl::Info> info;
if(!cv::ocl::getDevice(info))
template<class T>
void operator()(const T& in, const T& mask, std::vector<cv::KeyPoint>& pts, T& descriptors, bool useProvided = false)
{
cout << "Error: Did not find a valid OpenCL device!" << endl;
return -1;
surf(in, mask, pts, descriptors, useProvided);
}
Mat cpu_img1, cpu_img2, cpu_img1_grey, cpu_img2_grey;
oclMat img1, img2;
if(argc != 5)
};
template<class KPMatcher>
struct SURFMatcher
{
KPMatcher matcher;
template<class T>
void match(const T& in1, const T& in2, std::vector<cv::DMatch>& matches)
{
cpu_img1 = imread("o.png");
cvtColor(cpu_img1, cpu_img1_grey, COLOR_BGR2GRAY);
img1 = cpu_img1_grey;
CV_Assert(!img1.empty());
cpu_img2 = imread("r2.png");
cvtColor(cpu_img2, cpu_img2_grey, COLOR_BGR2GRAY);
img2 = cpu_img2_grey;
}
else
{
for (int i = 1; i < argc; ++i)
{
if (string(argv[i]) == "--left")
{
cpu_img1 = imread(argv[++i]);
cvtColor(cpu_img1, cpu_img1_grey, COLOR_BGR2GRAY);
img1 = cpu_img1_grey;
CV_Assert(!img1.empty());
}
else if (string(argv[i]) == "--right")
{
cpu_img2 = imread(argv[++i]);
cvtColor(cpu_img2, cpu_img2_grey, COLOR_BGR2GRAY);
img2 = cpu_img2_grey;
}
else if (string(argv[i]) == "--help")
{
help();
return -1;
}
}
matcher.match(in1, in2, matches);
}
};
SURF_OCL surf;
//surf.hessianThreshold = 400.f;
//surf.extended = false;
// detecting keypoints & computing descriptors
oclMat keypoints1GPU, keypoints2GPU;
oclMat descriptors1GPU, descriptors2GPU;
// downloading results
vector<KeyPoint> keypoints1, keypoints2;
vector<DMatch> matches;
#ifndef USE_CPU_DESCRIPTOR
surf(img1, oclMat(), keypoints1GPU, descriptors1GPU);
surf(img2, oclMat(), keypoints2GPU, descriptors2GPU);
surf.downloadKeypoints(keypoints1GPU, keypoints1);
surf.downloadKeypoints(keypoints2GPU, keypoints2);
#ifdef USE_CPU_BFMATCHER
//BFMatcher
BFMatcher matcher(cv::NORM_L2);
matcher.match(Mat(descriptors1GPU), Mat(descriptors2GPU), matches);
#else
BruteForceMatcher_OCL_base matcher(BruteForceMatcher_OCL_base::L2Dist);
matcher.match(descriptors1GPU, descriptors2GPU, matches);
#endif
#else
surf(img1, oclMat(), keypoints1GPU);
surf(img2, oclMat(), keypoints2GPU);
surf.downloadKeypoints(keypoints1GPU, keypoints1);
surf.downloadKeypoints(keypoints2GPU, keypoints2);
// use SURF_OCL to detect keypoints and use SURF to extract descriptors
SURF surf_cpu;
Mat descriptors1, descriptors2;
surf_cpu(cpu_img1, Mat(), keypoints1, descriptors1, true);
surf_cpu(cpu_img2, Mat(), keypoints2, descriptors2, true);
matcher.match(descriptors1, descriptors2, matches);
#endif
cout << "OCL: FOUND " << keypoints1GPU.cols << " keypoints on first image" << endl;
cout << "OCL: FOUND " << keypoints2GPU.cols << " keypoints on second image" << endl;
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( size_t i = 0; i < keypoints1.size(); i++ )
{
double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 2.5*min_dist )
Mat drawGoodMatches(
const Mat& cpu_img1,
const Mat& cpu_img2,
const std::vector<KeyPoint>& keypoints1,
const std::vector<KeyPoint>& keypoints2,
std::vector<DMatch>& matches,
std::vector<Point2f>& scene_corners_
)
{
//-- Sort matches and preserve top 10% matches
std::sort(matches.begin(), matches.end());
std::vector< DMatch > good_matches;
double minDist = matches.front().distance,
maxDist = matches.back().distance;
for( size_t i = 0; i < keypoints1.size(); i++ )
const int ptsPairs = std::min(GOOD_PTS_MAX, (int)(matches.size() * GOOD_PORTION));
for( int i = 0; i < ptsPairs; i++ )
{
if( matches[i].distance < 3*min_dist )
{
good_matches.push_back( matches[i]);
}
good_matches.push_back( matches[i] );
}
std::cout << "\nMax distance: " << maxDist << std::endl;
std::cout << "Min distance: " << minDist << std::endl;
std::cout << "Calculating homography using " << ptsPairs << " point pairs." << std::endl;
// drawing the results
Mat img_matches;
drawMatches( cpu_img1, keypoints1, cpu_img2, keypoints2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
std::vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Localize the object
std::vector<Point2f> obj;
@ -207,26 +153,238 @@ int main(int argc, char* argv[])
obj.push_back( keypoints1[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints2[ good_matches[i].trainIdx ].pt );
}
Mat H = findHomography( obj, scene, RANSAC );
//-- Get the corners from the image_1 ( the object to be "detected" )
std::vector<Point2f> obj_corners(4);
obj_corners[0] = Point(0,0); obj_corners[1] = Point( cpu_img1.cols, 0 );
obj_corners[2] = Point( cpu_img1.cols, cpu_img1.rows ); obj_corners[3] = Point( 0, cpu_img1.rows );
std::vector<Point2f> scene_corners(4);
Mat H = findHomography( obj, scene, RANSAC );
perspectiveTransform( obj_corners, scene_corners, H);
scene_corners_ = scene_corners;
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
line( img_matches, scene_corners[0] + Point2f( (float)cpu_img1.cols, 0), scene_corners[1] + Point2f( (float)cpu_img1.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( (float)cpu_img1.cols, 0), scene_corners[2] + Point2f( (float)cpu_img1.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f( (float)cpu_img1.cols, 0), scene_corners[3] + Point2f( (float)cpu_img1.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f( (float)cpu_img1.cols, 0), scene_corners[0] + Point2f( (float)cpu_img1.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches,
scene_corners[0] + Point2f( (float)cpu_img1.cols, 0), scene_corners[1] + Point2f( (float)cpu_img1.cols, 0),
Scalar( 0, 255, 0), 2, LINE_AA );
line( img_matches,
scene_corners[1] + Point2f( (float)cpu_img1.cols, 0), scene_corners[2] + Point2f( (float)cpu_img1.cols, 0),
Scalar( 0, 255, 0), 2, LINE_AA );
line( img_matches,
scene_corners[2] + Point2f( (float)cpu_img1.cols, 0), scene_corners[3] + Point2f( (float)cpu_img1.cols, 0),
Scalar( 0, 255, 0), 2, LINE_AA );
line( img_matches,
scene_corners[3] + Point2f( (float)cpu_img1.cols, 0), scene_corners[0] + Point2f( (float)cpu_img1.cols, 0),
Scalar( 0, 255, 0), 2, LINE_AA );
return img_matches;
}
}
////////////////////////////////////////////////////
// This program demonstrates the usage of SURF_OCL.
// use cpu findHomography interface to calculate the transformation matrix
int main(int argc, char* argv[])
{
std::vector<cv::ocl::Info> info;
if(cv::ocl::getDevice(info) == 0)
{
std::cout << "Error: Did not find a valid OpenCL device!" << std::endl;
return -1;
}
ocl::setDevice(info[0]);
Mat cpu_img1, cpu_img2, cpu_img1_grey, cpu_img2_grey;
oclMat img1, img2;
bool useCPU = false;
bool useGPU = false;
bool useALL = false;
for (int i = 1; i < argc; ++i)
{
if (String(argv[i]) == "--left")
{
cpu_img1 = imread(argv[++i]);
CV_Assert(!cpu_img1.empty());
cvtColor(cpu_img1, cpu_img1_grey, COLOR_BGR2GRAY);
img1 = cpu_img1_grey;
}
else if (String(argv[i]) == "--right")
{
cpu_img2 = imread(argv[++i]);
CV_Assert(!cpu_img2.empty());
cvtColor(cpu_img2, cpu_img2_grey, COLOR_BGR2GRAY);
img2 = cpu_img2_grey;
}
else if (String(argv[i]) == "-c")
{
useCPU = true;
useGPU = false;
useALL = false;
}else if(String(argv[i]) == "-g")
{
useGPU = true;
useCPU = false;
useALL = false;
}else if(String(argv[i]) == "-a")
{
useALL = true;
useCPU = false;
useGPU = false;
}
else if (String(argv[i]) == "--help")
{
help();
return -1;
}
}
if(!useCPU)
{
std::cout
<< "Device name:"
<< info[0].DeviceName[0]
<< std::endl;
}
double surf_time = 0.;
//declare input/output
std::vector<KeyPoint> keypoints1, keypoints2;
std::vector<DMatch> matches;
std::vector<KeyPoint> gpu_keypoints1;
std::vector<KeyPoint> gpu_keypoints2;
std::vector<DMatch> gpu_matches;
Mat descriptors1CPU, descriptors2CPU;
oclMat keypoints1GPU, keypoints2GPU;
oclMat descriptors1GPU, descriptors2GPU;
//instantiate detectors/matchers
SURFDetector<SURF> cpp_surf;
SURFDetector<SURF_OCL> ocl_surf;
SURFMatcher<BFMatcher> cpp_matcher;
SURFMatcher<BFMatcher_OCL> ocl_matcher;
//-- start of timing section
if (useCPU)
{
for (int i = 0; i <= LOOP_NUM; i++)
{
if(i == 1) workBegin();
cpp_surf(cpu_img1_grey, Mat(), keypoints1, descriptors1CPU);
cpp_surf(cpu_img2_grey, Mat(), keypoints2, descriptors2CPU);
cpp_matcher.match(descriptors1CPU, descriptors2CPU, matches);
}
workEnd();
std::cout << "CPP: FOUND " << keypoints1.size() << " keypoints on first image" << std::endl;
std::cout << "CPP: FOUND " << keypoints2.size() << " keypoints on second image" << std::endl;
surf_time = getTime();
std::cout << "SURF run time: " << surf_time / LOOP_NUM << " ms" << std::endl<<"\n";
}
else if(useGPU)
{
for (int i = 0; i <= LOOP_NUM; i++)
{
if(i == 1) workBegin();
ocl_surf(img1, oclMat(), keypoints1, descriptors1GPU);
ocl_surf(img2, oclMat(), keypoints2, descriptors2GPU);
ocl_matcher.match(descriptors1GPU, descriptors2GPU, matches);
}
workEnd();
std::cout << "OCL: FOUND " << keypoints1.size() << " keypoints on first image" << std::endl;
std::cout << "OCL: FOUND " << keypoints2.size() << " keypoints on second image" << std::endl;
surf_time = getTime();
std::cout << "SURF run time: " << surf_time / LOOP_NUM << " ms" << std::endl<<"\n";
}else
{
//cpu runs
for (int i = 0; i <= LOOP_NUM; i++)
{
if(i == 1) workBegin();
cpp_surf(cpu_img1_grey, Mat(), keypoints1, descriptors1CPU);
cpp_surf(cpu_img2_grey, Mat(), keypoints2, descriptors2CPU);
cpp_matcher.match(descriptors1CPU, descriptors2CPU, matches);
}
workEnd();
std::cout << "\nCPP: FOUND " << keypoints1.size() << " keypoints on first image" << std::endl;
std::cout << "CPP: FOUND " << keypoints2.size() << " keypoints on second image" << std::endl;
surf_time = getTime();
std::cout << "(CPP)SURF run time: " << surf_time / LOOP_NUM << " ms" << std::endl;
//gpu runs
for (int i = 0; i <= LOOP_NUM; i++)
{
if(i == 1) workBegin();
ocl_surf(img1, oclMat(), gpu_keypoints1, descriptors1GPU);
ocl_surf(img2, oclMat(), gpu_keypoints2, descriptors2GPU);
ocl_matcher.match(descriptors1GPU, descriptors2GPU, gpu_matches);
}
workEnd();
std::cout << "\nOCL: FOUND " << keypoints1.size() << " keypoints on first image" << std::endl;
std::cout << "OCL: FOUND " << keypoints2.size() << " keypoints on second image" << std::endl;
surf_time = getTime();
std::cout << "(OCL)SURF run time: " << surf_time / LOOP_NUM << " ms" << std::endl<<"\n";
}
//--------------------------------------------------------------------------
std::vector<Point2f> cpu_corner;
Mat img_matches = drawGoodMatches(cpu_img1, cpu_img2, keypoints1, keypoints2, matches, cpu_corner);
std::vector<Point2f> gpu_corner;
Mat ocl_img_matches;
if(useALL || (!useCPU&&!useGPU))
{
ocl_img_matches = drawGoodMatches(cpu_img1, cpu_img2, gpu_keypoints1, gpu_keypoints2, gpu_matches, gpu_corner);
//check accuracy
std::cout<<"\nCheck accuracy:\n";
if(cpu_corner.size()!=gpu_corner.size())
std::cout<<"Failed\n";
else
{
bool result = false;
for(size_t i = 0; i < cpu_corner.size(); i++)
{
if((std::abs(cpu_corner[i].x - gpu_corner[i].x) > 10)
||(std::abs(cpu_corner[i].y - gpu_corner[i].y) > 10))
{
std::cout<<"Failed\n";
result = false;
break;
}
result = true;
}
if(result)
std::cout<<"Passed\n";
}
}
//-- Show detected matches
namedWindow("ocl surf matches", 0);
imshow("ocl surf matches", img_matches);
waitKey(0);
if (useCPU)
{
namedWindow("cpu surf matches", 0);
imshow("cpu surf matches", img_matches);
}
else if(useGPU)
{
namedWindow("ocl surf matches", 0);
imshow("ocl surf matches", img_matches);
}else
{
namedWindow("cpu surf matches", 0);
imshow("cpu surf matches", img_matches);
namedWindow("ocl surf matches", 0);
imshow("ocl surf matches", ocl_img_matches);
}
waitKey(0);
return 0;
}