finished ocl::HoughCircles

This commit is contained in:
Suenghoon Park 2012-12-14 03:25:46 -05:00
parent 0656f13107
commit 13c44dd318
4 changed files with 529 additions and 503 deletions

View File

@ -833,7 +833,8 @@ namespace cv
{ {
oclMat edges; oclMat edges;
oclMat accum; oclMat accum;
oclMat list; oclMat srcPoints;
oclMat centers;
CannyBuf cannyBuf; CannyBuf cannyBuf;
}; };

View File

@ -50,31 +50,29 @@ using namespace cv::ocl;
#if !defined (HAVE_OPENCL) #if !defined (HAVE_OPENCL)
// void cv::ocl::HoughLines(const oclMat&, oclMat&, float, float, int, bool, int) { throw_nogpu(); }
// void cv::ocl::HoughLines(const oclMat&, oclMat&, HoughLinesBuf&, float, float, int, bool, int) { throw_nogpu(); }
// void cv::ocl::HoughLinesDownload(const oclMat&, OutputArray, OutputArray) { throw_nogpu(); }
void cv::ocl::HoughCircles(const oclMat&, oclMat&, int, float, float, int, int, int, int, int) { throw_nogpu(); } void cv::ocl::HoughCircles(const oclMat&, oclMat&, int, float, float, int, int, int, int, int) { throw_nogpu(); }
void cv::ocl::HoughCircles(const oclMat&, oclMat&, HoughCirclesBuf&, int, float, float, int, int, int, int, int) { throw_nogpu(); } void cv::ocl::HoughCircles(const oclMat&, oclMat&, HoughCirclesBuf&, int, float, float, int, int, int, int, int) { throw_nogpu(); }
void cv::ocl::HoughCirclesDownload(const oclMat&, OutputArray) { throw_nogpu(); } void cv::ocl::HoughCirclesDownload(const oclMat&, OutputArray) { throw_nogpu(); }
// Ptr<GeneralizedHough_GPU> cv::ocl::GeneralizedHough_GPU::create(int) { throw_nogpu(); return Ptr<GeneralizedHough_GPU>(); }
// cv::ocl::GeneralizedHough_GPU::~GeneralizedHough_GPU() {}
// void cv::ocl::GeneralizedHough_GPU::setTemplate(const oclMat&, int, Point) { throw_nogpu(); }
// void cv::ocl::GeneralizedHough_GPU::setTemplate(const oclMat&, const oclMat&, const oclMat&, Point) { throw_nogpu(); }
// void cv::ocl::GeneralizedHough_GPU::detect(const oclMat&, oclMat&, int) { throw_nogpu(); }
// void cv::ocl::GeneralizedHough_GPU::detect(const oclMat&, const oclMat&, const oclMat&, oclMat&) { throw_nogpu(); }
// void cv::ocl::GeneralizedHough_GPU::download(const oclMat&, OutputArray, OutputArray) { throw_nogpu(); }
// void cv::ocl::GeneralizedHough_GPU::release() {}
#else /* !defined (HAVE_OPENCL) */ #else /* !defined (HAVE_OPENCL) */
namespace cv { namespace ocl #define MUL_UP(a, b) ((a)/(b)+1)*(b)
{
int buildPointList_gpu(const oclMat& src, unsigned int* list);
namespace cv { namespace ocl {
///////////////////////////OpenCL kernel strings/////////////////////////// ///////////////////////////OpenCL kernel strings///////////////////////////
extern const char *hough; extern const char *imgproc_hough;
namespace hough
{
int buildPointList_gpu(const oclMat& src, oclMat& list);
void circlesAccumCenters_gpu(const unsigned int* list, int count, const oclMat& dx, const oclMat& dy, oclMat& accum, int minRadius, int maxRadius, float idp);
int buildCentersList_gpu(const oclMat& accum, oclMat& centers, int threshold);
int circlesAccumRadius_gpu(const oclMat& centers, int centersCount,
const oclMat& list, int count,
oclMat& circles, int maxCircles,
float dp, int minRadius, int maxRadius, int threshold);
}
}} }}
@ -82,9 +80,9 @@ namespace cv { namespace ocl
////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////
// common functions // common functions
namespace cv { namespace ocl namespace cv { namespace ocl { namespace hough
{ {
int buildPointList_gpu(const oclMat& src, unsigned int* list) int buildPointList_gpu(const oclMat& src, oclMat& list)
{ {
const int PIXELS_PER_THREAD = 16; const int PIXELS_PER_THREAD = 16;
@ -102,8 +100,8 @@ namespace cv { namespace ocl
size_t localThreads[3] = { blkSizeX, blkSizeY, 1 }; size_t localThreads[3] = { blkSizeX, blkSizeY, 1 };
const int PIXELS_PER_BLOCK = blkSizeX * PIXELS_PER_THREAD; const int PIXELS_PER_BLOCK = blkSizeX * PIXELS_PER_THREAD;
const size_t glbSizeX = src.cols % (PIXELS_PER_BLOCK) == 0 ? src.cols : (src.cols / PIXELS_PER_BLOCK + 1) * PIXELS_PER_BLOCK; const size_t glbSizeX = src.cols % (PIXELS_PER_BLOCK) == 0 ? src.cols : MUL_UP(src.cols, PIXELS_PER_BLOCK);
const size_t glbSizeY = src.rows % blkSizeY == 0 ? src.rows : (src.rows / blkSizeY + 1) * blkSizeY; const size_t glbSizeY = src.rows % blkSizeY == 0 ? src.rows : MUL_UP(src.rows, blkSizeY);
size_t globalThreads[3] = { glbSizeX, glbSizeY, 1 }; size_t globalThreads[3] = { glbSizeX, glbSizeY, 1 };
vector<pair<size_t , const void *> > args; vector<pair<size_t , const void *> > args;
@ -111,110 +109,141 @@ namespace cv { namespace ocl
args.push_back( make_pair( sizeof(cl_int) , (void *)&src.cols )); args.push_back( make_pair( sizeof(cl_int) , (void *)&src.cols ));
args.push_back( make_pair( sizeof(cl_int) , (void *)&src.rows )); args.push_back( make_pair( sizeof(cl_int) , (void *)&src.rows ));
args.push_back( make_pair( sizeof(cl_int) , (void *)&src.step )); args.push_back( make_pair( sizeof(cl_int) , (void *)&src.step ));
args.push_back( make_pair( sizeof(cl_mem) , (void *)&list )); args.push_back( make_pair( sizeof(cl_mem) , (void *)&list.data ));
args.push_back( make_pair( sizeof(cl_mem) , (void *)&counter )); args.push_back( make_pair( sizeof(cl_mem) , (void *)&counter ));
openCLExecuteKernel(src.clCxt, &hough, "buildPointList", globalThreads, localThreads, args, -1, -1); openCLExecuteKernel(src.clCxt, &imgproc_hough, "buildPointList", globalThreads, localThreads, args, -1, -1);
openCLSafeCall(clEnqueueReadBuffer(src.clCxt->impl->clCmdQueue, counter, CL_TRUE, 0, sizeof(int), &totalCount, 0, NULL, NULL)); openCLSafeCall(clEnqueueReadBuffer(src.clCxt->impl->clCmdQueue, counter, CL_TRUE, 0, sizeof(int), &totalCount, 0, NULL, NULL));
openCLSafeCall(clReleaseMemObject(counter)); openCLSafeCall(clReleaseMemObject(counter));
return totalCount; return totalCount;
} }
}} }}}
//////////////////////////////////////////////////////////
// HoughLines
// namespace cv { namespace ocl { namespace device
// {
// namespace hough
// {
// void linesAccum_gpu(const unsigned int* list, int count, PtrStepSzi accum, float rho, float theta, size_t sharedMemPerBlock, bool has20);
// int linesGetResult_gpu(PtrStepSzi accum, float2* out, int* votes, int maxSize, float rho, float theta, int threshold, bool doSort);
// }
// }}}
// void cv::ocl::HoughLines(const oclMat& src, oclMat& lines, float rho, float theta, int threshold, bool doSort, int maxLines)
// {
// HoughLinesBuf buf;
// HoughLines(src, lines, buf, rho, theta, threshold, doSort, maxLines);
// }
// void cv::ocl::HoughLines(const oclMat& src, oclMat& lines, HoughLinesBuf& buf, float rho, float theta, int threshold, bool doSort, int maxLines)
// {
// using namespace cv::ocl::device::hough;
// CV_Assert(src.type() == CV_8UC1);
// CV_Assert(src.cols < std::numeric_limits<unsigned short>::max());
// CV_Assert(src.rows < std::numeric_limits<unsigned short>::max());
// ensureSizeIsEnough(1, src.size().area(), CV_32SC1, buf.list);
// unsigned int* srcPoints = buf.list.ptr<unsigned int>();
// const int pointsCount = buildPointList_gpu(src, srcPoints);
// if (pointsCount == 0)
// {
// lines.release();
// return;
// }
// const int numangle = cvRound(CV_PI / theta);
// const int numrho = cvRound(((src.cols + src.rows) * 2 + 1) / rho);
// CV_Assert(numangle > 0 && numrho > 0);
// ensureSizeIsEnough(numangle + 2, numrho + 2, CV_32SC1, buf.accum);
// buf.accum.setTo(Scalar::all(0));
// DeviceInfo devInfo;
// linesAccum_gpu(srcPoints, pointsCount, buf.accum, rho, theta, devInfo.sharedMemPerBlock(), devInfo.supports(FEATURE_SET_COMPUTE_20));
// ensureSizeIsEnough(2, maxLines, CV_32FC2, lines);
// int linesCount = linesGetResult_gpu(buf.accum, lines.ptr<float2>(0), lines.ptr<int>(1), maxLines, rho, theta, threshold, doSort);
// if (linesCount > 0)
// lines.cols = linesCount;
// else
// lines.release();
// }
// void cv::ocl::HoughLinesDownload(const oclMat& d_lines, OutputArray h_lines_, OutputArray h_votes_)
// {
// if (d_lines.empty())
// {
// h_lines_.release();
// if (h_votes_.needed())
// h_votes_.release();
// return;
// }
// CV_Assert(d_lines.rows == 2 && d_lines.type() == CV_32FC2);
// h_lines_.create(1, d_lines.cols, CV_32FC2);
// Mat h_lines = h_lines_.getMat();
// d_lines.row(0).download(h_lines);
// if (h_votes_.needed())
// {
// h_votes_.create(1, d_lines.cols, CV_32SC1);
// Mat h_votes = h_votes_.getMat();
// oclMat d_votes(1, d_lines.cols, CV_32SC1, const_cast<int*>(d_lines.ptr<int>(1)));
// d_votes.download(h_votes);
// }
// }
////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////
// HoughCircles // HoughCircles
// namespace cv { namespace ocl namespace cv { namespace ocl { namespace hough
// { {
// namespace hough void circlesAccumCenters_gpu(const oclMat& list, int count, const oclMat& dx, const oclMat& dy, oclMat& accum, int minRadius, int maxRadius, float idp)
// { {
// void circlesAccumCenters_gpu(const unsigned int* list, int count, PtrStepi dx, PtrStepi dy, PtrStepSzi accum, int minRadius, int maxRadius, float idp); const size_t blkSizeX = 256;
// int buildCentersList_gpu(PtrStepSzi accum, unsigned int* centers, int threshold); size_t localThreads[3] = { 256, 1, 1 };
// int circlesAccumRadius_gpu(const unsigned int* centers, int centersCount, const unsigned int* list, int count,
// float3* circles, int maxCircles, float dp, int minRadius, int maxRadius, int threshold, bool has20); const size_t glbSizeX = count % blkSizeX == 0 ? count : MUL_UP(count, blkSizeX);
// } size_t globalThreads[3] = { glbSizeX, 1, 1 };
// }}
const int width = accum.cols - 2;
const int height = accum.rows - 2;
vector<pair<size_t , const void *> > args;
args.push_back( make_pair( sizeof(cl_mem) , (void *)&list.data ));
args.push_back( make_pair( sizeof(cl_int) , (void *)&count ));
args.push_back( make_pair( sizeof(cl_mem) , (void *)&dx.data ));
args.push_back( make_pair( sizeof(cl_int) , (void *)&dx.step ));
args.push_back( make_pair( sizeof(cl_mem) , (void *)&dy.data ));
args.push_back( make_pair( sizeof(cl_int) , (void *)&dy.step ));
args.push_back( make_pair( sizeof(cl_mem) , (void *)&accum.data ));
args.push_back( make_pair( sizeof(cl_int) , (void *)&accum.step ));
args.push_back( make_pair( sizeof(cl_int) , (void *)&width ));
args.push_back( make_pair( sizeof(cl_int) , (void *)&height ));
args.push_back( make_pair( sizeof(cl_int) , (void *)&minRadius));
args.push_back( make_pair( sizeof(cl_int) , (void *)&maxRadius));
args.push_back( make_pair( sizeof(cl_float), (void *)&idp));
openCLExecuteKernel(accum.clCxt, &imgproc_hough, "circlesAccumCenters", globalThreads, localThreads, args, -1, -1);
}
int buildCentersList_gpu(const oclMat& accum, oclMat& centers, int threshold)
{
int totalCount = 0;
int err = CL_SUCCESS;
cl_mem counter = clCreateBuffer(accum.clCxt->impl->clContext,
CL_MEM_COPY_HOST_PTR,
sizeof(int),
&totalCount,
&err);
openCLSafeCall(err);
const size_t blkSizeX = 32;
const size_t blkSizeY = 8;
size_t localThreads[3] = { blkSizeX, blkSizeY, 1 };
const size_t glbSizeX = (accum.cols - 2) % blkSizeX == 0 ? accum.cols - 2 : MUL_UP(accum.cols - 2, blkSizeX);
const size_t glbSizeY = (accum.rows - 2) % blkSizeY == 0 ? accum.rows - 2 : MUL_UP(accum.rows - 2, blkSizeY);
size_t globalThreads[3] = { glbSizeX, glbSizeY, 1 };
vector<pair<size_t , const void *> > args;
args.push_back( make_pair( sizeof(cl_mem) , (void *)&accum.data ));
args.push_back( make_pair( sizeof(cl_int) , (void *)&accum.cols ));
args.push_back( make_pair( sizeof(cl_int) , (void *)&accum.rows ));
args.push_back( make_pair( sizeof(cl_int) , (void *)&accum.step ));
args.push_back( make_pair( sizeof(cl_mem) , (void *)&centers.data ));
args.push_back( make_pair( sizeof(cl_int) , (void *)&threshold ));
args.push_back( make_pair( sizeof(cl_mem) , (void *)&counter ));
openCLExecuteKernel(accum.clCxt, &imgproc_hough, "buildCentersList", globalThreads, localThreads, args, -1, -1);
openCLSafeCall(clEnqueueReadBuffer(accum.clCxt->impl->clCmdQueue, counter, CL_TRUE, 0, sizeof(int), &totalCount, 0, NULL, NULL));
openCLSafeCall(clReleaseMemObject(counter));
return totalCount;
}
int circlesAccumRadius_gpu(const oclMat& centers, int centersCount,
const oclMat& list, int count,
oclMat& circles, int maxCircles,
float dp, int minRadius, int maxRadius, int threshold)
{
int totalCount = 0;
int err = CL_SUCCESS;
cl_mem counter = clCreateBuffer(circles.clCxt->impl->clContext,
CL_MEM_COPY_HOST_PTR,
sizeof(int),
&totalCount,
&err);
openCLSafeCall(err);
const size_t blkSizeX = circles.clCxt->impl->maxWorkGroupSize;
size_t localThreads[3] = { blkSizeX, 1, 1 };
const size_t glbSizeX = centersCount * blkSizeX;
size_t globalThreads[3] = { glbSizeX, 1, 1 };
const int histSize = maxRadius - minRadius + 1;
size_t smemSize = (histSize + 2) * sizeof(int);
vector<pair<size_t , const void *> > args;
args.push_back( make_pair( sizeof(cl_mem) , (void *)&centers.data ));
args.push_back( make_pair( sizeof(cl_mem) , (void *)&list.data ));
args.push_back( make_pair( sizeof(cl_int) , (void *)&count ));
args.push_back( make_pair( sizeof(cl_mem) , (void *)&circles.data ));
args.push_back( make_pair( sizeof(cl_int) , (void *)&maxCircles ));
args.push_back( make_pair( sizeof(cl_float), (void *)&dp ));
args.push_back( make_pair( sizeof(cl_int) , (void *)&minRadius ));
args.push_back( make_pair( sizeof(cl_int) , (void *)&maxRadius ));
args.push_back( make_pair( sizeof(cl_int) , (void *)&histSize ));
args.push_back( make_pair( sizeof(cl_int) , (void *)&threshold ));
args.push_back( make_pair( smemSize , (void *)NULL ));
args.push_back( make_pair( sizeof(cl_mem) , (void *)&counter ));
CV_Assert(circles.offset == 0);
openCLExecuteKernel(circles.clCxt, &imgproc_hough, "circlesAccumRadius", globalThreads, localThreads, args, -1, -1);
openCLSafeCall(clEnqueueReadBuffer(circles.clCxt->impl->clCmdQueue, counter, CL_TRUE, 0, sizeof(int), &totalCount, 0, NULL, NULL));
openCLSafeCall(clReleaseMemObject(counter));
totalCount = ::min(totalCount, maxCircles);
return totalCount;
}
}}} // namespace cv { namespace ocl { namespace hough
void cv::ocl::HoughCircles(const oclMat& src, oclMat& circles, int method, float dp, float minDist, int cannyThreshold, int votesThreshold, int minRadius, int maxRadius, int maxCircles) void cv::ocl::HoughCircles(const oclMat& src, oclMat& circles, int method, float dp, float minDist, int cannyThreshold, int votesThreshold, int minRadius, int maxRadius, int maxCircles)
{ {
@ -239,119 +268,140 @@ void cv::ocl::HoughCircles(const oclMat& src, oclMat& circles, HoughCirclesBuf&
cv::ocl::Canny(src, buf.cannyBuf, buf.edges, std::max(cannyThreshold / 2, 1), cannyThreshold); cv::ocl::Canny(src, buf.cannyBuf, buf.edges, std::max(cannyThreshold / 2, 1), cannyThreshold);
ensureSizeIsEnough(2, src.size().area(), CV_32SC1, buf.list); ensureSizeIsEnough(1, src.size().area(), CV_32SC1, buf.srcPoints);
// unsigned int* srcPoints = buf.list.ptr<unsigned int>(0); const int pointsCount = hough::buildPointList_gpu(buf.edges, buf.srcPoints);
unsigned int* srcPoints = (unsigned int*)buf.list.data;
// unsigned int* centers = buf.list.ptr<unsigned int>(1);
unsigned int* centers = (unsigned int*)buf.list.data + buf.list.step;
const int pointsCount = buildPointList_gpu(buf.edges, srcPoints);
//std::cout << "pointsCount: " << pointsCount << std::endl;
if (pointsCount == 0) if (pointsCount == 0)
{ {
circles.release(); circles.release();
return; return;
} }
// ensureSizeIsEnough(cvCeil(src.rows * idp) + 2, cvCeil(src.cols * idp) + 2, CV_32SC1, buf.accum); ensureSizeIsEnough(cvCeil(src.rows * idp) + 2, cvCeil(src.cols * idp) + 2, CV_32SC1, buf.accum);
// buf.accum.setTo(Scalar::all(0)); buf.accum.setTo(Scalar::all(0));
// circlesAccumCenters_gpu(srcPoints, pointsCount, buf.cannyBuf.dx, buf.cannyBuf.dy, buf.accum, minRadius, maxRadius, idp); hough::circlesAccumCenters_gpu(buf.srcPoints, pointsCount, buf.cannyBuf.dx, buf.cannyBuf.dy, buf.accum, minRadius, maxRadius, idp);
// int centersCount = buildCentersList_gpu(buf.accum, centers, votesThreshold); ensureSizeIsEnough(1, src.size().area(), CV_32SC1, buf.centers);
// if (centersCount == 0) int centersCount = hough::buildCentersList_gpu(buf.accum, buf.centers, votesThreshold);
// { if (centersCount == 0)
// circles.release(); {
// return; circles.release();
// } return;
}
// if (minDist > 1) if (minDist > 1)
// { {
// cv::AutoBuffer<ushort2> oldBuf_(centersCount); cv::AutoBuffer<unsigned int> oldBuf_(centersCount);
// cv::AutoBuffer<ushort2> newBuf_(centersCount); cv::AutoBuffer<unsigned int> newBuf_(centersCount);
// int newCount = 0; int newCount = 0;
// ushort2* oldBuf = oldBuf_; unsigned int* oldBuf = oldBuf_;
// ushort2* newBuf = newBuf_; unsigned int* newBuf = newBuf_;
// cudaSafeCall( cudaMemcpy(oldBuf, centers, centersCount * sizeof(ushort2), cudaMemcpyDeviceToHost) ); openCLSafeCall(clEnqueueReadBuffer(buf.centers.clCxt->impl->clCmdQueue,
(cl_mem)buf.centers.data,
CL_TRUE,
0,
centersCount * sizeof(unsigned int),
oldBuf,
0,
NULL,
NULL));
// const int cellSize = cvRound(minDist);
// const int gridWidth = (src.cols + cellSize - 1) / cellSize; const int cellSize = cvRound(minDist);
// const int gridHeight = (src.rows + cellSize - 1) / cellSize; const int gridWidth = (src.cols + cellSize - 1) / cellSize;
const int gridHeight = (src.rows + cellSize - 1) / cellSize;
// std::vector< std::vector<ushort2> > grid(gridWidth * gridHeight); std::vector< std::vector<unsigned int> > grid(gridWidth * gridHeight);
// const float minDist2 = minDist * minDist; const float minDist2 = minDist * minDist;
// for (int i = 0; i < centersCount; ++i) for (int i = 0; i < centersCount; ++i)
// { {
// ushort2 p = oldBuf[i]; unsigned int p = oldBuf[i];
const int px = p & 0xFFFF;
const int py = (p >> 16) & 0xFFFF;
// bool good = true; bool good = true;
// int xCell = static_cast<int>(p.x / cellSize); int xCell = static_cast<int>(px / cellSize);
// int yCell = static_cast<int>(p.y / cellSize); int yCell = static_cast<int>(py / cellSize);
// int x1 = xCell - 1; int x1 = xCell - 1;
// int y1 = yCell - 1; int y1 = yCell - 1;
// int x2 = xCell + 1; int x2 = xCell + 1;
// int y2 = yCell + 1; int y2 = yCell + 1;
// // boundary check // boundary check
// x1 = std::max(0, x1); x1 = std::max(0, x1);
// y1 = std::max(0, y1); y1 = std::max(0, y1);
// x2 = std::min(gridWidth - 1, x2); x2 = std::min(gridWidth - 1, x2);
// y2 = std::min(gridHeight - 1, y2); y2 = std::min(gridHeight - 1, y2);
// for (int yy = y1; yy <= y2; ++yy) for (int yy = y1; yy <= y2; ++yy)
// { {
// for (int xx = x1; xx <= x2; ++xx) for (int xx = x1; xx <= x2; ++xx)
// { {
// vector<ushort2>& m = grid[yy * gridWidth + xx]; vector<unsigned int>& m = grid[yy * gridWidth + xx];
// for(size_t j = 0; j < m.size(); ++j) for(size_t j = 0; j < m.size(); ++j)
// { {
// float dx = (float)(p.x - m[j].x); const int val = m[j];
// float dy = (float)(p.y - m[j].y); const int jx = val & 0xFFFF;
const int jy = (val >> 16) & 0xFFFF;
float dx = (float)(px - jx);
float dy = (float)(py - jy);
// if (dx * dx + dy * dy < minDist2) if (dx * dx + dy * dy < minDist2)
// { {
// good = false; good = false;
// goto break_out; goto break_out;
// } }
// } }
// } }
// } }
// break_out: break_out:
// if(good) if(good)
// { {
// grid[yCell * gridWidth + xCell].push_back(p); grid[yCell * gridWidth + xCell].push_back(p);
newBuf[newCount++] = p;
}
}
// newBuf[newCount++] = p; openCLSafeCall(clEnqueueWriteBuffer(buf.centers.clCxt->impl->clCmdQueue,
// } (cl_mem)buf.centers.data,
// } CL_TRUE,
0,
newCount * sizeof(unsigned int),
newBuf,
0,
0,
0));
centersCount = newCount;
}
// cudaSafeCall( cudaMemcpy(centers, newBuf, newCount * sizeof(unsigned int), cudaMemcpyHostToDevice) ); ensureSizeIsEnough(1, maxCircles, CV_32FC3, circles);
// centersCount = newCount;
// }
// ensureSizeIsEnough(1, maxCircles, CV_32FC3, circles); const int circlesCount = hough::circlesAccumRadius_gpu(buf.centers, centersCount,
buf.srcPoints, pointsCount,
circles, maxCircles,
dp, minRadius, maxRadius, votesThreshold);
// DeviceInfo devInfo; if (circlesCount > 0)
// const int circlesCount = circlesAccumRadius_gpu(centers, centersCount, srcPoints, pointsCount, circles.ptr<float3>(), maxCircles, circles.cols = circlesCount;
// dp, minRadius, maxRadius, votesThreshold, devInfo.supports(FEATURE_SET_COMPUTE_20)); else
circles.release();
// if (circlesCount > 0)
// circles.cols = circlesCount;
// else
// circles.release();
} }
void cv::ocl::HoughCirclesDownload(const oclMat& d_circles, cv::OutputArray h_circles_) void cv::ocl::HoughCirclesDownload(const oclMat& d_circles, cv::OutputArray h_circles_)
{ {
// FIX ME: garbage values are copied!
CV_Error(CV_StsNotImplemented, "HoughCirclesDownload is not implemented");
if (d_circles.empty()) if (d_circles.empty())
{ {
h_circles_.release(); h_circles_.release();

View File

@ -1,307 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#pragma OPENCL EXTENSION cl_khr_global_int32_base_atomics : enable
#pragma OPENCL EXTENSION cl_khr_local_int32_base_atomics : enable
////////////////////////////////////////////////////////////////////////
// buildPointList
#define PIXELS_PER_THREAD 16
__kernel void buildPointList(__global const uchar* src,
int cols,
int rows,
int step,
__global unsigned int* list,
__global int* counter)
{
__local unsigned int s_queues[4][32 * PIXELS_PER_THREAD];
__local int s_qsize[4];
__local int s_globStart[4];
const int x = get_group_id(0) * get_local_size(0) * PIXELS_PER_THREAD + get_local_id(0);
const int y = get_global_id(1);
if (get_local_id(0) == 0)
s_qsize[get_local_id(1)] = 0;
barrier(CLK_LOCAL_MEM_FENCE);
if (y < rows)
{
// fill the queue
__global const uchar* srcRow = &src[y * step];
for (int i = 0, xx = x; i < PIXELS_PER_THREAD && xx < cols; ++i, xx += get_local_size(0))
{
if (srcRow[xx])
{
const unsigned int val = (y << 16) | xx;
const int qidx = atomic_add(&s_qsize[get_local_id(1)], 1);
s_queues[get_local_id(1)][qidx] = val;
}
}
}
barrier(CLK_LOCAL_MEM_FENCE);
// let one work-item reserve the space required in the global list
if (get_local_id(0) == 0 && get_local_id(1) == 0)
{
// find how many items are stored in each list
int totalSize = 0;
for (int i = 0; i < get_local_size(1); ++i)
{
s_globStart[i] = totalSize;
totalSize += s_qsize[i];
}
// calculate the offset in the global list
const int globalOffset = atomic_add(counter, totalSize);
for (int i = 0; i < get_local_size(1); ++i)
s_globStart[i] += globalOffset;
}
barrier(CLK_GLOBAL_MEM_FENCE);
// copy local queues to global queue
const int qsize = s_qsize[get_local_id(1)];
int gidx = s_globStart[get_local_id(1)] + get_local_id(0);
for(int i = get_local_id(0); i < qsize; i += get_local_size(0), gidx += get_local_size(0))
list[gidx] = s_queues[get_local_id(1)][i];
}
////////////////////////////////////////////////////////////////////////
// circlesAccumCenters
// __global__ void circlesAccumCenters(const unsigned int* list, const int count, const PtrStepi dx, const PtrStepi dy,
// PtrStepi accum, const int width, const int height, const int minRadius, const int maxRadius, const float idp)
// {
// const int SHIFT = 10;
// const int ONE = 1 << SHIFT;
// const int tid = blockIdx.x * blockDim.x + threadIdx.x;
// if (tid >= count)
// return;
// const unsigned int val = list[tid];
// const int x = (val & 0xFFFF);
// const int y = (val >> 16) & 0xFFFF;
// const int vx = dx(y, x);
// const int vy = dy(y, x);
// if (vx == 0 && vy == 0)
// return;
// const float mag = ::sqrtf(vx * vx + vy * vy);
// const int x0 = __float2int_rn((x * idp) * ONE);
// const int y0 = __float2int_rn((y * idp) * ONE);
// int sx = __float2int_rn((vx * idp) * ONE / mag);
// int sy = __float2int_rn((vy * idp) * ONE / mag);
// // Step from minRadius to maxRadius in both directions of the gradient
// for (int k1 = 0; k1 < 2; ++k1)
// {
// int x1 = x0 + minRadius * sx;
// int y1 = y0 + minRadius * sy;
// for (int r = minRadius; r <= maxRadius; x1 += sx, y1 += sy, ++r)
// {
// const int x2 = x1 >> SHIFT;
// const int y2 = y1 >> SHIFT;
// if (x2 < 0 || x2 >= width || y2 < 0 || y2 >= height)
// break;
// ::atomicAdd(accum.ptr(y2 + 1) + x2 + 1, 1);
// }
// sx = -sx;
// sy = -sy;
// }
// }
// void circlesAccumCenters_gpu(const unsigned int* list, int count, PtrStepi dx, PtrStepi dy, PtrStepSzi accum, int minRadius, int maxRadius, float idp)
// {
// const dim3 block(256);
// const dim3 grid(divUp(count, block.x));
// cudaSafeCall( cudaFuncSetCacheConfig(circlesAccumCenters, cudaFuncCachePreferL1) );
// circlesAccumCenters<<<grid, block>>>(list, count, dx, dy, accum, accum.cols - 2, accum.rows - 2, minRadius, maxRadius, idp);
// cudaSafeCall( cudaGetLastError() );
// cudaSafeCall( cudaDeviceSynchronize() );
// }
// ////////////////////////////////////////////////////////////////////////
// // buildCentersList
// __global__ void buildCentersList(const PtrStepSzi accum, unsigned int* centers, const int threshold)
// {
// const int x = blockIdx.x * blockDim.x + threadIdx.x;
// const int y = blockIdx.y * blockDim.y + threadIdx.y;
// if (x < accum.cols - 2 && y < accum.rows - 2)
// {
// const int top = accum(y, x + 1);
// const int left = accum(y + 1, x);
// const int cur = accum(y + 1, x + 1);
// const int right = accum(y + 1, x + 2);
// const int bottom = accum(y + 2, x + 1);
// if (cur > threshold && cur > top && cur >= bottom && cur > left && cur >= right)
// {
// const unsigned int val = (y << 16) | x;
// const int idx = ::atomicAdd(&g_counter, 1);
// centers[idx] = val;
// }
// }
// }
// int buildCentersList_gpu(PtrStepSzi accum, unsigned int* centers, int threshold)
// {
// void* counterPtr;
// cudaSafeCall( cudaGetSymbolAddress(&counterPtr, g_counter) );
// cudaSafeCall( cudaMemset(counterPtr, 0, sizeof(int)) );
// const dim3 block(32, 8);
// const dim3 grid(divUp(accum.cols - 2, block.x), divUp(accum.rows - 2, block.y));
// cudaSafeCall( cudaFuncSetCacheConfig(buildCentersList, cudaFuncCachePreferL1) );
// buildCentersList<<<grid, block>>>(accum, centers, threshold);
// cudaSafeCall( cudaGetLastError() );
// cudaSafeCall( cudaDeviceSynchronize() );
// int totalCount;
// cudaSafeCall( cudaMemcpy(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost) );
// return totalCount;
// }
// ////////////////////////////////////////////////////////////////////////
// // circlesAccumRadius
// __global__ void circlesAccumRadius(const unsigned int* centers, const unsigned int* list, const int count,
// float3* circles, const int maxCircles, const float dp,
// const int minRadius, const int maxRadius, const int histSize, const int threshold)
// {
// int* smem = DynamicSharedMem<int>();
// for (int i = threadIdx.x; i < histSize + 2; i += blockDim.x)
// smem[i] = 0;
// __syncthreads();
// unsigned int val = centers[blockIdx.x];
// float cx = (val & 0xFFFF);
// float cy = (val >> 16) & 0xFFFF;
// cx = (cx + 0.5f) * dp;
// cy = (cy + 0.5f) * dp;
// for (int i = threadIdx.x; i < count; i += blockDim.x)
// {
// val = list[i];
// const int x = (val & 0xFFFF);
// const int y = (val >> 16) & 0xFFFF;
// const float rad = ::sqrtf((cx - x) * (cx - x) + (cy - y) * (cy - y));
// if (rad >= minRadius && rad <= maxRadius)
// {
// const int r = __float2int_rn(rad - minRadius);
// Emulation::smem::atomicAdd(&smem[r + 1], 1);
// }
// }
// __syncthreads();
// for (int i = threadIdx.x; i < histSize; i += blockDim.x)
// {
// const int curVotes = smem[i + 1];
// if (curVotes >= threshold && curVotes > smem[i] && curVotes >= smem[i + 2])
// {
// const int ind = ::atomicAdd(&g_counter, 1);
// if (ind < maxCircles)
// circles[ind] = make_float3(cx, cy, i + minRadius);
// }
// }
// }
// int circlesAccumRadius_gpu(const unsigned int* centers, int centersCount, const unsigned int* list, int count,
// float3* circles, int maxCircles, float dp, int minRadius, int maxRadius, int threshold, bool has20)
// {
// void* counterPtr;
// cudaSafeCall( cudaGetSymbolAddress(&counterPtr, g_counter) );
// cudaSafeCall( cudaMemset(counterPtr, 0, sizeof(int)) );
// const dim3 block(has20 ? 1024 : 512);
// const dim3 grid(centersCount);
// const int histSize = maxRadius - minRadius + 1;
// size_t smemSize = (histSize + 2) * sizeof(int);
// circlesAccumRadius<<<grid, block, smemSize>>>(centers, list, count, circles, maxCircles, dp, minRadius, maxRadius, histSize, threshold);
// cudaSafeCall( cudaGetLastError() );
// cudaSafeCall( cudaDeviceSynchronize() );
// int totalCount;
// cudaSafeCall( cudaMemcpy(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost) );
// totalCount = ::min(totalCount, maxCircles);
// return totalCount;
// }

View File

@ -0,0 +1,282 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Modified by Seunghoon Park(pclove1@gmail.com)
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#pragma OPENCL EXTENSION cl_khr_global_int32_base_atomics : enable
#pragma OPENCL EXTENSION cl_khr_local_int32_base_atomics : enable
////////////////////////////////////////////////////////////////////////
// buildPointList
#define PIXELS_PER_THREAD 16
// TODO: add offset to support ROI
__kernel void buildPointList(__global const uchar* src,
int cols,
int rows,
int step,
__global unsigned int* list,
__global int* counter)
{
__local unsigned int s_queues[4][32 * PIXELS_PER_THREAD];
__local int s_qsize[4];
__local int s_globStart[4];
const int x = get_group_id(0) * get_local_size(0) * PIXELS_PER_THREAD + get_local_id(0);
const int y = get_global_id(1);
if (get_local_id(0) == 0)
s_qsize[get_local_id(1)] = 0;
barrier(CLK_LOCAL_MEM_FENCE);
if (y < rows)
{
// fill the queue
__global const uchar* srcRow = &src[y * step];
for (int i = 0, xx = x; i < PIXELS_PER_THREAD && xx < cols; ++i, xx += get_local_size(0))
{
if (srcRow[xx])
{
const unsigned int val = (y << 16) | xx;
const int qidx = atomic_add(&s_qsize[get_local_id(1)], 1);
s_queues[get_local_id(1)][qidx] = val;
}
}
}
barrier(CLK_LOCAL_MEM_FENCE);
// let one work-item reserve the space required in the global list
if (get_local_id(0) == 0 && get_local_id(1) == 0)
{
// find how many items are stored in each list
int totalSize = 0;
for (int i = 0; i < get_local_size(1); ++i)
{
s_globStart[i] = totalSize;
totalSize += s_qsize[i];
}
// calculate the offset in the global list
const int globalOffset = atomic_add(counter, totalSize);
for (int i = 0; i < get_local_size(1); ++i)
s_globStart[i] += globalOffset;
}
barrier(CLK_GLOBAL_MEM_FENCE);
// copy local queues to global queue
const int qsize = s_qsize[get_local_id(1)];
int gidx = s_globStart[get_local_id(1)] + get_local_id(0);
for(int i = get_local_id(0); i < qsize; i += get_local_size(0), gidx += get_local_size(0))
list[gidx] = s_queues[get_local_id(1)][i];
}
////////////////////////////////////////////////////////////////////////
// circlesAccumCenters
// TODO: add offset to support ROI
__kernel void circlesAccumCenters(__global const unsigned int* list,
const int count,
__global const int* dx,
const int dxStep,
__global const int* dy,
const int dyStep,
__global int* accum,
const int accumStep,
const int width,
const int height,
const int minRadius,
const int maxRadius,
const float idp)
{
const int dxStepInPixel = dxStep / sizeof(int);
const int dyStepInPixel = dyStep / sizeof(int);
const int accumStepInPixel = accumStep / sizeof(int);
const int SHIFT = 10;
const int ONE = 1 << SHIFT;
// const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int wid = get_global_id(0);
if (wid >= count)
return;
const unsigned int val = list[wid];
const int x = (val & 0xFFFF);
const int y = (val >> 16) & 0xFFFF;
const int vx = dx[mad24(y, dxStepInPixel, x)];
const int vy = dy[mad24(y, dyStepInPixel, x)];
if (vx == 0 && vy == 0)
return;
const float mag = sqrt(convert_float(vx * vx + vy * vy));
const int x0 = convert_int_rte((x * idp) * ONE);
const int y0 = convert_int_rte((y * idp) * ONE);
int sx = convert_int_rte((vx * idp) * ONE / mag);
int sy = convert_int_rte((vy * idp) * ONE / mag);
// Step from minRadius to maxRadius in both directions of the gradient
for (int k1 = 0; k1 < 2; ++k1)
{
int x1 = x0 + minRadius * sx;
int y1 = y0 + minRadius * sy;
for (int r = minRadius; r <= maxRadius; x1 += sx, y1 += sy, ++r)
{
const int x2 = x1 >> SHIFT;
const int y2 = y1 >> SHIFT;
if (x2 < 0 || x2 >= width || y2 < 0 || y2 >= height)
break;
atomic_add(&accum[mad24(y2+1, accumStepInPixel, x2+1)], 1);
}
sx = -sx;
sy = -sy;
}
}
// ////////////////////////////////////////////////////////////////////////
// // buildCentersList
// TODO: add offset to support ROI
__kernel void buildCentersList(__global const int* accum,
const int accumCols,
const int accumRows,
const int accumStep,
__global unsigned int* centers,
const int threshold,
__global int* counter)
{
const int accumStepInPixel = accumStep/sizeof(int);
const int x = get_global_id(0);
const int y = get_global_id(1);
if (x < accumCols - 2 && y < accumRows - 2)
{
const int top = accum[mad24(y, accumStepInPixel, x + 1)];
const int left = accum[mad24(y + 1, accumStepInPixel, x)];
const int cur = accum[mad24(y + 1, accumStepInPixel, x + 1)];
const int right = accum[mad24(y + 1, accumStepInPixel, x + 2)];
const int bottom = accum[mad24(y + 2, accumStepInPixel, x + 1)];;
if (cur > threshold && cur > top && cur >= bottom && cur > left && cur >= right)
{
const unsigned int val = (y << 16) | x;
const int idx = atomic_add(counter, 1);
centers[idx] = val;
}
}
}
// ////////////////////////////////////////////////////////////////////////
// // circlesAccumRadius
// TODO: add offset to support ROI
__kernel void circlesAccumRadius(__global const unsigned int* centers,
__global const unsigned int* list, const int count,
__global float4* circles, const int maxCircles,
const float dp,
const int minRadius, const int maxRadius,
const int histSize,
const int threshold,
__local int* smem,
__global int* counter)
{
for (int i = get_local_id(0); i < histSize + 2; i += get_local_size(0))
smem[i] = 0;
barrier(CLK_LOCAL_MEM_FENCE);
unsigned int val = centers[get_group_id(0)];
float cx = convert_float(val & 0xFFFF);
float cy = convert_float((val >> 16) & 0xFFFF);
cx = (cx + 0.5f) * dp;
cy = (cy + 0.5f) * dp;
for (int i = get_local_id(0); i < count; i += get_local_size(0))
{
val = list[i];
const int x = (val & 0xFFFF);
const int y = (val >> 16) & 0xFFFF;
const float rad = sqrt((cx - x) * (cx - x) + (cy - y) * (cy - y));
if (rad >= minRadius && rad <= maxRadius)
{
const int r = convert_int_rte(rad - minRadius);
atomic_add(&smem[r + 1], 1);
}
}
barrier(CLK_LOCAL_MEM_FENCE);
for (int i = get_local_id(0); i < histSize; i += get_local_size(0))
{
const int curVotes = smem[i + 1];
if (curVotes >= threshold && curVotes > smem[i] && curVotes >= smem[i + 2])
{
const int ind = atomic_add(counter, 1);
if (ind < maxCircles)
{
circles[ind] = (float4)(cx, cy, convert_float(i + minRadius), 0.0f);
}
}
}
}