opencv/modules/gpu/src/cuda/hough.cu

272 lines
10 KiB
Plaintext
Raw Normal View History

2012-08-13 21:44:23 +08:00
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <thrust/sort.h>
#include "opencv2/gpu/device/common.hpp"
2012-08-14 21:00:57 +08:00
#include "opencv2/gpu/device/emulation.hpp"
2012-08-13 21:44:23 +08:00
namespace cv { namespace gpu { namespace device
{
namespace hough
{
2012-08-14 21:00:57 +08:00
__device__ unsigned int g_counter;
const int PIXELS_PER_THREAD = 16;
__global__ void buildPointList(const DevMem2Db src, unsigned int* list)
2012-08-13 21:44:23 +08:00
{
2012-08-14 21:00:57 +08:00
const int x = blockIdx.x * 32 * PIXELS_PER_THREAD + threadIdx.x;
const int y = blockIdx.y * 4 + threadIdx.y;
2012-08-13 21:44:23 +08:00
2012-08-14 21:00:57 +08:00
if (y >= src.rows)
2012-08-13 21:44:23 +08:00
return;
2012-08-14 21:00:57 +08:00
volatile int qindex = -1;
__shared__ volatile int s_qindex[4];
__shared__ volatile int s_qstart[4];
s_qindex[threadIdx.y] = -1;
__shared__ volatile unsigned int s_queue[4][32 * PIXELS_PER_THREAD];
// fill the queue
for (int i = 0; i < PIXELS_PER_THREAD; ++i)
2012-08-13 21:44:23 +08:00
{
2012-08-14 21:00:57 +08:00
const int xx = i * blockDim.x + x;
if (xx >= src.cols)
break;
if (src(y, xx))
2012-08-13 21:44:23 +08:00
{
2012-08-14 21:00:57 +08:00
const unsigned int queue_val = (y << 16) | xx;
2012-08-13 21:44:23 +08:00
2012-08-14 21:00:57 +08:00
do {
qindex++;
s_qindex[threadIdx.y] = qindex;
s_queue[threadIdx.y][qindex] = queue_val;
} while (s_queue[threadIdx.y][qindex] != queue_val);
}
// reload index from smem (last thread to write to smem will have updated it)
qindex = s_qindex[threadIdx.y];
}
2012-08-13 21:44:23 +08:00
2012-08-14 21:00:57 +08:00
__syncthreads();
2012-08-13 21:44:23 +08:00
2012-08-14 21:00:57 +08:00
// let one thread reserve the space required in the global list
if (threadIdx.x == 0 && threadIdx.y == 0)
{
// find how many items are stored in each list
int total_index = 0;
#pragma unroll
for (int i = 0; i < 4; ++i)
{
s_qstart[i] = total_index;
total_index += (s_qindex[i] + 1u);
2012-08-13 21:44:23 +08:00
}
2012-08-14 21:00:57 +08:00
//calculate the offset in the global list
const unsigned int global_offset = atomicAdd(&g_counter, total_index);
#pragma unroll
for (int i = 0; i < 4; ++i)
s_qstart[i] += global_offset;
}
__syncthreads();
// copy local queues to global queue
for(int i = 0; i <= qindex; i += 32)
{
if(i + threadIdx.x > qindex)
break;
unsigned int qvalue = s_queue[threadIdx.y][i + threadIdx.x];
list[s_qstart[threadIdx.y] + i + threadIdx.x] = qvalue;
2012-08-13 21:44:23 +08:00
}
}
2012-08-14 21:00:57 +08:00
unsigned int buildPointList_gpu(DevMem2Db src, unsigned int* list)
2012-08-13 21:44:23 +08:00
{
2012-08-14 21:00:57 +08:00
void* counter_ptr;
cudaSafeCall( cudaGetSymbolAddress(&counter_ptr, g_counter) );
cudaSafeCall( cudaMemset(counter_ptr, 0, sizeof(unsigned int)) );
const dim3 block(32, 4);
const dim3 grid(divUp(src.cols, block.x * PIXELS_PER_THREAD), divUp(src.rows, block.y));
cudaSafeCall( cudaFuncSetCacheConfig(buildPointList, cudaFuncCachePreferShared) );
2012-08-13 21:44:23 +08:00
2012-08-14 21:00:57 +08:00
buildPointList<<<grid, block>>>(src, list);
2012-08-13 21:44:23 +08:00
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
2012-08-14 21:00:57 +08:00
unsigned int total_count;
cudaSafeCall( cudaMemcpy(&total_count, counter_ptr, sizeof(unsigned int), cudaMemcpyDeviceToHost) );
return total_count;
2012-08-13 21:44:23 +08:00
}
2012-08-14 21:00:57 +08:00
__global__ void linesAccum(const unsigned int* list, const unsigned int count, PtrStep_<unsigned int> accum,
const float irho, const float theta, const int numrho)
{
extern __shared__ unsigned int smem[];
for (int i = threadIdx.x; i < numrho; i += blockDim.x)
smem[i] = 0;
__syncthreads();
const int n = blockIdx.x;
const float ang = n * theta;
float sin_ang;
float cos_ang;
sincosf(ang, &sin_ang, &cos_ang);
const float tabSin = sin_ang * irho;
const float tabCos = cos_ang * irho;
for (int i = threadIdx.x; i < count; i += blockDim.x)
{
// read one element from global memory
const unsigned int qvalue = list[i];
const unsigned int x = (qvalue & 0x0000FFFF);
const unsigned int y = (qvalue >> 16) & 0x0000FFFF;
int r = __float2int_rn(x * tabCos + y * tabSin);
r += (numrho - 1) / 2;
Emulation::smem::atomicInc(&smem[r], (unsigned int)(-1));
}
__syncthreads();
for (int i = threadIdx.x; i < numrho; i += blockDim.x)
accum(n + 1, i + 1) = smem[i];
}
void linesAccum_gpu(const unsigned int* list, unsigned int count, DevMem2D_<unsigned int> accum, float rho, float theta)
{
const dim3 block(1024);
const dim3 grid(accum.rows - 2);
cudaSafeCall( cudaFuncSetCacheConfig(linesAccum, cudaFuncCachePreferShared) );
size_t smem_size = (accum.cols - 2) * sizeof(unsigned int);
linesAccum<<<grid, block, smem_size>>>(list, count, accum, 1.0f / rho, theta, accum.cols - 2);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
2012-08-13 21:44:23 +08:00
2012-08-14 21:00:57 +08:00
__global__ void linesGetResult(const DevMem2D_<unsigned int> accum, float2* out, int* voices, const int maxSize,
const float threshold, const float theta, const float rho, const int numrho)
2012-08-13 21:44:23 +08:00
{
2012-08-14 21:00:57 +08:00
__shared__ unsigned int smem[8][32];
2012-08-13 21:44:23 +08:00
int r = blockIdx.x * (blockDim.x - 2) + threadIdx.x;
int n = blockIdx.y * (blockDim.y - 2) + threadIdx.y;
if (r >= accum.cols || n >= accum.rows)
return;
smem[threadIdx.y][threadIdx.x] = accum(n, r);
__syncthreads();
r -= 1;
n -= 1;
if (threadIdx.x == 0 || threadIdx.x == blockDim.x - 1 || threadIdx.y == 0 || threadIdx.y == blockDim.y - 1 || r >= accum.cols - 2 || n >= accum.rows - 2)
return;
if (smem[threadIdx.y][threadIdx.x] > threshold &&
smem[threadIdx.y][threadIdx.x] > smem[threadIdx.y - 1][threadIdx.x] &&
smem[threadIdx.y][threadIdx.x] >= smem[threadIdx.y + 1][threadIdx.x] &&
smem[threadIdx.y][threadIdx.x] > smem[threadIdx.y][threadIdx.x - 1] &&
smem[threadIdx.y][threadIdx.x] >= smem[threadIdx.y][threadIdx.x + 1])
{
float radius = (r - (numrho - 1) * 0.5f) * rho;
float angle = n * theta;
const unsigned int ind = atomicInc(&g_counter, (unsigned int)(-1));
if (ind < maxSize)
{
out[ind] = make_float2(radius, angle);
voices[ind] = smem[threadIdx.y][threadIdx.x];
}
}
}
2012-08-14 21:00:57 +08:00
unsigned int linesGetResult_gpu(DevMem2D_<unsigned int> accum, float2* out, int* voices, unsigned int maxSize,
float rho, float theta, float threshold, bool doSort)
2012-08-13 21:44:23 +08:00
{
void* counter_ptr;
cudaSafeCall( cudaGetSymbolAddress(&counter_ptr, g_counter) );
cudaSafeCall( cudaMemset(counter_ptr, 0, sizeof(unsigned int)) );
const dim3 block(32, 8);
const dim3 grid(divUp(accum.cols, block.x - 2), divUp(accum.rows, block.y - 2));
linesGetResult<<<grid, block>>>(accum, out, voices, maxSize, threshold, theta, rho, accum.cols - 2);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
2012-08-14 21:00:57 +08:00
unsigned int total_count;
cudaSafeCall( cudaMemcpy(&total_count, counter_ptr, sizeof(unsigned int), cudaMemcpyDeviceToHost) );
2012-08-13 21:44:23 +08:00
total_count = ::min(total_count, maxSize);
if (doSort && total_count > 0)
2012-08-13 21:44:23 +08:00
{
thrust::device_ptr<float2> out_ptr(out);
thrust::device_ptr<int> voices_ptr(voices);
thrust::sort_by_key(voices_ptr, voices_ptr + total_count, out_ptr, thrust::greater<int>());
}
return total_count;
}
}
}}}