mirror of
https://github.com/opencv/opencv.git
synced 2025-06-07 17:44:04 +08:00
minor refactoring:
moved lbp.hpp to src/cuda folder added missing cv::gpu::device namespace deleted whitespaces
This commit is contained in:
parent
a703df5456
commit
3ebec7448d
@ -42,9 +42,9 @@
|
|||||||
|
|
||||||
#if !defined CUDA_DISABLER
|
#if !defined CUDA_DISABLER
|
||||||
|
|
||||||
#include <opencv2/gpu/device/lbp.hpp>
|
#include "lbp.hpp"
|
||||||
#include <opencv2/gpu/device/vec_traits.hpp>
|
#include "opencv2/gpu/device/vec_traits.hpp"
|
||||||
#include <opencv2/gpu/device/saturate_cast.hpp>
|
#include "opencv2/gpu/device/saturate_cast.hpp"
|
||||||
|
|
||||||
namespace cv { namespace gpu { namespace device
|
namespace cv { namespace gpu { namespace device
|
||||||
{
|
{
|
||||||
|
@ -1535,6 +1535,8 @@ namespace cv { namespace gpu { namespace device
|
|||||||
return functor_type(); \
|
return functor_type(); \
|
||||||
} \
|
} \
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#undef CV_DESCALE
|
||||||
}}} // namespace cv { namespace gpu { namespace device
|
}}} // namespace cv { namespace gpu { namespace device
|
||||||
|
|
||||||
#endif // __OPENCV_GPU_COLOR_DETAIL_HPP__
|
#endif // __OPENCV_GPU_COLOR_DETAIL_HPP__
|
||||||
|
@ -40,15 +40,15 @@
|
|||||||
//
|
//
|
||||||
//M*/
|
//M*/
|
||||||
|
|
||||||
#ifndef __OPENCV_GPU_UTILITY_DETAIL_HPP__
|
#ifndef __OPENCV_GPU_REDUCTION_DETAIL_HPP__
|
||||||
#define __OPENCV_GPU_UTILITY_DETAIL_HPP__
|
#define __OPENCV_GPU_REDUCTION_DETAIL_HPP__
|
||||||
|
|
||||||
namespace cv { namespace gpu { namespace device
|
namespace cv { namespace gpu { namespace device
|
||||||
{
|
{
|
||||||
namespace utility_detail
|
namespace utility_detail
|
||||||
{
|
{
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
// Reduction
|
// Reductor
|
||||||
|
|
||||||
template <int n> struct WarpReductor
|
template <int n> struct WarpReductor
|
||||||
{
|
{
|
||||||
@ -838,4 +838,4 @@ namespace cv { namespace gpu { namespace device
|
|||||||
} // namespace utility_detail
|
} // namespace utility_detail
|
||||||
}}} // namespace cv { namespace gpu { namespace device
|
}}} // namespace cv { namespace gpu { namespace device
|
||||||
|
|
||||||
#endif // __OPENCV_GPU_UTILITY_DETAIL_HPP__
|
#endif // __OPENCV_GPU_REDUCTION_DETAIL_HPP__
|
@ -203,7 +203,7 @@ namespace cv { namespace gpu { namespace device
|
|||||||
};
|
};
|
||||||
|
|
||||||
template <typename T, typename D, typename UnOp, typename Mask>
|
template <typename T, typename D, typename UnOp, typename Mask>
|
||||||
__global__ static void transformSmart(const PtrStepSz<T> src_, PtrStep<D> dst_, const Mask mask, const UnOp op)
|
static __global__ void transformSmart(const PtrStepSz<T> src_, PtrStep<D> dst_, const Mask mask, const UnOp op)
|
||||||
{
|
{
|
||||||
typedef TransformFunctorTraits<UnOp> ft;
|
typedef TransformFunctorTraits<UnOp> ft;
|
||||||
typedef typename UnaryReadWriteTraits<T, D, ft::smart_shift>::read_type read_type;
|
typedef typename UnaryReadWriteTraits<T, D, ft::smart_shift>::read_type read_type;
|
||||||
@ -239,10 +239,10 @@ namespace cv { namespace gpu { namespace device
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <typename T, typename D, typename UnOp, typename Mask>
|
template <typename T, typename D, typename UnOp, typename Mask>
|
||||||
static __global__ void transformSimple(const PtrStepSz<T> src, PtrStep<D> dst, const Mask mask, const UnOp op)
|
__global__ static void transformSimple(const PtrStepSz<T> src, PtrStep<D> dst, const Mask mask, const UnOp op)
|
||||||
{
|
{
|
||||||
const int x = blockDim.x * blockIdx.x + threadIdx.x;
|
const int x = blockDim.x * blockIdx.x + threadIdx.x;
|
||||||
const int y = blockDim.y * blockIdx.y + threadIdx.y;
|
const int y = blockDim.y * blockIdx.y + threadIdx.y;
|
||||||
|
|
||||||
if (x < src.cols && y < src.rows && mask(y, x))
|
if (x < src.cols && y < src.rows && mask(y, x))
|
||||||
{
|
{
|
||||||
@ -251,7 +251,7 @@ namespace cv { namespace gpu { namespace device
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
|
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
|
||||||
__global__ static void transformSmart(const PtrStepSz<T1> src1_, const PtrStep<T2> src2_, PtrStep<D> dst_,
|
static __global__ void transformSmart(const PtrStepSz<T1> src1_, const PtrStep<T2> src2_, PtrStep<D> dst_,
|
||||||
const Mask mask, const BinOp op)
|
const Mask mask, const BinOp op)
|
||||||
{
|
{
|
||||||
typedef TransformFunctorTraits<BinOp> ft;
|
typedef TransformFunctorTraits<BinOp> ft;
|
||||||
@ -294,8 +294,8 @@ namespace cv { namespace gpu { namespace device
|
|||||||
static __global__ void transformSimple(const PtrStepSz<T1> src1, const PtrStep<T2> src2, PtrStep<D> dst,
|
static __global__ void transformSimple(const PtrStepSz<T1> src1, const PtrStep<T2> src2, PtrStep<D> dst,
|
||||||
const Mask mask, const BinOp op)
|
const Mask mask, const BinOp op)
|
||||||
{
|
{
|
||||||
const int x = blockDim.x * blockIdx.x + threadIdx.x;
|
const int x = blockDim.x * blockIdx.x + threadIdx.x;
|
||||||
const int y = blockDim.y * blockIdx.y + threadIdx.y;
|
const int y = blockDim.y * blockIdx.y + threadIdx.y;
|
||||||
|
|
||||||
if (x < src1.cols && y < src1.rows && mask(y, x))
|
if (x < src1.cols && y < src1.rows && mask(y, x))
|
||||||
{
|
{
|
||||||
|
@ -43,124 +43,129 @@
|
|||||||
#ifndef __OPENCV_GPU_SCAN_HPP__
|
#ifndef __OPENCV_GPU_SCAN_HPP__
|
||||||
#define __OPENCV_GPU_SCAN_HPP__
|
#define __OPENCV_GPU_SCAN_HPP__
|
||||||
|
|
||||||
enum ScanKind { EXCLUSIVE = 0, INCLUSIVE = 1 };
|
#include "common.hpp"
|
||||||
|
|
||||||
template <ScanKind Kind, typename T, typename F> struct WarpScan
|
namespace cv { namespace gpu { namespace device
|
||||||
|
{
|
||||||
|
enum ScanKind { EXCLUSIVE = 0, INCLUSIVE = 1 };
|
||||||
|
|
||||||
|
template <ScanKind Kind, typename T, typename F> struct WarpScan
|
||||||
|
{
|
||||||
|
__device__ __forceinline__ WarpScan() {}
|
||||||
|
__device__ __forceinline__ WarpScan(const WarpScan& other) { (void)other; }
|
||||||
|
|
||||||
|
__device__ __forceinline__ T operator()( volatile T *ptr , const unsigned int idx)
|
||||||
{
|
{
|
||||||
__device__ __forceinline__ WarpScan() {}
|
const unsigned int lane = idx & 31;
|
||||||
__device__ __forceinline__ WarpScan(const WarpScan& other) { (void)other; }
|
F op;
|
||||||
|
|
||||||
__device__ __forceinline__ T operator()( volatile T *ptr , const unsigned int idx)
|
if ( lane >= 1) ptr [idx ] = op(ptr [idx - 1], ptr [idx]);
|
||||||
{
|
if ( lane >= 2) ptr [idx ] = op(ptr [idx - 2], ptr [idx]);
|
||||||
const unsigned int lane = idx & 31;
|
if ( lane >= 4) ptr [idx ] = op(ptr [idx - 4], ptr [idx]);
|
||||||
F op;
|
if ( lane >= 8) ptr [idx ] = op(ptr [idx - 8], ptr [idx]);
|
||||||
|
if ( lane >= 16) ptr [idx ] = op(ptr [idx - 16], ptr [idx]);
|
||||||
|
|
||||||
if ( lane >= 1) ptr [idx ] = op(ptr [idx - 1], ptr [idx]);
|
if( Kind == INCLUSIVE )
|
||||||
if ( lane >= 2) ptr [idx ] = op(ptr [idx - 2], ptr [idx]);
|
return ptr [idx];
|
||||||
if ( lane >= 4) ptr [idx ] = op(ptr [idx - 4], ptr [idx]);
|
else
|
||||||
if ( lane >= 8) ptr [idx ] = op(ptr [idx - 8], ptr [idx]);
|
return (lane > 0) ? ptr [idx - 1] : 0;
|
||||||
if ( lane >= 16) ptr [idx ] = op(ptr [idx - 16], ptr [idx]);
|
}
|
||||||
|
|
||||||
if( Kind == INCLUSIVE )
|
__device__ __forceinline__ unsigned int index(const unsigned int tid)
|
||||||
return ptr [idx];
|
|
||||||
else
|
|
||||||
return (lane > 0) ? ptr [idx - 1] : 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
__device__ __forceinline__ unsigned int index(const unsigned int tid)
|
|
||||||
{
|
|
||||||
return tid;
|
|
||||||
}
|
|
||||||
|
|
||||||
__device__ __forceinline__ void init(volatile T *ptr){}
|
|
||||||
|
|
||||||
static const int warp_offset = 0;
|
|
||||||
|
|
||||||
typedef WarpScan<INCLUSIVE, T, F> merge;
|
|
||||||
};
|
|
||||||
|
|
||||||
template <ScanKind Kind , typename T, typename F> struct WarpScanNoComp
|
|
||||||
{
|
{
|
||||||
__device__ __forceinline__ WarpScanNoComp() {}
|
return tid;
|
||||||
__device__ __forceinline__ WarpScanNoComp(const WarpScanNoComp& other) { (void)other; }
|
}
|
||||||
|
|
||||||
__device__ __forceinline__ T operator()( volatile T *ptr , const unsigned int idx)
|
__device__ __forceinline__ void init(volatile T *ptr){}
|
||||||
{
|
|
||||||
const unsigned int lane = threadIdx.x & 31;
|
|
||||||
F op;
|
|
||||||
|
|
||||||
ptr [idx ] = op(ptr [idx - 1], ptr [idx]);
|
static const int warp_offset = 0;
|
||||||
ptr [idx ] = op(ptr [idx - 2], ptr [idx]);
|
|
||||||
ptr [idx ] = op(ptr [idx - 4], ptr [idx]);
|
|
||||||
ptr [idx ] = op(ptr [idx - 8], ptr [idx]);
|
|
||||||
ptr [idx ] = op(ptr [idx - 16], ptr [idx]);
|
|
||||||
|
|
||||||
if( Kind == INCLUSIVE )
|
typedef WarpScan<INCLUSIVE, T, F> merge;
|
||||||
return ptr [idx];
|
};
|
||||||
else
|
|
||||||
return (lane > 0) ? ptr [idx - 1] : 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
__device__ __forceinline__ unsigned int index(const unsigned int tid)
|
template <ScanKind Kind , typename T, typename F> struct WarpScanNoComp
|
||||||
{
|
{
|
||||||
return (tid >> warp_log) * warp_smem_stride + 16 + (tid & warp_mask);
|
__device__ __forceinline__ WarpScanNoComp() {}
|
||||||
}
|
__device__ __forceinline__ WarpScanNoComp(const WarpScanNoComp& other) { (void)other; }
|
||||||
|
|
||||||
__device__ __forceinline__ void init(volatile T *ptr)
|
__device__ __forceinline__ T operator()( volatile T *ptr , const unsigned int idx)
|
||||||
{
|
|
||||||
ptr[threadIdx.x] = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static const int warp_smem_stride = 32 + 16 + 1;
|
|
||||||
static const int warp_offset = 16;
|
|
||||||
static const int warp_log = 5;
|
|
||||||
static const int warp_mask = 31;
|
|
||||||
|
|
||||||
typedef WarpScanNoComp<INCLUSIVE, T, F> merge;
|
|
||||||
};
|
|
||||||
|
|
||||||
template <ScanKind Kind , typename T, typename Sc, typename F> struct BlockScan
|
|
||||||
{
|
{
|
||||||
__device__ __forceinline__ BlockScan() {}
|
const unsigned int lane = threadIdx.x & 31;
|
||||||
__device__ __forceinline__ BlockScan(const BlockScan& other) { (void)other; }
|
F op;
|
||||||
|
|
||||||
__device__ __forceinline__ T operator()(volatile T *ptr)
|
ptr [idx ] = op(ptr [idx - 1], ptr [idx]);
|
||||||
{
|
ptr [idx ] = op(ptr [idx - 2], ptr [idx]);
|
||||||
const unsigned int tid = threadIdx.x;
|
ptr [idx ] = op(ptr [idx - 4], ptr [idx]);
|
||||||
const unsigned int lane = tid & warp_mask;
|
ptr [idx ] = op(ptr [idx - 8], ptr [idx]);
|
||||||
const unsigned int warp = tid >> warp_log;
|
ptr [idx ] = op(ptr [idx - 16], ptr [idx]);
|
||||||
|
|
||||||
Sc scan;
|
if( Kind == INCLUSIVE )
|
||||||
typename Sc::merge merge_scan;
|
return ptr [idx];
|
||||||
const unsigned int idx = scan.index(tid);
|
else
|
||||||
|
return (lane > 0) ? ptr [idx - 1] : 0;
|
||||||
|
}
|
||||||
|
|
||||||
T val = scan(ptr, idx);
|
__device__ __forceinline__ unsigned int index(const unsigned int tid)
|
||||||
__syncthreads ();
|
{
|
||||||
|
return (tid >> warp_log) * warp_smem_stride + 16 + (tid & warp_mask);
|
||||||
|
}
|
||||||
|
|
||||||
if( warp == 0)
|
__device__ __forceinline__ void init(volatile T *ptr)
|
||||||
scan.init(ptr);
|
{
|
||||||
__syncthreads ();
|
ptr[threadIdx.x] = 0;
|
||||||
|
}
|
||||||
|
|
||||||
if( lane == 31 )
|
static const int warp_smem_stride = 32 + 16 + 1;
|
||||||
ptr [scan.warp_offset + warp ] = (Kind == INCLUSIVE) ? val : ptr [idx];
|
static const int warp_offset = 16;
|
||||||
__syncthreads ();
|
static const int warp_log = 5;
|
||||||
|
static const int warp_mask = 31;
|
||||||
|
|
||||||
if( warp == 0 )
|
typedef WarpScanNoComp<INCLUSIVE, T, F> merge;
|
||||||
merge_scan(ptr, idx);
|
};
|
||||||
__syncthreads();
|
|
||||||
|
|
||||||
if ( warp > 0)
|
template <ScanKind Kind , typename T, typename Sc, typename F> struct BlockScan
|
||||||
val = ptr [scan.warp_offset + warp - 1] + val;
|
{
|
||||||
__syncthreads ();
|
__device__ __forceinline__ BlockScan() {}
|
||||||
|
__device__ __forceinline__ BlockScan(const BlockScan& other) { (void)other; }
|
||||||
|
|
||||||
ptr[idx] = val;
|
__device__ __forceinline__ T operator()(volatile T *ptr)
|
||||||
__syncthreads ();
|
{
|
||||||
|
const unsigned int tid = threadIdx.x;
|
||||||
|
const unsigned int lane = tid & warp_mask;
|
||||||
|
const unsigned int warp = tid >> warp_log;
|
||||||
|
|
||||||
return val ;
|
Sc scan;
|
||||||
}
|
typename Sc::merge merge_scan;
|
||||||
|
const unsigned int idx = scan.index(tid);
|
||||||
|
|
||||||
static const int warp_log = 5;
|
T val = scan(ptr, idx);
|
||||||
static const int warp_mask = 31;
|
__syncthreads ();
|
||||||
};
|
|
||||||
|
|
||||||
#endif
|
if( warp == 0)
|
||||||
|
scan.init(ptr);
|
||||||
|
__syncthreads ();
|
||||||
|
|
||||||
|
if( lane == 31 )
|
||||||
|
ptr [scan.warp_offset + warp ] = (Kind == INCLUSIVE) ? val : ptr [idx];
|
||||||
|
__syncthreads ();
|
||||||
|
|
||||||
|
if( warp == 0 )
|
||||||
|
merge_scan(ptr, idx);
|
||||||
|
__syncthreads();
|
||||||
|
|
||||||
|
if ( warp > 0)
|
||||||
|
val = ptr [scan.warp_offset + warp - 1] + val;
|
||||||
|
__syncthreads ();
|
||||||
|
|
||||||
|
ptr[idx] = val;
|
||||||
|
__syncthreads ();
|
||||||
|
|
||||||
|
return val ;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const int warp_log = 5;
|
||||||
|
static const int warp_mask = 31;
|
||||||
|
};
|
||||||
|
}}}
|
||||||
|
|
||||||
|
#endif // __OPENCV_GPU_SCAN_HPP__
|
||||||
|
@ -60,8 +60,6 @@ namespace cv { namespace gpu
|
|||||||
__OPENCV_GPU_HOST_DEVICE__ static void check() {};
|
__OPENCV_GPU_HOST_DEVICE__ static void check() {};
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
using ::cv::gpu::device::Static;
|
|
||||||
}}
|
}}
|
||||||
|
|
||||||
#undef __OPENCV_GPU_HOST_DEVICE__
|
#undef __OPENCV_GPU_HOST_DEVICE__
|
||||||
|
@ -45,7 +45,7 @@
|
|||||||
|
|
||||||
#include "saturate_cast.hpp"
|
#include "saturate_cast.hpp"
|
||||||
#include "datamov_utils.hpp"
|
#include "datamov_utils.hpp"
|
||||||
#include "detail/utility_detail.hpp"
|
#include "detail/reduction_detail.hpp"
|
||||||
|
|
||||||
namespace cv { namespace gpu { namespace device
|
namespace cv { namespace gpu { namespace device
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user