refactor cudaarithm reductions:

* remove overloads with explicit buffer, now BufferPool is used
* added async versions for all reduce functions
This commit is contained in:
Vladislav Vinogradov 2014-12-24 13:40:33 +03:00
parent a4e598f474
commit cd0e95de16
16 changed files with 1075 additions and 519 deletions

View File

@ -454,11 +454,10 @@ public:
// create sutable matrix headers
GpuMat src = resuzeBuffer(cv::Rect(0, 0, level.sFrame.width, level.sFrame.height));
GpuMat sint = integral(cv::Rect(prev, 0, level.sFrame.width + 1, level.sFrame.height + 1));
GpuMat buff = integralBuffer;
// generate integral for scale
cuda::resize(image, src, level.sFrame, 0, 0, cv::INTER_LINEAR);
cuda::integral(src, sint, buff);
cuda::integral(src, sint);
// calculate job
int totalWidth = level.workArea.width / step;

View File

@ -524,116 +524,53 @@ CV_EXPORTS void copyMakeBorder(InputArray src, OutputArray dst, int top, int bot
@param src1 Source matrix. Any matrices except 64F are supported.
@param normType Norm type. NORM_L1 , NORM_L2 , and NORM_INF are supported for now.
@param mask optional operation mask; it must have the same size as src1 and CV_8UC1 type.
@param buf Optional buffer to avoid extra memory allocations. It is resized automatically.
@sa norm
*/
CV_EXPORTS double norm(InputArray src1, int normType, InputArray mask, GpuMat& buf);
/** @overload
uses new buffer, no mask
*/
static inline double norm(InputArray src, int normType)
{
GpuMat buf;
return norm(src, normType, GpuMat(), buf);
}
/** @overload
no mask
*/
static inline double norm(InputArray src, int normType, GpuMat& buf)
{
return norm(src, normType, GpuMat(), buf);
}
CV_EXPORTS double norm(InputArray src1, int normType, InputArray mask = noArray());
/** @overload */
CV_EXPORTS void calcNorm(InputArray src, OutputArray dst, int normType, InputArray mask = noArray(), Stream& stream = Stream::Null());
/** @brief Returns the difference of two matrices.
@param src1 Source matrix. Any matrices except 64F are supported.
@param src2 Second source matrix (if any) with the same size and type as src1.
@param normType Norm type. NORM_L1 , NORM_L2 , and NORM_INF are supported for now.
@param buf Optional buffer to avoid extra memory allocations. It is resized automatically.
@sa norm
*/
CV_EXPORTS double norm(InputArray src1, InputArray src2, GpuMat& buf, int normType=NORM_L2);
/** @overload
uses new buffer
*/
static inline double norm(InputArray src1, InputArray src2, int normType=NORM_L2)
{
GpuMat buf;
return norm(src1, src2, buf, normType);
}
CV_EXPORTS double norm(InputArray src1, InputArray src2, int normType=NORM_L2);
/** @overload */
CV_EXPORTS void calcNormDiff(InputArray src1, InputArray src2, OutputArray dst, int normType=NORM_L2, Stream& stream = Stream::Null());
/** @brief Returns the sum of matrix elements.
@param src Source image of any depth except for CV_64F .
@param mask optional operation mask; it must have the same size as src1 and CV_8UC1 type.
@param buf Optional buffer to avoid extra memory allocations. It is resized automatically.
@sa sum
*/
CV_EXPORTS Scalar sum(InputArray src, InputArray mask, GpuMat& buf);
/** @overload
uses new buffer, no mask
*/
static inline Scalar sum(InputArray src)
{
GpuMat buf;
return sum(src, GpuMat(), buf);
}
/** @overload
no mask
*/
static inline Scalar sum(InputArray src, GpuMat& buf)
{
return sum(src, GpuMat(), buf);
}
CV_EXPORTS Scalar sum(InputArray src, InputArray mask = noArray());
/** @overload */
CV_EXPORTS void calcSum(InputArray src, OutputArray dst, InputArray mask = noArray(), Stream& stream = Stream::Null());
/** @brief Returns the sum of absolute values for matrix elements.
@param src Source image of any depth except for CV_64F .
@param mask optional operation mask; it must have the same size as src1 and CV_8UC1 type.
@param buf Optional buffer to avoid extra memory allocations. It is resized automatically.
*/
CV_EXPORTS Scalar absSum(InputArray src, InputArray mask, GpuMat& buf);
/** @overload
uses new buffer, no mask
*/
static inline Scalar absSum(InputArray src)
{
GpuMat buf;
return absSum(src, GpuMat(), buf);
}
/** @overload
no mask
*/
static inline Scalar absSum(InputArray src, GpuMat& buf)
{
return absSum(src, GpuMat(), buf);
}
CV_EXPORTS Scalar absSum(InputArray src, InputArray mask = noArray());
/** @overload */
CV_EXPORTS void calcAbsSum(InputArray src, OutputArray dst, InputArray mask = noArray(), Stream& stream = Stream::Null());
/** @brief Returns the squared sum of matrix elements.
@param src Source image of any depth except for CV_64F .
@param mask optional operation mask; it must have the same size as src1 and CV_8UC1 type.
@param buf Optional buffer to avoid extra memory allocations. It is resized automatically.
*/
CV_EXPORTS Scalar sqrSum(InputArray src, InputArray mask, GpuMat& buf);
/** @overload
uses new buffer, no mask
*/
static inline Scalar sqrSum(InputArray src)
{
GpuMat buf;
return sqrSum(src, GpuMat(), buf);
}
/** @overload
no mask
*/
static inline Scalar sqrSum(InputArray src, GpuMat& buf)
{
return sqrSum(src, GpuMat(), buf);
}
CV_EXPORTS Scalar sqrSum(InputArray src, InputArray mask = noArray());
/** @overload */
CV_EXPORTS void calcSqrSum(InputArray src, OutputArray dst, InputArray mask = noArray(), Stream& stream = Stream::Null());
/** @brief Finds global minimum and maximum matrix elements and returns their values.
@ -641,21 +578,14 @@ static inline Scalar sqrSum(InputArray src, GpuMat& buf)
@param minVal Pointer to the returned minimum value. Use NULL if not required.
@param maxVal Pointer to the returned maximum value. Use NULL if not required.
@param mask Optional mask to select a sub-matrix.
@param buf Optional buffer to avoid extra memory allocations. It is resized automatically.
The function does not work with CV_64F images on GPUs with the compute capability \< 1.3.
@sa minMaxLoc
*/
CV_EXPORTS void minMax(InputArray src, double* minVal, double* maxVal, InputArray mask, GpuMat& buf);
/** @overload
uses new buffer
*/
static inline void minMax(InputArray src, double* minVal, double* maxVal=0, InputArray mask=noArray())
{
GpuMat buf;
minMax(src, minVal, maxVal, mask, buf);
}
CV_EXPORTS void minMax(InputArray src, double* minVal, double* maxVal, InputArray mask = noArray());
/** @overload */
CV_EXPORTS void findMinMax(InputArray src, OutputArray dst, InputArray mask = noArray(), Stream& stream = Stream::Null());
/** @brief Finds global minimum and maximum matrix elements and returns their values with locations.
@ -665,44 +595,28 @@ static inline void minMax(InputArray src, double* minVal, double* maxVal=0, Inpu
@param minLoc Pointer to the returned minimum location. Use NULL if not required.
@param maxLoc Pointer to the returned maximum location. Use NULL if not required.
@param mask Optional mask to select a sub-matrix.
@param valbuf Optional values buffer to avoid extra memory allocations. It is resized
automatically.
@param locbuf Optional locations buffer to avoid extra memory allocations. It is resized
automatically.
The function does not work with CV_64F images on GPU with the compute capability \< 1.3.
@sa minMaxLoc
*/
CV_EXPORTS void minMaxLoc(InputArray src, double* minVal, double* maxVal, Point* minLoc, Point* maxLoc,
InputArray mask, GpuMat& valbuf, GpuMat& locbuf);
/** @overload
uses new buffer
*/
static inline void minMaxLoc(InputArray src, double* minVal, double* maxVal=0, Point* minLoc=0, Point* maxLoc=0,
InputArray mask=noArray())
{
GpuMat valBuf, locBuf;
minMaxLoc(src, minVal, maxVal, minLoc, maxLoc, mask, valBuf, locBuf);
}
InputArray mask = noArray());
/** @overload */
CV_EXPORTS void findMinMaxLoc(InputArray src, OutputArray minMaxVals, OutputArray loc,
InputArray mask = noArray(), Stream& stream = Stream::Null());
/** @brief Counts non-zero matrix elements.
@param src Single-channel source image.
@param buf Optional buffer to avoid extra memory allocations. It is resized automatically.
The function does not work with CV_64F images on GPUs with the compute capability \< 1.3.
@sa countNonZero
*/
CV_EXPORTS int countNonZero(InputArray src, GpuMat& buf);
/** @overload
uses new buffer
*/
static inline int countNonZero(const GpuMat& src)
{
GpuMat buf;
return countNonZero(src, buf);
}
CV_EXPORTS int countNonZero(InputArray src);
/** @overload */
CV_EXPORTS void countNonZero(InputArray src, OutputArray dst, Stream& stream = Stream::Null());
/** @brief Reduces a matrix to a vector.
@ -737,19 +651,12 @@ CV_EXPORTS void reduce(InputArray mtx, OutputArray vec, int dim, int reduceOp, i
@param mtx Source matrix. CV_8UC1 matrices are supported for now.
@param mean Mean value.
@param stddev Standard deviation value.
@param buf Optional buffer to avoid extra memory allocations. It is resized automatically.
@sa meanStdDev
*/
CV_EXPORTS void meanStdDev(InputArray mtx, Scalar& mean, Scalar& stddev, GpuMat& buf);
/** @overload
uses new buffer
*/
static inline void meanStdDev(InputArray src, Scalar& mean, Scalar& stddev)
{
GpuMat buf;
meanStdDev(src, mean, stddev, buf);
}
CV_EXPORTS void meanStdDev(InputArray mtx, Scalar& mean, Scalar& stddev);
/** @overload */
CV_EXPORTS void meanStdDev(InputArray mtx, OutputArray dst, Stream& stream = Stream::Null());
/** @brief Computes a standard deviation of integral images.
@ -773,64 +680,32 @@ normalization.
@param dtype When negative, the output array has the same type as src; otherwise, it has the same
number of channels as src and the depth =CV_MAT_DEPTH(dtype).
@param mask Optional operation mask.
@param norm_buf Optional buffer to avoid extra memory allocations. It is resized automatically.
@param cvt_buf Optional buffer to avoid extra memory allocations. It is resized automatically.
@param stream Stream for the asynchronous version.
@sa normalize
*/
CV_EXPORTS void normalize(InputArray src, OutputArray dst, double alpha, double beta,
int norm_type, int dtype, InputArray mask, GpuMat& norm_buf, GpuMat& cvt_buf);
/** @overload
uses new buffers
*/
static inline void normalize(InputArray src, OutputArray dst, double alpha = 1, double beta = 0,
int norm_type = NORM_L2, int dtype = -1, InputArray mask = noArray())
{
GpuMat norm_buf;
GpuMat cvt_buf;
normalize(src, dst, alpha, beta, norm_type, dtype, mask, norm_buf, cvt_buf);
}
int norm_type, int dtype, InputArray mask = noArray(),
Stream& stream = Stream::Null());
/** @brief Computes an integral image.
@param src Source image. Only CV_8UC1 images are supported for now.
@param sum Integral image containing 32-bit unsigned integer values packed into CV_32SC1 .
@param buffer Optional buffer to avoid extra memory allocations. It is resized automatically.
@param stream Stream for the asynchronous version.
@sa integral
*/
CV_EXPORTS void integral(InputArray src, OutputArray sum, GpuMat& buffer, Stream& stream = Stream::Null());
static inline void integralBuffered(InputArray src, OutputArray sum, GpuMat& buffer, Stream& stream = Stream::Null())
{
integral(src, sum, buffer, stream);
}
/** @overload
uses new buffer
*/
static inline void integral(InputArray src, OutputArray sum, Stream& stream = Stream::Null())
{
GpuMat buffer;
integral(src, sum, buffer, stream);
}
CV_EXPORTS void integral(InputArray src, OutputArray sum, Stream& stream = Stream::Null());
/** @brief Computes a squared integral image.
@param src Source image. Only CV_8UC1 images are supported for now.
@param sqsum Squared integral image containing 64-bit unsigned integer values packed into
CV_64FC1 .
@param buf Optional buffer to avoid extra memory allocations. It is resized automatically.
@param stream Stream for the asynchronous version.
*/
CV_EXPORTS void sqrIntegral(InputArray src, OutputArray sqsum, GpuMat& buf, Stream& stream = Stream::Null());
/** @overload
uses new buffer
*/
static inline void sqrIntegral(InputArray src, OutputArray sqsum, Stream& stream = Stream::Null())
{
GpuMat buffer;
sqrIntegral(src, sqsum, buffer, stream);
}
CV_EXPORTS void sqrIntegral(InputArray src, OutputArray sqsum, Stream& stream = Stream::Null());
//! @} cudaarithm_reduce

View File

@ -108,10 +108,9 @@ PERF_TEST_P(Sz_Norm, NormDiff,
{
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
cv::cuda::GpuMat d_buf;
double gpu_dst;
TEST_CYCLE() gpu_dst = cv::cuda::norm(d_src1, d_src2, d_buf, normType);
TEST_CYCLE() gpu_dst = cv::cuda::norm(d_src1, d_src2, normType);
SANITY_CHECK(gpu_dst);
@ -146,10 +145,9 @@ PERF_TEST_P(Sz_Depth_Cn, Sum,
if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat d_buf;
cv::Scalar gpu_dst;
TEST_CYCLE() gpu_dst = cv::cuda::sum(d_src, d_buf);
TEST_CYCLE() gpu_dst = cv::cuda::sum(d_src);
SANITY_CHECK(gpu_dst, 1e-5, ERROR_RELATIVE);
}
@ -183,10 +181,9 @@ PERF_TEST_P(Sz_Depth_Cn, SumAbs,
if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat d_buf;
cv::Scalar gpu_dst;
TEST_CYCLE() gpu_dst = cv::cuda::absSum(d_src, d_buf);
TEST_CYCLE() gpu_dst = cv::cuda::absSum(d_src);
SANITY_CHECK(gpu_dst, 1e-6, ERROR_RELATIVE);
}
@ -216,10 +213,9 @@ PERF_TEST_P(Sz_Depth_Cn, SumSqr,
if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat d_buf;
cv::Scalar gpu_dst;
TEST_CYCLE() gpu_dst = cv::cuda::sqrSum(d_src, d_buf);
TEST_CYCLE() gpu_dst = cv::cuda::sqrSum(d_src);
SANITY_CHECK(gpu_dst, 1e-6, ERROR_RELATIVE);
}
@ -248,10 +244,9 @@ PERF_TEST_P(Sz_Depth, MinMax,
if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat d_buf;
double gpu_minVal, gpu_maxVal;
TEST_CYCLE() cv::cuda::minMax(d_src, &gpu_minVal, &gpu_maxVal, cv::cuda::GpuMat(), d_buf);
TEST_CYCLE() cv::cuda::minMax(d_src, &gpu_minVal, &gpu_maxVal, cv::cuda::GpuMat());
SANITY_CHECK(gpu_minVal, 1e-10);
SANITY_CHECK(gpu_maxVal, 1e-10);
@ -286,11 +281,10 @@ PERF_TEST_P(Sz_Depth, MinMaxLoc,
if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat d_valbuf, d_locbuf;
double gpu_minVal, gpu_maxVal;
cv::Point gpu_minLoc, gpu_maxLoc;
TEST_CYCLE() cv::cuda::minMaxLoc(d_src, &gpu_minVal, &gpu_maxVal, &gpu_minLoc, &gpu_maxLoc, cv::cuda::GpuMat(), d_valbuf, d_locbuf);
TEST_CYCLE() cv::cuda::minMaxLoc(d_src, &gpu_minVal, &gpu_maxVal, &gpu_minLoc, &gpu_maxLoc);
SANITY_CHECK(gpu_minVal, 1e-10);
SANITY_CHECK(gpu_maxVal, 1e-10);
@ -323,10 +317,9 @@ PERF_TEST_P(Sz_Depth, CountNonZero,
if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat d_buf;
int gpu_dst = 0;
TEST_CYCLE() gpu_dst = cv::cuda::countNonZero(d_src, d_buf);
TEST_CYCLE() gpu_dst = cv::cuda::countNonZero(d_src);
SANITY_CHECK(gpu_dst);
}
@ -414,9 +407,8 @@ PERF_TEST_P(Sz_Depth_NormType, Normalize,
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
cv::cuda::GpuMat d_norm_buf, d_cvt_buf;
TEST_CYCLE() cv::cuda::normalize(d_src, dst, alpha, beta, norm_type, type, cv::cuda::GpuMat(), d_norm_buf, d_cvt_buf);
TEST_CYCLE() cv::cuda::normalize(d_src, dst, alpha, beta, norm_type, type, cv::cuda::GpuMat());
CUDA_SANITY_CHECK(dst, 1e-6);
}
@ -445,11 +437,10 @@ PERF_TEST_P(Sz, MeanStdDev,
if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat d_buf;
cv::Scalar gpu_mean;
cv::Scalar gpu_stddev;
TEST_CYCLE() cv::cuda::meanStdDev(d_src, gpu_mean, gpu_stddev, d_buf);
TEST_CYCLE() cv::cuda::meanStdDev(d_src, gpu_mean, gpu_stddev);
SANITY_CHECK(gpu_mean);
SANITY_CHECK(gpu_stddev);
@ -481,9 +472,8 @@ PERF_TEST_P(Sz, Integral,
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
cv::cuda::GpuMat d_buf;
TEST_CYCLE() cv::cuda::integral(d_src, dst, d_buf);
TEST_CYCLE() cv::cuda::integral(d_src, dst);
CUDA_SANITY_CHECK(dst);
}
@ -511,9 +501,9 @@ PERF_TEST_P(Sz, IntegralSqr,
if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst, buf;
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::cuda::sqrIntegral(d_src, dst, buf);
TEST_CYCLE() cv::cuda::sqrIntegral(d_src, dst);
CUDA_SANITY_CHECK(dst);
}

View File

@ -50,47 +50,64 @@
#include "opencv2/cudaarithm.hpp"
#include "opencv2/cudev.hpp"
#include "opencv2/core/private.cuda.hpp"
using namespace cv;
using namespace cv::cuda;
using namespace cv::cudev;
namespace
{
template <typename T>
int countNonZeroImpl(const GpuMat& _src, GpuMat& _buf)
template <typename T, typename D>
void countNonZeroImpl(const GpuMat& _src, GpuMat& _dst, Stream& stream)
{
const GpuMat_<T>& src = (const GpuMat_<T>&) _src;
GpuMat_<int>& buf = (GpuMat_<int>&) _buf;
GpuMat_<D>& dst = (GpuMat_<D>&) _dst;
gridCountNonZero(src, buf);
int data;
buf.download(cv::Mat(1, 1, buf.type(), &data));
return data;
gridCountNonZero(src, dst, stream);
}
}
int cv::cuda::countNonZero(InputArray _src, GpuMat& buf)
void cv::cuda::countNonZero(InputArray _src, OutputArray _dst, Stream& stream)
{
typedef int (*func_t)(const GpuMat& _src, GpuMat& _buf);
typedef void (*func_t)(const GpuMat& src, GpuMat& dst, Stream& stream);
static const func_t funcs[] =
{
countNonZeroImpl<uchar>,
countNonZeroImpl<schar>,
countNonZeroImpl<ushort>,
countNonZeroImpl<short>,
countNonZeroImpl<int>,
countNonZeroImpl<float>,
countNonZeroImpl<double>
countNonZeroImpl<uchar, int>,
countNonZeroImpl<schar, int>,
countNonZeroImpl<ushort, int>,
countNonZeroImpl<short, int>,
countNonZeroImpl<int, int>,
countNonZeroImpl<float, int>,
countNonZeroImpl<double, int>,
};
GpuMat src = _src.getGpuMat();
GpuMat src = getInputMat(_src, stream);
CV_Assert( src.depth() <= CV_64F );
CV_Assert( src.channels() == 1 );
const func_t func = funcs[src.depth()];
GpuMat dst = getOutputMat(_dst, 1, 1, CV_32SC1, stream);
return func(src, buf);
const func_t func = funcs[src.depth()];
func(src, dst, stream);
syncOutput(dst, _dst, stream);
}
int cv::cuda::countNonZero(InputArray _src)
{
Stream& stream = Stream::Null();
BufferPool pool(stream);
GpuMat buf = pool.getBuffer(1, 1, CV_32SC1);
countNonZero(_src, buf, stream);
int data;
buf.download(Mat(1, 1, CV_32SC1, &data));
return data;
}
#endif

View File

@ -50,62 +50,140 @@
#include "opencv2/cudaarithm.hpp"
#include "opencv2/cudev.hpp"
#include "opencv2/core/private.cuda.hpp"
using namespace cv;
using namespace cv::cuda;
using namespace cv::cudev;
namespace
{
template <typename T>
void minMaxImpl(const GpuMat& _src, const GpuMat& mask, GpuMat& _buf, double* minVal, double* maxVal)
template <typename T, typename R>
void minMaxImpl(const GpuMat& _src, const GpuMat& mask, GpuMat& _dst, Stream& stream)
{
typedef typename SelectIf<
TypesEquals<T, double>::value,
double,
typename SelectIf<TypesEquals<T, float>::value, float, int>::type
>::type work_type;
const GpuMat_<T>& src = (const GpuMat_<T>&) _src;
GpuMat_<work_type>& buf = (GpuMat_<work_type>&) _buf;
GpuMat_<R>& dst = (GpuMat_<R>&) _dst;
if (mask.empty())
gridFindMinMaxVal(src, buf);
gridFindMinMaxVal(src, dst, stream);
else
gridFindMinMaxVal(src, buf, globPtr<uchar>(mask));
gridFindMinMaxVal(src, dst, globPtr<uchar>(mask), stream);
}
work_type data[2];
buf.download(cv::Mat(1, 2, buf.type(), data));
template <typename T, typename R>
void minMaxImpl(const GpuMat& src, const GpuMat& mask, double* minVal, double* maxVal)
{
BufferPool pool(Stream::Null());
GpuMat buf(pool.getBuffer(1, 2, DataType<R>::type));
if (minVal)
*minVal = data[0];
minMaxImpl<T, R>(src, mask, buf, Stream::Null());
R data[2];
buf.download(Mat(1, 2, buf.type(), data));
if (maxVal)
*maxVal = data[1];
}
}
void cv::cuda::minMax(InputArray _src, double* minVal, double* maxVal, InputArray _mask, GpuMat& buf)
void cv::cuda::findMinMax(InputArray _src, OutputArray _dst, InputArray _mask, Stream& stream)
{
typedef void (*func_t)(const GpuMat& _src, const GpuMat& mask, GpuMat& _buf, double* minVal, double* maxVal);
typedef void (*func_t)(const GpuMat& _src, const GpuMat& mask, GpuMat& _dst, Stream& stream);
static const func_t funcs[] =
{
minMaxImpl<uchar>,
minMaxImpl<schar>,
minMaxImpl<ushort>,
minMaxImpl<short>,
minMaxImpl<int>,
minMaxImpl<float>,
minMaxImpl<double>
minMaxImpl<uchar, int>,
minMaxImpl<schar, int>,
minMaxImpl<ushort, int>,
minMaxImpl<short, int>,
minMaxImpl<int, int>,
minMaxImpl<float, float>,
minMaxImpl<double, double>
};
GpuMat src = _src.getGpuMat();
GpuMat mask = _mask.getGpuMat();
const GpuMat src = getInputMat(_src, stream);
const GpuMat mask = getInputMat(_mask, stream);
CV_Assert( src.channels() == 1 );
CV_DbgAssert( mask.empty() || (mask.size() == src.size() && mask.type() == CV_8U) );
CV_Assert( mask.empty() || (mask.size() == src.size() && mask.type() == CV_8U) );
const int src_depth = src.depth();
const int dst_depth = src_depth < CV_32F ? CV_32S : src_depth;
GpuMat dst = getOutputMat(_dst, 1, 2, dst_depth, stream);
const func_t func = funcs[src.depth()];
func(src, mask, dst, stream);
func(src, mask, buf, minVal, maxVal);
syncOutput(dst, _dst, stream);
}
void cv::cuda::minMax(InputArray _src, double* minVal, double* maxVal, InputArray _mask)
{
Stream& stream = Stream::Null();
HostMem dst;
findMinMax(_src, dst, _mask, stream);
stream.waitForCompletion();
double vals[2];
dst.createMatHeader().convertTo(Mat(1, 2, CV_64FC1, &vals[0]), CV_64F);
if (minVal)
*minVal = vals[0];
if (maxVal)
*maxVal = vals[1];
}
namespace cv { namespace cuda { namespace internal {
void findMaxAbs(InputArray _src, OutputArray _dst, InputArray _mask, Stream& stream);
}}}
namespace
{
template <typename T, typename R>
void findMaxAbsImpl(const GpuMat& _src, const GpuMat& mask, GpuMat& _dst, Stream& stream)
{
const GpuMat_<T>& src = (const GpuMat_<T>&) _src;
GpuMat_<R>& dst = (GpuMat_<R>&) _dst;
if (mask.empty())
gridFindMaxVal(abs_(src), dst, stream);
else
gridFindMaxVal(abs_(src), dst, globPtr<uchar>(mask), stream);
}
}
void cv::cuda::internal::findMaxAbs(InputArray _src, OutputArray _dst, InputArray _mask, Stream& stream)
{
typedef void (*func_t)(const GpuMat& _src, const GpuMat& mask, GpuMat& _dst, Stream& stream);
static const func_t funcs[] =
{
findMaxAbsImpl<uchar, int>,
findMaxAbsImpl<schar, int>,
findMaxAbsImpl<ushort, int>,
findMaxAbsImpl<short, int>,
findMaxAbsImpl<int, int>,
findMaxAbsImpl<float, float>,
findMaxAbsImpl<double, double>
};
const GpuMat src = getInputMat(_src, stream);
const GpuMat mask = getInputMat(_mask, stream);
CV_Assert( src.channels() == 1 );
CV_Assert( mask.empty() || (mask.size() == src.size() && mask.type() == CV_8U) );
const int src_depth = src.depth();
const int dst_depth = src_depth < CV_32F ? CV_32S : src_depth;
GpuMat dst = getOutputMat(_dst, 1, 1, dst_depth, stream);
const func_t func = funcs[src.depth()];
func(src, mask, dst, stream);
syncOutput(dst, _dst, stream);
}
#endif

View File

@ -50,78 +50,110 @@
#include "opencv2/cudaarithm.hpp"
#include "opencv2/cudev.hpp"
#include "opencv2/core/private.cuda.hpp"
using namespace cv;
using namespace cv::cuda;
using namespace cv::cudev;
namespace
{
template <typename T>
void minMaxLocImpl(const GpuMat& _src, const GpuMat& mask, GpuMat& _valBuf, GpuMat& _locBuf, double* minVal, double* maxVal, cv::Point* minLoc, cv::Point* maxLoc)
template <typename T, typename R>
void minMaxLocImpl(const GpuMat& _src, const GpuMat& mask, GpuMat& _valBuf, GpuMat& _locBuf, Stream& stream)
{
typedef typename SelectIf<
TypesEquals<T, double>::value,
double,
typename SelectIf<TypesEquals<T, float>::value, float, int>::type
>::type work_type;
const GpuMat_<T>& src = (const GpuMat_<T>&) _src;
GpuMat_<work_type>& valBuf = (GpuMat_<work_type>&) _valBuf;
GpuMat_<R>& valBuf = (GpuMat_<R>&) _valBuf;
GpuMat_<int>& locBuf = (GpuMat_<int>&) _locBuf;
if (mask.empty())
gridMinMaxLoc(src, valBuf, locBuf);
gridMinMaxLoc(src, valBuf, locBuf, stream);
else
gridMinMaxLoc(src, valBuf, locBuf, globPtr<uchar>(mask));
cv::Mat_<work_type> h_valBuf;
cv::Mat_<int> h_locBuf;
valBuf.download(h_valBuf);
locBuf.download(h_locBuf);
if (minVal)
*minVal = h_valBuf(0, 0);
if (maxVal)
*maxVal = h_valBuf(1, 0);
if (minLoc)
{
const int idx = h_locBuf(0, 0);
*minLoc = cv::Point(idx % src.cols, idx / src.cols);
}
if (maxLoc)
{
const int idx = h_locBuf(1, 0);
*maxLoc = cv::Point(idx % src.cols, idx / src.cols);
}
gridMinMaxLoc(src, valBuf, locBuf, globPtr<uchar>(mask), stream);
}
}
void cv::cuda::minMaxLoc(InputArray _src, double* minVal, double* maxVal, Point* minLoc, Point* maxLoc, InputArray _mask, GpuMat& valBuf, GpuMat& locBuf)
void cv::cuda::findMinMaxLoc(InputArray _src, OutputArray _minMaxVals, OutputArray _loc, InputArray _mask, Stream& stream)
{
typedef void (*func_t)(const GpuMat& _src, const GpuMat& mask, GpuMat& _valBuf, GpuMat& _locBuf, double* minVal, double* maxVal, cv::Point* minLoc, cv::Point* maxLoc);
typedef void (*func_t)(const GpuMat& _src, const GpuMat& mask, GpuMat& _valBuf, GpuMat& _locBuf, Stream& stream);
static const func_t funcs[] =
{
minMaxLocImpl<uchar>,
minMaxLocImpl<schar>,
minMaxLocImpl<ushort>,
minMaxLocImpl<short>,
minMaxLocImpl<int>,
minMaxLocImpl<float>,
minMaxLocImpl<double>
minMaxLocImpl<uchar, int>,
minMaxLocImpl<schar, int>,
minMaxLocImpl<ushort, int>,
minMaxLocImpl<short, int>,
minMaxLocImpl<int, int>,
minMaxLocImpl<float, float>,
minMaxLocImpl<double, double>
};
GpuMat src = _src.getGpuMat();
GpuMat mask = _mask.getGpuMat();
const GpuMat src = getInputMat(_src, stream);
const GpuMat mask = getInputMat(_mask, stream);
CV_Assert( src.channels() == 1 );
CV_DbgAssert( mask.empty() || (mask.size() == src.size() && mask.type() == CV_8U) );
CV_Assert( mask.empty() || (mask.size() == src.size() && mask.type() == CV_8U) );
const func_t func = funcs[src.depth()];
const int src_depth = src.depth();
func(src, mask, valBuf, locBuf, minVal, maxVal, minLoc, maxLoc);
BufferPool pool(stream);
GpuMat valBuf(pool.getAllocator());
GpuMat locBuf(pool.getAllocator());
const func_t func = funcs[src_depth];
func(src, mask, valBuf, locBuf, stream);
GpuMat minMaxVals = valBuf.colRange(0, 1);
GpuMat loc = locBuf.colRange(0, 1);
if (_minMaxVals.kind() == _InputArray::CUDA_GPU_MAT)
{
minMaxVals.copyTo(_minMaxVals, stream);
}
else
{
minMaxVals.download(_minMaxVals, stream);
}
if (_loc.kind() == _InputArray::CUDA_GPU_MAT)
{
loc.copyTo(_loc, stream);
}
else
{
loc.download(_loc, stream);
}
}
void cv::cuda::minMaxLoc(InputArray _src, double* minVal, double* maxVal, Point* minLoc, Point* maxLoc, InputArray _mask)
{
Stream& stream = Stream::Null();
HostMem minMaxVals, locVals;
findMinMaxLoc(_src, minMaxVals, locVals, _mask, stream);
stream.waitForCompletion();
double vals[2];
minMaxVals.createMatHeader().convertTo(Mat(minMaxVals.size(), CV_64FC1, &vals[0]), CV_64F);
int locs[2];
locVals.createMatHeader().copyTo(Mat(locVals.size(), CV_32SC1, &locs[0]));
Size size = _src.size();
cv::Point locs2D[] = {
cv::Point(locs[0] % size.width, locs[0] / size.width),
cv::Point(locs[1] % size.width, locs[1] / size.width),
};
if (minVal)
*minVal = vals[0];
if (maxVal)
*maxVal = vals[1];
if (minLoc)
*minLoc = locs2D[0];
if (maxLoc)
*maxLoc = locs2D[1];
}
#endif

View File

@ -50,70 +50,140 @@
#include "opencv2/cudaarithm.hpp"
#include "opencv2/cudev.hpp"
#include "opencv2/core/private.cuda.hpp"
using namespace cv;
using namespace cv::cuda;
using namespace cv::cudev;
namespace
{
double normDiffInf(const GpuMat& _src1, const GpuMat& _src2, GpuMat& _buf)
void normDiffInf(const GpuMat& _src1, const GpuMat& _src2, GpuMat& _dst, Stream& stream)
{
const GpuMat_<uchar>& src1 = (const GpuMat_<uchar>&) _src1;
const GpuMat_<uchar>& src2 = (const GpuMat_<uchar>&) _src2;
GpuMat_<int>& buf = (GpuMat_<int>&) _buf;
GpuMat_<int>& dst = (GpuMat_<int>&) _dst;
gridFindMinMaxVal(abs_(cvt_<int>(src1) - cvt_<int>(src2)), buf);
int data[2];
buf.download(cv::Mat(1, 2, buf.type(), data));
return data[1];
gridFindMaxVal(abs_(cvt_<int>(src1) - cvt_<int>(src2)), dst, stream);
}
double normDiffL1(const GpuMat& _src1, const GpuMat& _src2, GpuMat& _buf)
void normDiffL1(const GpuMat& _src1, const GpuMat& _src2, GpuMat& _dst, Stream& stream)
{
const GpuMat_<uchar>& src1 = (const GpuMat_<uchar>&) _src1;
const GpuMat_<uchar>& src2 = (const GpuMat_<uchar>&) _src2;
GpuMat_<int>& buf = (GpuMat_<int>&) _buf;
GpuMat_<int>& dst = (GpuMat_<int>&) _dst;
gridCalcSum(abs_(cvt_<int>(src1) - cvt_<int>(src2)), buf);
int data;
buf.download(cv::Mat(1, 1, buf.type(), &data));
return data;
gridCalcSum(abs_(cvt_<int>(src1) - cvt_<int>(src2)), dst, stream);
}
double normDiffL2(const GpuMat& _src1, const GpuMat& _src2, GpuMat& _buf)
void normDiffL2(const GpuMat& _src1, const GpuMat& _src2, GpuMat& _dst, Stream& stream)
{
const GpuMat_<uchar>& src1 = (const GpuMat_<uchar>&) _src1;
const GpuMat_<uchar>& src2 = (const GpuMat_<uchar>&) _src2;
GpuMat_<double>& buf = (GpuMat_<double>&) _buf;
GpuMat_<double>& dst = (GpuMat_<double>&) _dst;
gridCalcSum(sqr_(cvt_<double>(src1) - cvt_<double>(src2)), buf);
BufferPool pool(stream);
GpuMat_<double> buf(1, 1, pool.getAllocator());
double data;
buf.download(cv::Mat(1, 1, buf.type(), &data));
return std::sqrt(data);
gridCalcSum(sqr_(cvt_<double>(src1) - cvt_<double>(src2)), buf, stream);
gridTransformUnary(buf, dst, sqrt_func<double>(), stream);
}
}
double cv::cuda::norm(InputArray _src1, InputArray _src2, GpuMat& buf, int normType)
void cv::cuda::calcNormDiff(InputArray _src1, InputArray _src2, OutputArray _dst, int normType, Stream& stream)
{
typedef double (*func_t)(const GpuMat& _src1, const GpuMat& _src2, GpuMat& _buf);
typedef void (*func_t)(const GpuMat& _src1, const GpuMat& _src2, GpuMat& _dst, Stream& stream);
static const func_t funcs[] =
{
0, normDiffInf, normDiffL1, 0, normDiffL2
};
GpuMat src1 = _src1.getGpuMat();
GpuMat src2 = _src2.getGpuMat();
GpuMat src1 = getInputMat(_src1, stream);
GpuMat src2 = getInputMat(_src2, stream);
CV_Assert( src1.type() == CV_8UC1 );
CV_Assert( src1.size() == src2.size() && src1.type() == src2.type() );
CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 );
return funcs[normType](src1, src2, buf);
GpuMat dst = getOutputMat(_dst, 1, 1, normType == NORM_L2 ? CV_64FC1 : CV_32SC1, stream);
const func_t func = funcs[normType];
func(src1, src2, dst, stream);
syncOutput(dst, _dst, stream);
}
double cv::cuda::norm(InputArray _src1, InputArray _src2, int normType)
{
Stream& stream = Stream::Null();
HostMem dst;
calcNormDiff(_src1, _src2, dst, normType, stream);
stream.waitForCompletion();
double val;
dst.createMatHeader().convertTo(Mat(1, 1, CV_64FC1, &val), CV_64F);
return val;
}
namespace cv { namespace cuda { namespace internal {
void normL2(cv::InputArray _src, cv::OutputArray _dst, cv::InputArray _mask, Stream& stream);
}}}
namespace
{
template <typename T, typename R>
void normL2Impl(const GpuMat& _src, const GpuMat& mask, GpuMat& _dst, Stream& stream)
{
const GpuMat_<T>& src = (const GpuMat_<T>&) _src;
GpuMat_<R>& dst = (GpuMat_<R>&) _dst;
BufferPool pool(stream);
GpuMat_<double> buf(1, 1, pool.getAllocator());
if (mask.empty())
{
gridCalcSum(sqr_(cvt_<double>(src)), buf, stream);
}
else
{
gridCalcSum(sqr_(cvt_<double>(src)), buf, globPtr<uchar>(mask), stream);
}
gridTransformUnary(buf, dst, sqrt_func<double>(), stream);
}
}
void cv::cuda::internal::normL2(InputArray _src, OutputArray _dst, InputArray _mask, Stream& stream)
{
typedef void (*func_t)(const GpuMat& _src, const GpuMat& mask, GpuMat& _dst, Stream& stream);
static const func_t funcs[] =
{
normL2Impl<uchar, double>,
normL2Impl<schar, double>,
normL2Impl<ushort, double>,
normL2Impl<short, double>,
normL2Impl<int, double>,
normL2Impl<float, double>,
normL2Impl<double, double>
};
const GpuMat src = getInputMat(_src, stream);
const GpuMat mask = getInputMat(_mask, stream);
CV_Assert( src.channels() == 1 );
CV_Assert( mask.empty() || (mask.size() == src.size() && mask.type() == CV_8U) );
GpuMat dst = getOutputMat(_dst, 1, 1, CV_64FC1, stream);
const func_t func = funcs[src.depth()];
func(src, mask, dst, stream);
syncOutput(dst, _dst, stream);
}
#endif

View File

@ -0,0 +1,290 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/opencv_modules.hpp"
#ifndef HAVE_OPENCV_CUDEV
#error "opencv_cudev is required"
#else
#include "opencv2/cudaarithm.hpp"
#include "opencv2/cudev.hpp"
#include "opencv2/core/private.cuda.hpp"
using namespace cv;
using namespace cv::cuda;
using namespace cv::cudev;
namespace {
template <typename T, typename R, typename I>
struct ConvertorMinMax : unary_function<T, R>
{
typedef typename LargerType<T, R>::type larger_type1;
typedef typename LargerType<larger_type1, I>::type larger_type2;
typedef typename LargerType<larger_type2, float>::type scalar_type;
scalar_type dmin, dmax;
const I* minMaxVals;
__device__ R operator ()(typename TypeTraits<T>::parameter_type src) const
{
const scalar_type smin = minMaxVals[0];
const scalar_type smax = minMaxVals[1];
const scalar_type scale = (dmax - dmin) * (smax - smin > numeric_limits<scalar_type>::epsilon() ? 1.0 / (smax - smin) : 0.0);
const scalar_type shift = dmin - smin * scale;
return cudev::saturate_cast<R>(scale * src + shift);
}
};
template <typename T, typename R, typename I>
void normalizeMinMax(const GpuMat& _src, GpuMat& _dst, double a, double b, const GpuMat& mask, Stream& stream)
{
const GpuMat_<T>& src = (const GpuMat_<T>&)_src;
GpuMat_<R>& dst = (GpuMat_<R>&)_dst;
BufferPool pool(stream);
GpuMat_<I> minMaxVals(1, 2, pool.getAllocator());
if (mask.empty())
{
gridFindMinMaxVal(src, minMaxVals, stream);
}
else
{
gridFindMinMaxVal(src, minMaxVals, globPtr<uchar>(mask), stream);
}
ConvertorMinMax<T, R, I> cvt;
cvt.dmin = std::min(a, b);
cvt.dmax = std::max(a, b);
cvt.minMaxVals = minMaxVals[0];
if (mask.empty())
{
gridTransformUnary(src, dst, cvt, stream);
}
else
{
dst.setTo(Scalar::all(0), stream);
gridTransformUnary(src, dst, cvt, globPtr<uchar>(mask), stream);
}
}
template <typename T, typename R, typename I, bool normL2>
struct ConvertorNorm : unary_function<T, R>
{
typedef typename LargerType<T, R>::type larger_type1;
typedef typename LargerType<larger_type1, I>::type larger_type2;
typedef typename LargerType<larger_type2, float>::type scalar_type;
scalar_type a;
const I* normVal;
__device__ R operator ()(typename TypeTraits<T>::parameter_type src) const
{
sqrt_func<scalar_type> sqrt;
scalar_type scale = normL2 ? sqrt(*normVal) : *normVal;
scale = scale > numeric_limits<scalar_type>::epsilon() ? a / scale : 0.0;
return cudev::saturate_cast<R>(scale * src);
}
};
template <typename T, typename R, typename I>
void normalizeNorm(const GpuMat& _src, GpuMat& _dst, double a, int normType, const GpuMat& mask, Stream& stream)
{
const GpuMat_<T>& src = (const GpuMat_<T>&)_src;
GpuMat_<R>& dst = (GpuMat_<R>&)_dst;
BufferPool pool(stream);
GpuMat_<I> normVal(1, 1, pool.getAllocator());
if (normType == NORM_L1)
{
if (mask.empty())
{
gridCalcSum(abs_(cvt_<I>(src)), normVal, stream);
}
else
{
gridCalcSum(abs_(cvt_<I>(src)), normVal, globPtr<uchar>(mask), stream);
}
}
else if (normType == NORM_L2)
{
if (mask.empty())
{
gridCalcSum(sqr_(cvt_<I>(src)), normVal, stream);
}
else
{
gridCalcSum(sqr_(cvt_<I>(src)), normVal, globPtr<uchar>(mask), stream);
}
}
else // NORM_INF
{
if (mask.empty())
{
gridFindMaxVal(abs_(cvt_<I>(src)), normVal, stream);
}
else
{
gridFindMaxVal(abs_(cvt_<I>(src)), normVal, globPtr<uchar>(mask), stream);
}
}
if (normType == NORM_L2)
{
ConvertorNorm<T, R, I, true> cvt;
cvt.a = a;
cvt.normVal = normVal[0];
if (mask.empty())
{
gridTransformUnary(src, dst, cvt, stream);
}
else
{
dst.setTo(Scalar::all(0), stream);
gridTransformUnary(src, dst, cvt, globPtr<uchar>(mask), stream);
}
}
else
{
ConvertorNorm<T, R, I, false> cvt;
cvt.a = a;
cvt.normVal = normVal[0];
if (mask.empty())
{
gridTransformUnary(src, dst, cvt, stream);
}
else
{
dst.setTo(Scalar::all(0), stream);
gridTransformUnary(src, dst, cvt, globPtr<uchar>(mask), stream);
}
}
}
} // namespace
void cv::cuda::normalize(InputArray _src, OutputArray _dst, double a, double b, int normType, int dtype, InputArray _mask, Stream& stream)
{
typedef void (*func_minmax_t)(const GpuMat& _src, GpuMat& _dst, double a, double b, const GpuMat& mask, Stream& stream);
typedef void (*func_norm_t)(const GpuMat& _src, GpuMat& _dst, double a, int normType, const GpuMat& mask, Stream& stream);
static const func_minmax_t funcs_minmax[] =
{
normalizeMinMax<uchar, float, float>,
normalizeMinMax<schar, float, float>,
normalizeMinMax<ushort, float, float>,
normalizeMinMax<short, float, float>,
normalizeMinMax<int, float, float>,
normalizeMinMax<float, float, float>,
normalizeMinMax<double, double, double>
};
static const func_norm_t funcs_norm[] =
{
normalizeNorm<uchar, float, float>,
normalizeNorm<schar, float, float>,
normalizeNorm<ushort, float, float>,
normalizeNorm<short, float, float>,
normalizeNorm<int, float, float>,
normalizeNorm<float, float, float>,
normalizeNorm<double, double, double>
};
CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 || normType == NORM_MINMAX );
const GpuMat src = getInputMat(_src, stream);
const GpuMat mask = getInputMat(_mask, stream);
CV_Assert( src.channels() == 1 );
CV_Assert( mask.empty() || (mask.size() == src.size() && mask.type() == CV_8U) );
dtype = CV_MAT_DEPTH(dtype);
const int src_depth = src.depth();
const int tmp_depth = src_depth <= CV_32F ? CV_32F : src_depth;
GpuMat dst;
if (dtype == tmp_depth)
{
_dst.create(src.size(), tmp_depth);
dst = getOutputMat(_dst, src.size(), tmp_depth, stream);
}
else
{
BufferPool pool(stream);
dst = pool.getBuffer(src.size(), tmp_depth);
}
if (normType == NORM_MINMAX)
{
const func_minmax_t func = funcs_minmax[src_depth];
func(src, dst, a, b, mask, stream);
}
else
{
const func_norm_t func = funcs_norm[src_depth];
func(src, dst, a, normType, mask, stream);
}
if (dtype == tmp_depth)
{
syncOutput(dst, _dst, stream);
}
else
{
dst.convertTo(_dst, dtype, stream);
}
}
#endif

View File

@ -50,126 +50,153 @@
#include "opencv2/cudaarithm.hpp"
#include "opencv2/cudev.hpp"
#include "opencv2/core/private.cuda.hpp"
using namespace cv;
using namespace cv::cuda;
using namespace cv::cudev;
namespace
{
template <typename T, typename R, int cn>
cv::Scalar sumImpl(const GpuMat& _src, const GpuMat& mask, GpuMat& _buf)
void sumImpl(const GpuMat& _src, GpuMat& _dst, const GpuMat& mask, Stream& stream)
{
typedef typename MakeVec<T, cn>::type src_type;
typedef typename MakeVec<R, cn>::type res_type;
const GpuMat_<src_type>& src = (const GpuMat_<src_type>&) _src;
GpuMat_<res_type>& buf = (GpuMat_<res_type>&) _buf;
GpuMat_<res_type>& dst = (GpuMat_<res_type>&) _dst;
if (mask.empty())
gridCalcSum(src, buf);
gridCalcSum(src, dst, stream);
else
gridCalcSum(src, buf, globPtr<uchar>(mask));
cv::Scalar_<R> res;
cv::Mat res_mat(buf.size(), buf.type(), res.val);
buf.download(res_mat);
return res;
gridCalcSum(src, dst, globPtr<uchar>(mask), stream);
}
template <typename T, typename R, int cn>
cv::Scalar sumAbsImpl(const GpuMat& _src, const GpuMat& mask, GpuMat& _buf)
void sumAbsImpl(const GpuMat& _src, GpuMat& _dst, const GpuMat& mask, Stream& stream)
{
typedef typename MakeVec<T, cn>::type src_type;
typedef typename MakeVec<R, cn>::type res_type;
const GpuMat_<src_type>& src = (const GpuMat_<src_type>&) _src;
GpuMat_<res_type>& buf = (GpuMat_<res_type>&) _buf;
GpuMat_<res_type>& dst = (GpuMat_<res_type>&) _dst;
if (mask.empty())
gridCalcSum(abs_(cvt_<res_type>(src)), buf);
gridCalcSum(abs_(cvt_<res_type>(src)), dst, stream);
else
gridCalcSum(abs_(cvt_<res_type>(src)), buf, globPtr<uchar>(mask));
cv::Scalar_<R> res;
cv::Mat res_mat(buf.size(), buf.type(), res.val);
buf.download(res_mat);
return res;
gridCalcSum(abs_(cvt_<res_type>(src)), dst, globPtr<uchar>(mask), stream);
}
template <typename T, typename R, int cn>
cv::Scalar sumSqrImpl(const GpuMat& _src, const GpuMat& mask, GpuMat& _buf)
void sumSqrImpl(const GpuMat& _src, GpuMat& _dst, const GpuMat& mask, Stream& stream)
{
typedef typename MakeVec<T, cn>::type src_type;
typedef typename MakeVec<R, cn>::type res_type;
const GpuMat_<src_type>& src = (const GpuMat_<src_type>&) _src;
GpuMat_<res_type>& buf = (GpuMat_<res_type>&) _buf;
GpuMat_<res_type>& dst = (GpuMat_<res_type>&) _dst;
if (mask.empty())
gridCalcSum(sqr_(cvt_<res_type>(src)), buf);
gridCalcSum(sqr_(cvt_<res_type>(src)), dst, stream);
else
gridCalcSum(sqr_(cvt_<res_type>(src)), buf, globPtr<uchar>(mask));
cv::Scalar_<R> res;
cv::Mat res_mat(buf.size(), buf.type(), res.val);
buf.download(res_mat);
return res;
gridCalcSum(sqr_(cvt_<res_type>(src)), dst, globPtr<uchar>(mask), stream);
}
}
cv::Scalar cv::cuda::sum(InputArray _src, InputArray _mask, GpuMat& buf)
void cv::cuda::calcSum(InputArray _src, OutputArray _dst, InputArray _mask, Stream& stream)
{
typedef cv::Scalar (*func_t)(const GpuMat& _src, const GpuMat& mask, GpuMat& _buf);
typedef void (*func_t)(const GpuMat& _src, GpuMat& _dst, const GpuMat& mask, Stream& stream);
static const func_t funcs[7][4] =
{
{sumImpl<uchar , uint , 1>, sumImpl<uchar , uint , 2>, sumImpl<uchar , uint , 3>, sumImpl<uchar , uint , 4>},
{sumImpl<schar , int , 1>, sumImpl<schar , int , 2>, sumImpl<schar , int , 3>, sumImpl<schar , int , 4>},
{sumImpl<ushort, uint , 1>, sumImpl<ushort, uint , 2>, sumImpl<ushort, uint , 3>, sumImpl<ushort, uint , 4>},
{sumImpl<short , int , 1>, sumImpl<short , int , 2>, sumImpl<short , int , 3>, sumImpl<short , int , 4>},
{sumImpl<int , int , 1>, sumImpl<int , int , 2>, sumImpl<int , int , 3>, sumImpl<int , int , 4>},
{sumImpl<float , float , 1>, sumImpl<float , float , 2>, sumImpl<float , float , 3>, sumImpl<float , float , 4>},
{sumImpl<uchar , double, 1>, sumImpl<uchar , double, 2>, sumImpl<uchar , double, 3>, sumImpl<uchar , double, 4>},
{sumImpl<schar , double, 1>, sumImpl<schar , double, 2>, sumImpl<schar , double, 3>, sumImpl<schar , double, 4>},
{sumImpl<ushort, double, 1>, sumImpl<ushort, double, 2>, sumImpl<ushort, double, 3>, sumImpl<ushort, double, 4>},
{sumImpl<short , double, 1>, sumImpl<short , double, 2>, sumImpl<short , double, 3>, sumImpl<short , double, 4>},
{sumImpl<int , double, 1>, sumImpl<int , double, 2>, sumImpl<int , double, 3>, sumImpl<int , double, 4>},
{sumImpl<float , double, 1>, sumImpl<float , double, 2>, sumImpl<float , double, 3>, sumImpl<float , double, 4>},
{sumImpl<double, double, 1>, sumImpl<double, double, 2>, sumImpl<double, double, 3>, sumImpl<double, double, 4>}
};
GpuMat src = _src.getGpuMat();
GpuMat mask = _mask.getGpuMat();
const GpuMat src = getInputMat(_src, stream);
const GpuMat mask = getInputMat(_mask, stream);
CV_DbgAssert( mask.empty() || (mask.type() == CV_8UC1 && mask.size() == src.size()) );
CV_Assert( mask.empty() || (mask.type() == CV_8UC1 && mask.size() == src.size()) );
const func_t func = funcs[src.depth()][src.channels() - 1];
const int src_depth = src.depth();
const int channels = src.channels();
return func(src, mask, buf);
GpuMat dst = getOutputMat(_dst, 1, 1, CV_64FC(channels), stream);
const func_t func = funcs[src_depth][channels - 1];
func(src, dst, mask, stream);
syncOutput(dst, _dst, stream);
}
cv::Scalar cv::cuda::absSum(InputArray _src, InputArray _mask, GpuMat& buf)
cv::Scalar cv::cuda::sum(InputArray _src, InputArray _mask)
{
typedef cv::Scalar (*func_t)(const GpuMat& _src, const GpuMat& mask, GpuMat& _buf);
Stream& stream = Stream::Null();
HostMem dst;
calcSum(_src, dst, _mask, stream);
stream.waitForCompletion();
cv::Scalar val;
dst.createMatHeader().convertTo(cv::Mat(dst.size(), CV_64FC(dst.channels()), val.val), CV_64F);
return val;
}
void cv::cuda::calcAbsSum(InputArray _src, OutputArray _dst, InputArray _mask, Stream& stream)
{
typedef void (*func_t)(const GpuMat& _src, GpuMat& _dst, const GpuMat& mask, Stream& stream);
static const func_t funcs[7][4] =
{
{sumAbsImpl<uchar , uint , 1>, sumAbsImpl<uchar , uint , 2>, sumAbsImpl<uchar , uint , 3>, sumAbsImpl<uchar , uint , 4>},
{sumAbsImpl<schar , int , 1>, sumAbsImpl<schar , int , 2>, sumAbsImpl<schar , int , 3>, sumAbsImpl<schar , int , 4>},
{sumAbsImpl<ushort, uint , 1>, sumAbsImpl<ushort, uint , 2>, sumAbsImpl<ushort, uint , 3>, sumAbsImpl<ushort, uint , 4>},
{sumAbsImpl<short , int , 1>, sumAbsImpl<short , int , 2>, sumAbsImpl<short , int , 3>, sumAbsImpl<short , int , 4>},
{sumAbsImpl<int , int , 1>, sumAbsImpl<int , int , 2>, sumAbsImpl<int , int , 3>, sumAbsImpl<int , int , 4>},
{sumAbsImpl<float , float , 1>, sumAbsImpl<float , float , 2>, sumAbsImpl<float , float , 3>, sumAbsImpl<float , float , 4>},
{sumAbsImpl<uchar , double, 1>, sumAbsImpl<uchar , double, 2>, sumAbsImpl<uchar , double, 3>, sumAbsImpl<uchar , double, 4>},
{sumAbsImpl<schar , double, 1>, sumAbsImpl<schar , double, 2>, sumAbsImpl<schar , double, 3>, sumAbsImpl<schar , double, 4>},
{sumAbsImpl<ushort, double, 1>, sumAbsImpl<ushort, double, 2>, sumAbsImpl<ushort, double, 3>, sumAbsImpl<ushort, double, 4>},
{sumAbsImpl<short , double, 1>, sumAbsImpl<short , double, 2>, sumAbsImpl<short , double, 3>, sumAbsImpl<short , double, 4>},
{sumAbsImpl<int , double, 1>, sumAbsImpl<int , double, 2>, sumAbsImpl<int , double, 3>, sumAbsImpl<int , double, 4>},
{sumAbsImpl<float , double, 1>, sumAbsImpl<float , double, 2>, sumAbsImpl<float , double, 3>, sumAbsImpl<float , double, 4>},
{sumAbsImpl<double, double, 1>, sumAbsImpl<double, double, 2>, sumAbsImpl<double, double, 3>, sumAbsImpl<double, double, 4>}
};
GpuMat src = _src.getGpuMat();
GpuMat mask = _mask.getGpuMat();
const GpuMat src = getInputMat(_src, stream);
const GpuMat mask = getInputMat(_mask, stream);
CV_DbgAssert( mask.empty() || (mask.type() == CV_8UC1 && mask.size() == src.size()) );
CV_Assert( mask.empty() || (mask.type() == CV_8UC1 && mask.size() == src.size()) );
const func_t func = funcs[src.depth()][src.channels() - 1];
const int src_depth = src.depth();
const int channels = src.channels();
return func(src, mask, buf);
GpuMat dst = getOutputMat(_dst, 1, 1, CV_64FC(channels), stream);
const func_t func = funcs[src_depth][channels - 1];
func(src, dst, mask, stream);
syncOutput(dst, _dst, stream);
}
cv::Scalar cv::cuda::sqrSum(InputArray _src, InputArray _mask, GpuMat& buf)
cv::Scalar cv::cuda::absSum(InputArray _src, InputArray _mask)
{
typedef cv::Scalar (*func_t)(const GpuMat& _src, const GpuMat& mask, GpuMat& _buf);
Stream& stream = Stream::Null();
HostMem dst;
calcAbsSum(_src, dst, _mask, stream);
stream.waitForCompletion();
cv::Scalar val;
dst.createMatHeader().convertTo(cv::Mat(dst.size(), CV_64FC(dst.channels()), val.val), CV_64F);
return val;
}
void cv::cuda::calcSqrSum(InputArray _src, OutputArray _dst, InputArray _mask, Stream& stream)
{
typedef void (*func_t)(const GpuMat& _src, GpuMat& _dst, const GpuMat& mask, Stream& stream);
static const func_t funcs[7][4] =
{
{sumSqrImpl<uchar , double, 1>, sumSqrImpl<uchar , double, 2>, sumSqrImpl<uchar , double, 3>, sumSqrImpl<uchar , double, 4>},
@ -181,14 +208,35 @@ cv::Scalar cv::cuda::sqrSum(InputArray _src, InputArray _mask, GpuMat& buf)
{sumSqrImpl<double, double, 1>, sumSqrImpl<double, double, 2>, sumSqrImpl<double, double, 3>, sumSqrImpl<double, double, 4>}
};
GpuMat src = _src.getGpuMat();
GpuMat mask = _mask.getGpuMat();
const GpuMat src = getInputMat(_src, stream);
const GpuMat mask = getInputMat(_mask, stream);
CV_DbgAssert( mask.empty() || (mask.type() == CV_8UC1 && mask.size() == src.size()) );
CV_Assert( mask.empty() || (mask.type() == CV_8UC1 && mask.size() == src.size()) );
const func_t func = funcs[src.depth()][src.channels() - 1];
const int src_depth = src.depth();
const int channels = src.channels();
return func(src, mask, buf);
GpuMat dst = getOutputMat(_dst, 1, 1, CV_64FC(channels), stream);
const func_t func = funcs[src_depth][channels - 1];
func(src, dst, mask, stream);
syncOutput(dst, _dst, stream);
}
cv::Scalar cv::cuda::sqrSum(InputArray _src, InputArray _mask)
{
Stream& stream = Stream::Null();
HostMem dst;
calcSqrSum(_src, dst, _mask, stream);
stream.waitForCompletion();
cv::Scalar val;
dst.createMatHeader().convertTo(cv::Mat(dst.size(), CV_64FC(dst.channels()), val.val), CV_64F);
return val;
}
#endif

View File

@ -47,110 +47,106 @@ using namespace cv::cuda;
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
double cv::cuda::norm(InputArray, int, InputArray, GpuMat&) { throw_no_cuda(); return 0.0; }
double cv::cuda::norm(InputArray, InputArray, GpuMat&, int) { throw_no_cuda(); return 0.0; }
double cv::cuda::norm(InputArray, int, InputArray) { throw_no_cuda(); return 0.0; }
void cv::cuda::calcNorm(InputArray, OutputArray, int, InputArray, Stream&) { throw_no_cuda(); }
double cv::cuda::norm(InputArray, InputArray, int) { throw_no_cuda(); return 0.0; }
void cv::cuda::calcNormDiff(InputArray, InputArray, OutputArray, int, Stream&) { throw_no_cuda(); }
Scalar cv::cuda::sum(InputArray, InputArray, GpuMat&) { throw_no_cuda(); return Scalar(); }
Scalar cv::cuda::absSum(InputArray, InputArray, GpuMat&) { throw_no_cuda(); return Scalar(); }
Scalar cv::cuda::sqrSum(InputArray, InputArray, GpuMat&) { throw_no_cuda(); return Scalar(); }
Scalar cv::cuda::sum(InputArray, InputArray) { throw_no_cuda(); return Scalar(); }
void cv::cuda::calcSum(InputArray, OutputArray, InputArray, Stream&) { throw_no_cuda(); }
Scalar cv::cuda::absSum(InputArray, InputArray) { throw_no_cuda(); return Scalar(); }
void cv::cuda::calcAbsSum(InputArray, OutputArray, InputArray, Stream&) { throw_no_cuda(); }
Scalar cv::cuda::sqrSum(InputArray, InputArray) { throw_no_cuda(); return Scalar(); }
void cv::cuda::calcSqrSum(InputArray, OutputArray, InputArray, Stream&) { throw_no_cuda(); }
void cv::cuda::minMax(InputArray, double*, double*, InputArray, GpuMat&) { throw_no_cuda(); }
void cv::cuda::minMaxLoc(InputArray, double*, double*, Point*, Point*, InputArray, GpuMat&, GpuMat&) { throw_no_cuda(); }
void cv::cuda::minMax(InputArray, double*, double*, InputArray) { throw_no_cuda(); }
void cv::cuda::findMinMax(InputArray, OutputArray, InputArray, Stream&) { throw_no_cuda(); }
void cv::cuda::minMaxLoc(InputArray, double*, double*, Point*, Point*, InputArray) { throw_no_cuda(); }
void cv::cuda::findMinMaxLoc(InputArray, OutputArray, OutputArray, InputArray, Stream&) { throw_no_cuda(); }
int cv::cuda::countNonZero(InputArray, GpuMat&) { throw_no_cuda(); return 0; }
int cv::cuda::countNonZero(InputArray) { throw_no_cuda(); return 0; }
void cv::cuda::countNonZero(InputArray, OutputArray, Stream&) { throw_no_cuda(); }
void cv::cuda::reduce(InputArray, OutputArray, int, int, int, Stream&) { throw_no_cuda(); }
void cv::cuda::meanStdDev(InputArray, Scalar&, Scalar&, GpuMat&) { throw_no_cuda(); }
void cv::cuda::meanStdDev(InputArray, Scalar&, Scalar&) { throw_no_cuda(); }
void cv::cuda::meanStdDev(InputArray, OutputArray, Stream&) { throw_no_cuda(); }
void cv::cuda::rectStdDev(InputArray, InputArray, OutputArray, Rect, Stream&) { throw_no_cuda(); }
void cv::cuda::normalize(InputArray, OutputArray, double, double, int, int, InputArray, GpuMat&, GpuMat&) { throw_no_cuda(); }
void cv::cuda::normalize(InputArray, OutputArray, double, double, int, int, InputArray, Stream&) { throw_no_cuda(); }
void cv::cuda::integral(InputArray, OutputArray, GpuMat&, Stream&) { throw_no_cuda(); }
void cv::cuda::sqrIntegral(InputArray, OutputArray, GpuMat&, Stream&) { throw_no_cuda(); }
void cv::cuda::integral(InputArray, OutputArray, Stream&) { throw_no_cuda(); }
void cv::cuda::sqrIntegral(InputArray, OutputArray, Stream&) { throw_no_cuda(); }
#else
namespace
{
class DeviceBuffer
{
public:
explicit DeviceBuffer(int count_ = 1) : count(count_)
{
cudaSafeCall( cudaMalloc(&pdev, count * sizeof(double)) );
}
~DeviceBuffer()
{
cudaSafeCall( cudaFree(pdev) );
}
operator double*() {return pdev;}
void download(double* hptr)
{
double hbuf;
cudaSafeCall( cudaMemcpy(&hbuf, pdev, sizeof(double), cudaMemcpyDeviceToHost) );
*hptr = hbuf;
}
void download(double** hptrs)
{
AutoBuffer<double, 2 * sizeof(double)> hbuf(count);
cudaSafeCall( cudaMemcpy((void*)hbuf, pdev, count * sizeof(double), cudaMemcpyDeviceToHost) );
for (int i = 0; i < count; ++i)
*hptrs[i] = hbuf[i];
}
private:
double* pdev;
int count;
};
}
////////////////////////////////////////////////////////////////////////
// norm
double cv::cuda::norm(InputArray _src, int normType, InputArray _mask, GpuMat& buf)
{
GpuMat src = _src.getGpuMat();
GpuMat mask = _mask.getGpuMat();
namespace cv { namespace cuda { namespace internal {
void normL2(cv::InputArray _src, cv::OutputArray _dst, cv::InputArray _mask, Stream& stream);
void findMaxAbs(cv::InputArray _src, cv::OutputArray _dst, cv::InputArray _mask, Stream& stream);
}}}
void cv::cuda::calcNorm(InputArray _src, OutputArray dst, int normType, InputArray mask, Stream& stream)
{
CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 );
CV_Assert( mask.empty() || (mask.type() == CV_8UC1 && mask.size() == src.size() && src.channels() == 1) );
GpuMat src = getInputMat(_src, stream);
GpuMat src_single_channel = src.reshape(1);
if (normType == NORM_L1)
return cuda::absSum(src_single_channel, mask, buf)[0];
{
calcAbsSum(src_single_channel, dst, mask, stream);
}
else if (normType == NORM_L2)
{
internal::normL2(src_single_channel, dst, mask, stream);
}
else // NORM_INF
{
internal::findMaxAbs(src_single_channel, dst, mask, stream);
}
}
if (normType == NORM_L2)
return std::sqrt(cuda::sqrSum(src_single_channel, mask, buf)[0]);
double cv::cuda::norm(InputArray _src, int normType, InputArray _mask)
{
Stream& stream = Stream::Null();
// NORM_INF
double min_val, max_val;
cuda::minMax(src_single_channel, &min_val, &max_val, mask, buf);
return std::max(std::abs(min_val), std::abs(max_val));
HostMem dst;
calcNorm(_src, dst, normType, _mask, stream);
stream.waitForCompletion();
double val;
dst.createMatHeader().convertTo(Mat(1, 1, CV_64FC1, &val), CV_64F);
return val;
}
////////////////////////////////////////////////////////////////////////
// meanStdDev
void cv::cuda::meanStdDev(InputArray _src, Scalar& mean, Scalar& stddev, GpuMat& buf)
void cv::cuda::meanStdDev(InputArray _src, OutputArray _dst, Stream& stream)
{
GpuMat src = _src.getGpuMat();
if (!deviceSupports(FEATURE_SET_COMPUTE_13))
CV_Error(cv::Error::StsNotImplemented, "Not sufficient compute capebility");
const GpuMat src = getInputMat(_src, stream);
CV_Assert( src.type() == CV_8UC1 );
if (!deviceSupports(FEATURE_SET_COMPUTE_13))
CV_Error(cv::Error::StsNotImplemented, "Not sufficient compute capebility");
GpuMat dst = getOutputMat(_dst, 1, 2, CV_64FC1, stream);
NppiSize sz;
sz.width = src.cols;
sz.height = src.rows;
DeviceBuffer dbuf(2);
int bufSize;
#if (CUDA_VERSION <= 4020)
nppSafeCall( nppiMeanStdDev8uC1RGetBufferHostSize(sz, &bufSize) );
@ -158,14 +154,30 @@ void cv::cuda::meanStdDev(InputArray _src, Scalar& mean, Scalar& stddev, GpuMat&
nppSafeCall( nppiMeanStdDevGetBufferHostSize_8u_C1R(sz, &bufSize) );
#endif
ensureSizeIsEnough(1, bufSize, CV_8UC1, buf);
BufferPool pool(stream);
GpuMat buf = pool.getBuffer(1, bufSize, CV_8UC1);
nppSafeCall( nppiMean_StdDev_8u_C1R(src.ptr<Npp8u>(), static_cast<int>(src.step), sz, buf.ptr<Npp8u>(), dbuf, (double*)dbuf + 1) );
NppStreamHandler h(StreamAccessor::getStream(stream));
cudaSafeCall( cudaDeviceSynchronize() );
nppSafeCall( nppiMean_StdDev_8u_C1R(src.ptr<Npp8u>(), static_cast<int>(src.step), sz, buf.ptr<Npp8u>(), dst.ptr<Npp64f>(), dst.ptr<Npp64f>() + 1) );
double* ptrs[2] = {mean.val, stddev.val};
dbuf.download(ptrs);
syncOutput(dst, _dst, stream);
}
void cv::cuda::meanStdDev(InputArray _src, Scalar& mean, Scalar& stddev)
{
Stream& stream = Stream::Null();
HostMem dst;
meanStdDev(_src, dst, stream);
stream.waitForCompletion();
double vals[2];
dst.createMatHeader().copyTo(Mat(1, 2, CV_64FC1, &vals[0]));
mean = Scalar(vals[0]);
stddev = Scalar(vals[1]);
}
//////////////////////////////////////////////////////////////////////////////
@ -173,13 +185,12 @@ void cv::cuda::meanStdDev(InputArray _src, Scalar& mean, Scalar& stddev, GpuMat&
void cv::cuda::rectStdDev(InputArray _src, InputArray _sqr, OutputArray _dst, Rect rect, Stream& _stream)
{
GpuMat src = _src.getGpuMat();
GpuMat sqr = _sqr.getGpuMat();
GpuMat src = getInputMat(_src, _stream);
GpuMat sqr = getInputMat(_sqr, _stream);
CV_Assert( src.type() == CV_32SC1 && sqr.type() == CV_64FC1 );
_dst.create(src.size(), CV_32FC1);
GpuMat dst = _dst.getGpuMat();
GpuMat dst = getOutputMat(_dst, src.size(), CV_32FC1, _stream);
NppiSize sz;
sz.width = src.cols;
@ -200,45 +211,8 @@ void cv::cuda::rectStdDev(InputArray _src, InputArray _sqr, OutputArray _dst, Re
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// normalize
void cv::cuda::normalize(InputArray _src, OutputArray dst, double a, double b, int norm_type, int dtype, InputArray mask, GpuMat& norm_buf, GpuMat& cvt_buf)
{
GpuMat src = _src.getGpuMat();
double scale = 1, shift = 0;
if (norm_type == NORM_MINMAX)
{
double smin = 0, smax = 0;
double dmin = std::min(a, b), dmax = std::max(a, b);
cuda::minMax(src, &smin, &smax, mask, norm_buf);
scale = (dmax - dmin) * (smax - smin > std::numeric_limits<double>::epsilon() ? 1.0 / (smax - smin) : 0.0);
shift = dmin - smin * scale;
}
else if (norm_type == NORM_L2 || norm_type == NORM_L1 || norm_type == NORM_INF)
{
scale = cuda::norm(src, norm_type, mask, norm_buf);
scale = scale > std::numeric_limits<double>::epsilon() ? a / scale : 0.0;
shift = 0;
}
else
{
CV_Error(cv::Error::StsBadArg, "Unknown/unsupported norm type");
}
if (mask.empty())
{
src.convertTo(dst, dtype, scale, shift);
}
else
{
src.convertTo(cvt_buf, dtype, scale, shift);
cvt_buf.copyTo(dst, mask);
}
syncOutput(dst, _dst, _stream);
}
#endif

View File

@ -74,8 +74,27 @@ CUDA_TEST_P(Norm, Accuracy)
cv::Mat src = randomMat(size, depth);
cv::Mat mask = randomMat(size, CV_8UC1, 0, 2);
cv::cuda::GpuMat d_buf;
double val = cv::cuda::norm(loadMat(src, useRoi), normCode, loadMat(mask, useRoi), d_buf);
double val = cv::cuda::norm(loadMat(src, useRoi), normCode, loadMat(mask, useRoi));
double val_gold = cv::norm(src, normCode, mask);
EXPECT_NEAR(val_gold, val, depth < CV_32F ? 0.0 : 1.0);
}
CUDA_TEST_P(Norm, Async)
{
cv::Mat src = randomMat(size, depth);
cv::Mat mask = randomMat(size, CV_8UC1, 0, 2);
cv::cuda::Stream stream;
cv::cuda::HostMem dst;
cv::cuda::calcNorm(loadMat(src, useRoi), dst, normCode, loadMat(mask, useRoi), stream);
stream.waitForCompletion();
double val;
dst.createMatHeader().convertTo(cv::Mat(1, 1, CV_64FC1, &val), CV_64F);
double val_gold = cv::norm(src, normCode, mask);
@ -127,6 +146,27 @@ CUDA_TEST_P(NormDiff, Accuracy)
EXPECT_NEAR(val_gold, val, 0.0);
}
CUDA_TEST_P(NormDiff, Async)
{
cv::Mat src1 = randomMat(size, CV_8UC1);
cv::Mat src2 = randomMat(size, CV_8UC1);
cv::cuda::Stream stream;
cv::cuda::HostMem dst;
cv::cuda::calcNormDiff(loadMat(src1, useRoi), loadMat(src2, useRoi), dst, normCode, stream);
stream.waitForCompletion();
double val;
const cv::Mat val_mat(1, 1, CV_64FC1, &val);
dst.createMatHeader().convertTo(val_mat, CV_64F);
double val_gold = cv::norm(src1, src2, normCode);
EXPECT_NEAR(val_gold, val, 0.0);
}
INSTANTIATE_TEST_CASE_P(CUDA_Arithm, NormDiff, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
@ -247,6 +287,24 @@ CUDA_TEST_P(Sum, Simple)
EXPECT_SCALAR_NEAR(val_gold, val, CV_MAT_DEPTH(type) < CV_32F ? 0.0 : 0.5);
}
CUDA_TEST_P(Sum, Simple_Async)
{
cv::cuda::Stream stream;
cv::cuda::HostMem dst;
cv::cuda::calcSum(loadMat(src, useRoi), dst, cv::noArray(), stream);
stream.waitForCompletion();
cv::Scalar val;
cv::Mat val_mat(dst.size(), CV_64FC(dst.channels()), val.val);
dst.createMatHeader().convertTo(val_mat, CV_64F);
cv::Scalar val_gold = cv::sum(src);
EXPECT_SCALAR_NEAR(val_gold, val, CV_MAT_DEPTH(type) < CV_32F ? 0.0 : 0.5);
}
CUDA_TEST_P(Sum, Abs)
{
cv::Scalar val = cv::cuda::absSum(loadMat(src, useRoi));
@ -256,6 +314,24 @@ CUDA_TEST_P(Sum, Abs)
EXPECT_SCALAR_NEAR(val_gold, val, CV_MAT_DEPTH(type) < CV_32F ? 0.0 : 0.5);
}
CUDA_TEST_P(Sum, Abs_Async)
{
cv::cuda::Stream stream;
cv::cuda::HostMem dst;
cv::cuda::calcAbsSum(loadMat(src, useRoi), dst, cv::noArray(), stream);
stream.waitForCompletion();
cv::Scalar val;
cv::Mat val_mat(dst.size(), CV_64FC(dst.channels()), val.val);
dst.createMatHeader().convertTo(val_mat, CV_64F);
cv::Scalar val_gold = absSumGold(src);
EXPECT_SCALAR_NEAR(val_gold, val, CV_MAT_DEPTH(type) < CV_32F ? 0.0 : 0.5);
}
CUDA_TEST_P(Sum, Sqr)
{
cv::Scalar val = cv::cuda::sqrSum(loadMat(src, useRoi));
@ -265,6 +341,24 @@ CUDA_TEST_P(Sum, Sqr)
EXPECT_SCALAR_NEAR(val_gold, val, CV_MAT_DEPTH(type) < CV_32F ? 0.0 : 0.5);
}
CUDA_TEST_P(Sum, Sqr_Async)
{
cv::cuda::Stream stream;
cv::cuda::HostMem dst;
cv::cuda::calcSqrSum(loadMat(src, useRoi), dst, cv::noArray(), stream);
stream.waitForCompletion();
cv::Scalar val;
cv::Mat val_mat(dst.size(), CV_64FC(dst.channels()), val.val);
dst.createMatHeader().convertTo(val_mat, CV_64F);
cv::Scalar val_gold = sqrSumGold(src);
EXPECT_SCALAR_NEAR(val_gold, val, CV_MAT_DEPTH(type) < CV_32F ? 0.0 : 0.5);
}
INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Sum, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
@ -321,6 +415,28 @@ CUDA_TEST_P(MinMax, WithoutMask)
}
}
CUDA_TEST_P(MinMax, Async)
{
cv::Mat src = randomMat(size, depth);
cv::cuda::Stream stream;
cv::cuda::HostMem dst;
cv::cuda::findMinMax(loadMat(src, useRoi), dst, cv::noArray(), stream);
stream.waitForCompletion();
double vals[2];
const cv::Mat vals_mat(1, 2, CV_64FC1, &vals[0]);
dst.createMatHeader().convertTo(vals_mat, CV_64F);
double minVal_gold, maxVal_gold;
minMaxLocGold(src, &minVal_gold, &maxVal_gold);
EXPECT_DOUBLE_EQ(minVal_gold, vals[0]);
EXPECT_DOUBLE_EQ(maxVal_gold, vals[1]);
}
CUDA_TEST_P(MinMax, WithMask)
{
cv::Mat src = randomMat(size, depth);
@ -471,6 +587,41 @@ CUDA_TEST_P(MinMaxLoc, WithoutMask)
}
}
CUDA_TEST_P(MinMaxLoc, Async)
{
cv::Mat src = randomMat(size, depth);
cv::cuda::Stream stream;
cv::cuda::HostMem minMaxVals, locVals;
cv::cuda::findMinMaxLoc(loadMat(src, useRoi), minMaxVals, locVals, cv::noArray(), stream);
stream.waitForCompletion();
double vals[2];
const cv::Mat vals_mat(2, 1, CV_64FC1, &vals[0]);
minMaxVals.createMatHeader().convertTo(vals_mat, CV_64F);
int locs[2];
const cv::Mat locs_mat(2, 1, CV_32SC1, &locs[0]);
locVals.createMatHeader().copyTo(locs_mat);
cv::Point locs2D[] = {
cv::Point(locs[0] % src.cols, locs[0] / src.cols),
cv::Point(locs[1] % src.cols, locs[1] / src.cols),
};
double minVal_gold, maxVal_gold;
cv::Point minLoc_gold, maxLoc_gold;
minMaxLocGold(src, &minVal_gold, &maxVal_gold, &minLoc_gold, &maxLoc_gold);
EXPECT_DOUBLE_EQ(minVal_gold, vals[0]);
EXPECT_DOUBLE_EQ(maxVal_gold, vals[1]);
expectEqual(src, minLoc_gold, locs2D[0]);
expectEqual(src, maxLoc_gold, locs2D[1]);
}
CUDA_TEST_P(MinMaxLoc, WithMask)
{
cv::Mat src = randomMat(size, depth);
@ -564,6 +715,7 @@ PARAM_TEST_CASE(CountNonZero, cv::cuda::DeviceInfo, cv::Size, MatDepth, UseRoi)
int depth;
bool useRoi;
cv::Mat src;
virtual void SetUp()
{
@ -573,15 +725,14 @@ PARAM_TEST_CASE(CountNonZero, cv::cuda::DeviceInfo, cv::Size, MatDepth, UseRoi)
useRoi = GET_PARAM(3);
cv::cuda::setDevice(devInfo.deviceID());
cv::Mat srcBase = randomMat(size, CV_8U, 0.0, 1.5);
srcBase.convertTo(src, depth);
}
};
CUDA_TEST_P(CountNonZero, Accuracy)
{
cv::Mat srcBase = randomMat(size, CV_8U, 0.0, 1.5);
cv::Mat src;
srcBase.convertTo(src, depth);
if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
{
try
@ -603,6 +754,24 @@ CUDA_TEST_P(CountNonZero, Accuracy)
}
}
CUDA_TEST_P(CountNonZero, Async)
{
cv::cuda::Stream stream;
cv::cuda::HostMem dst;
cv::cuda::countNonZero(loadMat(src, useRoi), dst, stream);
stream.waitForCompletion();
int val;
const cv::Mat val_mat(1, 1, CV_32SC1, &val);
dst.createMatHeader().copyTo(val_mat);
int val_gold = cv::countNonZero(src);
ASSERT_EQ(val_gold, val);
}
INSTANTIATE_TEST_CASE_P(CUDA_Arithm, CountNonZero, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
@ -750,7 +919,7 @@ CUDA_TEST_P(Normalize, WithMask)
dst_gold.setTo(cv::Scalar::all(0));
cv::normalize(src, dst_gold, alpha, beta, norm_type, type, mask);
EXPECT_MAT_NEAR(dst_gold, dst, 1e-6);
EXPECT_MAT_NEAR(dst_gold, dst, type < CV_32F ? 1.0 : 1e-4);
}
INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Normalize, testing::Combine(
@ -811,6 +980,28 @@ CUDA_TEST_P(MeanStdDev, Accuracy)
}
}
CUDA_TEST_P(MeanStdDev, Async)
{
cv::Mat src = randomMat(size, CV_8UC1);
cv::cuda::Stream stream;
cv::cuda::HostMem dst;
cv::cuda::meanStdDev(loadMat(src, useRoi), dst, stream);
stream.waitForCompletion();
double vals[2];
dst.createMatHeader().copyTo(cv::Mat(1, 2, CV_64FC1, &vals[0]));
cv::Scalar mean_gold;
cv::Scalar stddev_gold;
cv::meanStdDev(src, mean_gold, stddev_gold);
EXPECT_SCALAR_NEAR(mean_gold, cv::Scalar(vals[0]), 1e-5);
EXPECT_SCALAR_NEAR(stddev_gold, cv::Scalar(vals[1]), 1e-5);
}
INSTANTIATE_TEST_CASE_P(CUDA_Arithm, MeanStdDev, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,

View File

@ -266,7 +266,7 @@ namespace
{
int bgfgClassification(const GpuMat& prevFrame, const GpuMat& curFrame,
const GpuMat& Ftd, const GpuMat& Fbd,
GpuMat& foreground, GpuMat& countBuf,
GpuMat& foreground,
const FGDParams& params, int out_cn)
{
typedef void (*func_t)(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground,
@ -298,7 +298,7 @@ namespace
deltaC, deltaCC, params.alpha2,
params.N1c, params.N1cc, 0);
int count = cuda::countNonZero(foreground, countBuf);
int count = cuda::countNonZero(foreground);
cuda::multiply(foreground, Scalar::all(255), foreground);
@ -605,8 +605,6 @@ namespace
GpuMat hist_;
GpuMat histBuf_;
GpuMat countBuf_;
GpuMat buf_;
GpuMat filterBrd_;
@ -649,7 +647,7 @@ namespace
changeDetection(prevFrame_, curFrame, Ftd_, hist_, histBuf_);
changeDetection(background_, curFrame, Fbd_, hist_, histBuf_);
int FG_pixels_count = bgfgClassification(prevFrame_, curFrame, Ftd_, Fbd_, foreground_, countBuf_, params_, 4);
int FG_pixels_count = bgfgClassification(prevFrame_, curFrame, Ftd_, Fbd_, foreground_, params_, 4);
#ifdef HAVE_OPENCV_CUDAFILTERS
if (params_.perform_morphing > 0)

View File

@ -542,7 +542,7 @@ namespace
anchor_ = Point(iters_, iters_);
iters_ = 1;
}
else if (iters_ > 1 && countNonZero(kernel) == (int) kernel.total())
else if (iters_ > 1 && cv::countNonZero(kernel) == (int) kernel.total())
{
anchor_ = Point(anchor_.x * iters_, anchor_.y * iters_);
kernel = getStructuringElement(MORPH_RECT,

View File

@ -81,7 +81,6 @@ namespace
GpuMat Dy_;
GpuMat buf_;
GpuMat eig_;
GpuMat minMaxbuf_;
GpuMat tmpCorners_;
};
@ -109,7 +108,7 @@ namespace
cornerCriteria_->compute(image, eig_);
double maxVal = 0;
cuda::minMax(eig_, 0, &maxVal, noArray(), minMaxbuf_);
cuda::minMax(eig_, 0, &maxVal);
ensureSizeIsEnough(1, std::max(1000, static_cast<int>(image.size().area() * 0.05)), CV_32FC2, tmpCorners_);

View File

@ -271,7 +271,6 @@ namespace
private:
Match_CCORR_8U match_CCORR_;
GpuMat image_sqsums_;
GpuMat intBuffer_;
};
void Match_CCORR_NORMED_8U::match(InputArray _image, InputArray _templ, OutputArray _result, Stream& stream)
@ -288,7 +287,7 @@ namespace
match_CCORR_.match(image, templ, _result, stream);
GpuMat result = _result.getGpuMat();
cuda::sqrIntegral(image.reshape(1), image_sqsums_, intBuffer_, stream);
cuda::sqrIntegral(image.reshape(1), image_sqsums_, stream);
double templ_sqsum = cuda::sqrSum(templ.reshape(1))[0];
@ -335,7 +334,6 @@ namespace
private:
GpuMat image_sqsums_;
GpuMat intBuffer_;
Match_CCORR_8U match_CCORR_;
};
@ -359,7 +357,7 @@ namespace
return;
}
cuda::sqrIntegral(image.reshape(1), image_sqsums_, intBuffer_, stream);
cuda::sqrIntegral(image.reshape(1), image_sqsums_, stream);
double templ_sqsum = cuda::sqrSum(templ.reshape(1))[0];
@ -383,7 +381,6 @@ namespace
private:
GpuMat image_sqsums_;
GpuMat intBuffer_;
Match_CCORR_8U match_CCORR_;
};
@ -398,7 +395,7 @@ namespace
CV_Assert( image.type() == templ.type() );
CV_Assert( image.cols >= templ.cols && image.rows >= templ.rows );
cuda::sqrIntegral(image.reshape(1), image_sqsums_, intBuffer_, stream);
cuda::sqrIntegral(image.reshape(1), image_sqsums_, stream);
double templ_sqsum = cuda::sqrSum(templ.reshape(1))[0];
@ -421,7 +418,6 @@ namespace
void match(InputArray image, InputArray templ, OutputArray result, Stream& stream = Stream::Null());
private:
GpuMat intBuffer_;
std::vector<GpuMat> images_;
std::vector<GpuMat> image_sums_;
Match_CCORR_8U match_CCORR_;
@ -444,7 +440,7 @@ namespace
if (image.channels() == 1)
{
image_sums_.resize(1);
cuda::integral(image, image_sums_[0], intBuffer_, stream);
cuda::integral(image, image_sums_[0], stream);
int templ_sum = (int) cuda::sum(templ)[0];
@ -456,7 +452,7 @@ namespace
image_sums_.resize(images_.size());
for (int i = 0; i < image.channels(); ++i)
cuda::integral(images_[i], image_sums_[i], intBuffer_, stream);
cuda::integral(images_[i], image_sums_[i], stream);
Scalar templ_sum = cuda::sum(templ);
@ -501,7 +497,6 @@ namespace
private:
GpuMat imagef_, templf_;
Match_CCORR_32F match_CCORR_32F_;
GpuMat intBuffer_;
std::vector<GpuMat> images_;
std::vector<GpuMat> image_sums_;
std::vector<GpuMat> image_sqsums_;
@ -527,10 +522,10 @@ namespace
if (image.channels() == 1)
{
image_sums_.resize(1);
cuda::integral(image, image_sums_[0], intBuffer_, stream);
cuda::integral(image, image_sums_[0], stream);
image_sqsums_.resize(1);
cuda::sqrIntegral(image, image_sqsums_[0], intBuffer_, stream);
cuda::sqrIntegral(image, image_sqsums_[0], stream);
int templ_sum = (int) cuda::sum(templ)[0];
double templ_sqsum = cuda::sqrSum(templ)[0];
@ -547,8 +542,8 @@ namespace
image_sqsums_.resize(images_.size());
for (int i = 0; i < image.channels(); ++i)
{
cuda::integral(images_[i], image_sums_[i], intBuffer_, stream);
cuda::sqrIntegral(images_[i], image_sqsums_[i], intBuffer_, stream);
cuda::integral(images_[i], image_sums_[i], stream);
cuda::sqrIntegral(images_[i], image_sqsums_[i], stream);
}
Scalar templ_sum = cuda::sum(templ);

View File

@ -193,7 +193,7 @@ TEST(cornerHarris)
TEST(integral)
{
Mat src, sum;
cuda::GpuMat d_src, d_sum, d_buf;
cuda::GpuMat d_src, d_sum;
for (int size = 1000; size <= 4000; size *= 2)
{
@ -209,10 +209,10 @@ TEST(integral)
d_src.upload(src);
cuda::integralBuffered(d_src, d_sum, d_buf);
cuda::integral(d_src, d_sum);
CUDA_ON;
cuda::integralBuffered(d_src, d_sum, d_buf);
cuda::integral(d_src, d_sum);
CUDA_OFF;
}
}