opencv/modules/dnn/src/cuda/region.cu
Yashas Samaga B L 613c12e590 Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module

* stub cuda4dnn design

* minor fixes for tests and doxygen

* add csl public api directory to module headers

* add low-level CSL components

* add high-level CSL components

* integrate csl::Tensor into backbone code

* switch to CPU iff unsupported; otherwise, fail on error

* add fully connected layer

* add softmax layer

* add activation layers

* support arbitary rank TensorDescriptor

* pass input wrappers to `initCUDA()`

* add 1d/2d/3d-convolution

* add pooling layer

* reorganize and refactor code

* fixes for gcc, clang and doxygen; remove cxx14/17 code

* add blank_layer

* add LRN layer

* add rounding modes for pooling layer

* split tensor.hpp into tensor.hpp and tensor_ops.hpp

* add concat layer

* add scale layer

* add batch normalization layer

* split math.cu into activations.cu and math.hpp

* add eltwise layer

* add flatten layer

* add tensor transform api

* add asymmetric padding support for convolution layer

* add reshape layer

* fix rebase issues

* add permute layer

* add padding support for concat layer

* refactor and reorganize code

* add normalize layer

* optimize bias addition in scale layer

* add prior box layer

* fix and optimize normalize layer

* add asymmetric padding support for pooling layer

* add event API

* improve pooling performance for some padding scenarios

* avoid over-allocation of compute resources to kernels

* improve prior box performance

* enable layer fusion

* add const layer

* add resize layer

* add slice layer

* add padding layer

* add deconvolution layer

* fix channelwise  ReLU initialization

* add vector traits

* add vectorized versions of relu, clipped_relu, power

* add vectorized concat kernels

* improve concat_with_offsets performance

* vectorize scale and bias kernels

* add support for multi-billion element tensors

* vectorize prior box kernels

* fix address alignment check

* improve bias addition performance of conv/deconv/fc layers

* restructure code for supporting multiple targets

* add DNN_TARGET_CUDA_FP64

* add DNN_TARGET_FP16

* improve vectorization

* add region layer

* improve tensor API, add dynamic ranks

1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
  - size_range: computes the combined size of for a given axis range
  - tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability

* fix parametric relu activation

* add squeeze/unsqueeze tensor API

* add reorg layer

* optimize permute and enable 2d permute

* enable 1d and 2d slice

* add split layer

* add shuffle channel layer

* allow tensors of different ranks in reshape primitive

* patch SliceOp to allow Crop Layer

* allow extra shape inputs in reshape layer

* use `std::move_backward` instead of `std::move` for insert in resizable_static_array

* improve workspace management

* add spatial LRN

* add nms (cpu) to region layer

* add max pooling with argmax ( and a fix to limits.hpp)

* add max unpooling layer

* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA

* update supportBackend to be more rigorous

* remove stray include from preventing non-cuda build

* include op_cuda.hpp outside condition #if

* refactoring, fixes and many optimizations

* drop DNN_TARGET_CUDA_FP64

* fix gcc errors

* increase max. tensor rank limit to six

* add Interp layer

* drop custom layers; use BackendNode

* vectorize activation kernels

* fixes for gcc

* remove wrong assertion

* fix broken assertion in unpooling primitive

* fix build errors in non-CUDA build

* completely remove workspace from public API

* fix permute layer

* enable accuracy and perf. tests for DNN_TARGET_CUDA

* add asynchronous forward

* vectorize eltwise ops

* vectorize fill kernel

* fixes for gcc

* remove CSL headers from public API

* remove csl header source group from cmake

* update min. cudnn version in cmake

* add numerically stable FP32 log1pexp

* refactor code

* add FP16 specialization to cudnn based tensor addition

* vectorize scale1 and bias1 + minor refactoring

* fix doxygen build

* fix invalid alignment assertion

* clear backend wrappers before allocateLayers

* ignore memory lock failures

* do not allocate internal blobs

* integrate NVTX

* add numerically stable half precision log1pexp

* fix indentation, following coding style,  improve docs

* remove accidental modification of IE code

* Revert "add asynchronous forward"

This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.

* [cmake] throw error for unsupported CC versions

* fix rebase issues

* add more docs, refactor code, fix bugs

* minor refactoring and fixes

* resolve warnings/errors from clang

* remove haveCUDA() checks from supportBackend()

* remove NVTX integration

* changes based on review comments

* avoid exception when no CUDA device is present

* add color code for CUDA in Net::dump
2019-10-21 14:28:00 +03:00

200 lines
8.8 KiB
Plaintext

// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include "math.hpp"
#include "grid_stride_range.hpp"
#include "execution.hpp"
#include "limits.hpp"
#include "vector_traits.hpp"
#include "../cuda4dnn/csl/stream.hpp"
#include "../cuda4dnn/csl/span.hpp"
#include <opencv2/core.hpp>
#include <cstddef>
using namespace cv::dnn::cuda4dnn::csl;
using namespace cv::dnn::cuda4dnn::csl::device;
namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
namespace raw {
template <class T>
__global__ void sigmoid_strided(Span<T> output, View<T> input, size_type n, size_type stride, size_type offset) {
/* - the input is divided into equal blocks strided by `stride`
* - we must apply sigmoid to a continuous range of `n` values starting from `offset` in every block
*/
for (auto i : grid_stride_range(n * output.size() / stride)) {
auto block_idx = i / n;
auto index = block_idx * stride + offset + (i % n);
using device::sigmoid;
output[index] = sigmoid(input[index]);
}
}
template <class T>
__global__ void softmax_strided(Span<T> output, View<T> input, size_type n, size_type stride, size_type offset_) {
for (auto idx : grid_stride_range(output.size() / stride)) {
index_type offset = idx * stride + offset_;
auto largest = numeric_limits<T>::lowest();
for (int i = 0; i < n; i++) {
using device::max;
largest = max(largest, output[offset + i]);
}
auto sum = T(0);
for (int i = 0; i < n; i++) {
using device::exp;
auto temp = exp(output[offset + i] - largest);
sum += temp;
output[offset + i] = temp;
}
for (int i = 0; i < n; i++) {
output[offset + i] /= sum;
}
}
}
template <class T>
__global__ void region_finalize(Span<T> output, View<T> input, View<T> bias,
T object_prob_cutoff, T class_prob_cutoff,
size_type height_norm, size_type width_norm,
size_type rows, size_type cols,
size_type boxes_per_cell,
size_type box_size,
size_type classes)
{
for (auto box_index : grid_stride_range(output.size() / box_size)) {
auto box_of_the_cell = box_index % boxes_per_cell; /* box number within a cell */
auto box_offset = box_index * box_size;
auto batch_inner_size = rows * cols * boxes_per_cell;
auto row_inner_size = cols * boxes_per_cell;
auto col_inner_size = boxes_per_cell;
auto y = (box_index % batch_inner_size) / row_inner_size;
auto x = (box_index % row_inner_size) / col_inner_size;
using device::sigmoid;
using device::exp;
output[box_offset + 0] = (T(x) + sigmoid(input[box_offset + 0])) / T(cols);
output[box_offset + 1] = (T(y) + sigmoid(input[box_offset + 1])) / T(rows);
output[box_offset + 2] = exp(input[box_offset + 2]) * bias[2 * box_of_the_cell + 0] / T(width_norm);
output[box_offset + 3] = exp(input[box_offset + 3]) * bias[2 * box_of_the_cell + 1] / T(height_norm);
/* squash objectness score into a probability */
using device::sigmoid;
T objectness_prob = sigmoid(output[box_offset + 4]);
output[box_offset + 4] = objectness_prob;
/* ignore prediction if the objectness probability is less than the cutoff */
if (objectness_prob < object_prob_cutoff)
objectness_prob = 0;
/* the class probabilities we have currently are conditional class probabilities
* given the object
*
* to obtain the actual class probability, we multiply the conditional probability
* with the object probability
*/
const index_type class_begin = box_offset + 5; /* 4 box coordinates, 1 obj prob, class probs... */
const index_type class_end = class_begin + classes;
index_type offset = class_begin;
using vector_type = get_vector_type_t<T, 4>;
/* process each class independently until the offset is aligned to an n-element boundary */
while (offset % vector_type::size() != 0 && offset < class_end) {
T actual_class_prob = objectness_prob * output[offset];
if (actual_class_prob <= class_prob_cutoff)
actual_class_prob = T(0);
output[offset] = actual_class_prob;
offset++;
}
auto output_vPtr = vector_type::get_pointer(output.data() + offset);
auto input_vPtr = vector_type::get_pointer(input.data() + offset);
for (int i = 0; (offset + vector_type::size()) < class_end; i++) {
vector_type vec;
v_load(vec, output_vPtr[i]);
for (int j = 0; j < vector_type::size(); j++) {
T actual_class_prob = objectness_prob * vec.data[j];
if (actual_class_prob <= class_prob_cutoff)
actual_class_prob = T(0);
vec.data[j] = actual_class_prob;
}
v_store(output_vPtr[i], vec);
offset += vector_type::size();
}
/* process the remaining classes */
while (offset < class_end) {
T actual_class_prob = objectness_prob * output[offset];
if (actual_class_prob <= class_prob_cutoff)
actual_class_prob = T(0);
output[offset] = actual_class_prob;
offset++;
}
}
}
}
template <class T>
void sigmoid_strided(const Stream& stream, Span<T> output, View<T> input, std::size_t n, std::size_t stride, std::size_t offset) {
CV_Assert(output.size() % stride == 0);
auto kernel = raw::sigmoid_strided<T>;
auto policy = make_policy(kernel, n * output.size() / stride, 0, stream);
launch_kernel(kernel, policy, output, input, n, stride, offset);
}
template void sigmoid_strided(const Stream&, Span<__half>, View<__half>, std::size_t, std::size_t, std::size_t);
template void sigmoid_strided(const Stream&, Span<float>, View<float>, std::size_t, std::size_t, std::size_t);
template <class T>
void softmax_strided(const Stream& stream, Span<T> output, View<T> input, std::size_t n, std::size_t stride, std::size_t offset) {
CV_Assert(output.size() % stride == 0);
auto kernel = raw::softmax_strided<T>;
auto policy = make_policy(kernel, output.size() / stride, 0, stream);
launch_kernel(kernel, policy, output, input, n, stride, offset);
}
template void softmax_strided(const Stream&, Span<__half>, View<__half>, std::size_t, std::size_t, std::size_t);
template void softmax_strided(const Stream&, Span<float>, View<float>, std::size_t, std::size_t, std::size_t);
template <class T>
void region_finalize(const Stream& stream, Span<T> output, View<T> input, View<T> bias,
T object_prob_cutoff, T class_prob_cutoff,
std::size_t height_norm, std::size_t width_norm,
std::size_t rows, std::size_t cols,
std::size_t boxes_per_cell,
std::size_t box_size,
std::size_t classes)
{
CV_Assert(output.size() % box_size == 0);
auto kernel = raw::region_finalize<T>;
auto policy = make_policy(kernel, output.size() / box_size, 0, stream);
launch_kernel(kernel, policy, output, input, bias,
object_prob_cutoff, class_prob_cutoff,
height_norm, width_norm,
rows, cols, boxes_per_cell, box_size, classes);
}
template void region_finalize(const Stream&, Span<__half>, View<__half>, View<__half>,
__half, __half, std::size_t, std::size_t, std::size_t, std::size_t, std::size_t, std::size_t, std::size_t);
template void region_finalize(const Stream&, Span<float>, View<float>, View<float>,
float, float, std::size_t, std::size_t, std::size_t, std::size_t, std::size_t, std::size_t, std::size_t);
}}}} /* namespace cv::dnn::cuda4dnn::kernels */