mirror of
https://github.com/opencv/opencv.git
synced 2024-11-30 14:29:49 +08:00
613c12e590
CUDA backend for the DNN module * stub cuda4dnn design * minor fixes for tests and doxygen * add csl public api directory to module headers * add low-level CSL components * add high-level CSL components * integrate csl::Tensor into backbone code * switch to CPU iff unsupported; otherwise, fail on error * add fully connected layer * add softmax layer * add activation layers * support arbitary rank TensorDescriptor * pass input wrappers to `initCUDA()` * add 1d/2d/3d-convolution * add pooling layer * reorganize and refactor code * fixes for gcc, clang and doxygen; remove cxx14/17 code * add blank_layer * add LRN layer * add rounding modes for pooling layer * split tensor.hpp into tensor.hpp and tensor_ops.hpp * add concat layer * add scale layer * add batch normalization layer * split math.cu into activations.cu and math.hpp * add eltwise layer * add flatten layer * add tensor transform api * add asymmetric padding support for convolution layer * add reshape layer * fix rebase issues * add permute layer * add padding support for concat layer * refactor and reorganize code * add normalize layer * optimize bias addition in scale layer * add prior box layer * fix and optimize normalize layer * add asymmetric padding support for pooling layer * add event API * improve pooling performance for some padding scenarios * avoid over-allocation of compute resources to kernels * improve prior box performance * enable layer fusion * add const layer * add resize layer * add slice layer * add padding layer * add deconvolution layer * fix channelwise ReLU initialization * add vector traits * add vectorized versions of relu, clipped_relu, power * add vectorized concat kernels * improve concat_with_offsets performance * vectorize scale and bias kernels * add support for multi-billion element tensors * vectorize prior box kernels * fix address alignment check * improve bias addition performance of conv/deconv/fc layers * restructure code for supporting multiple targets * add DNN_TARGET_CUDA_FP64 * add DNN_TARGET_FP16 * improve vectorization * add region layer * improve tensor API, add dynamic ranks 1. use ManagedPtr instead of a Tensor in backend wrapper 2. add new methods to tensor classes - size_range: computes the combined size of for a given axis range - tensor span/view can be constructed from a raw pointer and shape 3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time) 4. remove device code from tensor classes (as they are unused) 5. enforce strict conditions on tensor class APIs to improve debugging ability * fix parametric relu activation * add squeeze/unsqueeze tensor API * add reorg layer * optimize permute and enable 2d permute * enable 1d and 2d slice * add split layer * add shuffle channel layer * allow tensors of different ranks in reshape primitive * patch SliceOp to allow Crop Layer * allow extra shape inputs in reshape layer * use `std::move_backward` instead of `std::move` for insert in resizable_static_array * improve workspace management * add spatial LRN * add nms (cpu) to region layer * add max pooling with argmax ( and a fix to limits.hpp) * add max unpooling layer * rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA * update supportBackend to be more rigorous * remove stray include from preventing non-cuda build * include op_cuda.hpp outside condition #if * refactoring, fixes and many optimizations * drop DNN_TARGET_CUDA_FP64 * fix gcc errors * increase max. tensor rank limit to six * add Interp layer * drop custom layers; use BackendNode * vectorize activation kernels * fixes for gcc * remove wrong assertion * fix broken assertion in unpooling primitive * fix build errors in non-CUDA build * completely remove workspace from public API * fix permute layer * enable accuracy and perf. tests for DNN_TARGET_CUDA * add asynchronous forward * vectorize eltwise ops * vectorize fill kernel * fixes for gcc * remove CSL headers from public API * remove csl header source group from cmake * update min. cudnn version in cmake * add numerically stable FP32 log1pexp * refactor code * add FP16 specialization to cudnn based tensor addition * vectorize scale1 and bias1 + minor refactoring * fix doxygen build * fix invalid alignment assertion * clear backend wrappers before allocateLayers * ignore memory lock failures * do not allocate internal blobs * integrate NVTX * add numerically stable half precision log1pexp * fix indentation, following coding style, improve docs * remove accidental modification of IE code * Revert "add asynchronous forward" This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70. * [cmake] throw error for unsupported CC versions * fix rebase issues * add more docs, refactor code, fix bugs * minor refactoring and fixes * resolve warnings/errors from clang * remove haveCUDA() checks from supportBackend() * remove NVTX integration * changes based on review comments * avoid exception when no CUDA device is present * add color code for CUDA in Net::dump
183 lines
7.5 KiB
C++
183 lines
7.5 KiB
C++
// This file is part of OpenCV project.
|
|
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
|
// of this distribution and at http://opencv.org/license.html.
|
|
|
|
#include "perf_precomp.hpp"
|
|
#include <opencv2/dnn/shape_utils.hpp>
|
|
|
|
namespace opencv_test {
|
|
|
|
struct Conv3DParam_t {
|
|
int kernel[3];
|
|
struct BlobShape { int dims[5]; } shapeIn;
|
|
int outCN;
|
|
int groups;
|
|
int stride[3];
|
|
int dilation[3];
|
|
int pad[6];
|
|
const char* padMode;
|
|
bool hasBias;
|
|
double declared_flops;
|
|
};
|
|
// Details: #12142
|
|
static const Conv3DParam_t testConvolution3DConfigs[] = {
|
|
{{3, 3, 3}, {{1, 6, 10, 38, 50}}, 6, 1, {1, 1, 1}, {1, 1, 1}, {0, 0, 0, 0, 0, 0}, "VALID", true, 26956800.},
|
|
{{3, 3, 3}, {{1, 2, 19, 19, 19}}, 2, 2, {2, 2, 2}, {1, 1, 1}, {1, 1, 1, 1, 1, 1}, "", true, 218000.},
|
|
{{3, 3, 3}, {{1, 2, 25, 19, 19}}, 2, 2, {1, 2, 2}, {1, 1, 1}, {2, 2, 2, 2, 2, 2}, "SAME", false, 545000.},
|
|
{{3, 3, 3}, {{1, 11, 9, 150, 200}}, 11, 1, {1, 1, 1}, {1, 1, 1}, {0, 0, 0, 0, 0, 0}, "VALID", true, 1342562760.},
|
|
{{3, 3, 3}, {{1, 10, 98, 10, 10}}, 10, 1, {1, 1, 1}, {1, 1, 1}, {1, 0, 1, 1, 0,1}, "SAME", false, 53018000.},
|
|
{{5, 5, 5}, {{1, 6, 19, 19, 19}}, 6, 2, {1, 1, 1}, {1, 1, 1}, {0, 0, 0, 0, 0, 0}, "", false, 30395250.},
|
|
{{5, 5, 5}, {{1, 4, 50, 19, 19}}, 4, 1, {2, 2, 2}, {1, 1, 1}, {1, 1, 1, 1, 1, 1}, "VALID", false, 5893888.},
|
|
{{5, 5, 5}, {{1, 3, 75, 75, 100}}, 3, 1, {1, 1, 1}, {1, 1, 1}, {0, 0, 0, 0, 0, 0}, "SAME", true, 1267312500.},
|
|
{{5, 5, 5}, {{1, 2, 21, 75, 100}}, 2, 1, {1, 1, 1}, {1, 1, 1}, {0, 0, 0, 0, 0, 0}, "", true, 116103744.},
|
|
{{5, 5, 5}, {{1, 4, 40, 75, 75}}, 4, 1, {2, 2, 2}, {1, 1, 1}, {0, 0, 0, 0, 0, 0}, "", false, 93405312.},
|
|
{{7, 7, 7}, {{1, 6, 15, 19, 19}}, 6, 1, {2, 1, 1}, {1, 1, 1}, {3, 3, 3, 3, 3, 3}, "SAME", true, 71339376.},
|
|
{{7, 7, 7}, {{1, 2, 38, 38, 38}}, 2, 1, {1, 2, 1}, {1, 1, 1}, {0, 0, 0, 0, 0, 0}, "", false, 44990464.},
|
|
{{1, 1, 1}, {{1, 4, 9, 10, 10}}, 4, 1, {1, 1, 2}, {1, 1, 1}, {1, 1, 1, 1, 1, 1}, "VALID", false, 16200.},
|
|
{{3, 1, 4}, {{1, 14, 5, 10, 10}}, 14, 1, {1, 1, 1}, {1, 1, 1}, {0, 0, 0, 0, 0, 0}, "SAME", false, 2359000.},
|
|
{{1, 1, 1}, {{1, 8, 1, 10, 10}}, 8, 8, {1, 1, 1}, {1, 1, 1}, {1, 1, 1, 1, 1, 1}, "", true, 58752.},
|
|
{{3, 4, 2}, {{1, 4, 8, 10, 10}}, 4, 4, {1, 2, 1}, {1, 1, 1}, {0, 0, 0, 0, 0, 0}, "", true, 166752.}
|
|
};
|
|
|
|
struct Conv3DParamID
|
|
{
|
|
enum {
|
|
CONV_0 = 0,
|
|
CONV_100 = 16,
|
|
CONV_LAST = sizeof(testConvolution3DConfigs) / sizeof(testConvolution3DConfigs[0])
|
|
};
|
|
int val_; \
|
|
Conv3DParamID(int val = 0) : val_(val) {}
|
|
operator int() const { return val_; }
|
|
static ::testing::internal::ParamGenerator<Conv3DParamID> all()
|
|
{
|
|
#if 0
|
|
enum { NUM = (int)CONV_LAST };
|
|
#else
|
|
enum { NUM = (int)CONV_100 };
|
|
#endif
|
|
Conv3DParamID v_[NUM]; for (int i = 0; i < NUM; ++i) { v_[i] = Conv3DParamID(i); } // reduce generated code size
|
|
return ::testing::ValuesIn(v_, v_ + NUM);
|
|
}
|
|
}; \
|
|
static inline void PrintTo(const Conv3DParamID& v, std::ostream* os)
|
|
{
|
|
CV_Assert((int)v >= 0); CV_Assert((int)v < Conv3DParamID::CONV_LAST);
|
|
const Conv3DParam_t& p = testConvolution3DConfigs[(int)v];
|
|
|
|
*os << "GFLOPS=" << cv::format("%.3f", p.declared_flops * 1e-9)
|
|
<< ", K=[" << p.kernel[0] << " x " << p.kernel[1] << " x " << p.kernel[2] << "]"
|
|
<< ", IN={" << p.shapeIn.dims[0] << ", " << p.shapeIn.dims[1] << ", " << p.shapeIn.dims[2] << ", " << p.shapeIn.dims[3] << ", " << p.shapeIn.dims[4] << "}"
|
|
<< ", OCN=" << p.outCN;
|
|
if (p.groups > 1)
|
|
*os << ", G=" << p.groups;
|
|
if (p.stride[0] * p.stride[1] * p.stride[2] != 1)
|
|
*os << ", S=[" << p.stride[0] << " x " << p.stride[1] << " x " << p.stride[2] << "]";
|
|
if (p.dilation[0] * p.dilation[1] * p.dilation[2] != 1)
|
|
*os << ", D=[" << p.dilation[0] << " x " << p.dilation[1] << " x " << p.dilation[2] << "]";
|
|
if (p.pad[0] != 0 && p.pad[1] != 0 && p.pad[2] != 0 &&
|
|
p.pad[3] != 0 && p.pad[4] != 0 && p.pad[5] != 0)
|
|
*os << ", P=(" << p.pad[0] << ", " << p.pad[3] << ") x ("
|
|
<< p.pad[1] << ", " << p.pad[4] << ") x ("
|
|
<< p.pad[2] << ", " << p.pad[5] << ")";
|
|
if (!((std::string)p.padMode).empty())
|
|
*os << ", PM=" << ((std::string)p.padMode);
|
|
if (p.hasBias)
|
|
*os << ", BIAS";
|
|
}
|
|
|
|
|
|
typedef tuple<Conv3DParamID, tuple<Backend, Target> > Conv3DTestParam_t;
|
|
typedef TestBaseWithParam<Conv3DTestParam_t> Conv3D;
|
|
|
|
PERF_TEST_P_(Conv3D, conv3d)
|
|
{
|
|
int test_id = (int)get<0>(GetParam());
|
|
ASSERT_GE(test_id, 0); ASSERT_LT(test_id, Conv3DParamID::CONV_LAST);
|
|
const Conv3DParam_t& params = testConvolution3DConfigs[test_id];
|
|
double declared_flops = params.declared_flops;
|
|
|
|
DictValue kernel = DictValue::arrayInt(¶ms.kernel[0], 3);
|
|
DictValue stride = DictValue::arrayInt(¶ms.stride[0], 3);
|
|
DictValue pad = DictValue::arrayInt(¶ms.pad[0], 6);
|
|
DictValue dilation = DictValue::arrayInt(¶ms.dilation[0], 3);
|
|
|
|
MatShape inputShape = MatShape(params.shapeIn.dims, params.shapeIn.dims + 5);
|
|
int outChannels = params.outCN;
|
|
int groups = params.groups;
|
|
std::string padMode(params.padMode);
|
|
|
|
bool hasBias = params.hasBias;
|
|
Backend backendId = get<0>(get<1>(GetParam()));
|
|
Target targetId = get<1>(get<1>(GetParam()));
|
|
|
|
if (targetId != DNN_TARGET_CPU && backendId != DNN_BACKEND_CUDA)
|
|
throw SkipTestException("Only CPU and CUDA is supported");
|
|
|
|
int inChannels = inputShape[1];
|
|
|
|
int sz[] = {outChannels, inChannels / groups, params.kernel[0], params.kernel[1], params.kernel[2]};
|
|
Mat weights(5, &sz[0], CV_32F);
|
|
randu(weights, -1.0f, 1.0f);
|
|
|
|
LayerParams lp;
|
|
lp.set("kernel_size", kernel);
|
|
lp.set("pad", pad);
|
|
if (!padMode.empty())
|
|
lp.set("pad_mode", padMode);
|
|
|
|
lp.set("stride", stride);
|
|
lp.set("dilation", dilation);
|
|
lp.set("num_output", outChannels);
|
|
lp.set("group", groups);
|
|
lp.set("bias_term", hasBias);
|
|
lp.type = "Convolution";
|
|
lp.name = "testLayer";
|
|
lp.blobs.push_back(weights);
|
|
|
|
if (hasBias)
|
|
{
|
|
Mat bias(1, outChannels, CV_32F);
|
|
randu(bias, -1.0f, 1.0f);
|
|
lp.blobs.push_back(bias);
|
|
}
|
|
int inpSz[] = {1, inChannels, inputShape[2], inputShape[3], inputShape[4]};
|
|
Mat input(5, &inpSz[0], CV_32F);
|
|
randu(input, -1.0f, 1.0f);
|
|
|
|
Net net;
|
|
net.addLayerToPrev(lp.name, lp.type, lp);
|
|
|
|
net.setInput(input);
|
|
net.setPreferableBackend(backendId);
|
|
net.setPreferableTarget(targetId);
|
|
|
|
Mat output = net.forward();
|
|
|
|
MatShape netInputShape = shape(input);
|
|
size_t weightsMemory = 0, blobsMemory = 0;
|
|
net.getMemoryConsumption(netInputShape, weightsMemory, blobsMemory);
|
|
int64 flops = net.getFLOPS(netInputShape);
|
|
CV_Assert(flops > 0);
|
|
|
|
std::cout
|
|
<< "IN=" << divUp(input.total() * input.elemSize(), 1u<<10) << " Kb " << netInputShape
|
|
<< " OUT=" << divUp(output.total() * output.elemSize(), 1u<<10) << " Kb " << shape(output)
|
|
<< " Weights(parameters): " << divUp(weightsMemory, 1u<<10) << " Kb"
|
|
<< " MFLOPS=" << flops * 1e-6 << std::endl;
|
|
|
|
TEST_CYCLE()
|
|
{
|
|
Mat res = net.forward();
|
|
}
|
|
EXPECT_NEAR(flops, declared_flops, declared_flops * 1e-6);
|
|
SANITY_CHECK_NOTHING();
|
|
}
|
|
|
|
INSTANTIATE_TEST_CASE_P(/**/, Conv3D, Combine(
|
|
Conv3DParamID::all(),
|
|
dnnBackendsAndTargets(false, false) // defined in ../test/test_common.hpp
|
|
));
|
|
|
|
} // namespace
|