2018-01-21 02:55:25 +08:00
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
2019-03-29 21:42:58 +08:00
// Copyright (C) 2018-2019, Intel Corporation, all rights reserved.
2018-01-21 02:55:25 +08:00
// Third party copyrights are property of their respective owners.
# include "test_precomp.hpp"
# include "opencv2/core/ocl.hpp"
2017-11-05 21:48:40 +08:00
namespace opencv_test { namespace {
2018-01-21 02:55:25 +08:00
2018-06-27 21:34:36 +08:00
class DNNTestNetwork : public DNNTestLayer
2018-01-21 02:55:25 +08:00
{
public :
void processNet ( const std : : string & weights , const std : : string & proto ,
2018-04-19 20:04:57 +08:00
Size inpSize , const std : : string & outputLayer = " " ,
2018-03-17 00:27:04 +08:00
const std : : string & halideScheduler = " " ,
2018-04-19 20:04:57 +08:00
double l1 = 0.0 , double lInf = 0.0 )
2018-01-21 02:55:25 +08:00
{
// Create a common input blob.
int blobSize [ ] = { 1 , 3 , inpSize . height , inpSize . width } ;
Mat inp ( 4 , blobSize , CV_32FC1 ) ;
randu ( inp , 0.0f , 1.0f ) ;
2018-03-17 00:27:04 +08:00
processNet ( weights , proto , inp , outputLayer , halideScheduler , l1 , lInf ) ;
2018-01-21 02:55:25 +08:00
}
void processNet ( std : : string weights , std : : string proto ,
2018-04-19 20:04:57 +08:00
Mat inp , const std : : string & outputLayer = " " ,
2018-03-17 00:27:04 +08:00
std : : string halideScheduler = " " ,
2022-12-08 20:57:13 +08:00
double l1 = 0.0 , double lInf = 0.0 , double detectionConfThresh = 0.2 , bool useWinograd = true )
2018-01-21 02:55:25 +08:00
{
2018-06-27 21:34:36 +08:00
checkBackend ( ) ;
l1 = l1 ? l1 : default_l1 ;
lInf = lInf ? lInf : default_lInf ;
2018-01-21 02:55:25 +08:00
weights = findDataFile ( weights , false ) ;
if ( ! proto . empty ( ) )
2019-06-20 21:43:28 +08:00
proto = findDataFile ( proto ) ;
2018-01-21 02:55:25 +08:00
// Create two networks - with default backend and target and a tested one.
2018-03-17 00:27:04 +08:00
Net netDefault = readNet ( weights , proto ) ;
2018-06-01 15:54:12 +08:00
netDefault . setPreferableBackend ( DNN_BACKEND_OPENCV ) ;
2018-01-21 02:55:25 +08:00
netDefault . setInput ( inp ) ;
Mat outDefault = netDefault . forward ( outputLayer ) . clone ( ) ;
2019-04-19 19:54:08 +08:00
net = readNet ( weights , proto ) ;
2018-01-21 02:55:25 +08:00
net . setInput ( inp ) ;
net . setPreferableBackend ( backend ) ;
net . setPreferableTarget ( target ) ;
2022-12-08 20:57:13 +08:00
net . enableWinograd ( useWinograd ) ;
2018-01-21 02:55:25 +08:00
if ( backend = = DNN_BACKEND_HALIDE & & ! halideScheduler . empty ( ) )
{
2019-06-20 21:43:28 +08:00
halideScheduler = findDataFile ( halideScheduler ) ;
2018-01-21 02:55:25 +08:00
net . setHalideScheduler ( halideScheduler ) ;
}
Mat out = net . forward ( outputLayer ) . clone ( ) ;
2018-06-08 21:55:21 +08:00
check ( outDefault , out , outputLayer , l1 , lInf , detectionConfThresh , " First run " ) ;
2018-01-21 02:55:25 +08:00
// Test 2: change input.
2018-04-19 20:04:57 +08:00
float * inpData = ( float * ) inp . data ;
for ( int i = 0 ; i < inp . size [ 0 ] * inp . size [ 1 ] ; + + i )
{
Mat slice ( inp . size [ 2 ] , inp . size [ 3 ] , CV_32F , inpData ) ;
cv : : flip ( slice , slice , 1 ) ;
inpData + = slice . total ( ) ;
}
2018-01-21 02:55:25 +08:00
netDefault . setInput ( inp ) ;
net . setInput ( inp ) ;
outDefault = netDefault . forward ( outputLayer ) . clone ( ) ;
out = net . forward ( outputLayer ) . clone ( ) ;
2018-06-08 21:55:21 +08:00
check ( outDefault , out , outputLayer , l1 , lInf , detectionConfThresh , " Second run " ) ;
2018-05-31 19:05:21 +08:00
}
2018-01-21 02:55:25 +08:00
2018-06-08 21:55:21 +08:00
void check ( Mat & ref , Mat & out , const std : : string & outputLayer , double l1 , double lInf ,
double detectionConfThresh , const char * msg )
2018-05-31 19:05:21 +08:00
{
2018-01-21 02:55:25 +08:00
if ( outputLayer = = " detection_out " )
2018-05-31 19:05:21 +08:00
{
2019-12-02 21:16:06 +08:00
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 )
2018-05-31 19:05:21 +08:00
{
// Inference Engine produces detections terminated by a row which starts from -1.
out = out . reshape ( 1 , out . total ( ) / 7 ) ;
int numDetections = 0 ;
while ( numDetections < out . rows & & out . at < float > ( numDetections , 0 ) ! = - 1 )
{
numDetections + = 1 ;
}
out = out . rowRange ( 0 , numDetections ) ;
}
2018-06-08 21:55:21 +08:00
normAssertDetections ( ref , out , msg , detectionConfThresh , l1 , lInf ) ;
2018-05-31 19:05:21 +08:00
}
2018-01-21 02:55:25 +08:00
else
2018-05-31 19:05:21 +08:00
normAssert ( ref , out , msg , l1 , lInf ) ;
2018-01-21 02:55:25 +08:00
}
2019-04-19 19:54:08 +08:00
Net net ;
2018-01-21 02:55:25 +08:00
} ;
TEST_P ( DNNTestNetwork , AlexNet )
{
2018-10-09 06:38:06 +08:00
applyTestTag ( CV_TEST_TAG_MEMORY_1GB ) ;
2018-01-21 02:55:25 +08:00
processNet ( " dnn/bvlc_alexnet.caffemodel " , " dnn/bvlc_alexnet.prototxt " ,
2018-03-17 00:27:04 +08:00
Size ( 227 , 227 ) , " prob " ,
2018-01-21 02:55:25 +08:00
target = = DNN_TARGET_OPENCL ? " dnn/halide_scheduler_opencl_alexnet.yml " :
" dnn/halide_scheduler_alexnet.yml " ) ;
2019-04-19 19:54:08 +08:00
expectNoFallbacksFromIE ( net ) ;
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
expectNoFallbacksFromCUDA ( net ) ;
2018-01-21 02:55:25 +08:00
}
TEST_P ( DNNTestNetwork , ResNet_50 )
{
2019-05-27 20:14:18 +08:00
applyTestTag (
( target = = DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB ) ,
CV_TEST_TAG_DEBUG_LONG
) ;
2020-12-11 02:23:24 +08:00
2018-01-21 02:55:25 +08:00
processNet ( " dnn/ResNet-50-model.caffemodel " , " dnn/ResNet-50-deploy.prototxt " ,
2018-03-17 00:27:04 +08:00
Size ( 224 , 224 ) , " prob " ,
2018-01-21 02:55:25 +08:00
target = = DNN_TARGET_OPENCL ? " dnn/halide_scheduler_opencl_resnet_50.yml " :
" dnn/halide_scheduler_resnet_50.yml " ) ;
2019-04-19 19:54:08 +08:00
expectNoFallbacksFromIE ( net ) ;
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
expectNoFallbacksFromCUDA ( net ) ;
2018-01-21 02:55:25 +08:00
}
TEST_P ( DNNTestNetwork , SqueezeNet_v1_1 )
{
processNet ( " dnn/squeezenet_v1.1.caffemodel " , " dnn/squeezenet_v1.1.prototxt " ,
2018-03-17 00:27:04 +08:00
Size ( 227 , 227 ) , " prob " ,
2018-01-21 02:55:25 +08:00
target = = DNN_TARGET_OPENCL ? " dnn/halide_scheduler_opencl_squeezenet_v1_1.yml " :
" dnn/halide_scheduler_squeezenet_v1_1.yml " ) ;
2019-04-19 19:54:08 +08:00
expectNoFallbacksFromIE ( net ) ;
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
expectNoFallbacksFromCUDA ( net ) ;
2018-01-21 02:55:25 +08:00
}
TEST_P ( DNNTestNetwork , GoogLeNet )
{
2018-10-09 06:38:06 +08:00
applyTestTag ( target = = DNN_TARGET_CPU ? " " : CV_TEST_TAG_MEMORY_512MB ) ;
2020-12-11 02:23:24 +08:00
2018-01-21 02:55:25 +08:00
processNet ( " dnn/bvlc_googlenet.caffemodel " , " dnn/bvlc_googlenet.prototxt " ,
2018-03-17 00:27:04 +08:00
Size ( 224 , 224 ) , " prob " ) ;
2019-04-19 19:54:08 +08:00
expectNoFallbacksFromIE ( net ) ;
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
expectNoFallbacksFromCUDA ( net ) ;
2018-01-21 02:55:25 +08:00
}
TEST_P ( DNNTestNetwork , Inception_5h )
{
2018-10-09 06:38:06 +08:00
applyTestTag ( CV_TEST_TAG_MEMORY_512MB ) ;
2020-12-11 02:23:24 +08:00
2018-11-26 17:09:50 +08:00
double l1 = default_l1 , lInf = default_lInf ;
2019-12-02 21:16:06 +08:00
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 & & ( target = = DNN_TARGET_CPU | | target = = DNN_TARGET_OPENCL ) )
2018-11-26 17:09:50 +08:00
{
l1 = 1.72e-5 ;
lInf = 8e-4 ;
}
2018-03-17 00:27:04 +08:00
processNet ( " dnn/tensorflow_inception_graph.pb " , " " , Size ( 224 , 224 ) , " softmax2 " ,
2018-01-21 02:55:25 +08:00
target = = DNN_TARGET_OPENCL ? " dnn/halide_scheduler_opencl_inception_5h.yml " :
2018-11-26 17:09:50 +08:00
" dnn/halide_scheduler_inception_5h.yml " ,
l1 , lInf ) ;
2019-04-19 19:54:08 +08:00
expectNoFallbacksFromIE ( net ) ;
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
expectNoFallbacksFromCUDA ( net ) ;
2018-01-21 02:55:25 +08:00
}
TEST_P ( DNNTestNetwork , ENet )
{
2018-10-09 06:38:06 +08:00
applyTestTag ( target = = DNN_TARGET_CPU ? " " : CV_TEST_TAG_MEMORY_512MB ) ;
2020-12-11 02:23:24 +08:00
2019-12-02 21:16:06 +08:00
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER ) ;
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_NGRAPH ) ;
2019-06-15 20:17:25 +08:00
if ( backend = = DNN_BACKEND_OPENCV & & target = = DNN_TARGET_OPENCL_FP16 )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_OPENCL_FP16 ) ;
2019-12-20 21:36:32 +08:00
if ( backend = = DNN_BACKEND_CUDA & & target = = DNN_TARGET_CUDA_FP16 )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_CUDA_FP16 ) ;
2018-03-17 00:27:04 +08:00
processNet ( " dnn/Enet-model-best.net " , " " , Size ( 512 , 512 ) , " l367_Deconvolution " ,
2018-01-21 02:55:25 +08:00
target = = DNN_TARGET_OPENCL ? " dnn/halide_scheduler_opencl_enet.yml " :
" dnn/halide_scheduler_enet.yml " ,
2e-5 , 0.15 ) ;
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
expectNoFallbacksFromCUDA ( net ) ;
2018-01-21 02:55:25 +08:00
}
2018-02-07 16:28:45 +08:00
TEST_P ( DNNTestNetwork , MobileNet_SSD_Caffe )
2018-01-21 02:55:25 +08:00
{
2018-10-09 06:38:06 +08:00
applyTestTag ( CV_TEST_TAG_MEMORY_512MB ) ;
2018-05-31 19:05:21 +08:00
if ( backend = = DNN_BACKEND_HALIDE )
2019-06-15 20:17:25 +08:00
applyTestTag ( CV_TEST_TAG_DNN_SKIP_HALIDE ) ;
2019-06-20 21:43:28 +08:00
Mat sample = imread ( findDataFile ( " dnn/street.png " ) ) ;
2018-01-21 02:55:25 +08:00
Mat inp = blobFromImage ( sample , 1.0f / 127.5 , Size ( 300 , 300 ) , Scalar ( 127.5 , 127.5 , 127.5 ) , false ) ;
2019-12-20 21:36:32 +08:00
float scoreDiff = ( target = = DNN_TARGET_OPENCL_FP16 | | target = = DNN_TARGET_MYRIAD ) ? 1.5e-2 : 0.0 ;
float iouDiff = ( target = = DNN_TARGET_MYRIAD ) ? 0.063 : 0.0 ;
2021-06-30 04:25:22 +08:00
float detectionConfThresh = ( target = = DNN_TARGET_MYRIAD ) ? 0.262 : FLT_MIN ;
2019-03-29 21:42:58 +08:00
processNet ( " dnn/MobileNetSSD_deploy.caffemodel " , " dnn/MobileNetSSD_deploy.prototxt " ,
2019-12-20 21:36:32 +08:00
inp , " detection_out " , " " , scoreDiff , iouDiff , detectionConfThresh ) ;
2019-04-19 19:54:08 +08:00
expectNoFallbacksFromIE ( net ) ;
2018-01-21 02:55:25 +08:00
}
2019-02-02 01:23:51 +08:00
TEST_P ( DNNTestNetwork , MobileNet_SSD_Caffe_Different_Width_Height )
{
if ( backend = = DNN_BACKEND_HALIDE )
2019-06-15 20:17:25 +08:00
applyTestTag ( CV_TEST_TAG_DNN_SKIP_HALIDE ) ;
2022-03-31 03:03:38 +08:00
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2022010000)
// May hang on some configurations
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & ( target = = DNN_TARGET_OPENCL | | target = = DNN_TARGET_OPENCL_FP16 ) )
applyTestTag ( target = = DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16 ,
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH , CV_TEST_TAG_DNN_SKIP_IE_VERSION
) ;
# elif defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
2021-11-26 03:56:27 +08:00
// IE exception: Ngraph operation Transpose with name conv15_2_mbox_conf_perm has dynamic output shape on 0 port, but CPU plug-in supports only static shape
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & ( target = = DNN_TARGET_OPENCL | | target = = DNN_TARGET_OPENCL_FP16 ) )
applyTestTag ( target = = DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16 ,
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH , CV_TEST_TAG_DNN_SKIP_IE_VERSION
) ;
2022-03-31 03:03:38 +08:00
if ( ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 | | backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH ) & &
target = = DNN_TARGET_MYRIAD & & getInferenceEngineVPUType ( ) = = CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X , CV_TEST_TAG_DNN_SKIP_IE_NGRAPH , CV_TEST_TAG_DNN_SKIP_IE_VERSION ) ;
# elif defined(INF_ENGINE_RELEASE)
if ( ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 | | backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH ) & &
target = = DNN_TARGET_MYRIAD & & getInferenceEngineVPUType ( ) = = CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X , CV_TEST_TAG_DNN_SKIP_IE_NGRAPH , CV_TEST_TAG_DNN_SKIP_IE_VERSION ) ;
2019-03-29 21:42:58 +08:00
# endif
2021-11-26 03:56:27 +08:00
2019-06-20 21:43:28 +08:00
Mat sample = imread ( findDataFile ( " dnn/street.png " ) ) ;
2019-02-02 01:23:51 +08:00
Mat inp = blobFromImage ( sample , 1.0f / 127.5 , Size ( 300 , 560 ) , Scalar ( 127.5 , 127.5 , 127.5 ) , false ) ;
2019-12-20 21:36:32 +08:00
float scoreDiff = 0.0 , iouDiff = 0.0 ;
if ( target = = DNN_TARGET_OPENCL_FP16 | | target = = DNN_TARGET_MYRIAD )
{
scoreDiff = 0.029 ;
iouDiff = 0.09 ;
}
else if ( target = = DNN_TARGET_CUDA_FP16 )
{
scoreDiff = 0.03 ;
iouDiff = 0.08 ;
}
2019-02-02 01:23:51 +08:00
processNet ( " dnn/MobileNetSSD_deploy.caffemodel " , " dnn/MobileNetSSD_deploy.prototxt " ,
2019-12-20 21:36:32 +08:00
inp , " detection_out " , " " , scoreDiff , iouDiff ) ;
2019-04-19 19:54:08 +08:00
expectNoFallbacksFromIE ( net ) ;
2019-02-02 01:23:51 +08:00
}
2018-06-08 21:55:21 +08:00
TEST_P ( DNNTestNetwork , MobileNet_SSD_v1_TensorFlow )
2018-02-07 16:28:45 +08:00
{
2018-10-09 06:38:06 +08:00
applyTestTag ( target = = DNN_TARGET_CPU ? " " : CV_TEST_TAG_MEMORY_512MB ) ;
2018-06-08 21:55:21 +08:00
if ( backend = = DNN_BACKEND_HALIDE )
2019-06-15 20:17:25 +08:00
applyTestTag ( CV_TEST_TAG_DNN_SKIP_HALIDE ) ;
2019-07-25 14:57:49 +08:00
2019-06-20 21:43:28 +08:00
Mat sample = imread ( findDataFile ( " dnn/street.png " ) ) ;
2018-08-31 20:41:56 +08:00
Mat inp = blobFromImage ( sample , 1.0f , Size ( 300 , 300 ) , Scalar ( ) , false ) ;
2019-03-29 21:42:58 +08:00
float detectionConfThresh = ( target = = DNN_TARGET_MYRIAD ) ? 0.216 : 0.2 ;
2019-12-20 21:36:32 +08:00
float scoreDiff = 0.0 , iouDiff = 0.0 ;
if ( target = = DNN_TARGET_OPENCL_FP16 | | target = = DNN_TARGET_MYRIAD )
{
scoreDiff = 0.095 ;
iouDiff = 0.09 ;
}
else if ( target = = DNN_TARGET_CUDA_FP16 )
{
scoreDiff = 0.007 ;
iouDiff = 0.08 ;
}
2018-06-08 21:55:21 +08:00
processNet ( " dnn/ssd_mobilenet_v1_coco_2017_11_17.pb " , " dnn/ssd_mobilenet_v1_coco_2017_11_17.pbtxt " ,
2019-12-20 21:36:32 +08:00
inp , " detection_out " , " " , scoreDiff , iouDiff , detectionConfThresh ) ;
2019-04-19 19:54:08 +08:00
expectNoFallbacksFromIE ( net ) ;
2019-02-02 01:23:51 +08:00
}
TEST_P ( DNNTestNetwork , MobileNet_SSD_v1_TensorFlow_Different_Width_Height )
{
if ( backend = = DNN_BACKEND_HALIDE )
2019-06-15 20:17:25 +08:00
applyTestTag ( CV_TEST_TAG_DNN_SKIP_HALIDE ) ;
2021-11-30 20:08:35 +08:00
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
2019-12-24 18:34:33 +08:00
if ( ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 | | backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH ) & &
target = = DNN_TARGET_MYRIAD & & getInferenceEngineVPUType ( ) = = CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X )
2019-06-15 20:17:25 +08:00
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X ) ;
2019-03-29 21:42:58 +08:00
# endif
2019-07-25 14:57:49 +08:00
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019020000)
2019-12-02 21:16:06 +08:00
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 & & target = = DNN_TARGET_MYRIAD )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_MYRIAD , CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER , CV_TEST_TAG_DNN_SKIP_IE_VERSION ) ;
2019-07-25 14:57:49 +08:00
# endif
2019-06-20 21:43:28 +08:00
Mat sample = imread ( findDataFile ( " dnn/street.png " ) ) ;
2019-02-02 01:23:51 +08:00
Mat inp = blobFromImage ( sample , 1.0f , Size ( 300 , 560 ) , Scalar ( ) , false ) ;
2019-12-20 21:36:32 +08:00
float scoreDiff = 0.0 , iouDiff = 0.0 ;
if ( target = = DNN_TARGET_OPENCL_FP16 | | target = = DNN_TARGET_MYRIAD )
{
2020-02-27 01:09:03 +08:00
scoreDiff = 0.013 ;
2019-12-20 21:36:32 +08:00
iouDiff = 0.06 ;
}
else if ( target = = DNN_TARGET_CUDA_FP16 )
{
scoreDiff = 0.007 ;
iouDiff = 0.06 ;
}
2019-02-02 01:23:51 +08:00
processNet ( " dnn/ssd_mobilenet_v1_coco_2017_11_17.pb " , " dnn/ssd_mobilenet_v1_coco_2017_11_17.pbtxt " ,
2019-12-20 21:36:32 +08:00
inp , " detection_out " , " " , scoreDiff , iouDiff ) ;
2019-04-19 19:54:08 +08:00
expectNoFallbacksFromIE ( net ) ;
2018-02-07 16:28:45 +08:00
}
2018-06-08 21:55:21 +08:00
TEST_P ( DNNTestNetwork , MobileNet_SSD_v2_TensorFlow )
{
2018-10-09 06:38:06 +08:00
applyTestTag ( target = = DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB ) ;
2018-06-08 21:55:21 +08:00
if ( backend = = DNN_BACKEND_HALIDE )
2019-06-15 20:17:25 +08:00
applyTestTag ( CV_TEST_TAG_DNN_SKIP_HALIDE ) ;
2019-07-25 14:57:49 +08:00
2019-06-20 21:43:28 +08:00
Mat sample = imread ( findDataFile ( " dnn/street.png " ) ) ;
2018-08-31 20:41:56 +08:00
Mat inp = blobFromImage ( sample , 1.0f , Size ( 300 , 300 ) , Scalar ( ) , false ) ;
2019-12-20 21:36:32 +08:00
float scoreDiff = 2e-5 , iouDiff = 0.0 ;
if ( target = = DNN_TARGET_OPENCL_FP16 | | target = = DNN_TARGET_MYRIAD )
{
scoreDiff = 0.013 ;
iouDiff = 0.062 ;
}
else if ( target = = DNN_TARGET_CUDA_FP16 )
{
scoreDiff = 0.02 ;
iouDiff = 0.07 ;
}
2018-06-08 21:55:21 +08:00
processNet ( " dnn/ssd_mobilenet_v2_coco_2018_03_29.pb " , " dnn/ssd_mobilenet_v2_coco_2018_03_29.pbtxt " ,
2019-12-20 21:36:32 +08:00
inp , " detection_out " , " " , scoreDiff , iouDiff , 0.25 ) ;
2019-04-19 19:54:08 +08:00
expectNoFallbacksFromIE ( net ) ;
2018-06-08 21:55:21 +08:00
}
2018-01-21 02:55:25 +08:00
TEST_P ( DNNTestNetwork , SSD_VGG16 )
{
2018-10-09 06:38:06 +08:00
applyTestTag ( CV_TEST_TAG_LONG , ( target = = DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_1GB : CV_TEST_TAG_MEMORY_2GB ) ,
CV_TEST_TAG_DEBUG_VERYLONG ) ;
2018-05-31 19:05:21 +08:00
if ( backend = = DNN_BACKEND_HALIDE & & target = = DNN_TARGET_CPU )
2019-06-15 20:17:25 +08:00
applyTestTag ( CV_TEST_TAG_DNN_SKIP_HALIDE ) ; // TODO HALIDE_CPU
2021-11-26 03:56:27 +08:00
2019-06-20 21:43:28 +08:00
Mat sample = imread ( findDataFile ( " dnn/street.png " ) ) ;
2018-05-31 19:05:21 +08:00
Mat inp = blobFromImage ( sample , 1.0f , Size ( 300 , 300 ) , Scalar ( ) , false ) ;
2021-11-26 03:56:27 +08:00
2019-12-20 21:36:32 +08:00
float scoreDiff = 0.0 , iouDiff = 0.0 ;
if ( target = = DNN_TARGET_OPENCL_FP16 )
{
2021-11-26 03:56:27 +08:00
scoreDiff = 0.04 ;
2019-12-20 21:36:32 +08:00
}
else if ( target = = DNN_TARGET_MYRIAD )
{
scoreDiff = 0.0325 ;
iouDiff = 0.032 ;
}
else if ( target = = DNN_TARGET_CUDA_FP16 )
{
scoreDiff = 0.03 ;
2020-11-21 20:05:20 +08:00
iouDiff = 0.13 ;
2019-12-20 21:36:32 +08:00
}
2018-01-21 02:55:25 +08:00
processNet ( " dnn/VGG_ILSVRC2016_SSD_300x300_iter_440000.caffemodel " ,
2022-12-08 20:57:13 +08:00
" dnn/ssd_vgg16.prototxt " , inp , " detection_out " , " " , scoreDiff ,
iouDiff , 0.2 , false ) ;
2019-04-19 19:54:08 +08:00
expectNoFallbacksFromIE ( net ) ;
2018-01-21 02:55:25 +08:00
}
2018-02-06 16:57:35 +08:00
TEST_P ( DNNTestNetwork , OpenPose_pose_coco )
{
2018-10-09 06:38:06 +08:00
applyTestTag ( CV_TEST_TAG_LONG , ( target = = DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_1GB : CV_TEST_TAG_MEMORY_2GB ) ,
2019-06-15 20:17:25 +08:00
CV_TEST_TAG_DEBUG_LONG ) ;
2018-11-26 17:09:50 +08:00
if ( backend = = DNN_BACKEND_HALIDE )
2019-06-15 20:17:25 +08:00
applyTestTag ( CV_TEST_TAG_DNN_SKIP_HALIDE ) ;
2019-03-29 21:42:58 +08:00
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
2019-12-02 21:16:06 +08:00
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 & & target = = DNN_TARGET_MYRIAD
2019-03-29 21:42:58 +08:00
& & getInferenceEngineVPUType ( ) = = CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X )
2019-12-02 21:16:06 +08:00
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X , CV_TEST_TAG_DNN_SKIP_IE_VERSION ) ;
2019-03-29 21:42:58 +08:00
# endif
2021-06-30 04:25:22 +08:00
const float l1 = ( target = = DNN_TARGET_MYRIAD ) ? 0.009 : 0.0 ;
const float lInf = ( target = = DNN_TARGET_MYRIAD ) ? 0.09 : 0.0 ;
2018-02-06 16:57:35 +08:00
processNet ( " dnn/openpose_pose_coco.caffemodel " , " dnn/openpose_pose_coco.prototxt " ,
2019-03-29 21:42:58 +08:00
Size ( 46 , 46 ) , " " , " " , l1 , lInf ) ;
2019-04-19 19:54:08 +08:00
expectNoFallbacksFromIE ( net ) ;
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
expectNoFallbacksFromCUDA ( net ) ;
2018-02-06 16:57:35 +08:00
}
TEST_P ( DNNTestNetwork , OpenPose_pose_mpi )
{
2018-10-09 06:38:06 +08:00
applyTestTag ( CV_TEST_TAG_LONG , ( target = = DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_1GB : CV_TEST_TAG_MEMORY_2GB ) ,
CV_TEST_TAG_DEBUG_VERYLONG ) ;
2018-11-26 17:09:50 +08:00
if ( backend = = DNN_BACKEND_HALIDE )
2019-06-15 20:17:25 +08:00
applyTestTag ( CV_TEST_TAG_DNN_SKIP_HALIDE ) ;
2019-03-29 21:42:58 +08:00
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
2019-12-02 21:16:06 +08:00
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 & & target = = DNN_TARGET_MYRIAD
2019-03-29 21:42:58 +08:00
& & getInferenceEngineVPUType ( ) = = CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X )
2019-12-02 21:16:06 +08:00
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X , CV_TEST_TAG_DNN_SKIP_IE_VERSION ) ;
2019-03-29 21:42:58 +08:00
# endif
2019-06-14 23:17:02 +08:00
2019-03-29 21:42:58 +08:00
// output range: [-0.001, 0.97]
2021-06-30 04:25:22 +08:00
const float l1 = ( target = = DNN_TARGET_MYRIAD ) ? 0.02 : 0.0 ;
const float lInf = ( target = = DNN_TARGET_MYRIAD | | target = = DNN_TARGET_OPENCL_FP16 ) ? 0.2 : 0.0 ;
2018-02-06 16:57:35 +08:00
processNet ( " dnn/openpose_pose_mpi.caffemodel " , " dnn/openpose_pose_mpi.prototxt " ,
2019-03-29 21:42:58 +08:00
Size ( 46 , 46 ) , " " , " " , l1 , lInf ) ;
2019-04-19 19:54:08 +08:00
expectNoFallbacksFromIE ( net ) ;
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
expectNoFallbacksFromCUDA ( net ) ;
2018-02-06 16:57:35 +08:00
}
TEST_P ( DNNTestNetwork , OpenPose_pose_mpi_faster_4_stages )
{
2018-10-09 06:38:06 +08:00
applyTestTag ( CV_TEST_TAG_LONG , CV_TEST_TAG_MEMORY_1GB ) ;
2018-11-26 17:09:50 +08:00
if ( backend = = DNN_BACKEND_HALIDE )
2019-06-15 20:17:25 +08:00
applyTestTag ( CV_TEST_TAG_DNN_SKIP_HALIDE ) ;
2019-03-29 21:42:58 +08:00
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
2019-12-02 21:16:06 +08:00
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 & & target = = DNN_TARGET_MYRIAD
2019-03-29 21:42:58 +08:00
& & getInferenceEngineVPUType ( ) = = CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X )
2019-12-02 21:16:06 +08:00
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X , CV_TEST_TAG_DNN_SKIP_IE_VERSION ) ;
2019-03-29 21:42:58 +08:00
# endif
2019-06-14 23:17:02 +08:00
2018-02-06 16:57:35 +08:00
// The same .caffemodel but modified .prototxt
// See https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/pose/poseParameters.cpp
processNet ( " dnn/openpose_pose_mpi.caffemodel " , " dnn/openpose_pose_mpi_faster_4_stages.prototxt " ,
2018-11-23 23:23:27 +08:00
Size ( 46 , 46 ) ) ;
2019-04-19 19:54:08 +08:00
expectNoFallbacksFromIE ( net ) ;
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
expectNoFallbacksFromCUDA ( net ) ;
2018-02-06 16:57:35 +08:00
}
TEST_P ( DNNTestNetwork , OpenFace )
{
2018-11-26 17:09:50 +08:00
# if defined(INF_ENGINE_RELEASE)
2019-03-29 21:42:58 +08:00
# if INF_ENGINE_VER_MAJOR_EQ(2018050000)
2019-12-02 21:16:06 +08:00
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 & & target = = DNN_TARGET_MYRIAD )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_MYRIAD , CV_TEST_TAG_DNN_SKIP_IE_VERSION ) ;
2018-11-26 17:09:50 +08:00
# endif
2018-08-27 20:45:44 +08:00
# endif
2018-11-26 17:09:50 +08:00
if ( backend = = DNN_BACKEND_HALIDE )
2019-06-15 20:17:25 +08:00
applyTestTag ( CV_TEST_TAG_DNN_SKIP_HALIDE ) ;
2019-03-29 21:42:58 +08:00
const float l1 = ( target = = DNN_TARGET_MYRIAD ) ? 0.0024 : 0.0 ;
const float lInf = ( target = = DNN_TARGET_MYRIAD ) ? 0.0071 : 0.0 ;
processNet ( " dnn/openface_nn4.small2.v1.t7 " , " " , Size ( 96 , 96 ) , " " , " " , l1 , lInf ) ;
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
expectNoFallbacksFromCUDA ( net ) ;
2018-02-06 16:57:35 +08:00
}
2018-02-06 21:23:18 +08:00
TEST_P ( DNNTestNetwork , opencv_face_detector )
{
2018-05-31 19:05:21 +08:00
if ( backend = = DNN_BACKEND_HALIDE )
2019-06-15 20:17:25 +08:00
applyTestTag ( CV_TEST_TAG_DNN_SKIP_HALIDE ) ;
2019-06-20 21:43:28 +08:00
Mat img = imread ( findDataFile ( " gpu/lbpcascade/er.png " ) ) ;
2018-07-31 16:37:45 +08:00
Mat inp = blobFromImage ( img , 1.0 , Size ( ) , Scalar ( 104.0 , 177.0 , 123.0 ) , false , false ) ;
2018-02-06 21:23:18 +08:00
processNet ( " dnn/opencv_face_detector.caffemodel " , " dnn/opencv_face_detector.prototxt " ,
2018-03-17 00:27:04 +08:00
inp , " detection_out " ) ;
2019-04-19 19:54:08 +08:00
expectNoFallbacksFromIE ( net ) ;
2018-02-06 21:23:18 +08:00
}
2018-02-07 16:28:45 +08:00
TEST_P ( DNNTestNetwork , Inception_v2_SSD_TensorFlow )
{
2019-05-27 20:14:18 +08:00
applyTestTag (
( target = = DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB ) ,
CV_TEST_TAG_DEBUG_LONG
) ;
2019-03-29 21:42:58 +08:00
# if defined(INF_ENGINE_RELEASE)
2019-12-02 21:16:06 +08:00
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 & & target = = DNN_TARGET_MYRIAD
2019-03-29 21:42:58 +08:00
& & getInferenceEngineVPUType ( ) = = CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X )
2019-06-15 20:17:25 +08:00
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X ) ;
2019-07-25 14:57:49 +08:00
# endif
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019020000)
2019-12-02 21:16:06 +08:00
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 & & target = = DNN_TARGET_MYRIAD )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_MYRIAD , CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER , CV_TEST_TAG_DNN_SKIP_IE_VERSION ) ;
2019-03-29 21:42:58 +08:00
# endif
2018-06-08 21:55:21 +08:00
if ( backend = = DNN_BACKEND_HALIDE )
2019-06-15 20:17:25 +08:00
applyTestTag ( CV_TEST_TAG_DNN_SKIP_HALIDE ) ;
2019-06-20 21:43:28 +08:00
Mat sample = imread ( findDataFile ( " dnn/street.png " ) ) ;
2018-08-31 20:41:56 +08:00
Mat inp = blobFromImage ( sample , 1.0f , Size ( 300 , 300 ) , Scalar ( ) , false ) ;
2019-12-20 21:36:32 +08:00
float scoreDiff = 0.0 , iouDiff = 0.0 ;
if ( target = = DNN_TARGET_OPENCL_FP16 | | target = = DNN_TARGET_MYRIAD )
{
2022-04-03 03:45:44 +08:00
scoreDiff = 0.02 ;
iouDiff = 0.1 ;
2019-12-20 21:36:32 +08:00
}
else if ( target = = DNN_TARGET_CUDA_FP16 )
{
scoreDiff = 0.015 ;
iouDiff = 0.08 ;
}
2018-02-07 16:28:45 +08:00
processNet ( " dnn/ssd_inception_v2_coco_2017_11_17.pb " , " dnn/ssd_inception_v2_coco_2017_11_17.pbtxt " ,
2019-12-20 21:36:32 +08:00
inp , " detection_out " , " " , scoreDiff , iouDiff ) ;
2019-04-19 19:54:08 +08:00
expectNoFallbacksFromIE ( net ) ;
2018-02-07 16:28:45 +08:00
}
2018-03-12 22:35:28 +08:00
TEST_P ( DNNTestNetwork , DenseNet_121 )
{
2018-10-09 06:38:06 +08:00
applyTestTag ( CV_TEST_TAG_MEMORY_512MB ) ;
2018-08-27 20:45:44 +08:00
if ( backend = = DNN_BACKEND_HALIDE )
2019-06-15 20:17:25 +08:00
applyTestTag ( CV_TEST_TAG_DNN_SKIP_HALIDE ) ;
2019-03-29 21:42:58 +08:00
// Reference output values are in range [-3.807, 4.605]
2018-08-27 20:45:44 +08:00
float l1 = 0.0 , lInf = 0.0 ;
if ( target = = DNN_TARGET_OPENCL_FP16 )
{
2019-12-20 21:36:32 +08:00
l1 = 2e-2 ;
lInf = 9e-2 ;
2020-07-16 06:52:08 +08:00
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH )
lInf = 0.1f ;
2018-08-27 20:45:44 +08:00
}
else if ( target = = DNN_TARGET_MYRIAD )
{
2019-12-20 21:36:32 +08:00
l1 = 0.1 ;
lInf = 0.6 ;
}
else if ( target = = DNN_TARGET_CUDA_FP16 )
{
l1 = 0.008 ;
2022-04-20 00:40:25 +08:00
lInf = 0.06 ;
2018-08-27 20:45:44 +08:00
}
processNet ( " dnn/DenseNet_121.caffemodel " , " dnn/DenseNet_121.prototxt " , Size ( 224 , 224 ) , " " , " " , l1 , lInf ) ;
2019-06-25 02:55:32 +08:00
if ( target ! = DNN_TARGET_MYRIAD | | getInferenceEngineVPUType ( ) ! = CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X )
expectNoFallbacksFromIE ( net ) ;
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
expectNoFallbacksFromCUDA ( net ) ;
2018-03-12 22:35:28 +08:00
}
2018-06-09 20:37:04 +08:00
TEST_P ( DNNTestNetwork , FastNeuralStyle_eccv16 )
{
2019-05-27 20:14:18 +08:00
applyTestTag ( CV_TEST_TAG_MEMORY_512MB , CV_TEST_TAG_DEBUG_VERYLONG ) ;
2019-06-15 20:17:25 +08:00
if ( backend = = DNN_BACKEND_HALIDE )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_HALIDE ) ;
2019-12-02 21:16:06 +08:00
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 & & target = = DNN_TARGET_MYRIAD )
2020-02-07 21:40:50 +08:00
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_MYRIAD , CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER ) ;
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_MYRIAD )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_MYRIAD , CV_TEST_TAG_DNN_SKIP_IE_NGRAPH ) ;
2019-02-08 22:12:33 +08:00
# if defined(INF_ENGINE_RELEASE)
2019-06-15 20:17:25 +08:00
# if INF_ENGINE_VER_MAJOR_LE(2018050000)
2019-12-02 21:16:06 +08:00
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 & & target = = DNN_TARGET_OPENCL )
applyTestTag ( CV_TEST_TAG_DNN_SKIP_IE_OPENCL , CV_TEST_TAG_DNN_SKIP_IE_VERSION ) ;
2019-02-08 22:12:33 +08:00
# endif
# endif
2019-06-20 21:43:28 +08:00
Mat img = imread ( findDataFile ( " dnn/googlenet_1.png " ) ) ;
2018-06-09 20:37:04 +08:00
Mat inp = blobFromImage ( img , 1.0 , Size ( 320 , 240 ) , Scalar ( 103.939 , 116.779 , 123.68 ) , false , false ) ;
// Output image has values in range [-143.526, 148.539].
2022-10-14 10:15:45 +08:00
float l1 = 2e-4 , lInf = 2e-3 ;
2019-12-20 21:36:32 +08:00
if ( target = = DNN_TARGET_OPENCL_FP16 | | target = = DNN_TARGET_MYRIAD )
{
l1 = 0.4 ;
2022-12-24 00:58:41 +08:00
lInf = 7.46 ;
2019-12-20 21:36:32 +08:00
}
else if ( target = = DNN_TARGET_CUDA_FP16 )
{
l1 = 0.3 ;
2020-11-21 20:05:20 +08:00
lInf = 7.6 ;
2019-12-20 21:36:32 +08:00
}
2022-03-31 03:03:38 +08:00
# if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2022010000)
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_OPENCL )
{
l1 = 5e-3 ;
lInf = 5e-3 ;
}
if ( backend = = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH & & target = = DNN_TARGET_OPENCL_FP16 )
{
lInf = 25 ;
}
# endif
2018-06-09 20:37:04 +08:00
processNet ( " dnn/fast_neural_style_eccv16_starry_night.t7 " , " " , inp , " " , " " , l1 , lInf ) ;
2019-06-15 20:17:25 +08:00
# if defined(HAVE_INF_ENGINE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
2019-05-28 23:24:25 +08:00
expectNoFallbacksFromIE ( net ) ;
2019-06-14 23:17:02 +08:00
# endif
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
expectNoFallbacksFromCUDA ( net ) ;
2018-06-09 20:37:04 +08:00
}
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
INSTANTIATE_TEST_CASE_P ( /*nothing*/ , DNNTestNetwork , dnnBackendsAndTargets ( true , true , false , true , true ) ) ;
2018-01-21 02:55:25 +08:00
2017-11-05 21:48:40 +08:00
} } // namespace