2017-06-26 18:35:51 +08:00
|
|
|
/*M///////////////////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
|
|
|
//
|
|
|
|
// By downloading, copying, installing or using the software you agree to this license.
|
|
|
|
// If you do not agree to this license, do not download, install,
|
|
|
|
// copy or use the software.
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// License Agreement
|
|
|
|
// For Open Source Computer Vision Library
|
|
|
|
//
|
|
|
|
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
|
|
|
|
// Third party copyrights are property of their respective owners.
|
|
|
|
//
|
|
|
|
// Redistribution and use in source and binary forms, with or without modification,
|
|
|
|
// are permitted provided that the following conditions are met:
|
|
|
|
//
|
|
|
|
// * Redistribution's of source code must retain the above copyright notice,
|
|
|
|
// this list of conditions and the following disclaimer.
|
|
|
|
//
|
|
|
|
// * Redistribution's in binary form must reproduce the above copyright notice,
|
|
|
|
// this list of conditions and the following disclaimer in the documentation
|
|
|
|
// and/or other materials provided with the distribution.
|
|
|
|
//
|
|
|
|
// * The name of the copyright holders may not be used to endorse or promote products
|
|
|
|
// derived from this software without specific prior written permission.
|
|
|
|
//
|
|
|
|
// This software is provided by the copyright holders and contributors "as is" and
|
|
|
|
// any express or implied warranties, including, but not limited to, the implied
|
|
|
|
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
|
|
|
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
|
|
|
// indirect, incidental, special, exemplary, or consequential damages
|
|
|
|
// (including, but not limited to, procurement of substitute goods or services;
|
|
|
|
// loss of use, data, or profits; or business interruption) however caused
|
|
|
|
// and on any theory of liability, whether in contract, strict liability,
|
|
|
|
// or tort (including negligence or otherwise) arising in any way out of
|
|
|
|
// the use of this software, even if advised of the possibility of such damage.
|
|
|
|
//
|
|
|
|
//M*/
|
|
|
|
|
|
|
|
#include "precomp.hpp"
|
|
|
|
#include "op_halide.hpp"
|
2018-02-06 16:57:35 +08:00
|
|
|
#include "op_inf_engine.hpp"
|
Merge pull request #12703 from wzw-intel:vkcom
* dnn: Add a Vulkan based backend
This commit adds a new backend "DNN_BACKEND_VKCOM" and a
new target "DNN_TARGET_VULKAN". VKCOM means vulkan based
computation library.
This backend uses Vulkan API and SPIR-V shaders to do
the inference computation for layers. The layer types
that implemented in DNN_BACKEND_VKCOM include:
Conv, Concat, ReLU, LRN, PriorBox, Softmax, MaxPooling,
AvePooling, Permute
This is just a beginning work for Vulkan in OpenCV DNN,
more layer types will be supported and performance
tuning is on the way.
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/vulkan: Add FindVulkan.cmake to detect Vulkan SDK
In order to build dnn with Vulkan support, need installing
Vulkan SDK and setting environment variable "VULKAN_SDK" and
add "-DWITH_VULKAN=ON" to cmake command.
You can download Vulkan SDK from:
https://vulkan.lunarg.com/sdk/home#linux
For how to install, see
https://vulkan.lunarg.com/doc/sdk/latest/linux/getting_started.html
https://vulkan.lunarg.com/doc/sdk/latest/windows/getting_started.html
https://vulkan.lunarg.com/doc/sdk/latest/mac/getting_started.html
respectively for linux, windows and mac.
To run the vulkan backend, also need installing mesa driver.
On Ubuntu, use this command 'sudo apt-get install mesa-vulkan-drivers'
To test, use command '$BUILD_DIR/bin/opencv_test_dnn --gtest_filter=*VkCom*'
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/Vulkan: dynamically load Vulkan runtime
No compile-time dependency on Vulkan library.
If Vulkan runtime is unavailable, fallback to CPU path.
Use environment "OPENCL_VULKAN_RUNTIME" to specify path to your
own vulkan runtime library.
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/Vulkan: Add a python script to compile GLSL shaders to SPIR-V shaders
The SPIR-V shaders are in format of text-based 32-bit hexadecimal
numbers, and inserted into .cpp files as unsigned int32 array.
* dnn/Vulkan: Put Vulkan headers into 3rdparty directory and some other fixes
Vulkan header files are copied from
https://github.com/KhronosGroup/Vulkan-Docs/tree/master/include/vulkan
to 3rdparty/include
Fix the Copyright declaration issue.
Refine OpenCVDetectVulkan.cmake
* dnn/Vulkan: Add vulkan backend tests into existing ones.
Also fixed some test failures.
- Don't use bool variable as uniform for shader
- Fix dispathed group number beyond max issue
- Bypass "group > 1" convolution. This should be support in future.
* dnn/Vulkan: Fix multiple initialization in one thread.
2018-10-29 22:51:26 +08:00
|
|
|
#include "op_vkcom.hpp"
|
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
|
|
|
#include "op_cuda.hpp"
|
2017-06-26 18:35:51 +08:00
|
|
|
#include "halide_scheduler.hpp"
|
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
#include <set>
|
|
|
|
#include <algorithm>
|
|
|
|
#include <iostream>
|
|
|
|
#include <sstream>
|
2019-04-13 00:31:07 +08:00
|
|
|
#include <fstream>
|
2017-06-26 18:35:51 +08:00
|
|
|
#include <iterator>
|
2017-08-02 22:27:58 +08:00
|
|
|
#include <numeric>
|
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
|
|
|
#include <memory>
|
2017-06-26 18:35:51 +08:00
|
|
|
#include <opencv2/dnn/shape_utils.hpp>
|
|
|
|
#include <opencv2/imgproc.hpp>
|
|
|
|
|
2018-01-08 02:38:14 +08:00
|
|
|
#include <opencv2/core/utils/configuration.private.hpp>
|
2018-02-28 20:22:20 +08:00
|
|
|
#include <opencv2/core/utils/logger.hpp>
|
2018-01-08 02:38:14 +08:00
|
|
|
|
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
|
|
|
#include <opencv2/core/cuda.hpp>
|
|
|
|
|
2017-06-29 03:59:02 +08:00
|
|
|
namespace cv {
|
|
|
|
namespace dnn {
|
2018-09-04 04:20:02 +08:00
|
|
|
CV__DNN_INLINE_NS_BEGIN
|
2017-06-26 18:35:51 +08:00
|
|
|
|
2018-02-12 20:07:39 +08:00
|
|
|
// this option is useful to run valgrind memory errors detection
|
2018-01-08 02:38:14 +08:00
|
|
|
static bool DNN_DISABLE_MEMORY_OPTIMIZATIONS = utils::getConfigurationParameterBool("OPENCV_DNN_DISABLE_MEMORY_OPTIMIZATIONS", false);
|
|
|
|
|
2018-05-17 23:29:04 +08:00
|
|
|
#ifdef HAVE_OPENCL
|
2018-05-16 18:23:19 +08:00
|
|
|
static bool DNN_OPENCL_ALLOW_ALL_DEVICES = utils::getConfigurationParameterBool("OPENCV_DNN_OPENCL_ALLOW_ALL_DEVICES", false);
|
2018-05-17 23:29:04 +08:00
|
|
|
#endif
|
2018-05-16 18:23:19 +08:00
|
|
|
|
2018-06-13 23:55:31 +08:00
|
|
|
static int PARAM_DNN_BACKEND_DEFAULT = (int)utils::getConfigurationParameterSizeT("OPENCV_DNN_BACKEND_DEFAULT",
|
|
|
|
#ifdef HAVE_INF_ENGINE
|
|
|
|
(size_t)DNN_BACKEND_INFERENCE_ENGINE
|
|
|
|
#else
|
|
|
|
(size_t)DNN_BACKEND_OPENCV
|
|
|
|
#endif
|
|
|
|
);
|
|
|
|
|
2018-07-20 20:19:44 +08:00
|
|
|
// Additional checks (slowdowns execution!)
|
|
|
|
static bool DNN_CHECK_NAN_INF = utils::getConfigurationParameterBool("OPENCV_DNN_CHECK_NAN_INF", false);
|
|
|
|
static bool DNN_CHECK_NAN_INF_DUMP = utils::getConfigurationParameterBool("OPENCV_DNN_CHECK_NAN_INF_DUMP", false);
|
|
|
|
static bool DNN_CHECK_NAN_INF_RAISE_ERROR = utils::getConfigurationParameterBool("OPENCV_DNN_CHECK_NAN_INF_RAISE_ERROR", false);
|
2018-06-13 23:55:31 +08:00
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
using std::vector;
|
|
|
|
using std::map;
|
|
|
|
using std::make_pair;
|
|
|
|
using std::set;
|
|
|
|
|
2018-12-05 23:11:45 +08:00
|
|
|
//==================================================================================================
|
|
|
|
|
|
|
|
class BackendRegistry
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
typedef std::vector< std::pair<Backend, Target> > BackendsList;
|
|
|
|
const BackendsList & getBackends() const { return backends; }
|
|
|
|
static BackendRegistry & getRegistry()
|
|
|
|
{
|
|
|
|
static BackendRegistry impl;
|
|
|
|
return impl;
|
|
|
|
}
|
2019-10-22 00:09:44 +08:00
|
|
|
|
|
|
|
static inline bool checkIETarget(int target)
|
|
|
|
{
|
|
|
|
#ifndef HAVE_INF_ENGINE
|
|
|
|
return false;
|
|
|
|
#else
|
|
|
|
cv::dnn::Net net;
|
|
|
|
cv::dnn::LayerParams lp;
|
|
|
|
lp.set("kernel_size", 1);
|
|
|
|
lp.set("num_output", 1);
|
|
|
|
lp.set("bias_term", false);
|
|
|
|
lp.type = "Convolution";
|
|
|
|
lp.name = "testLayer";
|
|
|
|
lp.blobs.push_back(Mat({1, 2, 1, 1}, CV_32F, Scalar(1)));
|
|
|
|
net.addLayerToPrev(lp.name, lp.type, lp);
|
|
|
|
net.setPreferableBackend(cv::dnn::DNN_BACKEND_INFERENCE_ENGINE);
|
|
|
|
net.setPreferableTarget(target);
|
|
|
|
static int inpDims[] = {1, 2, 3, 4};
|
|
|
|
net.setInput(cv::Mat(4, &inpDims[0], CV_32FC1, cv::Scalar(0)));
|
|
|
|
try
|
|
|
|
{
|
|
|
|
net.forward();
|
|
|
|
}
|
|
|
|
catch(...)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2018-12-05 23:11:45 +08:00
|
|
|
private:
|
|
|
|
BackendRegistry()
|
|
|
|
{
|
|
|
|
#ifdef HAVE_HALIDE
|
|
|
|
backends.push_back(std::make_pair(DNN_BACKEND_HALIDE, DNN_TARGET_CPU));
|
|
|
|
# ifdef HAVE_OPENCL
|
|
|
|
if (cv::ocl::useOpenCL())
|
|
|
|
backends.push_back(std::make_pair(DNN_BACKEND_HALIDE, DNN_TARGET_OPENCL));
|
|
|
|
# endif
|
|
|
|
#endif // HAVE_HALIDE
|
|
|
|
|
|
|
|
#ifdef HAVE_INF_ENGINE
|
|
|
|
if (checkIETarget(DNN_TARGET_CPU))
|
|
|
|
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_CPU));
|
|
|
|
if (checkIETarget(DNN_TARGET_MYRIAD))
|
|
|
|
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_MYRIAD));
|
|
|
|
if (checkIETarget(DNN_TARGET_FPGA))
|
|
|
|
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_FPGA));
|
|
|
|
# ifdef HAVE_OPENCL
|
|
|
|
if (cv::ocl::useOpenCL() && ocl::Device::getDefault().isIntel())
|
|
|
|
{
|
|
|
|
if (checkIETarget(DNN_TARGET_OPENCL))
|
|
|
|
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_OPENCL));
|
|
|
|
if (checkIETarget(DNN_TARGET_OPENCL_FP16))
|
|
|
|
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_OPENCL_FP16));
|
|
|
|
}
|
|
|
|
# endif
|
|
|
|
#endif // HAVE_INF_ENGINE
|
|
|
|
|
|
|
|
#ifdef HAVE_OPENCL
|
|
|
|
if (cv::ocl::useOpenCL())
|
|
|
|
{
|
|
|
|
backends.push_back(std::make_pair(DNN_BACKEND_OPENCV, DNN_TARGET_OPENCL));
|
|
|
|
backends.push_back(std::make_pair(DNN_BACKEND_OPENCV, DNN_TARGET_OPENCL_FP16));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
backends.push_back(std::make_pair(DNN_BACKEND_OPENCV, DNN_TARGET_CPU));
|
2018-12-06 03:54:52 +08:00
|
|
|
|
|
|
|
#ifdef HAVE_VULKAN
|
2018-12-24 10:41:58 +08:00
|
|
|
if (haveVulkan())
|
|
|
|
backends.push_back(std::make_pair(DNN_BACKEND_VKCOM, DNN_TARGET_VULKAN));
|
2018-12-06 03:54:52 +08:00
|
|
|
#endif
|
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
|
|
|
|
|
|
|
#ifdef HAVE_CUDA
|
|
|
|
if (haveCUDA()) {
|
|
|
|
backends.push_back(std::make_pair(DNN_BACKEND_CUDA, DNN_TARGET_CUDA));
|
|
|
|
backends.push_back(std::make_pair(DNN_BACKEND_CUDA, DNN_TARGET_CUDA_FP16));
|
|
|
|
}
|
|
|
|
#endif
|
2018-12-05 23:11:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
BackendsList backends;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
std::vector< std::pair<Backend, Target> > getAvailableBackends()
|
|
|
|
{
|
|
|
|
return BackendRegistry::getRegistry().getBackends();
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<Target> getAvailableTargets(Backend be)
|
|
|
|
{
|
2018-12-05 23:31:14 +08:00
|
|
|
if (be == DNN_BACKEND_DEFAULT)
|
|
|
|
be = (Backend)PARAM_DNN_BACKEND_DEFAULT;
|
|
|
|
|
2018-12-05 23:11:45 +08:00
|
|
|
std::vector<Target> result;
|
|
|
|
const BackendRegistry::BackendsList all_backends = getAvailableBackends();
|
|
|
|
for(BackendRegistry::BackendsList::const_iterator i = all_backends.begin(); i != all_backends.end(); ++i )
|
|
|
|
{
|
|
|
|
if (i->first == be)
|
|
|
|
result.push_back(i->second);
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
//==================================================================================================
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
namespace
|
|
|
|
{
|
|
|
|
typedef std::vector<MatShape> ShapesVec;
|
|
|
|
|
|
|
|
struct LayerShapes
|
|
|
|
{
|
|
|
|
ShapesVec in, out, internal;
|
|
|
|
// No guarantees that layer which support in-place computations
|
|
|
|
// will be computed in-place (input.data_ptr == output.data_ptr).
|
|
|
|
// If layer said that it could work in-place and layers after it
|
|
|
|
// no longer use input blob, we'll set output = input.
|
|
|
|
bool supportInPlace;
|
|
|
|
LayerShapes() {supportInPlace = false;}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2018-01-13 23:17:56 +08:00
|
|
|
Mat blobFromImage(InputArray image, double scalefactor, const Size& size,
|
2018-06-05 04:51:28 +08:00
|
|
|
const Scalar& mean, bool swapRB, bool crop, int ddepth)
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2018-01-13 23:17:56 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
Mat blob;
|
2018-06-05 04:51:28 +08:00
|
|
|
blobFromImage(image, blob, scalefactor, size, mean, swapRB, crop, ddepth);
|
2018-01-13 23:17:56 +08:00
|
|
|
return blob;
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
|
2018-01-13 23:17:56 +08:00
|
|
|
void blobFromImage(InputArray image, OutputArray blob, double scalefactor,
|
2018-06-05 04:51:28 +08:00
|
|
|
const Size& size, const Scalar& mean, bool swapRB, bool crop, int ddepth)
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
2017-12-03 04:52:35 +08:00
|
|
|
std::vector<Mat> images(1, image.getMat());
|
2018-06-05 04:51:28 +08:00
|
|
|
blobFromImages(images, blob, scalefactor, size, mean, swapRB, crop, ddepth);
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
|
2018-01-13 23:17:56 +08:00
|
|
|
Mat blobFromImages(InputArrayOfArrays images, double scalefactor, Size size,
|
2018-06-05 04:51:28 +08:00
|
|
|
const Scalar& mean, bool swapRB, bool crop, int ddepth)
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
2018-01-13 23:17:56 +08:00
|
|
|
Mat blob;
|
2018-06-05 04:51:28 +08:00
|
|
|
blobFromImages(images, blob, scalefactor, size, mean, swapRB, crop, ddepth);
|
2018-01-13 23:17:56 +08:00
|
|
|
return blob;
|
|
|
|
}
|
|
|
|
|
|
|
|
void blobFromImages(InputArrayOfArrays images_, OutputArray blob_, double scalefactor,
|
2018-06-05 04:51:28 +08:00
|
|
|
Size size, const Scalar& mean_, bool swapRB, bool crop, int ddepth)
|
2018-01-13 23:17:56 +08:00
|
|
|
{
|
|
|
|
CV_TRACE_FUNCTION();
|
2018-06-05 04:51:28 +08:00
|
|
|
CV_CheckType(ddepth, ddepth == CV_32F || ddepth == CV_8U, "Blob depth should be CV_32F or CV_8U");
|
|
|
|
if (ddepth == CV_8U)
|
|
|
|
{
|
|
|
|
CV_CheckEQ(scalefactor, 1.0, "Scaling is not supported for CV_8U blob depth");
|
2018-08-15 19:55:47 +08:00
|
|
|
CV_Assert(mean_ == Scalar() && "Mean subtraction is not supported for CV_8U blob depth");
|
2018-06-05 04:51:28 +08:00
|
|
|
}
|
|
|
|
|
2018-01-13 23:17:56 +08:00
|
|
|
std::vector<Mat> images;
|
|
|
|
images_.getMatVector(images);
|
|
|
|
CV_Assert(!images.empty());
|
2019-04-04 03:13:11 +08:00
|
|
|
for (size_t i = 0; i < images.size(); i++)
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
|
|
|
Size imgSize = images[i].size();
|
|
|
|
if (size == Size())
|
|
|
|
size = imgSize;
|
|
|
|
if (size != imgSize)
|
|
|
|
{
|
2017-10-11 20:46:20 +08:00
|
|
|
if(crop)
|
|
|
|
{
|
|
|
|
float resizeFactor = std::max(size.width / (float)imgSize.width,
|
|
|
|
size.height / (float)imgSize.height);
|
2017-12-13 20:00:38 +08:00
|
|
|
resize(images[i], images[i], Size(), resizeFactor, resizeFactor, INTER_LINEAR);
|
2017-10-11 20:46:20 +08:00
|
|
|
Rect crop(Point(0.5 * (images[i].cols - size.width),
|
|
|
|
0.5 * (images[i].rows - size.height)),
|
|
|
|
size);
|
|
|
|
images[i] = images[i](crop);
|
|
|
|
}
|
|
|
|
else
|
2017-12-13 20:00:38 +08:00
|
|
|
resize(images[i], images[i], size, 0, 0, INTER_LINEAR);
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
2018-06-05 04:51:28 +08:00
|
|
|
if(images[i].depth() == CV_8U && ddepth == CV_32F)
|
2017-06-26 18:35:51 +08:00
|
|
|
images[i].convertTo(images[i], CV_32F);
|
|
|
|
Scalar mean = mean_;
|
|
|
|
if (swapRB)
|
|
|
|
std::swap(mean[0], mean[2]);
|
|
|
|
|
|
|
|
images[i] -= mean;
|
|
|
|
images[i] *= scalefactor;
|
|
|
|
}
|
|
|
|
|
2019-04-04 03:13:11 +08:00
|
|
|
size_t nimages = images.size();
|
2017-06-26 18:35:51 +08:00
|
|
|
Mat image0 = images[0];
|
|
|
|
int nch = image0.channels();
|
|
|
|
CV_Assert(image0.dims == 2);
|
|
|
|
if (nch == 3 || nch == 4)
|
|
|
|
{
|
2017-10-27 19:06:53 +08:00
|
|
|
int sz[] = { (int)nimages, nch, image0.rows, image0.cols };
|
2018-06-05 04:51:28 +08:00
|
|
|
blob_.create(4, sz, ddepth);
|
2018-01-13 23:17:56 +08:00
|
|
|
Mat blob = blob_.getMat();
|
2017-06-26 18:35:51 +08:00
|
|
|
Mat ch[4];
|
|
|
|
|
2019-04-04 03:13:11 +08:00
|
|
|
for(size_t i = 0; i < nimages; i++ )
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2019-04-04 03:13:11 +08:00
|
|
|
const Mat& image = images[i];
|
2018-06-05 04:51:28 +08:00
|
|
|
CV_Assert(image.depth() == blob_.depth());
|
2017-06-26 18:35:51 +08:00
|
|
|
nch = image.channels();
|
|
|
|
CV_Assert(image.dims == 2 && (nch == 3 || nch == 4));
|
|
|
|
CV_Assert(image.size() == image0.size());
|
|
|
|
|
2017-10-27 19:06:53 +08:00
|
|
|
for( int j = 0; j < nch; j++ )
|
2018-06-05 04:51:28 +08:00
|
|
|
ch[j] = Mat(image.rows, image.cols, ddepth, blob.ptr((int)i, j));
|
2017-06-26 18:35:51 +08:00
|
|
|
if(swapRB)
|
|
|
|
std::swap(ch[0], ch[2]);
|
|
|
|
split(image, ch);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
CV_Assert(nch == 1);
|
|
|
|
int sz[] = { (int)nimages, 1, image0.rows, image0.cols };
|
2018-06-05 04:51:28 +08:00
|
|
|
blob_.create(4, sz, ddepth);
|
2018-01-13 23:17:56 +08:00
|
|
|
Mat blob = blob_.getMat();
|
2017-06-26 18:35:51 +08:00
|
|
|
|
2019-04-04 03:13:11 +08:00
|
|
|
for(size_t i = 0; i < nimages; i++ )
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2019-04-04 03:13:11 +08:00
|
|
|
const Mat& image = images[i];
|
2018-06-05 04:51:28 +08:00
|
|
|
CV_Assert(image.depth() == blob_.depth());
|
2017-06-26 18:35:51 +08:00
|
|
|
nch = image.channels();
|
|
|
|
CV_Assert(image.dims == 2 && (nch == 1));
|
|
|
|
CV_Assert(image.size() == image0.size());
|
|
|
|
|
2018-06-05 04:51:28 +08:00
|
|
|
image.copyTo(Mat(image.rows, image.cols, ddepth, blob.ptr((int)i, 0)));
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-12 19:51:07 +08:00
|
|
|
void imagesFromBlob(const cv::Mat& blob_, OutputArrayOfArrays images_)
|
|
|
|
{
|
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
|
|
|
|
//A blob is a 4 dimensional matrix in floating point precision
|
|
|
|
//blob_[0] = batchSize = nbOfImages
|
|
|
|
//blob_[1] = nbOfChannels
|
|
|
|
//blob_[2] = height
|
|
|
|
//blob_[3] = width
|
|
|
|
CV_Assert(blob_.depth() == CV_32F);
|
|
|
|
CV_Assert(blob_.dims == 4);
|
|
|
|
|
|
|
|
images_.create(cv::Size(1, blob_.size[0]), blob_.depth());
|
|
|
|
|
|
|
|
std::vector<Mat> vectorOfChannels(blob_.size[1]);
|
|
|
|
for (int n = 0; n < blob_.size[0]; ++n)
|
|
|
|
{
|
|
|
|
for (int c = 0; c < blob_.size[1]; ++c)
|
|
|
|
{
|
|
|
|
vectorOfChannels[c] = getPlane(blob_, n, c);
|
|
|
|
}
|
|
|
|
cv::merge(vectorOfChannels, images_.getMatRef(n));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-11 02:50:54 +08:00
|
|
|
class OpenCLBackendWrapper : public BackendWrapper
|
|
|
|
{
|
|
|
|
public:
|
2018-06-01 15:54:12 +08:00
|
|
|
OpenCLBackendWrapper(Mat& m) : BackendWrapper(DNN_BACKEND_OPENCV, DNN_TARGET_OPENCL)
|
2018-01-11 02:50:54 +08:00
|
|
|
{
|
|
|
|
m.copyTo(umat);
|
|
|
|
host = &m;
|
|
|
|
hostDirty = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
OpenCLBackendWrapper(const Ptr<BackendWrapper>& baseBuffer, Mat& m)
|
2018-06-01 15:54:12 +08:00
|
|
|
: BackendWrapper(DNN_BACKEND_OPENCV, DNN_TARGET_OPENCL)
|
2018-01-11 02:50:54 +08:00
|
|
|
{
|
|
|
|
Ptr<OpenCLBackendWrapper> base = baseBuffer.dynamicCast<OpenCLBackendWrapper>();
|
|
|
|
CV_Assert(!base.empty());
|
|
|
|
|
|
|
|
host = &m;
|
|
|
|
|
|
|
|
int shape[] = {1, (int)base->umat.total()};
|
|
|
|
umat = base->umat.reshape(1, 2, &shape[0])
|
|
|
|
.colRange(0, host->total())
|
|
|
|
.reshape(1, host->dims, &host->size[0]);
|
|
|
|
hostDirty = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static Ptr<BackendWrapper> create(Mat& m)
|
|
|
|
{
|
|
|
|
return Ptr<BackendWrapper>(new OpenCLBackendWrapper(m));
|
|
|
|
}
|
|
|
|
|
|
|
|
static Ptr<BackendWrapper> create(const Ptr<BackendWrapper>& baseBuffer, Mat& m)
|
|
|
|
{
|
|
|
|
return Ptr<BackendWrapper>(new OpenCLBackendWrapper(baseBuffer, m));
|
|
|
|
}
|
|
|
|
|
|
|
|
static std::vector<UMat> getUMatVector(const std::vector<Ptr<BackendWrapper> >& wrappers)
|
|
|
|
{
|
|
|
|
const int numWrappers = wrappers.size();
|
|
|
|
std::vector<UMat> mats(wrappers.size());
|
|
|
|
for (int i = 0; i < numWrappers; ++i)
|
|
|
|
{
|
|
|
|
Ptr<OpenCLBackendWrapper> umatWrapper = wrappers[i].dynamicCast<OpenCLBackendWrapper>();
|
|
|
|
CV_Assert(!umatWrapper.empty());
|
|
|
|
umatWrapper->copyToDevice();
|
|
|
|
mats[i] = umatWrapper->umat;
|
|
|
|
}
|
|
|
|
return mats;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Replaces all umats in wrappers to specific ones.
|
|
|
|
static void update(const std::vector<Ptr<BackendWrapper> >& wrappers,
|
|
|
|
const std::vector<UMat>& umats)
|
|
|
|
{
|
|
|
|
CV_Assert(wrappers.size() == umats.size());
|
|
|
|
for (int i = 0, n = umats.size(); i < n; ++i)
|
|
|
|
{
|
|
|
|
Ptr<OpenCLBackendWrapper> umatWrapper = wrappers[i].dynamicCast<OpenCLBackendWrapper>();
|
|
|
|
CV_Assert(!umatWrapper.empty());
|
|
|
|
umatWrapper->umat = umats[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
~OpenCLBackendWrapper() {}
|
|
|
|
|
|
|
|
// Copies data from device to a host memory.
|
2018-03-15 21:16:56 +08:00
|
|
|
virtual void copyToHost() CV_OVERRIDE
|
2018-01-11 02:50:54 +08:00
|
|
|
{
|
|
|
|
umat.copyTo(*host);
|
|
|
|
}
|
|
|
|
|
2018-03-15 21:16:56 +08:00
|
|
|
virtual void setHostDirty() CV_OVERRIDE
|
2018-01-11 02:50:54 +08:00
|
|
|
{
|
|
|
|
hostDirty = true;
|
|
|
|
};
|
|
|
|
|
|
|
|
void copyToDevice()
|
|
|
|
{
|
|
|
|
if (hostDirty)
|
|
|
|
{
|
|
|
|
host->copyTo(umat);
|
|
|
|
hostDirty = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
UMat umat;
|
|
|
|
Mat* host;
|
|
|
|
bool hostDirty;
|
|
|
|
};
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
struct LayerPin
|
|
|
|
{
|
|
|
|
int lid;
|
|
|
|
int oid;
|
|
|
|
|
|
|
|
LayerPin(int layerId = -1, int outputId = -1)
|
|
|
|
: lid(layerId), oid(outputId) {}
|
|
|
|
|
|
|
|
bool valid() const
|
|
|
|
{
|
|
|
|
return (lid >= 0 && oid >= 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool equal(const LayerPin &r) const
|
|
|
|
{
|
|
|
|
return (lid == r.lid && oid == r.oid);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool operator<(const LayerPin &r) const
|
|
|
|
{
|
2018-11-15 04:25:23 +08:00
|
|
|
return lid < r.lid || (lid == r.lid && oid < r.oid);
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool operator ==(const LayerPin &r) const
|
|
|
|
{
|
|
|
|
return lid == r.lid && oid == r.oid;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
struct LayerData
|
|
|
|
{
|
2018-02-22 18:20:35 +08:00
|
|
|
LayerData() : id(-1), skip(false), flag(0) {}
|
2017-06-26 18:35:51 +08:00
|
|
|
LayerData(int _id, const String &_name, const String &_type, LayerParams &_params)
|
2018-01-21 02:55:25 +08:00
|
|
|
: id(_id), name(_name), type(_type), params(_params), skip(false), flag(0)
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
//add logging info
|
|
|
|
params.name = name;
|
|
|
|
params.type = type;
|
|
|
|
}
|
|
|
|
|
|
|
|
int id;
|
|
|
|
String name;
|
|
|
|
String type;
|
|
|
|
LayerParams params;
|
|
|
|
|
|
|
|
std::vector<LayerPin> inputBlobsId;
|
|
|
|
std::set<int> inputLayersId;
|
|
|
|
std::set<int> requiredOutputs;
|
|
|
|
std::vector<LayerPin> consumers;
|
2017-09-06 15:34:07 +08:00
|
|
|
std::vector<Ptr<BackendWrapper> > outputBlobsWrappers;
|
|
|
|
std::vector<Ptr<BackendWrapper> > inputBlobsWrappers;
|
2018-01-11 02:50:54 +08:00
|
|
|
std::vector<Ptr<BackendWrapper> > internalBlobsWrappers;
|
2017-06-26 18:35:51 +08:00
|
|
|
|
|
|
|
Ptr<Layer> layerInstance;
|
|
|
|
std::vector<Mat> outputBlobs;
|
|
|
|
std::vector<Mat*> inputBlobs;
|
|
|
|
std::vector<Mat> internals;
|
|
|
|
// Computation nodes of implemented backends (except DEFAULT).
|
|
|
|
std::map<int, Ptr<BackendNode> > backendNodes;
|
|
|
|
// Flag for skip layer computation for specific backend.
|
2018-01-21 02:55:25 +08:00
|
|
|
bool skip;
|
2017-06-26 18:35:51 +08:00
|
|
|
|
|
|
|
int flag;
|
|
|
|
|
|
|
|
Ptr<Layer> getLayerInstance()
|
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
CV_TRACE_ARG_VALUE(type, "type", type.c_str());
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
if (layerInstance)
|
|
|
|
return layerInstance;
|
|
|
|
|
|
|
|
layerInstance = LayerFactory::createLayerInstance(type, params);
|
|
|
|
if (!layerInstance)
|
|
|
|
{
|
|
|
|
CV_Error(Error::StsError, "Can't create layer \"" + name + "\" of type \"" + type + "\"");
|
|
|
|
}
|
|
|
|
|
|
|
|
return layerInstance;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
//fake layer containing network input blobs
|
|
|
|
struct DataLayer : public Layer
|
|
|
|
{
|
2018-06-05 04:51:28 +08:00
|
|
|
DataLayer() : Layer()
|
|
|
|
{
|
|
|
|
skip = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
|
|
|
{
|
|
|
|
return backendId == DNN_BACKEND_OPENCV ||
|
2018-11-15 04:25:23 +08:00
|
|
|
(backendId == DNN_BACKEND_INFERENCE_ENGINE && inputsData.size() == 1);
|
2018-06-05 04:51:28 +08:00
|
|
|
}
|
2018-07-09 19:35:54 +08:00
|
|
|
|
2018-09-06 18:26:47 +08:00
|
|
|
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
|
2018-07-09 19:35:54 +08:00
|
|
|
{
|
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
|
|
|
|
|
|
|
|
CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
|
2018-09-06 18:26:47 +08:00
|
|
|
forward_ocl(inputs_arr, outputs_arr, internals_arr))
|
2018-07-09 19:35:54 +08:00
|
|
|
|
2018-09-06 18:26:47 +08:00
|
|
|
if (outputs_arr.depth() == CV_16S)
|
|
|
|
{
|
|
|
|
forward_fallback(inputs_arr, outputs_arr, internals_arr);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<Mat> outputs, internals;
|
|
|
|
outputs_arr.getMatVector(outputs);
|
|
|
|
internals_arr.getMatVector(internals);
|
2018-07-09 19:35:54 +08:00
|
|
|
|
2018-06-05 04:51:28 +08:00
|
|
|
// Supported modes:
|
|
|
|
// | Input type | Output type |
|
|
|
|
// | fp32 | fp32 |
|
|
|
|
// | uint8 | fp32 |
|
2018-07-09 19:35:54 +08:00
|
|
|
for (int i = 0; i < inputsData.size(); ++i)
|
|
|
|
{
|
2018-06-05 04:51:28 +08:00
|
|
|
double scale = scaleFactors[i];
|
|
|
|
Scalar& mean = means[i];
|
2018-08-15 19:55:47 +08:00
|
|
|
CV_Assert(mean == Scalar() || inputsData[i].size[1] <= 4);
|
|
|
|
CV_CheckTypeEQ(outputs[i].type(), CV_32FC1, "");
|
2018-06-05 04:51:28 +08:00
|
|
|
|
|
|
|
bool singleMean = true;
|
|
|
|
for (int j = 1; j < std::min(4, inputsData[i].size[1]) && singleMean; ++j)
|
|
|
|
{
|
|
|
|
singleMean = mean[j] == mean[j - 1];
|
|
|
|
}
|
|
|
|
|
|
|
|
if (singleMean)
|
|
|
|
{
|
|
|
|
inputsData[i].convertTo(outputs[i], CV_32F, scale, -mean[0] * scale);
|
|
|
|
}
|
|
|
|
else
|
2018-07-09 19:35:54 +08:00
|
|
|
{
|
2018-06-05 04:51:28 +08:00
|
|
|
for (int n = 0; n < inputsData[i].size[0]; ++n)
|
|
|
|
for (int c = 0; c < inputsData[i].size[1]; ++c)
|
|
|
|
{
|
|
|
|
Mat inp = getPlane(inputsData[i], n, c);
|
|
|
|
Mat out = getPlane(outputs[i], n, c);
|
|
|
|
inp.convertTo(out, CV_32F, scale, -mean[c] * scale);
|
|
|
|
}
|
2018-07-09 19:35:54 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef HAVE_OPENCL
|
2018-10-05 22:06:50 +08:00
|
|
|
std::vector<Mat> tmp_expressions;
|
2018-07-09 19:35:54 +08:00
|
|
|
bool forward_ocl(InputArrayOfArrays, OutputArrayOfArrays outputs_, OutputArrayOfArrays internals_)
|
|
|
|
{
|
2018-06-05 04:51:28 +08:00
|
|
|
// Supported modes:
|
|
|
|
// | Input type | Output type |
|
|
|
|
// | fp32 | fp32 |
|
|
|
|
// | fp32 | fp16 |
|
|
|
|
// | uint8 | fp32 |
|
|
|
|
std::vector<UMat> outputs;
|
|
|
|
outputs_.getUMatVector(outputs);
|
|
|
|
|
2018-10-05 22:06:50 +08:00
|
|
|
tmp_expressions.clear();
|
2018-06-05 04:51:28 +08:00
|
|
|
for (int i = 0; i < inputsData.size(); ++i)
|
2018-07-09 19:35:54 +08:00
|
|
|
{
|
2018-10-05 22:06:50 +08:00
|
|
|
Mat inputData = inputsData[i];
|
|
|
|
|
2018-06-05 04:51:28 +08:00
|
|
|
double scale = scaleFactors[i];
|
|
|
|
Scalar& mean = means[i];
|
|
|
|
|
|
|
|
CV_Assert(mean == Scalar() || inputsData[i].size[1] <= 4);
|
|
|
|
bool singleMean = true;
|
|
|
|
for (int j = 1; j < std::min(4, inputsData[i].size[1]) && singleMean; ++j)
|
2018-07-09 19:35:54 +08:00
|
|
|
{
|
2018-06-05 04:51:28 +08:00
|
|
|
singleMean = mean[j] == mean[j - 1];
|
|
|
|
}
|
|
|
|
|
|
|
|
if (outputs_.depth() == CV_16S)
|
|
|
|
{
|
|
|
|
if (singleMean)
|
2018-10-05 22:06:50 +08:00
|
|
|
{
|
|
|
|
tmp_expressions.push_back(Mat(scale * (inputsData[i] - mean[0])));
|
|
|
|
convertFp16(tmp_expressions.back(), outputs[i]);
|
|
|
|
}
|
2018-06-05 04:51:28 +08:00
|
|
|
else
|
|
|
|
{
|
|
|
|
for (int n = 0; n < inputsData[i].size[0]; ++n)
|
|
|
|
for (int c = 0; c < inputsData[i].size[1]; ++c)
|
|
|
|
{
|
|
|
|
Mat inp = getPlane(inputsData[i], n, c);
|
|
|
|
|
|
|
|
std::vector<cv::Range> plane(4, Range::all());
|
|
|
|
plane[0] = Range(n, n + 1);
|
|
|
|
plane[1] = Range(c, c + 1);
|
|
|
|
UMat out = outputs[i](plane).reshape(1, inp.dims, inp.size);
|
|
|
|
|
2018-10-05 22:06:50 +08:00
|
|
|
tmp_expressions.push_back(scale * (inp - mean[c]));
|
|
|
|
convertFp16(tmp_expressions.back(), out);
|
2018-06-05 04:51:28 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
CV_Assert(outputs_.depth() == CV_32F);
|
|
|
|
if (singleMean)
|
2018-10-05 22:06:50 +08:00
|
|
|
{
|
2018-06-05 04:51:28 +08:00
|
|
|
inputsData[i].convertTo(outputs[i], CV_32F, scale, -mean[0] * scale);
|
2018-10-05 22:06:50 +08:00
|
|
|
}
|
2018-06-05 04:51:28 +08:00
|
|
|
else
|
|
|
|
{
|
|
|
|
for (int n = 0; n < inputsData[i].size[0]; ++n)
|
|
|
|
for (int c = 0; c < inputsData[i].size[1]; ++c)
|
|
|
|
{
|
|
|
|
Mat inp = getPlane(inputsData[i], n, c);
|
|
|
|
|
|
|
|
std::vector<cv::Range> plane(4, Range::all());
|
|
|
|
plane[0] = Range(n, n + 1);
|
|
|
|
plane[1] = Range(c, c + 1);
|
|
|
|
UMat out = outputs[i](plane).reshape(1, inp.dims, inp.size);
|
|
|
|
|
|
|
|
inp.convertTo(out, CV_32F, scale, -mean[c] * scale);
|
|
|
|
}
|
|
|
|
}
|
2018-07-09 19:35:54 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
#endif
|
2017-06-26 18:35:51 +08:00
|
|
|
|
2018-03-15 21:16:56 +08:00
|
|
|
int outputNameToIndex(const String& tgtName) CV_OVERRIDE
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
|
|
|
int idx = (int)(std::find(outNames.begin(), outNames.end(), tgtName) - outNames.begin());
|
|
|
|
return (idx < (int)outNames.size()) ? idx : -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void setNames(const std::vector<String> &names)
|
|
|
|
{
|
|
|
|
outNames.assign(names.begin(), names.end());
|
|
|
|
}
|
|
|
|
|
2017-11-02 21:21:06 +08:00
|
|
|
bool getMemoryShapes(const std::vector<MatShape> &inputs,
|
|
|
|
const int requiredOutputs,
|
|
|
|
std::vector<MatShape> &outputs,
|
2018-03-15 21:16:56 +08:00
|
|
|
std::vector<MatShape> &internals) const CV_OVERRIDE
|
2017-11-02 21:21:06 +08:00
|
|
|
{
|
|
|
|
CV_Assert(inputs.size() == requiredOutputs);
|
|
|
|
outputs.assign(inputs.begin(), inputs.end());
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-09-06 18:26:47 +08:00
|
|
|
virtual void finalize(InputArrayOfArrays, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
|
2018-06-05 04:51:28 +08:00
|
|
|
{
|
2018-09-06 18:26:47 +08:00
|
|
|
std::vector<Mat> outputs;
|
|
|
|
outputs_arr.getMatVector(outputs);
|
|
|
|
|
2018-08-15 19:55:47 +08:00
|
|
|
CV_Assert_N(outputs.size() == scaleFactors.size(), outputs.size() == means.size(),
|
2018-06-05 04:51:28 +08:00
|
|
|
inputsData.size() == outputs.size());
|
|
|
|
skip = true;
|
|
|
|
for (int i = 0; skip && i < inputsData.size(); ++i)
|
|
|
|
{
|
|
|
|
if (inputsData[i].data != outputs[i].data || scaleFactors[i] != 1.0 || means[i] != Scalar())
|
|
|
|
skip = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
|
|
|
|
{
|
|
|
|
#ifdef HAVE_INF_ENGINE
|
2018-08-22 21:04:40 +08:00
|
|
|
CV_CheckEQ(inputsData.size(), (size_t)1, "");
|
|
|
|
CV_CheckEQ(inputsData[0].dims, 4, "");
|
2018-06-05 04:51:28 +08:00
|
|
|
const size_t numChannels = inputsData[0].size[1];
|
|
|
|
CV_Assert(numChannels <= 4);
|
|
|
|
|
|
|
|
// Scale
|
2019-08-07 03:20:26 +08:00
|
|
|
InferenceEngine::TensorDesc td(InferenceEngine::Precision::FP32, {numChannels},
|
|
|
|
InferenceEngine::Layout::C);
|
|
|
|
auto weights = InferenceEngine::make_shared_blob<float>(td);
|
2018-06-05 04:51:28 +08:00
|
|
|
weights->allocate();
|
2019-08-07 03:20:26 +08:00
|
|
|
|
|
|
|
float* weight_buf = weights->buffer().as<float*>();
|
|
|
|
std::fill(weight_buf, weight_buf + numChannels, scaleFactors[0]);
|
2018-06-05 04:51:28 +08:00
|
|
|
|
|
|
|
// Mean subtraction
|
2019-08-07 03:20:26 +08:00
|
|
|
auto biases = InferenceEngine::make_shared_blob<float>(td);
|
2018-06-05 04:51:28 +08:00
|
|
|
biases->allocate();
|
2019-08-07 03:20:26 +08:00
|
|
|
float* bias_buf = biases->buffer().as<float*>();
|
|
|
|
|
2018-06-05 04:51:28 +08:00
|
|
|
for (int i = 0; i < numChannels; ++i)
|
|
|
|
{
|
2019-08-07 03:20:26 +08:00
|
|
|
bias_buf[i] = -means[0][i] * scaleFactors[0];
|
2018-06-05 04:51:28 +08:00
|
|
|
}
|
|
|
|
|
2019-02-14 18:30:30 +08:00
|
|
|
InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name);
|
|
|
|
addConstantData("weights", weights, ieLayer);
|
|
|
|
addConstantData("biases", biases, ieLayer);
|
2018-06-05 04:51:28 +08:00
|
|
|
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
|
|
|
#endif // HAVE_INF_ENGINE
|
|
|
|
return Ptr<BackendNode>();
|
|
|
|
}
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
std::vector<String> outNames;
|
2018-06-05 04:51:28 +08:00
|
|
|
// Preprocessing parameters for each network's input.
|
|
|
|
std::vector<double> scaleFactors;
|
|
|
|
std::vector<Scalar> means;
|
2018-07-09 19:35:54 +08:00
|
|
|
std::vector<Mat> inputsData;
|
2018-06-05 04:51:28 +08:00
|
|
|
bool skip;
|
2017-06-26 18:35:51 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct BlobManager
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
// Increase references counter to layer output.
|
|
|
|
void addReference(const LayerPin& lp)
|
|
|
|
{
|
|
|
|
std::map<LayerPin, int>::iterator it = refCounter.find(lp);
|
|
|
|
if (it == refCounter.end())
|
|
|
|
refCounter[lp] = 1;
|
|
|
|
else
|
|
|
|
it->second += 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void addReferences(const std::vector<LayerPin>& pins)
|
|
|
|
{
|
|
|
|
for (int i = 0; i < pins.size(); i++)
|
|
|
|
{
|
|
|
|
addReference(pins[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns number of references to allocated memory that used in specific
|
|
|
|
// layer blob.
|
|
|
|
int numReferences(const LayerPin& lp)
|
|
|
|
{
|
|
|
|
std::map<LayerPin, LayerPin>::iterator mapIt = reuseMap.find(lp);
|
|
|
|
CV_Assert(mapIt != reuseMap.end());
|
|
|
|
LayerPin memHost = mapIt->second;
|
|
|
|
|
|
|
|
std::map<LayerPin, int>::iterator refIt = refCounter.find(memHost);
|
|
|
|
CV_Assert(refIt != refCounter.end());
|
|
|
|
return refIt->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reuse data allocated in <host> inside the <user> blob.
|
|
|
|
void reuse(const LayerPin& host, const LayerPin& user)
|
|
|
|
{
|
|
|
|
CV_Assert(reuseMap.find(user) == reuseMap.end());
|
|
|
|
CV_Assert(reuseMap.find(host) != reuseMap.end());
|
|
|
|
LayerPin memHost = reuseMap[host];
|
|
|
|
reuseMap[user] = memHost;
|
|
|
|
if (refCounter.find(memHost) != refCounter.end())
|
|
|
|
{
|
|
|
|
std::map<LayerPin, int>::iterator userRefIt = refCounter.find(user);
|
|
|
|
if (userRefIt != refCounter.end())
|
|
|
|
{
|
|
|
|
refCounter[memHost] += userRefIt->second;
|
|
|
|
refCounter.erase(userRefIt);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
refCounter[memHost] += 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Decrease references counter to allocated memory inside specific blob.
|
|
|
|
void releaseReference(const LayerPin& lp)
|
|
|
|
{
|
|
|
|
std::map<LayerPin, LayerPin>::iterator mapIt = reuseMap.find(lp);
|
|
|
|
CV_Assert(mapIt != reuseMap.end());
|
|
|
|
|
|
|
|
std::map<LayerPin, int>::iterator refIt = refCounter.find(mapIt->second);
|
|
|
|
CV_Assert(refIt != refCounter.end());
|
|
|
|
CV_Assert(refIt->second > 0);
|
|
|
|
refIt->second -= 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void releaseReferences(const std::vector<LayerPin>& pins)
|
|
|
|
{
|
|
|
|
for (int i = 0; i < pins.size(); i++)
|
|
|
|
{
|
|
|
|
releaseReference(pins[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-27 20:45:44 +08:00
|
|
|
void reuseOrCreate(const MatShape& shape, const LayerPin& lp, Mat& dst, bool use_half)
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2018-08-27 20:45:44 +08:00
|
|
|
if (!DNN_DISABLE_MEMORY_OPTIMIZATIONS)
|
2018-01-08 02:38:14 +08:00
|
|
|
{
|
|
|
|
Mat bestBlob;
|
|
|
|
LayerPin bestBlobPin;
|
2017-07-04 22:23:47 +08:00
|
|
|
|
2018-01-08 02:38:14 +08:00
|
|
|
std::map<LayerPin, Mat>::iterator hostIt;
|
|
|
|
std::map<LayerPin, int>::iterator refIt;
|
2017-07-04 22:23:47 +08:00
|
|
|
|
2018-01-08 02:38:14 +08:00
|
|
|
const int targetTotal = total(shape);
|
|
|
|
int bestBlobTotal = INT_MAX;
|
2017-07-04 22:23:47 +08:00
|
|
|
|
2018-01-08 02:38:14 +08:00
|
|
|
for (hostIt = memHosts.begin(); hostIt != memHosts.end(); ++hostIt)
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2018-01-08 02:38:14 +08:00
|
|
|
refIt = refCounter.find(hostIt->first);
|
|
|
|
// Use only blobs that had references before because if not,
|
|
|
|
// it might be used as output.
|
|
|
|
if (refIt != refCounter.end() && refIt->second == 0)
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2018-01-08 02:38:14 +08:00
|
|
|
Mat& unusedBlob = hostIt->second;
|
|
|
|
if (unusedBlob.total() >= targetTotal &&
|
|
|
|
unusedBlob.total() < bestBlobTotal)
|
|
|
|
{
|
|
|
|
bestBlobPin = hostIt->first;
|
|
|
|
bestBlob = unusedBlob;
|
|
|
|
bestBlobTotal = unusedBlob.total();
|
|
|
|
}
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
}
|
2018-01-08 02:38:14 +08:00
|
|
|
if (!bestBlob.empty())
|
|
|
|
{
|
|
|
|
reuse(bestBlobPin, lp);
|
|
|
|
dst = bestBlob.reshape(1, 1).colRange(0, targetTotal).reshape(1, shape);
|
|
|
|
return;
|
|
|
|
}
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
2018-01-08 02:38:14 +08:00
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
|
|
|
// if dst already has been allocated with total(shape) elements,
|
2018-06-03 07:21:08 +08:00
|
|
|
// it won't be recreated and pointer of dst.data remains the same.
|
2018-04-26 19:20:16 +08:00
|
|
|
dst.create(shape, use_half ? CV_16S : CV_32F);
|
2017-06-26 18:35:51 +08:00
|
|
|
addHost(lp, dst);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void allocateBlobsForLayer(LayerData &ld, const LayerShapes& layerShapes,
|
2018-02-06 16:57:35 +08:00
|
|
|
std::vector<LayerPin>& pinsForInternalBlobs,
|
2018-08-27 20:45:44 +08:00
|
|
|
bool use_half = false)
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
pinsForInternalBlobs.clear();
|
|
|
|
|
|
|
|
std::vector<Mat>& outputBlobs = ld.outputBlobs,
|
|
|
|
&internalBlobs = ld.internals;
|
|
|
|
|
|
|
|
const ShapesVec& outShapes = layerShapes.out,
|
|
|
|
internalShapes = layerShapes.internal;
|
|
|
|
|
|
|
|
outputBlobs.resize(std::max((size_t)1, outShapes.size())); //layer produce at least one output blob
|
|
|
|
internalBlobs.resize(internalShapes.size());
|
|
|
|
|
|
|
|
CV_Assert(ld.requiredOutputs.size() <= outShapes.size());
|
|
|
|
|
|
|
|
// Check that layer could work in-place.
|
|
|
|
bool inPlace = false;
|
|
|
|
if (layerShapes.supportInPlace)
|
|
|
|
{
|
|
|
|
if (ld.inputBlobs.size() == 1)
|
|
|
|
{
|
|
|
|
// Get number of references to the input memory.
|
|
|
|
int numRef = numReferences(ld.inputBlobsId[0]);
|
|
|
|
// If current layer is one and only customer of this blob.
|
|
|
|
inPlace = numRef == 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ShapesVec shapes(outShapes);
|
|
|
|
shapes.insert(shapes.end(), internalShapes.begin(), internalShapes.end());
|
|
|
|
std::vector<Mat*> blobs;
|
|
|
|
for(int i = 0; i < outputBlobs.size(); i++)
|
|
|
|
{
|
|
|
|
blobs.push_back(&outputBlobs[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
for(int i = 0; i < internalBlobs.size(); i++)
|
|
|
|
{
|
|
|
|
blobs.push_back(&internalBlobs[i]);
|
|
|
|
if (total(internalShapes[i]))
|
|
|
|
{
|
|
|
|
pinsForInternalBlobs.push_back(LayerPin(ld.id, ld.outputBlobs.size() + i));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
addReferences(pinsForInternalBlobs);
|
|
|
|
|
|
|
|
std::map<int, std::vector<int> > idxSizes;
|
|
|
|
for(int i = 0; i < shapes.size(); i++)
|
|
|
|
{
|
|
|
|
idxSizes[total(shapes[i])].push_back(i);
|
|
|
|
}
|
|
|
|
|
|
|
|
std::map<int, std::vector<int> >::reverse_iterator it;
|
|
|
|
for(it = idxSizes.rbegin(); it != idxSizes.rend(); it++)
|
|
|
|
{
|
|
|
|
for(int j = 0; j < it->second.size(); j++)
|
|
|
|
{
|
|
|
|
int index = it->second[j];
|
|
|
|
if (total(shapes[index]))
|
|
|
|
{
|
|
|
|
LayerPin blobPin(ld.id, index);
|
2017-12-28 21:04:09 +08:00
|
|
|
if (index < outShapes.size() && inPlace)
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2018-01-11 02:50:54 +08:00
|
|
|
CV_Assert(ld.inputBlobs[0]->total() == total(shapes[index]));
|
|
|
|
ld.outputBlobs[index] = ld.inputBlobs[0]->reshape(1, shapes[index]);
|
2017-06-26 18:35:51 +08:00
|
|
|
reuse(ld.inputBlobsId[0], blobPin);
|
|
|
|
}
|
|
|
|
else
|
2018-08-27 20:45:44 +08:00
|
|
|
reuseOrCreate(shapes[index], blobPin, *blobs[index], use_half);
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Clear internal state. Calls before an every reallocation.
|
|
|
|
void reset()
|
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
refCounter.clear();
|
|
|
|
reuseMap.clear();
|
|
|
|
memHosts.clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
// Register allocated memory.
|
|
|
|
void addHost(const LayerPin& lp, const Mat& mat)
|
|
|
|
{
|
|
|
|
CV_Assert(memHosts.find(lp) == memHosts.end());
|
|
|
|
reuseMap[lp] = lp;
|
|
|
|
memHosts[lp] = mat;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::map<LayerPin, int> refCounter;
|
|
|
|
// Maps pin to origin blob (for whom memory was allocated firstly).
|
|
|
|
// For origin blobs key == value.
|
|
|
|
std::map<LayerPin, LayerPin> reuseMap;
|
|
|
|
std::map<LayerPin, Mat> memHosts;
|
|
|
|
};
|
|
|
|
|
2018-01-11 02:50:54 +08:00
|
|
|
static Ptr<BackendWrapper> wrapMat(int backendId, int targetId, cv::Mat& m)
|
2017-09-06 15:34:07 +08:00
|
|
|
{
|
2018-06-01 15:54:12 +08:00
|
|
|
if (backendId == DNN_BACKEND_OPENCV)
|
2017-09-06 15:34:07 +08:00
|
|
|
{
|
2018-01-11 02:50:54 +08:00
|
|
|
if (targetId == DNN_TARGET_CPU)
|
|
|
|
return Ptr<BackendWrapper>();
|
2018-04-26 19:20:16 +08:00
|
|
|
else if (IS_DNN_OPENCL_TARGET(targetId))
|
2018-01-11 02:50:54 +08:00
|
|
|
return OpenCLBackendWrapper::create(m);
|
|
|
|
else
|
|
|
|
CV_Error(Error::StsNotImplemented, "Unknown target identifier");
|
2017-09-06 15:34:07 +08:00
|
|
|
}
|
|
|
|
else if (backendId == DNN_BACKEND_HALIDE)
|
|
|
|
{
|
|
|
|
CV_Assert(haveHalide());
|
|
|
|
#ifdef HAVE_HALIDE
|
|
|
|
return Ptr<BackendWrapper>(new HalideBackendWrapper(targetId, m));
|
|
|
|
#endif // HAVE_HALIDE
|
2018-02-06 16:57:35 +08:00
|
|
|
}
|
|
|
|
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
|
|
|
|
{
|
|
|
|
CV_Assert(haveInfEngine());
|
|
|
|
#ifdef HAVE_INF_ENGINE
|
|
|
|
return Ptr<BackendWrapper>(new InfEngineBackendWrapper(targetId, m));
|
|
|
|
#endif // HAVE_INF_ENGINE
|
Merge pull request #12703 from wzw-intel:vkcom
* dnn: Add a Vulkan based backend
This commit adds a new backend "DNN_BACKEND_VKCOM" and a
new target "DNN_TARGET_VULKAN". VKCOM means vulkan based
computation library.
This backend uses Vulkan API and SPIR-V shaders to do
the inference computation for layers. The layer types
that implemented in DNN_BACKEND_VKCOM include:
Conv, Concat, ReLU, LRN, PriorBox, Softmax, MaxPooling,
AvePooling, Permute
This is just a beginning work for Vulkan in OpenCV DNN,
more layer types will be supported and performance
tuning is on the way.
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/vulkan: Add FindVulkan.cmake to detect Vulkan SDK
In order to build dnn with Vulkan support, need installing
Vulkan SDK and setting environment variable "VULKAN_SDK" and
add "-DWITH_VULKAN=ON" to cmake command.
You can download Vulkan SDK from:
https://vulkan.lunarg.com/sdk/home#linux
For how to install, see
https://vulkan.lunarg.com/doc/sdk/latest/linux/getting_started.html
https://vulkan.lunarg.com/doc/sdk/latest/windows/getting_started.html
https://vulkan.lunarg.com/doc/sdk/latest/mac/getting_started.html
respectively for linux, windows and mac.
To run the vulkan backend, also need installing mesa driver.
On Ubuntu, use this command 'sudo apt-get install mesa-vulkan-drivers'
To test, use command '$BUILD_DIR/bin/opencv_test_dnn --gtest_filter=*VkCom*'
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/Vulkan: dynamically load Vulkan runtime
No compile-time dependency on Vulkan library.
If Vulkan runtime is unavailable, fallback to CPU path.
Use environment "OPENCL_VULKAN_RUNTIME" to specify path to your
own vulkan runtime library.
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/Vulkan: Add a python script to compile GLSL shaders to SPIR-V shaders
The SPIR-V shaders are in format of text-based 32-bit hexadecimal
numbers, and inserted into .cpp files as unsigned int32 array.
* dnn/Vulkan: Put Vulkan headers into 3rdparty directory and some other fixes
Vulkan header files are copied from
https://github.com/KhronosGroup/Vulkan-Docs/tree/master/include/vulkan
to 3rdparty/include
Fix the Copyright declaration issue.
Refine OpenCVDetectVulkan.cmake
* dnn/Vulkan: Add vulkan backend tests into existing ones.
Also fixed some test failures.
- Don't use bool variable as uniform for shader
- Fix dispathed group number beyond max issue
- Bypass "group > 1" convolution. This should be support in future.
* dnn/Vulkan: Fix multiple initialization in one thread.
2018-10-29 22:51:26 +08:00
|
|
|
}
|
|
|
|
else if (backendId == DNN_BACKEND_VKCOM)
|
|
|
|
{
|
|
|
|
CV_Assert(haveVulkan());
|
|
|
|
#ifdef HAVE_VULKAN
|
|
|
|
return Ptr<BackendWrapper>(new VkComBackendWrapper(m));
|
|
|
|
#endif // HAVE_VULKAN
|
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
|
|
|
}
|
|
|
|
else if (backendId == DNN_BACKEND_CUDA)
|
|
|
|
{
|
|
|
|
CV_Assert(haveCUDA());
|
|
|
|
|
|
|
|
#ifdef HAVE_CUDA
|
|
|
|
switch (targetId)
|
|
|
|
{
|
|
|
|
case DNN_TARGET_CUDA:
|
|
|
|
return CUDABackendWrapperFP32::create(m);
|
|
|
|
case DNN_TARGET_CUDA_FP16:
|
|
|
|
return CUDABackendWrapperFP16::create(m);
|
|
|
|
default:
|
|
|
|
CV_Assert(IS_DNN_CUDA_TARGET(targetId));
|
|
|
|
}
|
|
|
|
#endif
|
2017-09-06 15:34:07 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
CV_Error(Error::StsNotImplemented, "Unknown backend identifier");
|
|
|
|
return Ptr<BackendWrapper>();
|
|
|
|
}
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
struct Net::Impl
|
|
|
|
{
|
|
|
|
typedef std::map<int, LayerShapes> LayersShapesMap;
|
|
|
|
typedef std::map<int, LayerData> MapIdToLayerData;
|
|
|
|
|
|
|
|
Impl()
|
|
|
|
{
|
|
|
|
//allocate fake net input layer
|
|
|
|
netInputLayer = Ptr<DataLayer>(new DataLayer());
|
|
|
|
LayerData &inpl = layers.insert( make_pair(0, LayerData()) ).first->second;
|
|
|
|
inpl.id = 0;
|
2018-06-05 04:51:28 +08:00
|
|
|
netInputLayer->name = inpl.name = "_input";
|
2017-06-26 18:35:51 +08:00
|
|
|
inpl.type = "__NetInputLayer__";
|
|
|
|
inpl.layerInstance = netInputLayer;
|
|
|
|
layerNameToId.insert(std::make_pair(inpl.name, inpl.id));
|
|
|
|
|
2017-08-02 22:27:58 +08:00
|
|
|
lastLayerId = 0;
|
2017-06-26 18:35:51 +08:00
|
|
|
netWasAllocated = false;
|
2017-07-04 22:23:47 +08:00
|
|
|
fusion = true;
|
2019-04-20 02:01:19 +08:00
|
|
|
isAsync = false;
|
2017-06-26 18:35:51 +08:00
|
|
|
preferableBackend = DNN_BACKEND_DEFAULT;
|
|
|
|
preferableTarget = DNN_TARGET_CPU;
|
2018-03-17 00:27:04 +08:00
|
|
|
skipInfEngineInit = false;
|
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
|
|
|
|
|
|
|
#ifdef HAVE_CUDA
|
|
|
|
if (cv::cuda::getCudaEnabledDeviceCount() > 0)
|
|
|
|
{
|
|
|
|
cuda4dnn::csl::CSLContext context;
|
|
|
|
context.stream = cuda4dnn::csl::Stream(true);
|
|
|
|
context.cublas_handle = cuda4dnn::csl::cublas::Handle(context.stream);
|
|
|
|
context.cudnn_handle = cuda4dnn::csl::cudnn::Handle(context.stream);
|
|
|
|
|
|
|
|
cudaInfo = std::unique_ptr<CudaInfo_t>(new CudaInfo_t(std::move(context)));
|
|
|
|
}
|
|
|
|
#endif
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
Ptr<DataLayer> netInputLayer;
|
|
|
|
std::vector<LayerPin> blobsToKeep;
|
|
|
|
MapIdToLayerData layers;
|
|
|
|
std::map<String, int> layerNameToId;
|
|
|
|
BlobManager blobManager;
|
|
|
|
int preferableBackend;
|
|
|
|
int preferableTarget;
|
|
|
|
String halideConfigFile;
|
2018-03-17 00:27:04 +08:00
|
|
|
bool skipInfEngineInit;
|
2017-09-06 15:34:07 +08:00
|
|
|
// Map host data to backend specific wrapper.
|
|
|
|
std::map<void*, Ptr<BackendWrapper> > backendWrappers;
|
2017-06-26 18:35:51 +08:00
|
|
|
|
|
|
|
int lastLayerId;
|
|
|
|
|
|
|
|
bool netWasAllocated;
|
2017-07-04 22:23:47 +08:00
|
|
|
bool fusion;
|
2019-04-20 02:01:19 +08:00
|
|
|
bool isAsync;
|
2017-08-02 22:27:58 +08:00
|
|
|
std::vector<int64> layersTimings;
|
2018-04-26 19:20:16 +08:00
|
|
|
Mat output_blob;
|
2017-06-26 18:35:51 +08:00
|
|
|
|
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
|
|
|
#ifdef HAVE_CUDA
|
|
|
|
struct CudaInfo_t
|
|
|
|
{
|
|
|
|
CudaInfo_t(cuda4dnn::csl::CSLContext ctxt) : context(std::move(ctxt)) { }
|
|
|
|
cuda4dnn::csl::CSLContext context;
|
|
|
|
cuda4dnn::csl::Workspace workspace;
|
|
|
|
};
|
|
|
|
|
|
|
|
std::unique_ptr<CudaInfo_t> cudaInfo;
|
|
|
|
#endif
|
|
|
|
|
2018-01-11 02:50:54 +08:00
|
|
|
Ptr<BackendWrapper> wrap(Mat& host)
|
2017-09-06 15:34:07 +08:00
|
|
|
{
|
2018-06-01 15:54:12 +08:00
|
|
|
if (preferableBackend == DNN_BACKEND_OPENCV && preferableTarget == DNN_TARGET_CPU)
|
2017-09-06 15:34:07 +08:00
|
|
|
return Ptr<BackendWrapper>();
|
|
|
|
|
|
|
|
MatShape shape(host.dims);
|
|
|
|
for (int i = 0; i < host.dims; ++i)
|
|
|
|
shape[i] = host.size[i];
|
|
|
|
|
|
|
|
void* data = host.data;
|
|
|
|
if (backendWrappers.find(data) != backendWrappers.end())
|
|
|
|
{
|
|
|
|
Ptr<BackendWrapper> baseBuffer = backendWrappers[data];
|
2018-06-01 15:54:12 +08:00
|
|
|
if (preferableBackend == DNN_BACKEND_OPENCV)
|
2018-01-11 02:50:54 +08:00
|
|
|
{
|
2018-04-26 19:20:16 +08:00
|
|
|
CV_Assert(IS_DNN_OPENCL_TARGET(preferableTarget));
|
2018-01-11 02:50:54 +08:00
|
|
|
return OpenCLBackendWrapper::create(baseBuffer, host);
|
|
|
|
}
|
|
|
|
else if (preferableBackend == DNN_BACKEND_HALIDE)
|
2017-09-06 15:34:07 +08:00
|
|
|
{
|
|
|
|
CV_Assert(haveHalide());
|
|
|
|
#ifdef HAVE_HALIDE
|
|
|
|
return Ptr<BackendWrapper>(new HalideBackendWrapper(baseBuffer, shape));
|
|
|
|
#endif // HAVE_HALIDE
|
|
|
|
}
|
2018-02-06 16:57:35 +08:00
|
|
|
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE)
|
|
|
|
{
|
|
|
|
return wrapMat(preferableBackend, preferableTarget, host);
|
|
|
|
}
|
Merge pull request #12703 from wzw-intel:vkcom
* dnn: Add a Vulkan based backend
This commit adds a new backend "DNN_BACKEND_VKCOM" and a
new target "DNN_TARGET_VULKAN". VKCOM means vulkan based
computation library.
This backend uses Vulkan API and SPIR-V shaders to do
the inference computation for layers. The layer types
that implemented in DNN_BACKEND_VKCOM include:
Conv, Concat, ReLU, LRN, PriorBox, Softmax, MaxPooling,
AvePooling, Permute
This is just a beginning work for Vulkan in OpenCV DNN,
more layer types will be supported and performance
tuning is on the way.
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/vulkan: Add FindVulkan.cmake to detect Vulkan SDK
In order to build dnn with Vulkan support, need installing
Vulkan SDK and setting environment variable "VULKAN_SDK" and
add "-DWITH_VULKAN=ON" to cmake command.
You can download Vulkan SDK from:
https://vulkan.lunarg.com/sdk/home#linux
For how to install, see
https://vulkan.lunarg.com/doc/sdk/latest/linux/getting_started.html
https://vulkan.lunarg.com/doc/sdk/latest/windows/getting_started.html
https://vulkan.lunarg.com/doc/sdk/latest/mac/getting_started.html
respectively for linux, windows and mac.
To run the vulkan backend, also need installing mesa driver.
On Ubuntu, use this command 'sudo apt-get install mesa-vulkan-drivers'
To test, use command '$BUILD_DIR/bin/opencv_test_dnn --gtest_filter=*VkCom*'
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/Vulkan: dynamically load Vulkan runtime
No compile-time dependency on Vulkan library.
If Vulkan runtime is unavailable, fallback to CPU path.
Use environment "OPENCL_VULKAN_RUNTIME" to specify path to your
own vulkan runtime library.
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/Vulkan: Add a python script to compile GLSL shaders to SPIR-V shaders
The SPIR-V shaders are in format of text-based 32-bit hexadecimal
numbers, and inserted into .cpp files as unsigned int32 array.
* dnn/Vulkan: Put Vulkan headers into 3rdparty directory and some other fixes
Vulkan header files are copied from
https://github.com/KhronosGroup/Vulkan-Docs/tree/master/include/vulkan
to 3rdparty/include
Fix the Copyright declaration issue.
Refine OpenCVDetectVulkan.cmake
* dnn/Vulkan: Add vulkan backend tests into existing ones.
Also fixed some test failures.
- Don't use bool variable as uniform for shader
- Fix dispathed group number beyond max issue
- Bypass "group > 1" convolution. This should be support in future.
* dnn/Vulkan: Fix multiple initialization in one thread.
2018-10-29 22:51:26 +08:00
|
|
|
else if (preferableBackend == DNN_BACKEND_VKCOM)
|
|
|
|
{
|
|
|
|
#ifdef HAVE_VULKAN
|
|
|
|
return Ptr<BackendWrapper>(new VkComBackendWrapper(baseBuffer, host));
|
|
|
|
#endif
|
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
|
|
|
}
|
|
|
|
else if (preferableBackend == DNN_BACKEND_CUDA)
|
|
|
|
{
|
|
|
|
CV_Assert(haveCUDA());
|
|
|
|
#ifdef HAVE_CUDA
|
|
|
|
switch (preferableTarget)
|
|
|
|
{
|
|
|
|
case DNN_TARGET_CUDA:
|
|
|
|
return CUDABackendWrapperFP32::create(baseBuffer, shape);
|
|
|
|
case DNN_TARGET_CUDA_FP16:
|
|
|
|
return CUDABackendWrapperFP16::create(baseBuffer, shape);
|
|
|
|
default:
|
|
|
|
CV_Assert(IS_DNN_CUDA_TARGET(preferableTarget));
|
|
|
|
}
|
|
|
|
#endif
|
Merge pull request #12703 from wzw-intel:vkcom
* dnn: Add a Vulkan based backend
This commit adds a new backend "DNN_BACKEND_VKCOM" and a
new target "DNN_TARGET_VULKAN". VKCOM means vulkan based
computation library.
This backend uses Vulkan API and SPIR-V shaders to do
the inference computation for layers. The layer types
that implemented in DNN_BACKEND_VKCOM include:
Conv, Concat, ReLU, LRN, PriorBox, Softmax, MaxPooling,
AvePooling, Permute
This is just a beginning work for Vulkan in OpenCV DNN,
more layer types will be supported and performance
tuning is on the way.
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/vulkan: Add FindVulkan.cmake to detect Vulkan SDK
In order to build dnn with Vulkan support, need installing
Vulkan SDK and setting environment variable "VULKAN_SDK" and
add "-DWITH_VULKAN=ON" to cmake command.
You can download Vulkan SDK from:
https://vulkan.lunarg.com/sdk/home#linux
For how to install, see
https://vulkan.lunarg.com/doc/sdk/latest/linux/getting_started.html
https://vulkan.lunarg.com/doc/sdk/latest/windows/getting_started.html
https://vulkan.lunarg.com/doc/sdk/latest/mac/getting_started.html
respectively for linux, windows and mac.
To run the vulkan backend, also need installing mesa driver.
On Ubuntu, use this command 'sudo apt-get install mesa-vulkan-drivers'
To test, use command '$BUILD_DIR/bin/opencv_test_dnn --gtest_filter=*VkCom*'
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/Vulkan: dynamically load Vulkan runtime
No compile-time dependency on Vulkan library.
If Vulkan runtime is unavailable, fallback to CPU path.
Use environment "OPENCL_VULKAN_RUNTIME" to specify path to your
own vulkan runtime library.
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/Vulkan: Add a python script to compile GLSL shaders to SPIR-V shaders
The SPIR-V shaders are in format of text-based 32-bit hexadecimal
numbers, and inserted into .cpp files as unsigned int32 array.
* dnn/Vulkan: Put Vulkan headers into 3rdparty directory and some other fixes
Vulkan header files are copied from
https://github.com/KhronosGroup/Vulkan-Docs/tree/master/include/vulkan
to 3rdparty/include
Fix the Copyright declaration issue.
Refine OpenCVDetectVulkan.cmake
* dnn/Vulkan: Add vulkan backend tests into existing ones.
Also fixed some test failures.
- Don't use bool variable as uniform for shader
- Fix dispathed group number beyond max issue
- Bypass "group > 1" convolution. This should be support in future.
* dnn/Vulkan: Fix multiple initialization in one thread.
2018-10-29 22:51:26 +08:00
|
|
|
}
|
2017-09-06 15:34:07 +08:00
|
|
|
else
|
|
|
|
CV_Error(Error::StsNotImplemented, "Unknown backend identifier");
|
|
|
|
}
|
|
|
|
|
|
|
|
Ptr<BackendWrapper> wrapper = wrapMat(preferableBackend, preferableTarget, host);
|
|
|
|
backendWrappers[data] = wrapper;
|
|
|
|
return wrapper;
|
|
|
|
}
|
|
|
|
|
2017-10-10 22:52:55 +08:00
|
|
|
#ifdef HAVE_HALIDE
|
2017-06-26 18:35:51 +08:00
|
|
|
void compileHalide()
|
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
CV_Assert(preferableBackend == DNN_BACKEND_HALIDE);
|
|
|
|
|
|
|
|
HalideScheduler scheduler(halideConfigFile);
|
2017-10-10 22:52:55 +08:00
|
|
|
std::vector< std::reference_wrapper<LayerData> > compileList; compileList.reserve(64);
|
|
|
|
for (MapIdToLayerData::iterator it = layers.begin(); it != layers.end(); ++it)
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
|
|
|
LayerData &ld = it->second;
|
|
|
|
Ptr<Layer> layer = ld.layerInstance;
|
2018-01-21 02:55:25 +08:00
|
|
|
if (layer->supportBackend(DNN_BACKEND_HALIDE) && !ld.skip)
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
|
|
|
CV_Assert(!ld.backendNodes[DNN_BACKEND_HALIDE].empty());
|
|
|
|
bool scheduled = scheduler.process(ld.backendNodes[DNN_BACKEND_HALIDE]);
|
|
|
|
if (!scheduled)
|
|
|
|
{
|
|
|
|
// Use automatic scheduling provided by layer.
|
|
|
|
layer->applyHalideScheduler(ld.backendNodes[DNN_BACKEND_HALIDE],
|
|
|
|
ld.inputBlobs, ld.outputBlobs,
|
|
|
|
preferableTarget);
|
|
|
|
}
|
2017-10-10 22:52:55 +08:00
|
|
|
compileList.emplace_back(ld);
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
}
|
2017-10-10 22:52:55 +08:00
|
|
|
std::atomic<int> progress(0);
|
|
|
|
auto fn = ([&] () -> void
|
|
|
|
{
|
|
|
|
for (;;)
|
|
|
|
{
|
|
|
|
int id = progress.fetch_add(1);
|
|
|
|
if ((size_t)id >= compileList.size())
|
|
|
|
return;
|
|
|
|
const LayerData& ld = compileList[id].get();
|
|
|
|
Ptr<BackendNode> node = ld.backendNodes.find(DNN_BACKEND_HALIDE)->second;
|
|
|
|
dnn::compileHalide(ld.outputBlobs, node, preferableTarget);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
size_t num_threads = std::min(compileList.size(), (size_t)std::thread::hardware_concurrency());
|
|
|
|
num_threads = std::max((size_t)1u, std::min((size_t)8u, num_threads));
|
|
|
|
std::vector<std::thread> threads(num_threads - 1);
|
|
|
|
for (auto& t: threads) t = std::thread(fn);
|
|
|
|
fn(); // process own tasks
|
|
|
|
for (auto& t: threads) t.join();
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
2017-10-10 22:52:55 +08:00
|
|
|
#endif
|
2017-06-26 18:35:51 +08:00
|
|
|
|
|
|
|
void clear()
|
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
MapIdToLayerData::iterator it;
|
|
|
|
for (it = layers.begin(); it != layers.end(); it++)
|
|
|
|
{
|
|
|
|
if (it->second.id != 0) {
|
2017-06-29 21:45:17 +08:00
|
|
|
it->second.inputBlobs.clear();
|
2017-06-26 18:35:51 +08:00
|
|
|
it->second.outputBlobs.clear();
|
|
|
|
it->second.internals.clear();
|
|
|
|
}
|
2018-01-21 02:55:25 +08:00
|
|
|
it->second.skip = false;
|
2017-06-28 16:15:22 +08:00
|
|
|
//it->second.consumers.clear();
|
|
|
|
Ptr<Layer> currLayer = it->second.layerInstance;
|
2017-06-26 18:35:51 +08:00
|
|
|
|
2017-06-28 16:15:22 +08:00
|
|
|
if( currLayer.empty() )
|
|
|
|
continue;
|
|
|
|
|
2017-07-04 22:23:47 +08:00
|
|
|
currLayer->unsetAttached();
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
2017-08-02 22:27:58 +08:00
|
|
|
|
|
|
|
layersTimings.clear();
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void setUpNet(const std::vector<LayerPin>& blobsToKeep_ = std::vector<LayerPin>())
|
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
|
2018-06-01 15:54:12 +08:00
|
|
|
if (preferableBackend == DNN_BACKEND_DEFAULT)
|
2018-06-13 23:55:31 +08:00
|
|
|
preferableBackend = (Backend)PARAM_DNN_BACKEND_DEFAULT;
|
|
|
|
|
2018-06-01 15:54:12 +08:00
|
|
|
CV_Assert(preferableBackend != DNN_BACKEND_OPENCV ||
|
|
|
|
preferableTarget == DNN_TARGET_CPU ||
|
|
|
|
preferableTarget == DNN_TARGET_OPENCL ||
|
|
|
|
preferableTarget == DNN_TARGET_OPENCL_FP16);
|
|
|
|
CV_Assert(preferableBackend != DNN_BACKEND_HALIDE ||
|
|
|
|
preferableTarget == DNN_TARGET_CPU ||
|
|
|
|
preferableTarget == DNN_TARGET_OPENCL);
|
|
|
|
CV_Assert(preferableBackend != DNN_BACKEND_INFERENCE_ENGINE ||
|
|
|
|
preferableTarget == DNN_TARGET_CPU ||
|
|
|
|
preferableTarget == DNN_TARGET_OPENCL ||
|
|
|
|
preferableTarget == DNN_TARGET_OPENCL_FP16 ||
|
2018-11-16 22:09:54 +08:00
|
|
|
preferableTarget == DNN_TARGET_MYRIAD ||
|
|
|
|
preferableTarget == DNN_TARGET_FPGA);
|
Merge pull request #12703 from wzw-intel:vkcom
* dnn: Add a Vulkan based backend
This commit adds a new backend "DNN_BACKEND_VKCOM" and a
new target "DNN_TARGET_VULKAN". VKCOM means vulkan based
computation library.
This backend uses Vulkan API and SPIR-V shaders to do
the inference computation for layers. The layer types
that implemented in DNN_BACKEND_VKCOM include:
Conv, Concat, ReLU, LRN, PriorBox, Softmax, MaxPooling,
AvePooling, Permute
This is just a beginning work for Vulkan in OpenCV DNN,
more layer types will be supported and performance
tuning is on the way.
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/vulkan: Add FindVulkan.cmake to detect Vulkan SDK
In order to build dnn with Vulkan support, need installing
Vulkan SDK and setting environment variable "VULKAN_SDK" and
add "-DWITH_VULKAN=ON" to cmake command.
You can download Vulkan SDK from:
https://vulkan.lunarg.com/sdk/home#linux
For how to install, see
https://vulkan.lunarg.com/doc/sdk/latest/linux/getting_started.html
https://vulkan.lunarg.com/doc/sdk/latest/windows/getting_started.html
https://vulkan.lunarg.com/doc/sdk/latest/mac/getting_started.html
respectively for linux, windows and mac.
To run the vulkan backend, also need installing mesa driver.
On Ubuntu, use this command 'sudo apt-get install mesa-vulkan-drivers'
To test, use command '$BUILD_DIR/bin/opencv_test_dnn --gtest_filter=*VkCom*'
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/Vulkan: dynamically load Vulkan runtime
No compile-time dependency on Vulkan library.
If Vulkan runtime is unavailable, fallback to CPU path.
Use environment "OPENCL_VULKAN_RUNTIME" to specify path to your
own vulkan runtime library.
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/Vulkan: Add a python script to compile GLSL shaders to SPIR-V shaders
The SPIR-V shaders are in format of text-based 32-bit hexadecimal
numbers, and inserted into .cpp files as unsigned int32 array.
* dnn/Vulkan: Put Vulkan headers into 3rdparty directory and some other fixes
Vulkan header files are copied from
https://github.com/KhronosGroup/Vulkan-Docs/tree/master/include/vulkan
to 3rdparty/include
Fix the Copyright declaration issue.
Refine OpenCVDetectVulkan.cmake
* dnn/Vulkan: Add vulkan backend tests into existing ones.
Also fixed some test failures.
- Don't use bool variable as uniform for shader
- Fix dispathed group number beyond max issue
- Bypass "group > 1" convolution. This should be support in future.
* dnn/Vulkan: Fix multiple initialization in one thread.
2018-10-29 22:51:26 +08:00
|
|
|
CV_Assert(preferableBackend != DNN_BACKEND_VKCOM ||
|
|
|
|
preferableTarget == DNN_TARGET_VULKAN);
|
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
|
|
|
CV_Assert(preferableBackend != DNN_BACKEND_CUDA ||
|
|
|
|
IS_DNN_CUDA_TARGET(preferableTarget));
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
if (!netWasAllocated || this->blobsToKeep != blobsToKeep_)
|
|
|
|
{
|
2018-06-01 15:54:12 +08:00
|
|
|
if (preferableBackend == DNN_BACKEND_OPENCV && IS_DNN_OPENCL_TARGET(preferableTarget))
|
2018-05-16 18:23:19 +08:00
|
|
|
#ifndef HAVE_OPENCL
|
2018-02-28 20:22:20 +08:00
|
|
|
{
|
2018-05-16 18:23:19 +08:00
|
|
|
CV_LOG_WARNING(NULL, "DNN: OpenCL target is not available in this OpenCV build, switching to CPU.");
|
2018-02-28 20:22:20 +08:00
|
|
|
preferableTarget = DNN_TARGET_CPU;
|
|
|
|
}
|
2018-05-16 18:23:19 +08:00
|
|
|
#else
|
|
|
|
{
|
2018-09-26 21:27:00 +08:00
|
|
|
if (!DNN_OPENCL_ALLOW_ALL_DEVICES)
|
2018-05-16 18:23:19 +08:00
|
|
|
{
|
2018-09-26 21:27:00 +08:00
|
|
|
// Current implementation is only valid for GPU (#11494)
|
|
|
|
if (ocl::Device::getDefault().type() != ocl::Device::TYPE_GPU)
|
|
|
|
{
|
|
|
|
CV_LOG_WARNING(NULL, "DNN: OpenCL target is not supported with current OpenCL device (tested with GPUs only), switching to CPU.");
|
|
|
|
preferableTarget = DNN_TARGET_CPU;
|
|
|
|
}
|
|
|
|
else if (preferableTarget == DNN_TARGET_OPENCL_FP16 && !ocl::Device::getDefault().isIntel())
|
|
|
|
{
|
|
|
|
CV_LOG_WARNING(NULL,
|
|
|
|
"DNN: OpenCL target with fp16 precision is not supported "
|
|
|
|
"with current OpenCL device (tested with Intel GPUs only), "
|
|
|
|
"switching to OpenCL with fp32 precision.");
|
|
|
|
preferableTarget = DNN_TARGET_OPENCL;
|
|
|
|
}
|
2018-05-16 18:23:19 +08:00
|
|
|
}
|
|
|
|
}
|
2018-02-28 20:22:20 +08:00
|
|
|
#endif
|
Merge pull request #12703 from wzw-intel:vkcom
* dnn: Add a Vulkan based backend
This commit adds a new backend "DNN_BACKEND_VKCOM" and a
new target "DNN_TARGET_VULKAN". VKCOM means vulkan based
computation library.
This backend uses Vulkan API and SPIR-V shaders to do
the inference computation for layers. The layer types
that implemented in DNN_BACKEND_VKCOM include:
Conv, Concat, ReLU, LRN, PriorBox, Softmax, MaxPooling,
AvePooling, Permute
This is just a beginning work for Vulkan in OpenCV DNN,
more layer types will be supported and performance
tuning is on the way.
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/vulkan: Add FindVulkan.cmake to detect Vulkan SDK
In order to build dnn with Vulkan support, need installing
Vulkan SDK and setting environment variable "VULKAN_SDK" and
add "-DWITH_VULKAN=ON" to cmake command.
You can download Vulkan SDK from:
https://vulkan.lunarg.com/sdk/home#linux
For how to install, see
https://vulkan.lunarg.com/doc/sdk/latest/linux/getting_started.html
https://vulkan.lunarg.com/doc/sdk/latest/windows/getting_started.html
https://vulkan.lunarg.com/doc/sdk/latest/mac/getting_started.html
respectively for linux, windows and mac.
To run the vulkan backend, also need installing mesa driver.
On Ubuntu, use this command 'sudo apt-get install mesa-vulkan-drivers'
To test, use command '$BUILD_DIR/bin/opencv_test_dnn --gtest_filter=*VkCom*'
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/Vulkan: dynamically load Vulkan runtime
No compile-time dependency on Vulkan library.
If Vulkan runtime is unavailable, fallback to CPU path.
Use environment "OPENCL_VULKAN_RUNTIME" to specify path to your
own vulkan runtime library.
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/Vulkan: Add a python script to compile GLSL shaders to SPIR-V shaders
The SPIR-V shaders are in format of text-based 32-bit hexadecimal
numbers, and inserted into .cpp files as unsigned int32 array.
* dnn/Vulkan: Put Vulkan headers into 3rdparty directory and some other fixes
Vulkan header files are copied from
https://github.com/KhronosGroup/Vulkan-Docs/tree/master/include/vulkan
to 3rdparty/include
Fix the Copyright declaration issue.
Refine OpenCVDetectVulkan.cmake
* dnn/Vulkan: Add vulkan backend tests into existing ones.
Also fixed some test failures.
- Don't use bool variable as uniform for shader
- Fix dispathed group number beyond max issue
- Bypass "group > 1" convolution. This should be support in future.
* dnn/Vulkan: Fix multiple initialization in one thread.
2018-10-29 22:51:26 +08:00
|
|
|
if (preferableBackend == DNN_BACKEND_VKCOM && !haveVulkan())
|
|
|
|
{
|
|
|
|
preferableBackend = DNN_BACKEND_OPENCV;
|
|
|
|
preferableTarget = DNN_TARGET_CPU;
|
|
|
|
}
|
|
|
|
|
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
|
|
|
if (preferableBackend == DNN_BACKEND_CUDA && !haveCUDA())
|
|
|
|
{
|
|
|
|
#ifdef HAVE_CUDA
|
|
|
|
CV_LOG_WARNING(NULL, "unable to use CUDA backend; switching to CPU");
|
|
|
|
#else
|
|
|
|
CV_LOG_WARNING(NULL, "DNN module was not built with CUDA backend; switching to CPU");
|
|
|
|
#endif
|
|
|
|
preferableBackend = DNN_BACKEND_OPENCV;
|
|
|
|
preferableTarget = DNN_TARGET_CPU;
|
|
|
|
}
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
clear();
|
|
|
|
|
|
|
|
allocateLayers(blobsToKeep_);
|
2018-06-05 04:51:28 +08:00
|
|
|
|
|
|
|
MapIdToLayerData::iterator it = layers.find(0);
|
|
|
|
CV_Assert(it != layers.end());
|
|
|
|
it->second.skip = netInputLayer->skip;
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
initBackend();
|
|
|
|
|
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
|
|
|
if (!netWasAllocated)
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2017-10-10 22:52:55 +08:00
|
|
|
#ifdef HAVE_HALIDE
|
2017-06-26 18:35:51 +08:00
|
|
|
if (preferableBackend == DNN_BACKEND_HALIDE)
|
|
|
|
compileHalide();
|
2017-10-10 22:52:55 +08:00
|
|
|
#else
|
|
|
|
CV_Assert(preferableBackend != DNN_BACKEND_HALIDE);
|
|
|
|
#endif
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
netWasAllocated = true;
|
|
|
|
this->blobsToKeep = blobsToKeep_;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int getLayerId(const String &layerName)
|
|
|
|
{
|
|
|
|
std::map<String, int>::iterator it = layerNameToId.find(layerName);
|
|
|
|
return (it != layerNameToId.end()) ? it->second : -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int getLayerId(int id)
|
|
|
|
{
|
|
|
|
MapIdToLayerData::iterator it = layers.find(id);
|
|
|
|
return (it != layers.end()) ? id : -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int getLayerId(DictValue &layerDesc)
|
|
|
|
{
|
|
|
|
if (layerDesc.isInt())
|
|
|
|
return getLayerId(layerDesc.get<int>());
|
|
|
|
else if (layerDesc.isString())
|
|
|
|
return getLayerId(layerDesc.get<String>());
|
|
|
|
|
|
|
|
CV_Assert(layerDesc.isInt() || layerDesc.isString());
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
String getLayerName(int id)
|
|
|
|
{
|
|
|
|
MapIdToLayerData::iterator it = layers.find(id);
|
|
|
|
return (it != layers.end()) ? it->second.name : "(unknown layer)";
|
|
|
|
}
|
|
|
|
|
|
|
|
LayerData& getLayerData(int id)
|
|
|
|
{
|
|
|
|
MapIdToLayerData::iterator it = layers.find(id);
|
|
|
|
|
|
|
|
if (it == layers.end())
|
|
|
|
CV_Error(Error::StsObjectNotFound, format("Layer with requested id=%d not found", id));
|
|
|
|
|
|
|
|
return it->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
LayerData& getLayerData(const String &layerName)
|
|
|
|
{
|
|
|
|
int id = getLayerId(layerName);
|
|
|
|
|
|
|
|
if (id < 0)
|
2018-02-12 20:07:39 +08:00
|
|
|
CV_Error(Error::StsError, "Requested layer \"" + layerName + "\" not found");
|
2017-06-26 18:35:51 +08:00
|
|
|
|
|
|
|
return getLayerData(id);
|
|
|
|
}
|
|
|
|
|
|
|
|
LayerData& getLayerData(const DictValue &layerDesc)
|
|
|
|
{
|
2017-07-04 22:23:47 +08:00
|
|
|
CV_Assert(layerDesc.isInt() || layerDesc.isString());
|
2017-06-26 18:35:51 +08:00
|
|
|
if (layerDesc.isInt())
|
|
|
|
return getLayerData(layerDesc.get<int>());
|
2017-07-04 22:23:47 +08:00
|
|
|
else /*if (layerDesc.isString())*/
|
2017-06-26 18:35:51 +08:00
|
|
|
return getLayerData(layerDesc.get<String>());
|
|
|
|
}
|
|
|
|
|
|
|
|
static void addLayerInput(LayerData &ld, int inNum, LayerPin from)
|
|
|
|
{
|
|
|
|
if ((int)ld.inputBlobsId.size() <= inNum)
|
|
|
|
{
|
|
|
|
ld.inputBlobsId.resize(inNum + 1);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
LayerPin storedFrom = ld.inputBlobsId[inNum];
|
|
|
|
if (storedFrom.valid() && !storedFrom.equal(from))
|
2018-01-13 23:17:56 +08:00
|
|
|
CV_Error(Error::StsError, format("Input #%d of layer \"%s\" already was connected",
|
|
|
|
inNum, ld.name.c_str()));
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ld.inputBlobsId[inNum] = from;
|
|
|
|
}
|
|
|
|
|
|
|
|
int resolvePinOutputName(LayerData &ld, const String &outName)
|
|
|
|
{
|
|
|
|
if (outName.empty())
|
|
|
|
return 0;
|
|
|
|
return ld.getLayerInstance()->outputNameToIndex(outName);
|
|
|
|
}
|
|
|
|
|
2018-06-20 19:25:24 +08:00
|
|
|
LayerPin getPinByAlias(const String &layerName)
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
|
|
|
LayerPin pin;
|
|
|
|
pin.lid = (layerName.empty()) ? 0 : getLayerId(layerName);
|
|
|
|
|
|
|
|
if (pin.lid >= 0)
|
2018-06-20 19:25:24 +08:00
|
|
|
pin.oid = resolvePinOutputName(getLayerData(pin.lid), layerName);
|
2017-06-26 18:35:51 +08:00
|
|
|
|
|
|
|
return pin;
|
|
|
|
}
|
|
|
|
|
2018-06-20 19:25:24 +08:00
|
|
|
std::vector<LayerPin> getLayerOutPins(const String &layerName)
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
|
|
|
int lid = (layerName.empty()) ? 0 : getLayerId(layerName);
|
|
|
|
|
|
|
|
std::vector<LayerPin> pins;
|
|
|
|
|
|
|
|
for (int i = 0; i < layers[lid].outputBlobs.size(); i++)
|
|
|
|
{
|
|
|
|
pins.push_back(LayerPin(lid, i));
|
|
|
|
}
|
|
|
|
|
|
|
|
return pins;
|
|
|
|
}
|
|
|
|
|
|
|
|
void connect(int outLayerId, int outNum, int inLayerId, int inNum)
|
|
|
|
{
|
|
|
|
CV_Assert(outLayerId < inLayerId);
|
|
|
|
LayerData &ldOut = getLayerData(outLayerId);
|
|
|
|
LayerData &ldInp = getLayerData(inLayerId);
|
|
|
|
|
|
|
|
addLayerInput(ldInp, inNum, LayerPin(outLayerId, outNum));
|
|
|
|
ldOut.requiredOutputs.insert(outNum);
|
|
|
|
ldOut.consumers.push_back(LayerPin(inLayerId, outNum));
|
|
|
|
}
|
|
|
|
|
|
|
|
void initBackend()
|
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
2018-06-01 15:54:12 +08:00
|
|
|
if (preferableBackend == DNN_BACKEND_OPENCV)
|
2018-04-26 19:20:16 +08:00
|
|
|
CV_Assert(preferableTarget == DNN_TARGET_CPU || IS_DNN_OPENCL_TARGET(preferableTarget));
|
2018-02-06 16:57:35 +08:00
|
|
|
else if (preferableBackend == DNN_BACKEND_HALIDE)
|
|
|
|
initHalideBackend();
|
|
|
|
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE)
|
|
|
|
initInfEngineBackend();
|
Merge pull request #12703 from wzw-intel:vkcom
* dnn: Add a Vulkan based backend
This commit adds a new backend "DNN_BACKEND_VKCOM" and a
new target "DNN_TARGET_VULKAN". VKCOM means vulkan based
computation library.
This backend uses Vulkan API and SPIR-V shaders to do
the inference computation for layers. The layer types
that implemented in DNN_BACKEND_VKCOM include:
Conv, Concat, ReLU, LRN, PriorBox, Softmax, MaxPooling,
AvePooling, Permute
This is just a beginning work for Vulkan in OpenCV DNN,
more layer types will be supported and performance
tuning is on the way.
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/vulkan: Add FindVulkan.cmake to detect Vulkan SDK
In order to build dnn with Vulkan support, need installing
Vulkan SDK and setting environment variable "VULKAN_SDK" and
add "-DWITH_VULKAN=ON" to cmake command.
You can download Vulkan SDK from:
https://vulkan.lunarg.com/sdk/home#linux
For how to install, see
https://vulkan.lunarg.com/doc/sdk/latest/linux/getting_started.html
https://vulkan.lunarg.com/doc/sdk/latest/windows/getting_started.html
https://vulkan.lunarg.com/doc/sdk/latest/mac/getting_started.html
respectively for linux, windows and mac.
To run the vulkan backend, also need installing mesa driver.
On Ubuntu, use this command 'sudo apt-get install mesa-vulkan-drivers'
To test, use command '$BUILD_DIR/bin/opencv_test_dnn --gtest_filter=*VkCom*'
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/Vulkan: dynamically load Vulkan runtime
No compile-time dependency on Vulkan library.
If Vulkan runtime is unavailable, fallback to CPU path.
Use environment "OPENCL_VULKAN_RUNTIME" to specify path to your
own vulkan runtime library.
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/Vulkan: Add a python script to compile GLSL shaders to SPIR-V shaders
The SPIR-V shaders are in format of text-based 32-bit hexadecimal
numbers, and inserted into .cpp files as unsigned int32 array.
* dnn/Vulkan: Put Vulkan headers into 3rdparty directory and some other fixes
Vulkan header files are copied from
https://github.com/KhronosGroup/Vulkan-Docs/tree/master/include/vulkan
to 3rdparty/include
Fix the Copyright declaration issue.
Refine OpenCVDetectVulkan.cmake
* dnn/Vulkan: Add vulkan backend tests into existing ones.
Also fixed some test failures.
- Don't use bool variable as uniform for shader
- Fix dispathed group number beyond max issue
- Bypass "group > 1" convolution. This should be support in future.
* dnn/Vulkan: Fix multiple initialization in one thread.
2018-10-29 22:51:26 +08:00
|
|
|
else if (preferableBackend == DNN_BACKEND_VKCOM)
|
|
|
|
initVkComBackend();
|
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
|
|
|
else if (preferableBackend == DNN_BACKEND_CUDA)
|
|
|
|
initCUDABackend();
|
2018-02-06 16:57:35 +08:00
|
|
|
else
|
|
|
|
CV_Error(Error::StsNotImplemented, "Unknown backend identifier");
|
|
|
|
}
|
|
|
|
|
|
|
|
void initHalideBackend()
|
|
|
|
{
|
|
|
|
CV_TRACE_FUNCTION();
|
2018-08-15 19:55:47 +08:00
|
|
|
CV_Assert_N(preferableBackend == DNN_BACKEND_HALIDE, haveHalide());
|
2017-06-26 18:35:51 +08:00
|
|
|
|
|
|
|
// Iterator to current layer.
|
|
|
|
MapIdToLayerData::iterator it = layers.begin();
|
|
|
|
// Iterator to base layer for fusion. In example, in case of conv+bn+relu
|
|
|
|
// it'll be a conv layer.
|
|
|
|
MapIdToLayerData::iterator baseIt = layers.begin();
|
|
|
|
for (; it != layers.end(); it++)
|
|
|
|
{
|
|
|
|
LayerData &ldTop = it->second;
|
|
|
|
Ptr<Layer> layerTop = ldTop.layerInstance;
|
|
|
|
if (!layerTop->supportBackend(preferableBackend))
|
|
|
|
{
|
|
|
|
// Move base iterator to layer that don't support preferable
|
|
|
|
// backend to prevent fusion over layer of different backend.
|
|
|
|
baseIt = it;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// Try to do layers fusion.
|
|
|
|
LayerData &ldBot = baseIt->second;
|
|
|
|
Ptr<Layer> layerBot = ldBot.layerInstance;
|
|
|
|
// 1. Check that bottom and top from the same backends.
|
|
|
|
if (it != layers.begin() && layerBot->supportBackend(preferableBackend))
|
|
|
|
{
|
|
|
|
// 2. Check that current layer works in-place.
|
|
|
|
bool inPlace = ldTop.inputBlobs.size() == 1 &&
|
|
|
|
ldBot.outputBlobs.size() == 1 &&
|
|
|
|
ldTop.inputBlobs[0]->data ==
|
|
|
|
ldBot.outputBlobs[0].data;
|
|
|
|
if (inPlace)
|
|
|
|
{
|
|
|
|
// 3. Try to attach node.
|
|
|
|
CV_Assert(!ldBot.backendNodes[preferableBackend].empty());
|
|
|
|
Ptr<BackendNode> fusedNode =
|
|
|
|
layerTop->tryAttach(ldBot.backendNodes[preferableBackend]);
|
|
|
|
if (!fusedNode.empty())
|
|
|
|
{
|
2018-01-21 02:55:25 +08:00
|
|
|
ldTop.skip = true;
|
2017-06-26 18:35:51 +08:00
|
|
|
ldBot.backendNodes[preferableBackend] = fusedNode;
|
2018-01-21 02:55:25 +08:00
|
|
|
ldBot.outputBlobsWrappers = ldTop.outputBlobsWrappers;
|
2017-06-26 18:35:51 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// No layers fusion.
|
2018-01-21 02:55:25 +08:00
|
|
|
ldTop.skip = false;
|
2018-02-06 16:57:35 +08:00
|
|
|
ldTop.backendNodes[DNN_BACKEND_HALIDE] =
|
|
|
|
layerTop->initHalide(ldTop.inputBlobsWrappers);
|
|
|
|
baseIt = it;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-06 21:23:18 +08:00
|
|
|
#ifdef HAVE_INF_ENGINE
|
|
|
|
// Before launching Inference Engine graph we need to specify output blobs.
|
|
|
|
// This function requests output blobs based on inputs references of
|
|
|
|
// layers from default backend or layers from different graphs.
|
|
|
|
void addInfEngineNetOutputs(LayerData &ld)
|
|
|
|
{
|
|
|
|
Ptr<InfEngineBackendNet> layerNet;
|
|
|
|
if (ld.backendNodes.find(preferableBackend) != ld.backendNodes.end())
|
|
|
|
{
|
|
|
|
Ptr<BackendNode> node = ld.backendNodes[preferableBackend];
|
|
|
|
if (!node.empty())
|
|
|
|
{
|
|
|
|
Ptr<InfEngineBackendNode> ieNode = node.dynamicCast<InfEngineBackendNode>();
|
2018-08-22 21:04:40 +08:00
|
|
|
CV_Assert(!ieNode.empty()); CV_Assert(!ieNode->net.empty());
|
2018-02-06 21:23:18 +08:00
|
|
|
layerNet = ieNode->net;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// For an every input reference we check that it belongs to one of
|
|
|
|
// the Inference Engine backend graphs. Request an output blob if it is.
|
|
|
|
// Do nothing if layer's input is from the same graph.
|
|
|
|
for (int i = 0; i < ld.inputBlobsId.size(); ++i)
|
|
|
|
{
|
|
|
|
LayerData &inpLd = layers[ld.inputBlobsId[i].lid];
|
|
|
|
Ptr<BackendNode> inpNode = inpLd.backendNodes[preferableBackend];
|
|
|
|
if (!inpNode.empty())
|
|
|
|
{
|
|
|
|
Ptr<InfEngineBackendNode> ieInpNode = inpNode.dynamicCast<InfEngineBackendNode>();
|
2018-08-22 21:04:40 +08:00
|
|
|
CV_Assert(!ieInpNode.empty()); CV_Assert(!ieInpNode->net.empty());
|
2018-02-06 21:23:18 +08:00
|
|
|
if (layerNet != ieInpNode->net)
|
|
|
|
{
|
|
|
|
// layerNet is empty or nodes are from different graphs.
|
2019-01-14 14:55:44 +08:00
|
|
|
ieInpNode->net->addOutput(ieInpNode->layer.getName());
|
2018-02-06 21:23:18 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif // HAVE_INF_ENGINE
|
|
|
|
|
Merge pull request #12703 from wzw-intel:vkcom
* dnn: Add a Vulkan based backend
This commit adds a new backend "DNN_BACKEND_VKCOM" and a
new target "DNN_TARGET_VULKAN". VKCOM means vulkan based
computation library.
This backend uses Vulkan API and SPIR-V shaders to do
the inference computation for layers. The layer types
that implemented in DNN_BACKEND_VKCOM include:
Conv, Concat, ReLU, LRN, PriorBox, Softmax, MaxPooling,
AvePooling, Permute
This is just a beginning work for Vulkan in OpenCV DNN,
more layer types will be supported and performance
tuning is on the way.
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/vulkan: Add FindVulkan.cmake to detect Vulkan SDK
In order to build dnn with Vulkan support, need installing
Vulkan SDK and setting environment variable "VULKAN_SDK" and
add "-DWITH_VULKAN=ON" to cmake command.
You can download Vulkan SDK from:
https://vulkan.lunarg.com/sdk/home#linux
For how to install, see
https://vulkan.lunarg.com/doc/sdk/latest/linux/getting_started.html
https://vulkan.lunarg.com/doc/sdk/latest/windows/getting_started.html
https://vulkan.lunarg.com/doc/sdk/latest/mac/getting_started.html
respectively for linux, windows and mac.
To run the vulkan backend, also need installing mesa driver.
On Ubuntu, use this command 'sudo apt-get install mesa-vulkan-drivers'
To test, use command '$BUILD_DIR/bin/opencv_test_dnn --gtest_filter=*VkCom*'
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/Vulkan: dynamically load Vulkan runtime
No compile-time dependency on Vulkan library.
If Vulkan runtime is unavailable, fallback to CPU path.
Use environment "OPENCL_VULKAN_RUNTIME" to specify path to your
own vulkan runtime library.
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/Vulkan: Add a python script to compile GLSL shaders to SPIR-V shaders
The SPIR-V shaders are in format of text-based 32-bit hexadecimal
numbers, and inserted into .cpp files as unsigned int32 array.
* dnn/Vulkan: Put Vulkan headers into 3rdparty directory and some other fixes
Vulkan header files are copied from
https://github.com/KhronosGroup/Vulkan-Docs/tree/master/include/vulkan
to 3rdparty/include
Fix the Copyright declaration issue.
Refine OpenCVDetectVulkan.cmake
* dnn/Vulkan: Add vulkan backend tests into existing ones.
Also fixed some test failures.
- Don't use bool variable as uniform for shader
- Fix dispathed group number beyond max issue
- Bypass "group > 1" convolution. This should be support in future.
* dnn/Vulkan: Fix multiple initialization in one thread.
2018-10-29 22:51:26 +08:00
|
|
|
void initVkComBackend()
|
|
|
|
{
|
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
CV_Assert(preferableBackend == DNN_BACKEND_VKCOM);
|
|
|
|
#ifdef HAVE_VULKAN
|
|
|
|
if (!haveVulkan())
|
|
|
|
return;
|
|
|
|
|
|
|
|
MapIdToLayerData::iterator it = layers.begin();
|
|
|
|
for (; it != layers.end(); it++)
|
|
|
|
{
|
|
|
|
LayerData &ld = it->second;
|
|
|
|
Ptr<Layer> layer = ld.layerInstance;
|
|
|
|
if (!layer->supportBackend(preferableBackend))
|
|
|
|
{
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
ld.skip = false;
|
2018-10-29 14:32:09 +08:00
|
|
|
|
|
|
|
try
|
|
|
|
{
|
|
|
|
ld.backendNodes[DNN_BACKEND_VKCOM] =
|
|
|
|
layer->initVkCom(ld.inputBlobsWrappers);
|
|
|
|
}
|
|
|
|
catch (const cv::Exception& e)
|
|
|
|
{
|
|
|
|
CV_LOG_ERROR(NULL, "initVkCom failed, fallback to CPU implementation. " << e.what());
|
|
|
|
ld.backendNodes[DNN_BACKEND_VKCOM] = Ptr<BackendNode>();
|
|
|
|
}
|
Merge pull request #12703 from wzw-intel:vkcom
* dnn: Add a Vulkan based backend
This commit adds a new backend "DNN_BACKEND_VKCOM" and a
new target "DNN_TARGET_VULKAN". VKCOM means vulkan based
computation library.
This backend uses Vulkan API and SPIR-V shaders to do
the inference computation for layers. The layer types
that implemented in DNN_BACKEND_VKCOM include:
Conv, Concat, ReLU, LRN, PriorBox, Softmax, MaxPooling,
AvePooling, Permute
This is just a beginning work for Vulkan in OpenCV DNN,
more layer types will be supported and performance
tuning is on the way.
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/vulkan: Add FindVulkan.cmake to detect Vulkan SDK
In order to build dnn with Vulkan support, need installing
Vulkan SDK and setting environment variable "VULKAN_SDK" and
add "-DWITH_VULKAN=ON" to cmake command.
You can download Vulkan SDK from:
https://vulkan.lunarg.com/sdk/home#linux
For how to install, see
https://vulkan.lunarg.com/doc/sdk/latest/linux/getting_started.html
https://vulkan.lunarg.com/doc/sdk/latest/windows/getting_started.html
https://vulkan.lunarg.com/doc/sdk/latest/mac/getting_started.html
respectively for linux, windows and mac.
To run the vulkan backend, also need installing mesa driver.
On Ubuntu, use this command 'sudo apt-get install mesa-vulkan-drivers'
To test, use command '$BUILD_DIR/bin/opencv_test_dnn --gtest_filter=*VkCom*'
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/Vulkan: dynamically load Vulkan runtime
No compile-time dependency on Vulkan library.
If Vulkan runtime is unavailable, fallback to CPU path.
Use environment "OPENCL_VULKAN_RUNTIME" to specify path to your
own vulkan runtime library.
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/Vulkan: Add a python script to compile GLSL shaders to SPIR-V shaders
The SPIR-V shaders are in format of text-based 32-bit hexadecimal
numbers, and inserted into .cpp files as unsigned int32 array.
* dnn/Vulkan: Put Vulkan headers into 3rdparty directory and some other fixes
Vulkan header files are copied from
https://github.com/KhronosGroup/Vulkan-Docs/tree/master/include/vulkan
to 3rdparty/include
Fix the Copyright declaration issue.
Refine OpenCVDetectVulkan.cmake
* dnn/Vulkan: Add vulkan backend tests into existing ones.
Also fixed some test failures.
- Don't use bool variable as uniform for shader
- Fix dispathed group number beyond max issue
- Bypass "group > 1" convolution. This should be support in future.
* dnn/Vulkan: Fix multiple initialization in one thread.
2018-10-29 22:51:26 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2018-02-06 16:57:35 +08:00
|
|
|
void initInfEngineBackend()
|
|
|
|
{
|
|
|
|
CV_TRACE_FUNCTION();
|
2018-08-15 19:55:47 +08:00
|
|
|
CV_Assert_N(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE, haveInfEngine());
|
2018-02-06 16:57:35 +08:00
|
|
|
#ifdef HAVE_INF_ENGINE
|
|
|
|
MapIdToLayerData::iterator it;
|
|
|
|
Ptr<InfEngineBackendNet> net;
|
2018-03-17 00:27:04 +08:00
|
|
|
|
2018-06-05 04:51:28 +08:00
|
|
|
for (it = layers.begin(); it != layers.end(); ++it)
|
|
|
|
{
|
|
|
|
LayerData &ld = it->second;
|
|
|
|
if (ld.id == 0)
|
|
|
|
{
|
|
|
|
CV_Assert((netInputLayer->outNames.empty() && ld.outputBlobsWrappers.size() == 1) ||
|
|
|
|
(netInputLayer->outNames.size() == ld.outputBlobsWrappers.size()));
|
|
|
|
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
|
|
|
|
{
|
|
|
|
InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]);
|
2019-08-07 03:20:26 +08:00
|
|
|
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
|
2018-06-05 04:51:28 +08:00
|
|
|
dataPtr->name = netInputLayer->outNames.empty() ? ld.name : netInputLayer->outNames[i];
|
2019-08-07 03:20:26 +08:00
|
|
|
#else
|
|
|
|
dataPtr->setName(netInputLayer->outNames.empty() ? ld.name : netInputLayer->outNames[i]);
|
|
|
|
#endif
|
2018-06-05 04:51:28 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
|
|
|
|
{
|
|
|
|
InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]);
|
2019-08-07 03:20:26 +08:00
|
|
|
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
|
2018-06-05 04:51:28 +08:00
|
|
|
dataPtr->name = ld.name;
|
2019-08-07 03:20:26 +08:00
|
|
|
#else
|
|
|
|
dataPtr->setName(ld.name);
|
|
|
|
#endif
|
2018-06-05 04:51:28 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-17 00:27:04 +08:00
|
|
|
if (skipInfEngineInit)
|
|
|
|
{
|
|
|
|
Ptr<BackendNode> node = layers[lastLayerId].backendNodes[preferableBackend];
|
|
|
|
CV_Assert(!node.empty());
|
|
|
|
|
|
|
|
Ptr<InfEngineBackendNode> ieNode = node.dynamicCast<InfEngineBackendNode>();
|
|
|
|
CV_Assert(!ieNode.empty());
|
|
|
|
|
|
|
|
for (it = layers.begin(); it != layers.end(); ++it)
|
|
|
|
{
|
|
|
|
LayerData &ld = it->second;
|
2018-06-05 04:51:28 +08:00
|
|
|
if (ld.id == 0)
|
2018-03-17 00:27:04 +08:00
|
|
|
{
|
2018-06-05 04:51:28 +08:00
|
|
|
for (int i = 0; i < ld.inputBlobsWrappers.size(); ++i)
|
|
|
|
{
|
|
|
|
InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.inputBlobsWrappers[i]);
|
2019-08-07 03:20:26 +08:00
|
|
|
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
|
2018-06-05 04:51:28 +08:00
|
|
|
dataPtr->name = netInputLayer->outNames[i];
|
2019-08-07 03:20:26 +08:00
|
|
|
#else
|
|
|
|
dataPtr->setName(netInputLayer->outNames[i]);
|
|
|
|
#endif
|
2018-06-05 04:51:28 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
|
|
|
|
{
|
|
|
|
InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]);
|
2019-08-07 03:20:26 +08:00
|
|
|
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
|
2018-06-05 04:51:28 +08:00
|
|
|
dataPtr->name = ld.name;
|
2019-08-07 03:20:26 +08:00
|
|
|
#else
|
|
|
|
dataPtr->setName(ld.name);
|
|
|
|
#endif
|
2018-06-05 04:51:28 +08:00
|
|
|
}
|
2018-03-17 00:27:04 +08:00
|
|
|
}
|
|
|
|
ieNode->net->addBlobs(ld.inputBlobsWrappers);
|
|
|
|
ieNode->net->addBlobs(ld.outputBlobsWrappers);
|
|
|
|
ld.skip = true;
|
|
|
|
}
|
|
|
|
layers[lastLayerId].skip = false;
|
2018-03-12 22:35:28 +08:00
|
|
|
ieNode->net->init(preferableTarget);
|
2018-03-17 00:27:04 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build Inference Engine networks from sets of layers that support this
|
|
|
|
// backend. Split a whole model on several Inference Engine networks if
|
2019-01-14 14:55:44 +08:00
|
|
|
// some of layers are not implemented.
|
2018-03-17 00:27:04 +08:00
|
|
|
|
2019-10-22 00:09:44 +08:00
|
|
|
bool supportsCPUFallback = preferableTarget == DNN_TARGET_CPU ||
|
|
|
|
BackendRegistry::checkIETarget(DNN_TARGET_CPU);
|
|
|
|
|
2018-02-06 21:23:18 +08:00
|
|
|
// Set of all input and output blobs wrappers for current network.
|
2018-06-05 04:51:28 +08:00
|
|
|
std::map<LayerPin, Ptr<BackendWrapper> > netBlobsWrappers;
|
2018-02-06 16:57:35 +08:00
|
|
|
for (it = layers.begin(); it != layers.end(); ++it)
|
|
|
|
{
|
|
|
|
LayerData &ld = it->second;
|
2018-06-05 04:51:28 +08:00
|
|
|
if (ld.id == 0 && ld.skip)
|
2018-05-31 19:05:21 +08:00
|
|
|
continue;
|
|
|
|
bool fused = ld.skip;
|
2018-02-06 16:57:35 +08:00
|
|
|
|
2018-03-12 22:35:28 +08:00
|
|
|
Ptr<Layer> layer = ld.layerInstance;
|
2018-07-26 22:22:05 +08:00
|
|
|
if (!fused && !layer->supportBackend(preferableBackend))
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2019-09-10 00:24:54 +08:00
|
|
|
bool customizable = ld.id != 0 && ld.outputBlobs.size() == 1 &&
|
2019-10-22 00:09:44 +08:00
|
|
|
INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R2) &&
|
|
|
|
supportsCPUFallback;
|
2019-09-03 23:58:57 +08:00
|
|
|
// TODO: there is a bug in Myriad plugin with custom layers shape infer.
|
|
|
|
if (preferableTarget == DNN_TARGET_MYRIAD)
|
|
|
|
{
|
|
|
|
for (int i = 0; customizable && i < ld.inputBlobs.size(); ++i)
|
|
|
|
{
|
|
|
|
customizable = ld.inputBlobs[i]->size[0] == 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: fix these workarounds
|
|
|
|
if (preferableTarget == DNN_TARGET_MYRIAD ||
|
|
|
|
preferableTarget == DNN_TARGET_OPENCL ||
|
|
|
|
preferableTarget == DNN_TARGET_OPENCL_FP16)
|
|
|
|
customizable &= ld.type != "Concat";
|
|
|
|
|
|
|
|
if (preferableTarget == DNN_TARGET_OPENCL ||
|
|
|
|
preferableTarget == DNN_TARGET_OPENCL_FP16)
|
|
|
|
customizable &= ld.type != "Power";
|
|
|
|
|
|
|
|
if (preferableTarget == DNN_TARGET_OPENCL)
|
|
|
|
customizable &= ld.type != "Eltwise";
|
|
|
|
|
|
|
|
if (!customizable)
|
|
|
|
{
|
|
|
|
addInfEngineNetOutputs(ld);
|
|
|
|
net = Ptr<InfEngineBackendNet>();
|
|
|
|
netBlobsWrappers.clear(); // Is not used for R5 release but we don't wrap it to #ifdef.
|
|
|
|
layer->preferableTarget = DNN_TARGET_CPU;
|
|
|
|
continue;
|
|
|
|
}
|
2018-02-06 16:57:35 +08:00
|
|
|
}
|
2018-03-12 22:35:28 +08:00
|
|
|
ld.skip = true; // Initially skip all Inference Engine supported layers.
|
2018-02-06 16:57:35 +08:00
|
|
|
|
2018-02-06 21:23:18 +08:00
|
|
|
// Create a new network if one of inputs from different Inference Engine graph.
|
2018-02-06 16:57:35 +08:00
|
|
|
for (int i = 0; i < ld.inputBlobsId.size(); ++i)
|
|
|
|
{
|
|
|
|
LayerData &inpLd = layers[ld.inputBlobsId[i].lid];
|
|
|
|
Ptr<BackendNode> inpNode = inpLd.backendNodes[preferableBackend];
|
|
|
|
if (!inpNode.empty())
|
|
|
|
{
|
|
|
|
Ptr<InfEngineBackendNode> ieInpNode = inpNode.dynamicCast<InfEngineBackendNode>();
|
2018-08-22 21:04:40 +08:00
|
|
|
CV_Assert(!ieInpNode.empty()); CV_Assert(!ieInpNode->net.empty());
|
2018-02-06 21:23:18 +08:00
|
|
|
if (ieInpNode->net != net)
|
|
|
|
{
|
|
|
|
net = Ptr<InfEngineBackendNet>();
|
2019-01-14 14:55:44 +08:00
|
|
|
netBlobsWrappers.clear(); // Is not used for R5 release but we don't wrap it to #ifdef.
|
2018-02-06 21:23:18 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-06 16:57:35 +08:00
|
|
|
Ptr<BackendNode> node;
|
|
|
|
if (!net.empty())
|
|
|
|
{
|
2018-03-12 22:35:28 +08:00
|
|
|
if (fused)
|
2018-02-06 16:57:35 +08:00
|
|
|
{
|
2018-03-12 22:35:28 +08:00
|
|
|
bool inPlace = ld.inputBlobsId.size() == 1 && ld.outputBlobs.size() == 1 &&
|
|
|
|
ld.inputBlobs[0]->data == ld.outputBlobs[0].data;
|
|
|
|
CV_Assert(inPlace);
|
|
|
|
node = layers[ld.inputBlobsId[0].lid].backendNodes[preferableBackend];
|
|
|
|
ld.inputBlobsWrappers = layers[ld.inputBlobsId[0].lid].inputBlobsWrappers;
|
2018-02-06 16:57:35 +08:00
|
|
|
}
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
else
|
2018-02-06 16:57:35 +08:00
|
|
|
net = Ptr<InfEngineBackendNet>(new InfEngineBackendNet());
|
|
|
|
|
|
|
|
if (!fused)
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2019-09-03 23:58:57 +08:00
|
|
|
if (layer->supportBackend(preferableBackend))
|
|
|
|
node = layer->initInfEngine(ld.inputBlobsWrappers);
|
|
|
|
else
|
|
|
|
{
|
|
|
|
node = Ptr<BackendNode>(new InfEngineBackendNode(
|
|
|
|
ld.layerInstance, ld.inputBlobs, ld.outputBlobs, ld.internals));
|
|
|
|
}
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
2018-08-02 21:36:15 +08:00
|
|
|
else if (node.empty())
|
|
|
|
continue;
|
2018-02-06 16:57:35 +08:00
|
|
|
|
|
|
|
CV_Assert(!node.empty());
|
|
|
|
ld.backendNodes[preferableBackend] = node;
|
|
|
|
|
|
|
|
Ptr<InfEngineBackendNode> ieNode = node.dynamicCast<InfEngineBackendNode>();
|
|
|
|
CV_Assert(!ieNode.empty());
|
|
|
|
ieNode->net = net;
|
|
|
|
|
2019-01-14 14:55:44 +08:00
|
|
|
// Convert weights in FP16 for specific targets.
|
|
|
|
if ((preferableTarget == DNN_TARGET_OPENCL_FP16 ||
|
|
|
|
preferableTarget == DNN_TARGET_MYRIAD ||
|
|
|
|
preferableTarget == DNN_TARGET_FPGA) && !fused)
|
|
|
|
{
|
2019-04-01 20:00:25 +08:00
|
|
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1)
|
2019-02-11 22:13:39 +08:00
|
|
|
for (const std::string& name : {"weights", "biases"})
|
|
|
|
{
|
|
|
|
auto it = ieNode->layer.getParameters().find(name);
|
|
|
|
if (it != ieNode->layer.getParameters().end())
|
|
|
|
{
|
2019-02-14 18:30:30 +08:00
|
|
|
InferenceEngine::Blob::Ptr bp = it->second.as<InferenceEngine::Blob::Ptr>();
|
|
|
|
it->second = convertFp16(std::const_pointer_cast<InferenceEngine::Blob>(bp));
|
2019-02-11 22:13:39 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
2019-01-14 14:55:44 +08:00
|
|
|
auto& blobs = ieNode->layer.getConstantData();
|
|
|
|
if (blobs.empty())
|
|
|
|
{
|
|
|
|
// In case of non weightable layer we have to specify
|
|
|
|
// it's precision adding dummy blob.
|
|
|
|
auto blob = InferenceEngine::make_shared_blob<int16_t>(
|
|
|
|
InferenceEngine::Precision::FP16,
|
|
|
|
InferenceEngine::Layout::C, {1});
|
|
|
|
blob->allocate();
|
|
|
|
blobs[""] = blob;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
for (auto& it : blobs)
|
|
|
|
it.second = convertFp16(std::const_pointer_cast<InferenceEngine::Blob>(it.second));
|
|
|
|
}
|
2019-02-11 22:13:39 +08:00
|
|
|
#endif
|
2019-01-14 14:55:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!fused)
|
|
|
|
net->addLayer(ieNode->layer);
|
|
|
|
|
|
|
|
net->connect(ld.inputBlobsWrappers, ld.outputBlobsWrappers, ieNode->layer.getName());
|
|
|
|
net->addBlobs(ld.inputBlobsWrappers);
|
|
|
|
net->addBlobs(ld.outputBlobsWrappers);
|
|
|
|
addInfEngineNetOutputs(ld);
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
2018-02-06 16:57:35 +08:00
|
|
|
|
|
|
|
// Initialize all networks.
|
|
|
|
for (MapIdToLayerData::reverse_iterator it = layers.rbegin(); it != layers.rend(); ++it)
|
|
|
|
{
|
|
|
|
LayerData &ld = it->second;
|
|
|
|
if (ld.backendNodes.find(preferableBackend) == ld.backendNodes.end())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
Ptr<BackendNode> node = ld.backendNodes[preferableBackend];
|
|
|
|
if (node.empty())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
Ptr<InfEngineBackendNode> ieNode = node.dynamicCast<InfEngineBackendNode>();
|
|
|
|
if (ieNode.empty())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
CV_Assert(!ieNode->net.empty());
|
|
|
|
|
|
|
|
if (!ieNode->net->isInitialized())
|
|
|
|
{
|
2018-03-12 22:35:28 +08:00
|
|
|
ieNode->net->init(preferableTarget);
|
2018-02-06 16:57:35 +08:00
|
|
|
ld.skip = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif // HAVE_INF_ENGINE
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
|
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
|
|
|
void initCUDABackend() {
|
|
|
|
CV_Assert(haveCUDA());
|
|
|
|
|
|
|
|
#ifdef HAVE_CUDA
|
|
|
|
for (auto& layer : layers)
|
|
|
|
{
|
|
|
|
auto& ld = layer.second;
|
|
|
|
auto& layerInstance = ld.layerInstance;
|
|
|
|
|
|
|
|
if (!layerInstance->supportBackend(DNN_BACKEND_CUDA))
|
|
|
|
{
|
|
|
|
std::ostringstream os;
|
|
|
|
os << "CUDA backend will fallback to the CPU implementation for the layer \"" << ld.name
|
|
|
|
<< "\" of type " << ld.type << '\n';
|
|
|
|
CV_LOG_INFO(NULL, os.str().c_str());
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* we make a copy so that `initCUDA` doesn't modify `cudaInfo->context` */
|
|
|
|
auto context = cudaInfo->context;
|
|
|
|
auto node = layerInstance->initCUDA(&context, ld.inputBlobsWrappers, ld.outputBlobsWrappers);
|
|
|
|
ld.backendNodes[DNN_BACKEND_CUDA] = node;
|
|
|
|
|
|
|
|
auto cudaNode = node.dynamicCast<CUDABackendNode>();
|
|
|
|
cudaInfo->workspace.require(cudaNode->get_workspace_memory_in_bytes());
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
void allocateLayer(int lid, const LayersShapesMap& layersShapes)
|
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
LayerData &ld = layers[lid];
|
|
|
|
|
|
|
|
//already allocated
|
|
|
|
if (ld.flag)
|
|
|
|
return;
|
|
|
|
|
|
|
|
size_t ninputs = ld.inputBlobsId.size();
|
|
|
|
#if 0
|
|
|
|
printf("layer %s:", ld.name.c_str());
|
|
|
|
for (size_t i = 0; i < ninputs; i++)
|
|
|
|
{
|
|
|
|
int inp_lid = ld.inputBlobsId[i].lid;
|
|
|
|
LayerData &inp_ld = layers[inp_lid];
|
|
|
|
int inp_outputs = (int)inp_ld.outputBlobs.size();
|
|
|
|
std::cout << " " << inp_ld.name << "(" << inp_outputs;
|
|
|
|
|
|
|
|
for( int j = 0; j < inp_outputs; j++ )
|
|
|
|
{
|
|
|
|
std::cout << (j == 0 ? ": " : ", ") << inp_ld.outputBlobs[j].size;
|
|
|
|
}
|
|
|
|
std::cout << ")";
|
|
|
|
}
|
|
|
|
printf("\n");
|
|
|
|
#endif
|
|
|
|
|
|
|
|
//determine parent layers
|
|
|
|
for (size_t i = 0; i < ninputs; i++)
|
|
|
|
ld.inputLayersId.insert(ld.inputBlobsId[i].lid);
|
|
|
|
|
|
|
|
//allocate parents
|
|
|
|
for (set<int>::iterator i = ld.inputLayersId.begin(); i != ld.inputLayersId.end(); i++)
|
|
|
|
allocateLayer(*i, layersShapes);
|
|
|
|
|
|
|
|
//bind inputs
|
2018-07-09 19:35:54 +08:00
|
|
|
if (ld.id == 0) // DataLayer
|
|
|
|
{
|
|
|
|
ninputs = netInputLayer->inputsData.size();
|
|
|
|
ld.inputBlobsWrappers.resize(ninputs);
|
|
|
|
for (size_t i = 0; i < ninputs; i++)
|
|
|
|
{
|
|
|
|
ld.inputBlobsWrappers[i] = wrap(netInputLayer->inputsData[i]);
|
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
|
|
|
#ifdef HAVE_CUDA
|
|
|
|
if (IS_DNN_CUDA_TARGET(preferableTarget))
|
|
|
|
{
|
|
|
|
auto wrapper = ld.inputBlobsWrappers[i].dynamicCast<CUDABackendWrapper>();
|
|
|
|
wrapper->setStream(cudaInfo->context.stream);
|
|
|
|
}
|
|
|
|
#endif
|
2018-07-09 19:35:54 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2018-07-09 19:35:54 +08:00
|
|
|
ld.inputBlobs.resize(ninputs);
|
|
|
|
ld.inputBlobsWrappers.resize(ninputs);
|
|
|
|
for (size_t i = 0; i < ninputs; i++)
|
|
|
|
{
|
|
|
|
LayerPin from = ld.inputBlobsId[i];
|
|
|
|
CV_Assert(from.valid());
|
|
|
|
CV_DbgAssert(layers.count(from.lid) && (int)layers[from.lid].outputBlobs.size() > from.oid);
|
|
|
|
ld.inputBlobs[i] = &layers[from.lid].outputBlobs[from.oid];
|
|
|
|
ld.inputBlobsWrappers[i] = layers[from.lid].outputBlobsWrappers[from.oid];
|
|
|
|
}
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
LayersShapesMap::const_iterator layerShapesIt = layersShapes.find(lid);
|
|
|
|
|
|
|
|
CV_Assert(layerShapesIt != layersShapes.end());
|
|
|
|
|
|
|
|
std::vector<LayerPin> pinsForInternalBlobs;
|
2018-02-06 16:57:35 +08:00
|
|
|
blobManager.allocateBlobsForLayer(ld, layerShapesIt->second, pinsForInternalBlobs,
|
2018-06-01 15:54:12 +08:00
|
|
|
preferableBackend == DNN_BACKEND_OPENCV &&
|
2018-04-26 19:20:16 +08:00
|
|
|
preferableTarget == DNN_TARGET_OPENCL_FP16);
|
2017-09-06 15:34:07 +08:00
|
|
|
ld.outputBlobsWrappers.resize(ld.outputBlobs.size());
|
|
|
|
for (int i = 0; i < ld.outputBlobs.size(); ++i)
|
|
|
|
{
|
|
|
|
ld.outputBlobsWrappers[i] = wrap(ld.outputBlobs[i]);
|
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
|
|
|
#ifdef HAVE_CUDA
|
|
|
|
if (IS_DNN_CUDA_TARGET(preferableTarget))
|
|
|
|
{
|
|
|
|
auto wrapper = ld.outputBlobsWrappers[i].dynamicCast<CUDABackendWrapper>();
|
|
|
|
wrapper->setStream(cudaInfo->context.stream);
|
|
|
|
}
|
|
|
|
#endif
|
2017-09-06 15:34:07 +08:00
|
|
|
}
|
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
|
|
|
|
|
|
|
/* CUDA backend has its own system for internal blobs; we don't need these */
|
|
|
|
ld.internalBlobsWrappers.resize((preferableBackend == DNN_BACKEND_CUDA) ? 0 : ld.internals.size());
|
|
|
|
for (int i = 0; i < ld.internalBlobsWrappers.size(); ++i)
|
2018-01-11 02:50:54 +08:00
|
|
|
{
|
|
|
|
ld.internalBlobsWrappers[i] = wrap(ld.internals[i]);
|
|
|
|
}
|
2017-06-26 18:35:51 +08:00
|
|
|
|
|
|
|
Ptr<Layer> layerPtr = ld.getLayerInstance();
|
|
|
|
{
|
2018-09-06 18:26:47 +08:00
|
|
|
std::vector<Mat> inps(ld.inputBlobs.size());
|
|
|
|
for (int i = 0; i < ld.inputBlobs.size(); ++i)
|
|
|
|
{
|
|
|
|
inps[i] = *ld.inputBlobs[i];
|
|
|
|
}
|
|
|
|
layerPtr->finalize(inps, ld.outputBlobs);
|
Merge pull request #9114 from pengli:dnn_rebase
add libdnn acceleration to dnn module (#9114)
* import libdnn code
Signed-off-by: Li Peng <peng.li@intel.com>
* add convolution layer ocl acceleration
Signed-off-by: Li Peng <peng.li@intel.com>
* add pooling layer ocl acceleration
Signed-off-by: Li Peng <peng.li@intel.com>
* add softmax layer ocl acceleration
Signed-off-by: Li Peng <peng.li@intel.com>
* add lrn layer ocl acceleration
Signed-off-by: Li Peng <peng.li@intel.com>
* add innerproduct layer ocl acceleration
Signed-off-by: Li Peng <peng.li@intel.com>
* add HAVE_OPENCL macro
Signed-off-by: Li Peng <peng.li@intel.com>
* fix for convolution ocl
Signed-off-by: Li Peng <peng.li@intel.com>
* enable getUMat() for multi-dimension Mat
Signed-off-by: Li Peng <peng.li@intel.com>
* use getUMat for ocl acceleration
Signed-off-by: Li Peng <peng.li@intel.com>
* use CV_OCL_RUN macro
Signed-off-by: Li Peng <peng.li@intel.com>
* set OPENCL target when it is available
and disable fuseLayer for OCL target for the time being
Signed-off-by: Li Peng <peng.li@intel.com>
* fix innerproduct accuracy test
Signed-off-by: Li Peng <peng.li@intel.com>
* remove trailing space
Signed-off-by: Li Peng <peng.li@intel.com>
* Fixed tensorflow demo bug.
Root cause is that tensorflow has different algorithm with libdnn
to calculate convolution output dimension.
libdnn don't calculate output dimension anymore and just use one
passed in by config.
* split gemm ocl file
split it into gemm_buffer.cl and gemm_image.cl
Signed-off-by: Li Peng <peng.li@intel.com>
* Fix compile failure
Signed-off-by: Li Peng <peng.li@intel.com>
* check env flag for auto tuning
Signed-off-by: Li Peng <peng.li@intel.com>
* switch to new ocl kernels for softmax layer
Signed-off-by: Li Peng <peng.li@intel.com>
* update softmax layer
on some platform subgroup extension may not work well,
fallback to non subgroup ocl acceleration.
Signed-off-by: Li Peng <peng.li@intel.com>
* fallback to cpu path for fc layer with multi output
Signed-off-by: Li Peng <peng.li@intel.com>
* update output message
Signed-off-by: Li Peng <peng.li@intel.com>
* update fully connected layer
fallback to gemm API if libdnn return false
Signed-off-by: Li Peng <peng.li@intel.com>
* Add ReLU OCL implementation
* disable layer fusion for now
Signed-off-by: Li Peng <peng.li@intel.com>
* Add OCL implementation for concat layer
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* libdnn: update license and copyrights
Also refine libdnn coding style
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
Signed-off-by: Li Peng <peng.li@intel.com>
* DNN: Don't link OpenCL library explicitly
* DNN: Make default preferableTarget to DNN_TARGET_CPU
User should set it to DNN_TARGET_OPENCL explicitly if want to
use OpenCL acceleration.
Also don't fusion when using DNN_TARGET_OPENCL
* DNN: refine coding style
* Add getOpenCLErrorString
* DNN: Use int32_t/uint32_t instread of alias
* Use namespace ocl4dnn to include libdnn things
* remove extra copyTo in softmax ocl path
Signed-off-by: Li Peng <peng.li@intel.com>
* update ReLU layer ocl path
Signed-off-by: Li Peng <peng.li@intel.com>
* Add prefer target property for layer class
It is used to indicate the target for layer forwarding,
either the default CPU target or OCL target.
Signed-off-by: Li Peng <peng.li@intel.com>
* Add cl_event based timer for cv::ocl
* Rename libdnn to ocl4dnn
Signed-off-by: Li Peng <peng.li@intel.com>
Signed-off-by: wzw <zhiwen.wu@intel.com>
* use UMat for ocl4dnn internal buffer
Remove allocateMemory which use clCreateBuffer directly
Signed-off-by: Li Peng <peng.li@intel.com>
Signed-off-by: wzw <zhiwen.wu@intel.com>
* enable buffer gemm in ocl4dnn innerproduct
Signed-off-by: Li Peng <peng.li@intel.com>
* replace int_tp globally for ocl4dnn kernels.
Signed-off-by: wzw <zhiwen.wu@intel.com>
Signed-off-by: Li Peng <peng.li@intel.com>
* create UMat for layer params
Signed-off-by: Li Peng <peng.li@intel.com>
* update sign ocl kernel
Signed-off-by: Li Peng <peng.li@intel.com>
* update image based gemm of inner product layer
Signed-off-by: Li Peng <peng.li@intel.com>
* remove buffer gemm of inner product layer
call cv::gemm API instead
Signed-off-by: Li Peng <peng.li@intel.com>
* change ocl4dnn forward parameter to UMat
Signed-off-by: Li Peng <peng.li@intel.com>
* Refine auto-tuning mechanism.
- Use OPENCV_OCL4DNN_KERNEL_CONFIG_PATH to set cache directory
for fine-tuned kernel configuration.
e.g. export OPENCV_OCL4DNN_KERNEL_CONFIG_PATH=/home/tmp,
the cache directory will be /home/tmp/spatialkernels/ on Linux.
- Define environment OPENCV_OCL4DNN_ENABLE_AUTO_TUNING to enable
auto-tuning.
- OPENCV_OPENCL_ENABLE_PROFILING is only used to enable profiling
for OpenCL command queue. This fix basic kernel get wrong running
time, i.e. 0ms.
- If creating cache directory failed, disable auto-tuning.
* Detect and create cache dir on windows
Signed-off-by: Li Peng <peng.li@intel.com>
* Refine gemm like convolution kernel.
Signed-off-by: Li Peng <peng.li@intel.com>
* Fix redundant swizzleWeights calling when use cached kernel config.
* Fix "out of resource" bug when auto-tuning too many kernels.
* replace cl_mem with UMat in ocl4dnnConvSpatial class
* OCL4DNN: reduce the tuning kernel candidate.
This patch could reduce 75% of the tuning candidates with less
than 2% performance impact for the final result.
Signed-off-by: Zhigang Gong <zhigang.gong@intel.com>
* replace cl_mem with umat in ocl4dnn convolution
Signed-off-by: Li Peng <peng.li@intel.com>
* remove weight_image_ of ocl4dnn inner product
Actually it is unused in the computation
Signed-off-by: Li Peng <peng.li@intel.com>
* Various fixes for ocl4dnn
1. OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel())
2. Ptr<OCL4DNNInnerProduct<float> > innerProductOp
3. Code comments cleanup
4. ignore check on OCL cpu device
Signed-off-by: Li Peng <peng.li@intel.com>
* add build option for log softmax
Signed-off-by: Li Peng <peng.li@intel.com>
* remove unused ocl kernels in ocl4dnn
Signed-off-by: Li Peng <peng.li@intel.com>
* replace ocl4dnnSet with opencv setTo
Signed-off-by: Li Peng <peng.li@intel.com>
* replace ALIGN with cv::alignSize
Signed-off-by: Li Peng <peng.li@intel.com>
* check kernel build options
Signed-off-by: Li Peng <peng.li@intel.com>
* Handle program compilation fail properly.
* Use std::numeric_limits<float>::infinity() for large float number
* check ocl4dnn kernel compilation result
Signed-off-by: Li Peng <peng.li@intel.com>
* remove unused ctx_id
Signed-off-by: Li Peng <peng.li@intel.com>
* change clEnqueueNDRangeKernel to kernel.run()
Signed-off-by: Li Peng <peng.li@intel.com>
* change cl_mem to UMat in image based gemm
Signed-off-by: Li Peng <peng.li@intel.com>
* check intel subgroup support for lrn and pooling layer
Signed-off-by: Li Peng <peng.li@intel.com>
* Fix convolution bug if group is greater than 1
Signed-off-by: Li Peng <peng.li@intel.com>
* Set default layer preferableTarget to be DNN_TARGET_CPU
Signed-off-by: Li Peng <peng.li@intel.com>
* Add ocl perf test for convolution
Signed-off-by: Li Peng <peng.li@intel.com>
* Add more ocl accuracy test
Signed-off-by: Li Peng <peng.li@intel.com>
* replace cl_image with ocl::Image2D
Signed-off-by: Li Peng <peng.li@intel.com>
* Fix build failure in elementwise layer
Signed-off-by: Li Peng <peng.li@intel.com>
* use getUMat() to get blob data
Signed-off-by: Li Peng <peng.li@intel.com>
* replace cl_mem handle with ocl::KernelArg
Signed-off-by: Li Peng <peng.li@intel.com>
* dnn(build): don't use C++11, OPENCL_LIBRARIES fix
* dnn(ocl4dnn): remove unused OpenCL kernels
* dnn(ocl4dnn): extract OpenCL code into .cl files
* dnn(ocl4dnn): refine auto-tuning
Defaultly disable auto-tuning, set OPENCV_OCL4DNN_ENABLE_AUTO_TUNING
environment variable to enable it.
Use a set of pre-tuned configs as default config if auto-tuning is disabled.
These configs are tuned for Intel GPU with 48/72 EUs, and for googlenet,
AlexNet, ResNet-50
If default config is not suitable, use the first available kernel config
from the candidates. Candidate priority from high to low is gemm like kernel,
IDLF kernel, basick kernel.
* dnn(ocl4dnn): pooling doesn't use OpenCL subgroups
* dnn(ocl4dnn): fix perf test
OpenCV has default 3sec time limit for each performance test.
Warmup OpenCL backend outside of perf measurement loop.
* use ocl::KernelArg as much as possible
Signed-off-by: Li Peng <peng.li@intel.com>
* dnn(ocl4dnn): fix bias bug for gemm like kernel
* dnn(ocl4dnn): wrap cl_mem into UMat
Signed-off-by: Li Peng <peng.li@intel.com>
* dnn(ocl4dnn): Refine signature of kernel config
- Use more readable string as signture of kernel config
- Don't count device name and vendor in signature string
- Default kernel configurations are tuned for Intel GPU with
24/48/72 EUs, and for googlenet, AlexNet, ResNet-50 net model.
* dnn(ocl4dnn): swap width/height in configuration
* dnn(ocl4dnn): enable configs for Intel OpenCL runtime only
* core: make configuration helper functions accessible from non-core modules
* dnn(ocl4dnn): update kernel auto-tuning behavior
Avoid unwanted creation of directories
* dnn(ocl4dnn): simplify kernel to workaround OpenCL compiler crash
* dnn(ocl4dnn): remove redundant code
* dnn(ocl4dnn): Add more clear message for simd size dismatch.
* dnn(ocl4dnn): add const to const argument
Signed-off-by: Li Peng <peng.li@intel.com>
* dnn(ocl4dnn): force compiler use a specific SIMD size for IDLF kernel
* dnn(ocl4dnn): drop unused tuneLocalSize()
* dnn(ocl4dnn): specify OpenCL queue for Timer and convolve() method
* dnn(ocl4dnn): sanitize file names used for cache
* dnn(perf): enable Network tests with OpenCL
* dnn(ocl4dnn/conv): drop computeGlobalSize()
* dnn(ocl4dnn/conv): drop unused fields
* dnn(ocl4dnn/conv): simplify ctor
* dnn(ocl4dnn/conv): refactor kernelConfig localSize=NULL
* dnn(ocl4dnn/conv): drop unsupported double / untested half types
* dnn(ocl4dnn/conv): drop unused variable
* dnn(ocl4dnn/conv): alignSize/divUp
* dnn(ocl4dnn/conv): use enum values
* dnn(ocl4dnn): drop unused innerproduct variable
Signed-off-by: Li Peng <peng.li@intel.com>
* dnn(ocl4dnn): add an generic function to check cl option support
* dnn(ocl4dnn): run softmax subgroup version kernel first
Signed-off-by: Li Peng <peng.li@intel.com>
2017-10-02 20:38:00 +08:00
|
|
|
layerPtr->preferableTarget = preferableTarget;
|
2017-06-26 18:35:51 +08:00
|
|
|
#if 0
|
|
|
|
std::cout << "\toutputs:";
|
|
|
|
size_t noutputs = ld.outputBlobs.size();
|
|
|
|
for (size_t j = 0; j < noutputs; j++)
|
|
|
|
{
|
|
|
|
std::cout << (j == 0 ? " " : ", ") << ld.outputBlobs[j].size;
|
|
|
|
}
|
|
|
|
std::cout << "\n";
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// After allocation of layer, we decrease counters to it's input blobs.
|
|
|
|
blobManager.releaseReferences(ld.inputBlobsId);
|
|
|
|
blobManager.releaseReferences(pinsForInternalBlobs);
|
|
|
|
|
|
|
|
ld.flag = 1;
|
|
|
|
}
|
|
|
|
|
2017-07-04 22:23:47 +08:00
|
|
|
#if 0
|
|
|
|
#define printf_(args) printf args
|
|
|
|
#else
|
|
|
|
#define printf_(args)
|
|
|
|
#endif
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
void fuseLayers(const std::vector<LayerPin>& blobsToKeep_)
|
|
|
|
{
|
2018-11-15 04:25:23 +08:00
|
|
|
if( !fusion || (preferableBackend != DNN_BACKEND_OPENCV &&
|
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
|
|
|
preferableBackend != DNN_BACKEND_CUDA &&
|
2018-11-15 04:25:23 +08:00
|
|
|
preferableBackend != DNN_BACKEND_INFERENCE_ENGINE))
|
2017-07-04 22:23:47 +08:00
|
|
|
return;
|
|
|
|
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
// scan through all the layers. If there is convolution layer followed by the activation layer,
|
|
|
|
// we try to embed this activation into the convolution and disable separate execution of the activation
|
|
|
|
std::set<LayerPin> pinsToKeep(blobsToKeep_.begin(),
|
|
|
|
blobsToKeep_.end());
|
|
|
|
MapIdToLayerData::iterator it;
|
|
|
|
for (it = layers.begin(); it != layers.end(); it++)
|
|
|
|
{
|
|
|
|
int lid = it->first;
|
|
|
|
LayerData& ld = layers[lid];
|
2018-01-21 02:55:25 +08:00
|
|
|
if( ld.skip )
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2017-07-04 22:23:47 +08:00
|
|
|
printf_(("skipped %s: %s\n", ld.layerInstance->name.c_str(), ld.layerInstance->type.c_str()));
|
2017-06-26 18:35:51 +08:00
|
|
|
continue;
|
|
|
|
}
|
2017-07-04 22:23:47 +08:00
|
|
|
printf_(("analyzing %s: %s\n", ld.layerInstance->name.c_str(), ld.layerInstance->type.c_str()));
|
2017-06-28 16:15:22 +08:00
|
|
|
|
2017-07-04 22:23:47 +08:00
|
|
|
// the optimization #1. try to fuse batch norm, scaling and/or activation layers
|
|
|
|
// with the current layer if they follow it. Normally, the are fused with the convolution layer,
|
|
|
|
// but some of them (like activation) may be fused with fully-connected, elemwise (+) and
|
|
|
|
// some other layers.
|
2017-06-28 16:15:22 +08:00
|
|
|
Ptr<Layer>& currLayer = ld.layerInstance;
|
|
|
|
if( ld.consumers.size() == 1 && pinsToKeep.count(LayerPin(lid, 0)) == 0 )
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
|
|
|
LayerData* nextData = &layers[ld.consumers[0].lid];
|
|
|
|
LayerPin lpNext(ld.consumers[0].lid, 0);
|
2018-02-13 17:07:56 +08:00
|
|
|
while (nextData)
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2018-02-13 17:07:56 +08:00
|
|
|
Ptr<Layer> nextLayer = nextData->layerInstance;
|
|
|
|
if (currLayer->tryFuse(nextLayer))
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2018-02-13 17:07:56 +08:00
|
|
|
printf_(("\tfused with %s\n", nextLayer->name.c_str()));
|
|
|
|
nextData->skip = true;
|
2018-01-11 02:50:54 +08:00
|
|
|
ld.outputBlobs = layers[lpNext.lid].outputBlobs;
|
|
|
|
ld.outputBlobsWrappers = layers[lpNext.lid].outputBlobsWrappers;
|
2018-02-13 17:07:56 +08:00
|
|
|
if (nextData->consumers.size() == 1)
|
2017-06-29 21:45:17 +08:00
|
|
|
{
|
2018-02-13 17:07:56 +08:00
|
|
|
int nextLayerId = nextData->consumers[0].lid;
|
|
|
|
nextData = &layers[nextLayerId];
|
|
|
|
lpNext = LayerPin(nextLayerId, 0);
|
2017-06-29 21:45:17 +08:00
|
|
|
}
|
2018-02-13 17:07:56 +08:00
|
|
|
else
|
2017-06-29 21:45:17 +08:00
|
|
|
{
|
2018-02-13 17:07:56 +08:00
|
|
|
nextData = 0;
|
|
|
|
break;
|
2017-06-29 21:45:17 +08:00
|
|
|
}
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
2018-02-13 17:07:56 +08:00
|
|
|
else
|
|
|
|
break;
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
|
2018-06-01 15:54:12 +08:00
|
|
|
if (preferableBackend != DNN_BACKEND_OPENCV)
|
2018-03-12 22:35:28 +08:00
|
|
|
continue; // Go to the next layer.
|
|
|
|
|
2018-08-31 20:41:56 +08:00
|
|
|
// TODO: OpenCL target support more fusion styles.
|
|
|
|
if ( preferableBackend == DNN_BACKEND_OPENCV && IS_DNN_OPENCL_TARGET(preferableTarget) &&
|
|
|
|
(!cv::ocl::useOpenCL() || (ld.layerInstance->type != "Convolution" &&
|
|
|
|
ld.layerInstance->type != "MVN" && ld.layerInstance->type != "Pooling" &&
|
|
|
|
ld.layerInstance->type != "Concat")) )
|
|
|
|
continue;
|
|
|
|
|
2018-08-02 21:36:15 +08:00
|
|
|
while (nextData)
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2018-08-02 21:36:15 +08:00
|
|
|
// For now, OpenCL target support fusion with activation of ReLU/ChannelsPReLU/Power/Tanh
|
|
|
|
if (IS_DNN_OPENCL_TARGET(preferableTarget) &&
|
|
|
|
nextData->type != "ReLU" &&
|
|
|
|
nextData->type != "ChannelsPReLU" &&
|
|
|
|
nextData->type != "ReLU6" &&
|
|
|
|
nextData->type != "TanH" &&
|
|
|
|
nextData->type != "Power")
|
|
|
|
break;
|
2017-08-29 15:48:19 +08:00
|
|
|
|
2018-08-02 21:36:15 +08:00
|
|
|
Ptr<ActivationLayer> nextActivLayer = nextData->layerInstance.dynamicCast<ActivationLayer>();
|
|
|
|
if (nextActivLayer.empty())
|
|
|
|
break;
|
2017-08-29 15:48:19 +08:00
|
|
|
|
2018-08-02 21:36:15 +08:00
|
|
|
if (currLayer->setActivation(nextActivLayer))
|
2017-08-29 15:48:19 +08:00
|
|
|
{
|
|
|
|
printf_(("\tfused with %s\n", nextActivLayer->name.c_str()));
|
2018-08-02 21:36:15 +08:00
|
|
|
nextData->skip = true;
|
2018-01-11 02:50:54 +08:00
|
|
|
ld.outputBlobs = layers[lpNext.lid].outputBlobs;
|
|
|
|
ld.outputBlobsWrappers = layers[lpNext.lid].outputBlobsWrappers;
|
2018-08-02 21:36:15 +08:00
|
|
|
if (nextData->consumers.size() == 1)
|
2017-11-20 11:29:18 +08:00
|
|
|
{
|
2018-08-02 21:36:15 +08:00
|
|
|
int nextLayerId = nextData->consumers[0].lid;
|
|
|
|
nextData = &layers[nextLayerId];
|
|
|
|
lpNext = LayerPin(nextLayerId, 0);
|
|
|
|
}
|
|
|
|
else
|
2017-11-20 11:29:18 +08:00
|
|
|
{
|
2018-08-02 21:36:15 +08:00
|
|
|
nextData = 0;
|
|
|
|
break;
|
2017-11-20 11:29:18 +08:00
|
|
|
}
|
|
|
|
}
|
2018-08-02 21:36:15 +08:00
|
|
|
else
|
|
|
|
break;
|
2017-11-20 11:29:18 +08:00
|
|
|
}
|
|
|
|
|
2018-06-03 07:21:08 +08:00
|
|
|
// fuse convolution layer followed by eltwise + relu
|
2018-11-07 16:16:15 +08:00
|
|
|
if ( IS_DNN_OPENCL_TARGET(preferableTarget) && ld.layerInstance->type == "Convolution" )
|
2017-11-20 11:29:18 +08:00
|
|
|
{
|
|
|
|
Ptr<EltwiseLayer> nextEltwiseLayer;
|
|
|
|
if( nextData )
|
|
|
|
nextEltwiseLayer = nextData->layerInstance.dynamicCast<EltwiseLayer>();
|
|
|
|
|
2018-11-07 16:16:15 +08:00
|
|
|
if( !nextEltwiseLayer.empty() && pinsToKeep.count(lpNext) == 0 &&
|
2018-11-11 21:51:47 +08:00
|
|
|
nextData && nextData->inputBlobsId.size() == 2 )
|
2017-11-20 11:29:18 +08:00
|
|
|
{
|
|
|
|
LayerData *eltwiseData = nextData;
|
|
|
|
|
2018-11-07 16:16:15 +08:00
|
|
|
// Eltwise layer has two inputs. We need to determine which
|
|
|
|
// is a base convolution layer and which could be used as it's bias.
|
|
|
|
LayerData* biasLayerData = 0;
|
|
|
|
for (int i = 0; i < 2; ++i)
|
2017-11-20 11:29:18 +08:00
|
|
|
{
|
2018-11-07 16:16:15 +08:00
|
|
|
LayerData *downLayerData = &layers[eltwiseData->inputBlobsId[i].lid];
|
|
|
|
CV_Assert(downLayerData);
|
2018-01-21 02:55:25 +08:00
|
|
|
while (downLayerData->skip)
|
2017-11-20 11:29:18 +08:00
|
|
|
{
|
2018-11-07 16:16:15 +08:00
|
|
|
if (downLayerData->inputBlobsId.size() == 1)
|
2017-11-20 11:29:18 +08:00
|
|
|
downLayerData = &layers[downLayerData->inputBlobsId[0].lid];
|
2018-11-07 16:16:15 +08:00
|
|
|
else
|
|
|
|
{
|
|
|
|
downLayerData = 0;
|
|
|
|
break;
|
|
|
|
}
|
2017-11-20 11:29:18 +08:00
|
|
|
}
|
2018-11-07 16:16:15 +08:00
|
|
|
if (downLayerData && ld.id == downLayerData->id)
|
|
|
|
{
|
|
|
|
biasLayerData = &layers[eltwiseData->inputBlobsId[1 - i].lid];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
CV_Assert(biasLayerData);
|
|
|
|
{
|
|
|
|
if( eltwiseData->consumers.size() == 1 )
|
2017-11-20 11:29:18 +08:00
|
|
|
{
|
|
|
|
// fuse eltwise + activation layer
|
2018-11-07 16:16:15 +08:00
|
|
|
if (biasLayerData->id < ld.id)
|
2017-11-20 11:29:18 +08:00
|
|
|
{
|
|
|
|
nextData = &layers[eltwiseData->consumers[0].lid];
|
|
|
|
lpNext = LayerPin(eltwiseData->consumers[0].lid, 0);
|
|
|
|
Ptr<ActivationLayer> nextActivLayer;
|
|
|
|
if( nextData )
|
|
|
|
nextActivLayer = nextData->layerInstance.dynamicCast<ActivationLayer>();
|
|
|
|
|
|
|
|
if( !nextActivLayer.empty() && pinsToKeep.count(lpNext) == 0 &&
|
|
|
|
(!nextData->type.compare("ReLU") ||
|
|
|
|
!nextData->type.compare("ChannelsPReLU") ||
|
|
|
|
!nextData->type.compare("Power")) &&
|
|
|
|
currLayer->setActivation(nextActivLayer) )
|
|
|
|
{
|
2018-11-07 16:16:15 +08:00
|
|
|
CV_Assert_N(biasLayerData->outputBlobsWrappers.size() == 1, ld.inputBlobsWrappers.size() == 1);
|
|
|
|
ld.inputBlobsWrappers.push_back(biasLayerData->outputBlobsWrappers[0]);
|
2017-11-20 11:29:18 +08:00
|
|
|
printf_(("\tfused with %s\n", nextEltwiseLayer->name.c_str()));
|
|
|
|
printf_(("\tfused with %s\n", nextActivLayer->name.c_str()));
|
2018-01-21 02:55:25 +08:00
|
|
|
eltwiseData->skip = true;
|
|
|
|
nextData->skip = true;
|
2018-01-11 02:50:54 +08:00
|
|
|
// This optimization for cases like
|
|
|
|
// some_layer conv
|
|
|
|
// | |
|
|
|
|
// +-- eltwise --+
|
|
|
|
// |
|
|
|
|
// activ
|
|
|
|
// This way all the element-wise computations
|
|
|
|
// (i.e. some_layer+conv or some_layer*conv)
|
|
|
|
// would be done at [conv] layer. So we need to
|
|
|
|
// replace [conv]'s output blob to [eltwise]'s one
|
|
|
|
// considering that [activ] is an in-place layer.
|
|
|
|
// Also we need to move all the consumers' references.
|
|
|
|
// To prevent memory collisions (i.e. when input of
|
|
|
|
// [conv] and output of [eltwise] is the same blob)
|
|
|
|
// we allocate a new blob.
|
2018-08-15 19:55:47 +08:00
|
|
|
CV_Assert_N(ld.outputBlobs.size() == 1, ld.outputBlobsWrappers.size() == 1);
|
2018-01-11 02:50:54 +08:00
|
|
|
ld.outputBlobs[0] = ld.outputBlobs[0].clone();
|
|
|
|
ld.outputBlobsWrappers[0] = wrap(ld.outputBlobs[0]);
|
|
|
|
|
|
|
|
eltwiseData->outputBlobs = ld.outputBlobs;
|
|
|
|
nextData->outputBlobs = ld.outputBlobs;
|
|
|
|
eltwiseData->outputBlobsWrappers = ld.outputBlobsWrappers;
|
|
|
|
nextData->outputBlobsWrappers = ld.outputBlobsWrappers;
|
|
|
|
|
|
|
|
// Move references of [activ] layer consumers to the newly allocated blob.
|
|
|
|
for (int i = 0; i < nextData->consumers.size(); ++i)
|
|
|
|
{
|
|
|
|
LayerData& consumer = layers[nextData->consumers[i].lid];
|
|
|
|
for (int j = 0; j < consumer.inputBlobsId.size(); ++j)
|
|
|
|
{
|
|
|
|
if (consumer.inputBlobsId[j].lid == lpNext.lid)
|
|
|
|
{
|
|
|
|
consumer.inputBlobs[j] = &ld.outputBlobs[0];
|
|
|
|
consumer.inputBlobsWrappers[j] = ld.outputBlobsWrappers[0];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-11-20 11:29:18 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-08-29 15:48:19 +08:00
|
|
|
}
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
}
|
2017-07-04 22:23:47 +08:00
|
|
|
|
2019-03-29 21:42:58 +08:00
|
|
|
if (preferableBackend != DNN_BACKEND_OPENCV)
|
|
|
|
continue; // Go to the next layer.
|
|
|
|
|
2019-04-08 16:29:10 +08:00
|
|
|
// the optimization #2. if there is concat layer that concatenates channels
|
2017-07-04 22:23:47 +08:00
|
|
|
// from the inputs together (i.e. axis == 1) then we make the inputs of
|
2018-06-03 07:21:08 +08:00
|
|
|
// the concat layer to write to the concatenation output buffer
|
2017-07-04 22:23:47 +08:00
|
|
|
// (and so we eliminate the concatenation layer, because the channels
|
|
|
|
// are concatenated implicitly).
|
|
|
|
Ptr<ConcatLayer> concatLayer = ld.layerInstance.dynamicCast<ConcatLayer>();
|
2017-08-28 22:37:09 +08:00
|
|
|
if( !concatLayer.empty() && concatLayer->axis == 1 && !concatLayer->padding &&
|
2017-07-04 22:23:47 +08:00
|
|
|
ld.outputBlobs.size() == 1 )
|
|
|
|
{
|
|
|
|
Mat& output = ld.outputBlobs[0];
|
2018-07-12 15:16:32 +08:00
|
|
|
UMat umat_output;
|
|
|
|
if (!ld.outputBlobsWrappers.empty() &&
|
|
|
|
(preferableBackend == DNN_BACKEND_OPENCV && IS_DNN_OPENCL_TARGET(preferableTarget)))
|
|
|
|
{
|
|
|
|
size_t i, ninputs = ld.inputBlobsId.size();
|
|
|
|
bool conv_layer = true;
|
|
|
|
for( i = 0; i < ninputs; i++ )
|
|
|
|
{
|
|
|
|
LayerPin pin = ld.inputBlobsId[i];
|
|
|
|
LayerData* inp_i_data = &layers[pin.lid];
|
|
|
|
while(inp_i_data->skip &&
|
|
|
|
inp_i_data->inputBlobsId.size() == 1 &&
|
|
|
|
inp_i_data->consumers.size() == 1)
|
|
|
|
{
|
|
|
|
pin = inp_i_data->inputBlobsId[0];
|
|
|
|
inp_i_data = &layers[pin.lid];
|
|
|
|
}
|
|
|
|
conv_layer = conv_layer && (inp_i_data->getLayerInstance()->type == "Convolution");
|
|
|
|
}
|
|
|
|
if (!conv_layer)
|
|
|
|
continue;
|
|
|
|
std::vector<UMat> umat_outputBlobs;
|
|
|
|
umat_outputBlobs = OpenCLBackendWrapper::getUMatVector(ld.outputBlobsWrappers);
|
|
|
|
umat_output = umat_outputBlobs[0];
|
|
|
|
}
|
2017-07-04 22:23:47 +08:00
|
|
|
|
|
|
|
// TODO: in general, this optimization can always be done, but
|
|
|
|
// many layers currently check that the input/output blobs are
|
|
|
|
// continuous arrays. Unfortunately, this is not true when
|
|
|
|
// the concatenation optimization is applied with batch_size > 1.
|
|
|
|
// so, for now, we only apply this optimization in the most popular
|
|
|
|
// case batch_size == 1.
|
|
|
|
if( output.dims == 4 && output.size[0] == 1 )
|
|
|
|
{
|
|
|
|
size_t i, ninputs = ld.inputBlobsId.size();
|
|
|
|
std::vector<LayerPin> realinputs(ninputs);
|
|
|
|
for( i = 0; i < ninputs; i++ )
|
|
|
|
{
|
|
|
|
LayerPin pin = ld.inputBlobsId[i];
|
|
|
|
LayerData* inp_i_data = &layers[pin.lid];
|
2018-01-21 02:55:25 +08:00
|
|
|
while(inp_i_data->skip &&
|
2017-12-26 21:49:33 +08:00
|
|
|
inp_i_data->inputBlobsId.size() == 1 &&
|
|
|
|
inp_i_data->consumers.size() == 1)
|
2017-07-04 22:23:47 +08:00
|
|
|
{
|
|
|
|
pin = inp_i_data->inputBlobsId[0];
|
|
|
|
inp_i_data = &layers[pin.lid];
|
|
|
|
}
|
|
|
|
printf_(("\treal input for %s is %s\n",
|
|
|
|
layers[ld.inputBlobsId[i].lid].getLayerInstance()->name.c_str(),
|
|
|
|
inp_i_data->getLayerInstance()->name.c_str()));
|
|
|
|
|
2018-01-21 02:55:25 +08:00
|
|
|
if(inp_i_data->skip || inp_i_data->consumers.size() != 1)
|
2017-07-04 22:23:47 +08:00
|
|
|
break;
|
|
|
|
realinputs[i] = pin;
|
|
|
|
}
|
|
|
|
|
|
|
|
if( i >= ninputs )
|
|
|
|
{
|
2017-12-28 21:04:09 +08:00
|
|
|
// Allocate new memory to prevent collisions during memory
|
|
|
|
// reusing (see https://github.com/opencv/opencv/pull/10456).
|
|
|
|
output = output.clone();
|
2018-07-12 15:16:32 +08:00
|
|
|
if (preferableBackend == DNN_BACKEND_OPENCV &&
|
|
|
|
IS_DNN_OPENCL_TARGET(preferableTarget))
|
|
|
|
{
|
|
|
|
std::vector<UMat> umats(1);
|
|
|
|
umat_output = umat_output.clone();
|
|
|
|
umats[0] = umat_output;
|
|
|
|
OpenCLBackendWrapper::update(ld.outputBlobsWrappers, umats);
|
|
|
|
}
|
2017-07-04 22:23:47 +08:00
|
|
|
Range chrange[] = { Range::all(), Range::all(), Range::all(), Range::all() };
|
|
|
|
int ofs = 0;
|
|
|
|
for( i = 0; i < ninputs; i++ )
|
|
|
|
{
|
|
|
|
LayerPin pin = realinputs[i];
|
|
|
|
LayerData* inp_i_data = &layers[pin.lid];
|
|
|
|
int channels_i = ld.inputBlobs[i]->size[1];
|
|
|
|
chrange[1] = Range(ofs, ofs + channels_i);
|
|
|
|
printf_(("\toutput %s(%d) to channels (%d, %d)\n", inp_i_data->layerInstance->name.c_str(),
|
|
|
|
pin.oid, ofs, ofs + channels_i));
|
|
|
|
ofs += channels_i;
|
|
|
|
Mat output_slice = output(chrange);
|
|
|
|
Mat& curr_output = inp_i_data->outputBlobs[pin.oid];
|
|
|
|
CV_Assert(output_slice.isContinuous() && output_slice.size == curr_output.size);
|
2017-12-26 21:49:33 +08:00
|
|
|
Mat* oldPtr = &curr_output;
|
2017-07-04 22:23:47 +08:00
|
|
|
curr_output = output_slice;
|
2018-07-12 15:16:32 +08:00
|
|
|
if (preferableBackend == DNN_BACKEND_OPENCV && IS_DNN_OPENCL_TARGET(preferableTarget))
|
|
|
|
{
|
|
|
|
std::vector<UMat> umats(inp_i_data->outputBlobsWrappers.size());
|
|
|
|
umats[pin.oid] = umat_output(chrange);
|
|
|
|
OpenCLBackendWrapper::update(inp_i_data->outputBlobsWrappers, umats);
|
|
|
|
}
|
2017-12-26 21:49:33 +08:00
|
|
|
// Layers that refer old input Mat will refer to the
|
|
|
|
// new data but the same Mat object.
|
2018-08-15 19:55:47 +08:00
|
|
|
CV_Assert_N(curr_output.data == output_slice.data, oldPtr == &curr_output);
|
2017-07-04 22:23:47 +08:00
|
|
|
}
|
2018-01-21 02:55:25 +08:00
|
|
|
ld.skip = true;
|
2017-07-04 22:23:47 +08:00
|
|
|
printf_(("\toptimized out Concat layer %s\n", concatLayer->name.c_str()));
|
|
|
|
}
|
2017-06-28 16:15:22 +08:00
|
|
|
}
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void allocateLayers(const std::vector<LayerPin>& blobsToKeep_)
|
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
MapIdToLayerData::iterator it;
|
|
|
|
for (it = layers.begin(); it != layers.end(); it++)
|
|
|
|
it->second.flag = 0;
|
|
|
|
|
|
|
|
CV_Assert(!layers[0].outputBlobs.empty());
|
|
|
|
ShapesVec inputShapes;
|
|
|
|
for(int i = 0; i < layers[0].outputBlobs.size(); i++)
|
|
|
|
{
|
2018-07-09 19:35:54 +08:00
|
|
|
Mat& inp = layers[0].outputBlobs[i];
|
|
|
|
CV_Assert(inp.total());
|
|
|
|
if (preferableBackend == DNN_BACKEND_OPENCV &&
|
2018-04-26 19:20:16 +08:00
|
|
|
preferableTarget == DNN_TARGET_OPENCL_FP16)
|
|
|
|
{
|
2018-07-09 19:35:54 +08:00
|
|
|
layers[0].outputBlobs[i].create(inp.dims, inp.size, CV_16S);
|
2018-04-26 19:20:16 +08:00
|
|
|
}
|
2018-07-09 19:35:54 +08:00
|
|
|
inputShapes.push_back(shape(inp));
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
LayersShapesMap layersShapes;
|
|
|
|
getLayersShapes(inputShapes, layersShapes);
|
|
|
|
|
|
|
|
blobManager.reset();
|
2017-09-06 15:34:07 +08:00
|
|
|
backendWrappers.clear();
|
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
|
|
|
|
|
|
|
for(auto& layer : layers)
|
|
|
|
{
|
|
|
|
auto& ld = layer.second;
|
|
|
|
ld.inputBlobsWrappers.clear();
|
|
|
|
ld.outputBlobsWrappers.clear();
|
|
|
|
ld.internalBlobsWrappers.clear();
|
|
|
|
}
|
|
|
|
|
2017-11-02 21:21:06 +08:00
|
|
|
// Fake references to input blobs.
|
|
|
|
for (int i = 0; i < layers[0].outputBlobs.size(); ++i)
|
|
|
|
blobManager.addReference(LayerPin(0, i));
|
2017-06-26 18:35:51 +08:00
|
|
|
for (it = layers.begin(); it != layers.end(); ++it)
|
|
|
|
{
|
|
|
|
const LayerData& ld = it->second;
|
|
|
|
blobManager.addReferences(ld.inputBlobsId);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (int i = 0; i < blobsToKeep_.size(); i++)
|
|
|
|
{
|
|
|
|
blobManager.addReference(blobsToKeep_[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (it = layers.begin(); it != layers.end(); it++)
|
|
|
|
{
|
|
|
|
int lid = it->first;
|
|
|
|
allocateLayer(lid, layersShapes);
|
|
|
|
}
|
|
|
|
|
2017-08-02 22:27:58 +08:00
|
|
|
layersTimings.resize(lastLayerId + 1, 0);
|
2017-06-26 18:35:51 +08:00
|
|
|
fuseLayers(blobsToKeep_);
|
|
|
|
}
|
|
|
|
|
|
|
|
void forwardLayer(LayerData &ld)
|
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
Ptr<Layer> layer = ld.layerInstance;
|
|
|
|
|
2017-08-02 22:27:58 +08:00
|
|
|
TickMeter tm;
|
|
|
|
tm.start();
|
|
|
|
|
2018-07-26 22:22:05 +08:00
|
|
|
if( !ld.skip )
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2018-07-26 22:22:05 +08:00
|
|
|
std::map<int, Ptr<BackendNode> >::iterator it = ld.backendNodes.find(preferableBackend);
|
|
|
|
if (preferableBackend == DNN_BACKEND_OPENCV || it == ld.backendNodes.end() || it->second.empty())
|
2017-09-06 15:34:07 +08:00
|
|
|
{
|
2019-04-20 02:01:19 +08:00
|
|
|
if (isAsync)
|
|
|
|
CV_Error(Error::StsNotImplemented, "Default implementation fallbacks in asynchronous mode");
|
|
|
|
|
2019-07-20 00:18:34 +08:00
|
|
|
if (!layer->supportBackend(DNN_BACKEND_OPENCV))
|
|
|
|
CV_Error(Error::StsNotImplemented, format("Layer \"%s\" of type \"%s\" unsupported on OpenCV backend",
|
|
|
|
ld.name.c_str(), ld.type.c_str()));
|
|
|
|
|
2018-06-01 15:54:12 +08:00
|
|
|
if (preferableBackend == DNN_BACKEND_OPENCV && IS_DNN_OPENCL_TARGET(preferableTarget))
|
2017-09-06 15:34:07 +08:00
|
|
|
{
|
2018-07-20 20:19:44 +08:00
|
|
|
std::vector<UMat> umat_inputBlobs = OpenCLBackendWrapper::getUMatVector(ld.inputBlobsWrappers);
|
2018-01-11 02:50:54 +08:00
|
|
|
std::vector<UMat> umat_outputBlobs = OpenCLBackendWrapper::getUMatVector(ld.outputBlobsWrappers);
|
2018-07-20 20:19:44 +08:00
|
|
|
std::vector<UMat> umat_internalBlobs = OpenCLBackendWrapper::getUMatVector(ld.internalBlobsWrappers);
|
|
|
|
layer->forward(umat_inputBlobs,
|
2018-01-11 02:50:54 +08:00
|
|
|
umat_outputBlobs,
|
2018-07-20 20:19:44 +08:00
|
|
|
umat_internalBlobs);
|
|
|
|
if (DNN_CHECK_NAN_INF)
|
|
|
|
{
|
|
|
|
bool fail = false;
|
|
|
|
for (size_t i = 0; i < umat_outputBlobs.size(); ++i)
|
|
|
|
{
|
|
|
|
UMat& u = umat_outputBlobs[i];
|
|
|
|
Mat m;
|
|
|
|
if (u.depth() == CV_16S) // FP16
|
|
|
|
convertFp16(u, m);
|
|
|
|
else
|
|
|
|
m = u.getMat(ACCESS_READ);
|
|
|
|
if (!checkRange(m))
|
|
|
|
{
|
|
|
|
std::cerr << "WARNING: NaN detected in layer output: id=" << ld.id << " name=" << layer->name << std::endl;
|
|
|
|
std::cerr << "output id=" << i << " output shape=" << shape(m) << std::endl;
|
|
|
|
fail = true;
|
|
|
|
}
|
|
|
|
else if (!checkRange(m, true, NULL, -1e6, 1e6))
|
|
|
|
{
|
|
|
|
std::cerr << "WARNING: Inf detected in layer output: id=" << ld.id << " name=" << layer->name << std::endl;
|
|
|
|
std::cerr << "output id=" << i << " output shape=" << shape(m) << std::endl;
|
|
|
|
fail = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (fail)
|
|
|
|
{
|
|
|
|
for (size_t i = 0; i < umat_inputBlobs.size(); ++i)
|
|
|
|
{
|
|
|
|
UMat& u = umat_inputBlobs[i];
|
|
|
|
Mat m;
|
|
|
|
if (u.depth() == CV_16S) // FP16
|
|
|
|
convertFp16(u, m);
|
|
|
|
else
|
|
|
|
m = u.getMat(ACCESS_READ);
|
|
|
|
std::cout << "INPUT " << i << " " << cv::typeToString(u.type()) << " " << shape(m) << std::endl;
|
|
|
|
if (DNN_CHECK_NAN_INF_DUMP) std::cout << m.reshape(1, 1) << std::endl;
|
|
|
|
}
|
|
|
|
for (size_t i = 0; i < umat_outputBlobs.size(); ++i)
|
|
|
|
{
|
|
|
|
UMat& u = umat_outputBlobs[i];
|
|
|
|
Mat m;
|
|
|
|
if (u.depth() == CV_16S) // FP16
|
|
|
|
convertFp16(u, m);
|
|
|
|
else
|
|
|
|
m = u.getMat(ACCESS_READ);
|
|
|
|
std::cout << "OUTPUT " << i << " " << cv::typeToString(u.type()) << " " << shape(m) << std::endl;
|
|
|
|
if (DNN_CHECK_NAN_INF_DUMP) std::cout << m.reshape(1, 1) << std::endl;
|
|
|
|
}
|
|
|
|
for (size_t i = 0; i < umat_internalBlobs.size(); ++i)
|
|
|
|
{
|
|
|
|
UMat& u = umat_internalBlobs[i];
|
|
|
|
Mat m;
|
|
|
|
if (u.depth() == CV_16S) // FP16
|
|
|
|
convertFp16(u, m);
|
|
|
|
else
|
|
|
|
m = u.getMat(ACCESS_READ);
|
|
|
|
std::cout << "INTERNAL " << i << " " << shape(m) << std::endl;
|
|
|
|
if (DNN_CHECK_NAN_INF_DUMP) std::cout << cv::typeToString(u.type()) << " " << m.reshape(1, 1) << std::endl;
|
|
|
|
}
|
|
|
|
if (DNN_CHECK_NAN_INF_RAISE_ERROR)
|
|
|
|
CV_Assert(!fail);
|
|
|
|
}
|
|
|
|
}
|
2018-01-11 02:50:54 +08:00
|
|
|
OpenCLBackendWrapper::update(ld.outputBlobsWrappers, umat_outputBlobs);
|
2017-09-06 15:34:07 +08:00
|
|
|
}
|
2017-11-09 12:57:37 +08:00
|
|
|
else
|
2017-09-06 15:34:07 +08:00
|
|
|
{
|
2018-01-11 02:50:54 +08:00
|
|
|
for (int i = 0, n = ld.inputBlobsWrappers.size(); i < n; ++i)
|
|
|
|
{
|
|
|
|
if (!ld.inputBlobsWrappers[i].empty())
|
|
|
|
ld.inputBlobsWrappers[i]->copyToHost();
|
|
|
|
}
|
|
|
|
|
2018-09-06 18:26:47 +08:00
|
|
|
std::vector<Mat> inps(ld.inputBlobs.size());
|
|
|
|
for (int i = 0; i < ld.inputBlobs.size(); ++i)
|
|
|
|
{
|
|
|
|
inps[i] = *ld.inputBlobs[i];
|
|
|
|
}
|
|
|
|
layer->forward(inps, ld.outputBlobs, ld.internals);
|
2018-01-11 02:50:54 +08:00
|
|
|
|
2018-07-20 20:19:44 +08:00
|
|
|
if (DNN_CHECK_NAN_INF)
|
|
|
|
{
|
|
|
|
bool fail = false;
|
|
|
|
for (size_t i = 0; i < ld.outputBlobs.size(); ++i)
|
|
|
|
{
|
|
|
|
const Mat& m = ld.outputBlobs[i];
|
|
|
|
if (!checkRange(m))
|
|
|
|
{
|
|
|
|
std::cerr << "WARNING: NaN detected in layer output: id=" << ld.id << " name=" << layer->name << std::endl;
|
|
|
|
std::cerr << "output id=" << i << " output shape=" << shape(m) << std::endl;
|
|
|
|
fail = true;
|
|
|
|
}
|
|
|
|
else if (!checkRange(m, true, NULL, -1e6, 1e6))
|
|
|
|
{
|
|
|
|
std::cerr << "WARNING: Inf detected in layer output: id=" << ld.id << " name=" << layer->name << std::endl;
|
|
|
|
std::cerr << "output id=" << i << " output shape=" << shape(m) << std::endl;
|
|
|
|
fail = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (fail)
|
|
|
|
{
|
|
|
|
for (size_t i = 0; i < ld.inputBlobs.size(); ++i)
|
|
|
|
{
|
|
|
|
const Mat* pM = ld.inputBlobs[i];
|
|
|
|
if (!pM)
|
|
|
|
{
|
|
|
|
std::cout << "INPUT " << i << " is NULL" << std::endl;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
const Mat& m = *pM;
|
|
|
|
std::cout << "INPUT " << i << " " << cv::typeToString(m.type()) << " " << shape(m) << std::endl;
|
|
|
|
if (DNN_CHECK_NAN_INF_DUMP) std::cout << m.reshape(1, 1) << std::endl;
|
|
|
|
}
|
|
|
|
for (size_t i = 0; i < ld.outputBlobs.size(); ++i)
|
|
|
|
{
|
|
|
|
const Mat& m = ld.outputBlobs[i];
|
|
|
|
std::cout << "OUTPUT " << i << " " << cv::typeToString(m.type()) << " " << shape(m) << std::endl;
|
|
|
|
if (DNN_CHECK_NAN_INF_DUMP) std::cout << m.reshape(1, 1) << std::endl;
|
|
|
|
}
|
|
|
|
for (size_t i = 0; i < ld.internals.size(); ++i)
|
|
|
|
{
|
|
|
|
const Mat& m = ld.internals[i];
|
|
|
|
std::cout << "INTERNAL " << i << " " << cv::typeToString(m.type()) << " " << shape(m) << std::endl;
|
|
|
|
if (DNN_CHECK_NAN_INF_DUMP) std::cout << m.reshape(1, 1) << std::endl;
|
|
|
|
}
|
|
|
|
if (DNN_CHECK_NAN_INF_RAISE_ERROR)
|
|
|
|
CV_Assert(!fail);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-11 02:50:54 +08:00
|
|
|
for (int i = 0, n = ld.outputBlobsWrappers.size(); i < n; ++i)
|
|
|
|
{
|
|
|
|
if (!ld.outputBlobsWrappers[i].empty())
|
|
|
|
ld.outputBlobsWrappers[i]->setHostDirty();
|
|
|
|
}
|
2017-09-06 15:34:07 +08:00
|
|
|
}
|
|
|
|
}
|
2017-08-02 22:27:58 +08:00
|
|
|
else
|
2018-02-06 16:57:35 +08:00
|
|
|
{
|
2018-07-26 22:22:05 +08:00
|
|
|
Ptr<BackendNode> node = it->second;
|
|
|
|
CV_Assert(!node.empty());
|
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
|
|
|
if (preferableBackend == DNN_BACKEND_CUDA)
|
|
|
|
{
|
|
|
|
CV_Assert(haveCUDA());
|
|
|
|
|
|
|
|
#ifdef HAVE_CUDA
|
|
|
|
Ptr<CUDABackendNode> cudaNode = node.dynamicCast<CUDABackendNode>();
|
|
|
|
CV_Assert(!cudaNode.empty());
|
|
|
|
|
|
|
|
cudaNode->forward(ld.inputBlobsWrappers, ld.outputBlobsWrappers, cudaInfo->workspace);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
else if (preferableBackend == DNN_BACKEND_HALIDE)
|
2018-07-26 22:22:05 +08:00
|
|
|
{
|
|
|
|
forwardHalide(ld.outputBlobsWrappers, node);
|
|
|
|
}
|
|
|
|
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE)
|
|
|
|
{
|
2019-04-20 02:01:19 +08:00
|
|
|
forwardInfEngine(ld.outputBlobsWrappers, node, isAsync);
|
2018-07-26 22:22:05 +08:00
|
|
|
}
|
Merge pull request #12703 from wzw-intel:vkcom
* dnn: Add a Vulkan based backend
This commit adds a new backend "DNN_BACKEND_VKCOM" and a
new target "DNN_TARGET_VULKAN". VKCOM means vulkan based
computation library.
This backend uses Vulkan API and SPIR-V shaders to do
the inference computation for layers. The layer types
that implemented in DNN_BACKEND_VKCOM include:
Conv, Concat, ReLU, LRN, PriorBox, Softmax, MaxPooling,
AvePooling, Permute
This is just a beginning work for Vulkan in OpenCV DNN,
more layer types will be supported and performance
tuning is on the way.
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/vulkan: Add FindVulkan.cmake to detect Vulkan SDK
In order to build dnn with Vulkan support, need installing
Vulkan SDK and setting environment variable "VULKAN_SDK" and
add "-DWITH_VULKAN=ON" to cmake command.
You can download Vulkan SDK from:
https://vulkan.lunarg.com/sdk/home#linux
For how to install, see
https://vulkan.lunarg.com/doc/sdk/latest/linux/getting_started.html
https://vulkan.lunarg.com/doc/sdk/latest/windows/getting_started.html
https://vulkan.lunarg.com/doc/sdk/latest/mac/getting_started.html
respectively for linux, windows and mac.
To run the vulkan backend, also need installing mesa driver.
On Ubuntu, use this command 'sudo apt-get install mesa-vulkan-drivers'
To test, use command '$BUILD_DIR/bin/opencv_test_dnn --gtest_filter=*VkCom*'
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/Vulkan: dynamically load Vulkan runtime
No compile-time dependency on Vulkan library.
If Vulkan runtime is unavailable, fallback to CPU path.
Use environment "OPENCL_VULKAN_RUNTIME" to specify path to your
own vulkan runtime library.
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/Vulkan: Add a python script to compile GLSL shaders to SPIR-V shaders
The SPIR-V shaders are in format of text-based 32-bit hexadecimal
numbers, and inserted into .cpp files as unsigned int32 array.
* dnn/Vulkan: Put Vulkan headers into 3rdparty directory and some other fixes
Vulkan header files are copied from
https://github.com/KhronosGroup/Vulkan-Docs/tree/master/include/vulkan
to 3rdparty/include
Fix the Copyright declaration issue.
Refine OpenCVDetectVulkan.cmake
* dnn/Vulkan: Add vulkan backend tests into existing ones.
Also fixed some test failures.
- Don't use bool variable as uniform for shader
- Fix dispathed group number beyond max issue
- Bypass "group > 1" convolution. This should be support in future.
* dnn/Vulkan: Fix multiple initialization in one thread.
2018-10-29 22:51:26 +08:00
|
|
|
else if (preferableBackend == DNN_BACKEND_VKCOM)
|
|
|
|
{
|
2018-10-29 14:32:09 +08:00
|
|
|
try
|
|
|
|
{
|
|
|
|
forwardVkCom(ld.outputBlobsWrappers, node);
|
|
|
|
}
|
|
|
|
catch (const cv::Exception& e)
|
|
|
|
{
|
|
|
|
CV_LOG_ERROR(NULL, "forwardVkCom failed, fallback to CPU implementation. " << e.what());
|
|
|
|
it->second = Ptr<BackendNode>();
|
|
|
|
forwardLayer(ld);
|
|
|
|
}
|
Merge pull request #12703 from wzw-intel:vkcom
* dnn: Add a Vulkan based backend
This commit adds a new backend "DNN_BACKEND_VKCOM" and a
new target "DNN_TARGET_VULKAN". VKCOM means vulkan based
computation library.
This backend uses Vulkan API and SPIR-V shaders to do
the inference computation for layers. The layer types
that implemented in DNN_BACKEND_VKCOM include:
Conv, Concat, ReLU, LRN, PriorBox, Softmax, MaxPooling,
AvePooling, Permute
This is just a beginning work for Vulkan in OpenCV DNN,
more layer types will be supported and performance
tuning is on the way.
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/vulkan: Add FindVulkan.cmake to detect Vulkan SDK
In order to build dnn with Vulkan support, need installing
Vulkan SDK and setting environment variable "VULKAN_SDK" and
add "-DWITH_VULKAN=ON" to cmake command.
You can download Vulkan SDK from:
https://vulkan.lunarg.com/sdk/home#linux
For how to install, see
https://vulkan.lunarg.com/doc/sdk/latest/linux/getting_started.html
https://vulkan.lunarg.com/doc/sdk/latest/windows/getting_started.html
https://vulkan.lunarg.com/doc/sdk/latest/mac/getting_started.html
respectively for linux, windows and mac.
To run the vulkan backend, also need installing mesa driver.
On Ubuntu, use this command 'sudo apt-get install mesa-vulkan-drivers'
To test, use command '$BUILD_DIR/bin/opencv_test_dnn --gtest_filter=*VkCom*'
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/Vulkan: dynamically load Vulkan runtime
No compile-time dependency on Vulkan library.
If Vulkan runtime is unavailable, fallback to CPU path.
Use environment "OPENCL_VULKAN_RUNTIME" to specify path to your
own vulkan runtime library.
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/Vulkan: Add a python script to compile GLSL shaders to SPIR-V shaders
The SPIR-V shaders are in format of text-based 32-bit hexadecimal
numbers, and inserted into .cpp files as unsigned int32 array.
* dnn/Vulkan: Put Vulkan headers into 3rdparty directory and some other fixes
Vulkan header files are copied from
https://github.com/KhronosGroup/Vulkan-Docs/tree/master/include/vulkan
to 3rdparty/include
Fix the Copyright declaration issue.
Refine OpenCVDetectVulkan.cmake
* dnn/Vulkan: Add vulkan backend tests into existing ones.
Also fixed some test failures.
- Don't use bool variable as uniform for shader
- Fix dispathed group number beyond max issue
- Bypass "group > 1" convolution. This should be support in future.
* dnn/Vulkan: Fix multiple initialization in one thread.
2018-10-29 22:51:26 +08:00
|
|
|
}
|
2018-07-26 22:22:05 +08:00
|
|
|
else
|
|
|
|
{
|
|
|
|
CV_Error(Error::StsNotImplemented, "Unknown backend identifier");
|
|
|
|
}
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
}
|
2018-07-26 22:22:05 +08:00
|
|
|
else
|
|
|
|
tm.reset();
|
2017-06-26 18:35:51 +08:00
|
|
|
|
2017-08-02 22:27:58 +08:00
|
|
|
tm.stop();
|
|
|
|
layersTimings[ld.id] = tm.getTimeTicks();
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
ld.flag = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void forwardToLayer(LayerData &ld, bool clearFlags = true)
|
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
if (clearFlags)
|
|
|
|
{
|
|
|
|
MapIdToLayerData::iterator it;
|
|
|
|
for (it = layers.begin(); it != layers.end(); it++)
|
|
|
|
it->second.flag = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
//already was forwarded
|
|
|
|
if (ld.flag)
|
|
|
|
return;
|
|
|
|
|
|
|
|
//forward parents
|
|
|
|
MapIdToLayerData::iterator it;
|
2017-09-05 22:10:16 +08:00
|
|
|
for (it = layers.begin(); it != layers.end() && (it->second.id < ld.id); ++it)
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
|
|
|
LayerData &ld = it->second;
|
|
|
|
if (ld.flag)
|
|
|
|
continue;
|
|
|
|
forwardLayer(ld);
|
|
|
|
}
|
|
|
|
|
|
|
|
//forward itself
|
|
|
|
forwardLayer(ld);
|
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
|
|
|
|
|
|
|
#ifdef HAVE_CUDA
|
|
|
|
if (preferableBackend == DNN_BACKEND_CUDA)
|
|
|
|
cudaInfo->context.stream.synchronize();
|
|
|
|
#endif
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void getLayerShapesRecursively(int id, LayersShapesMap& inOutShapes)
|
|
|
|
{
|
|
|
|
std::vector<LayerPin>& inputLayerIds = layers[id].inputBlobsId;
|
|
|
|
|
2019-09-30 03:02:42 +08:00
|
|
|
if (id == 0 && inOutShapes[id].in[0].empty())
|
2019-08-10 00:51:42 +08:00
|
|
|
{
|
2019-09-30 03:02:42 +08:00
|
|
|
if (!layers[0].outputBlobs.empty())
|
2019-08-10 00:51:42 +08:00
|
|
|
{
|
2019-09-30 03:02:42 +08:00
|
|
|
ShapesVec shapes;
|
|
|
|
for (int i = 0; i < layers[0].outputBlobs.size(); i++)
|
|
|
|
{
|
|
|
|
Mat& inp = layers[0].outputBlobs[i];
|
|
|
|
CV_Assert(inp.total());
|
|
|
|
shapes.push_back(shape(inp));
|
|
|
|
}
|
|
|
|
inOutShapes[0].in = shapes;
|
2019-08-10 00:51:42 +08:00
|
|
|
}
|
2019-09-30 03:02:42 +08:00
|
|
|
else
|
|
|
|
{
|
|
|
|
inOutShapes[0].out.clear();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2019-08-10 00:51:42 +08:00
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
if (inOutShapes[id].in.empty())
|
|
|
|
{
|
|
|
|
for(int i = 0; i < inputLayerIds.size(); i++)
|
|
|
|
{
|
|
|
|
int layerId = inputLayerIds[i].lid;
|
|
|
|
LayersShapesMap::iterator it =
|
|
|
|
inOutShapes.find(layerId);
|
|
|
|
if(it == inOutShapes.end() ||
|
|
|
|
it->second.out.empty())
|
|
|
|
{
|
|
|
|
getLayerShapesRecursively(layerId, inOutShapes);
|
|
|
|
}
|
|
|
|
const MatShape& shape = inOutShapes[layerId].out[inputLayerIds[i].oid];
|
|
|
|
inOutShapes[id].in.push_back(shape);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
const ShapesVec& is = inOutShapes[id].in;
|
|
|
|
ShapesVec& os = inOutShapes[id].out;
|
|
|
|
ShapesVec& ints = inOutShapes[id].internal;
|
|
|
|
int requiredOutputs = layers[id].requiredOutputs.size();
|
|
|
|
inOutShapes[id].supportInPlace =
|
|
|
|
layers[id].getLayerInstance()->getMemoryShapes(is, requiredOutputs, os, ints);
|
2019-09-25 20:35:04 +08:00
|
|
|
|
|
|
|
for (int i = 0; i < ints.size(); i++)
|
|
|
|
CV_Assert(total(ints[i]) > 0);
|
|
|
|
|
|
|
|
for (int i = 0; i < os.size(); i++)
|
|
|
|
CV_Assert(total(os[i]) > 0);
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void getLayersShapes(const ShapesVec& netInputShapes,
|
|
|
|
LayersShapesMap& inOutShapes)
|
|
|
|
{
|
|
|
|
inOutShapes.clear();
|
|
|
|
|
|
|
|
inOutShapes[0].in = netInputShapes; //insert shape for first input layer
|
|
|
|
for (MapIdToLayerData::iterator it = layers.begin();
|
|
|
|
it != layers.end(); it++)
|
|
|
|
{
|
|
|
|
getLayerShapesRecursively(it->first, inOutShapes);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void getLayerShapes(const ShapesVec& netInputShapes,
|
|
|
|
const int layerId,
|
|
|
|
LayerShapes& shapes)
|
|
|
|
{
|
|
|
|
LayersShapesMap inOutShapes;
|
|
|
|
inOutShapes[0].in = netInputShapes; //insert shape for first input layer
|
|
|
|
getLayerShapesRecursively(layerId, inOutShapes);
|
|
|
|
shapes = inOutShapes[layerId];
|
|
|
|
}
|
|
|
|
|
|
|
|
LayerPin getLatestLayerPin(const std::vector<LayerPin>& pins)
|
|
|
|
{
|
|
|
|
return *std::max_element(pins.begin(), pins.end());
|
|
|
|
}
|
|
|
|
|
|
|
|
Mat getBlob(const LayerPin& pin)
|
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
if (!pin.valid())
|
|
|
|
CV_Error(Error::StsObjectNotFound, "Requested blob not found");
|
|
|
|
|
|
|
|
LayerData &ld = layers[pin.lid];
|
|
|
|
if ((size_t)pin.oid >= ld.outputBlobs.size())
|
|
|
|
{
|
2018-09-19 20:49:59 +08:00
|
|
|
CV_Error(Error::StsOutOfRange, format("Layer \"%s\" produce only %zu outputs, "
|
2018-02-12 20:07:39 +08:00
|
|
|
"the #%d was requested", ld.name.c_str(),
|
2018-01-13 23:17:56 +08:00
|
|
|
ld.outputBlobs.size(), pin.oid));
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
2018-01-11 02:50:54 +08:00
|
|
|
if (preferableTarget != DNN_TARGET_CPU)
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2018-01-11 02:50:54 +08:00
|
|
|
CV_Assert(!ld.outputBlobsWrappers.empty() && !ld.outputBlobsWrappers[pin.oid].empty());
|
2017-06-26 18:35:51 +08:00
|
|
|
// Transfer data to CPU if it's require.
|
2017-09-06 15:34:07 +08:00
|
|
|
ld.outputBlobsWrappers[pin.oid]->copyToHost();
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
2018-04-26 19:20:16 +08:00
|
|
|
|
|
|
|
if (ld.outputBlobs[pin.oid].depth() == CV_16S)
|
|
|
|
{
|
|
|
|
convertFp16(ld.outputBlobs[pin.oid], output_blob);
|
|
|
|
return output_blob;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return ld.outputBlobs[pin.oid];
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
Mat getBlob(String outputName)
|
|
|
|
{
|
|
|
|
return getBlob(getPinByAlias(outputName));
|
|
|
|
}
|
2019-04-20 02:01:19 +08:00
|
|
|
|
|
|
|
#ifdef CV_CXX11
|
2019-05-01 19:51:12 +08:00
|
|
|
AsyncArray getBlobAsync(const LayerPin& pin)
|
2019-04-20 02:01:19 +08:00
|
|
|
{
|
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
#ifdef HAVE_INF_ENGINE
|
|
|
|
if (!pin.valid())
|
|
|
|
CV_Error(Error::StsObjectNotFound, "Requested blob not found");
|
|
|
|
|
|
|
|
LayerData &ld = layers[pin.lid];
|
|
|
|
if ((size_t)pin.oid >= ld.outputBlobs.size())
|
|
|
|
{
|
|
|
|
CV_Error(Error::StsOutOfRange, format("Layer \"%s\" produce only %d outputs, "
|
|
|
|
"the #%d was requested", ld.name.c_str(),
|
2019-04-23 00:08:11 +08:00
|
|
|
(int)ld.outputBlobs.size(), (int)pin.oid));
|
2019-04-20 02:01:19 +08:00
|
|
|
}
|
|
|
|
if (preferableTarget != DNN_TARGET_CPU)
|
|
|
|
{
|
|
|
|
CV_Assert(!ld.outputBlobsWrappers.empty() && !ld.outputBlobsWrappers[pin.oid].empty());
|
|
|
|
// Transfer data to CPU if it's require.
|
|
|
|
ld.outputBlobsWrappers[pin.oid]->copyToHost();
|
|
|
|
}
|
|
|
|
CV_Assert(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE);
|
|
|
|
|
|
|
|
Ptr<InfEngineBackendWrapper> wrapper = ld.outputBlobsWrappers[pin.oid].dynamicCast<InfEngineBackendWrapper>();
|
|
|
|
return std::move(wrapper->futureMat);
|
|
|
|
#else
|
|
|
|
CV_Error(Error::StsNotImplemented, "DNN_BACKEND_INFERENCE_ENGINE backend is required");
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2019-05-01 19:51:12 +08:00
|
|
|
AsyncArray getBlobAsync(String outputName)
|
2019-04-20 02:01:19 +08:00
|
|
|
{
|
|
|
|
return getBlobAsync(getPinByAlias(outputName));
|
|
|
|
}
|
|
|
|
#endif // CV_CXX11
|
2017-06-26 18:35:51 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
Net::Net() : impl(new Net::Impl)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2018-03-17 00:27:04 +08:00
|
|
|
Net Net::readFromModelOptimizer(const String& xml, const String& bin)
|
|
|
|
{
|
|
|
|
#ifndef HAVE_INF_ENGINE
|
2018-04-24 00:02:39 +08:00
|
|
|
CV_Error(Error::StsError, "Build OpenCV with Inference Engine to enable loading models from Model Optimizer.");
|
2018-03-17 00:27:04 +08:00
|
|
|
#else
|
|
|
|
InferenceEngine::CNNNetReader reader;
|
|
|
|
reader.ReadNetwork(xml);
|
|
|
|
reader.ReadWeights(bin);
|
|
|
|
|
|
|
|
InferenceEngine::CNNNetwork ieNet = reader.getNetwork();
|
|
|
|
|
|
|
|
std::vector<String> inputsNames;
|
2019-08-10 00:51:42 +08:00
|
|
|
std::vector<MatShape> inp_shapes;
|
2018-03-17 00:27:04 +08:00
|
|
|
for (auto& it : ieNet.getInputsInfo())
|
|
|
|
{
|
|
|
|
inputsNames.push_back(it.first);
|
2019-08-10 00:51:42 +08:00
|
|
|
std::vector<size_t> dims = it.second->getTensorDesc().getDims();
|
|
|
|
inp_shapes.push_back(std::vector<int>(dims.begin(), dims.end()));
|
2018-03-17 00:27:04 +08:00
|
|
|
}
|
|
|
|
|
2018-03-28 21:34:37 +08:00
|
|
|
Net cvNet;
|
2018-03-17 00:27:04 +08:00
|
|
|
cvNet.setInputsNames(inputsNames);
|
|
|
|
|
2019-08-10 00:51:42 +08:00
|
|
|
// set empty input to determine input shapes
|
|
|
|
for (int inp_id = 0; inp_id < inputsNames.size(); ++inp_id)
|
|
|
|
{
|
|
|
|
cvNet.setInput(Mat(inp_shapes[inp_id], CV_32F), inputsNames[inp_id]);
|
|
|
|
}
|
|
|
|
|
2019-01-14 14:55:44 +08:00
|
|
|
Ptr<InfEngineBackendNode> backendNode(new InfEngineBackendNode(InferenceEngine::Builder::Layer("")));
|
2018-03-17 00:27:04 +08:00
|
|
|
backendNode->net = Ptr<InfEngineBackendNet>(new InfEngineBackendNet(ieNet));
|
|
|
|
for (auto& it : ieNet.getOutputsInfo())
|
|
|
|
{
|
2019-01-11 01:29:44 +08:00
|
|
|
Ptr<Layer> cvLayer(new InfEngineBackendLayer(ieNet));
|
2018-06-28 14:09:11 +08:00
|
|
|
InferenceEngine::CNNLayerPtr ieLayer = ieNet.getLayerByName(it.first.c_str());
|
|
|
|
CV_Assert(ieLayer);
|
|
|
|
|
2018-03-17 00:27:04 +08:00
|
|
|
LayerParams lp;
|
|
|
|
int lid = cvNet.addLayer(it.first, "", lp);
|
|
|
|
|
|
|
|
LayerData& ld = cvNet.impl->layers[lid];
|
2018-06-28 14:09:11 +08:00
|
|
|
cvLayer->name = it.first;
|
|
|
|
cvLayer->type = ieLayer->type;
|
|
|
|
ld.layerInstance = cvLayer;
|
2018-03-17 00:27:04 +08:00
|
|
|
ld.backendNodes[DNN_BACKEND_INFERENCE_ENGINE] = backendNode;
|
|
|
|
|
2018-04-11 18:28:07 +08:00
|
|
|
for (int i = 0; i < inputsNames.size(); ++i)
|
|
|
|
cvNet.connect(0, i, lid, i);
|
2018-03-17 00:27:04 +08:00
|
|
|
}
|
|
|
|
cvNet.setPreferableBackend(DNN_BACKEND_INFERENCE_ENGINE);
|
|
|
|
|
|
|
|
cvNet.impl->skipInfEngineInit = true;
|
|
|
|
return cvNet;
|
2018-03-28 21:34:37 +08:00
|
|
|
#endif // HAVE_INF_ENGINE
|
2018-03-17 00:27:04 +08:00
|
|
|
}
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
Net::~Net()
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
int Net::addLayer(const String &name, const String &type, LayerParams ¶ms)
|
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
if (impl->getLayerId(name) >= 0)
|
|
|
|
{
|
|
|
|
CV_Error(Error::StsBadArg, "Layer \"" + name + "\" already into net");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int id = ++impl->lastLayerId;
|
|
|
|
impl->layerNameToId.insert(std::make_pair(name, id));
|
|
|
|
impl->layers.insert(std::make_pair(id, LayerData(id, name, type, params)));
|
|
|
|
|
|
|
|
return id;
|
|
|
|
}
|
|
|
|
|
|
|
|
int Net::addLayerToPrev(const String &name, const String &type, LayerParams ¶ms)
|
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
int prvLid = impl->lastLayerId;
|
|
|
|
int newLid = this->addLayer(name, type, params);
|
|
|
|
this->connect(prvLid, 0, newLid, 0);
|
|
|
|
return newLid;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Net::connect(int outLayerId, int outNum, int inpLayerId, int inpNum)
|
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
impl->connect(outLayerId, outNum, inpLayerId, inpNum);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Net::connect(String _outPin, String _inPin)
|
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
LayerPin outPin = impl->getPinByAlias(_outPin);
|
|
|
|
LayerPin inpPin = impl->getPinByAlias(_inPin);
|
|
|
|
|
|
|
|
CV_Assert(outPin.valid() && inpPin.valid());
|
|
|
|
|
|
|
|
impl->connect(outPin.lid, outPin.oid, inpPin.lid, inpPin.oid);
|
|
|
|
}
|
|
|
|
|
|
|
|
Mat Net::forward(const String& outputName)
|
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
String layerName = outputName;
|
|
|
|
|
|
|
|
if (layerName.empty())
|
|
|
|
layerName = getLayerNames().back();
|
|
|
|
|
2018-07-04 20:50:39 +08:00
|
|
|
std::vector<LayerPin> pins(1, impl->getPinByAlias(layerName));
|
|
|
|
impl->setUpNet(pins);
|
2017-06-26 18:35:51 +08:00
|
|
|
impl->forwardToLayer(impl->getLayerData(layerName));
|
|
|
|
|
|
|
|
return impl->getBlob(layerName);
|
|
|
|
}
|
|
|
|
|
2019-05-01 19:51:12 +08:00
|
|
|
AsyncArray Net::forwardAsync(const String& outputName)
|
2019-04-20 02:01:19 +08:00
|
|
|
{
|
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
#ifdef CV_CXX11
|
|
|
|
String layerName = outputName;
|
|
|
|
|
|
|
|
if (layerName.empty())
|
|
|
|
layerName = getLayerNames().back();
|
|
|
|
|
|
|
|
std::vector<LayerPin> pins(1, impl->getPinByAlias(layerName));
|
|
|
|
impl->setUpNet(pins);
|
|
|
|
|
2019-05-08 20:27:22 +08:00
|
|
|
if (impl->preferableBackend != DNN_BACKEND_INFERENCE_ENGINE)
|
|
|
|
CV_Error(Error::StsNotImplemented, "Asynchronous forward for backend which is different from DNN_BACKEND_INFERENCE_ENGINE");
|
|
|
|
|
2019-04-20 02:01:19 +08:00
|
|
|
impl->isAsync = true;
|
|
|
|
impl->forwardToLayer(impl->getLayerData(layerName));
|
|
|
|
impl->isAsync = false;
|
|
|
|
|
|
|
|
return impl->getBlobAsync(layerName);
|
|
|
|
#else
|
|
|
|
CV_Error(Error::StsNotImplemented, "Asynchronous forward without C++11");
|
|
|
|
#endif // CV_CXX11
|
|
|
|
}
|
|
|
|
|
2017-11-16 11:20:08 +08:00
|
|
|
void Net::forward(OutputArrayOfArrays outputBlobs, const String& outputName)
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
String layerName = outputName;
|
|
|
|
|
|
|
|
if (layerName.empty())
|
|
|
|
layerName = getLayerNames().back();
|
|
|
|
|
2018-07-04 20:50:39 +08:00
|
|
|
std::vector<LayerPin> pins(1, impl->getPinByAlias(layerName));
|
|
|
|
impl->setUpNet(pins);
|
2017-06-26 18:35:51 +08:00
|
|
|
impl->forwardToLayer(impl->getLayerData(layerName));
|
|
|
|
|
|
|
|
LayerPin pin = impl->getPinByAlias(layerName);
|
|
|
|
LayerData &ld = impl->layers[pin.lid];
|
2017-11-09 12:57:37 +08:00
|
|
|
|
2017-11-16 11:20:08 +08:00
|
|
|
if (outputBlobs.isUMat())
|
2017-11-09 12:57:37 +08:00
|
|
|
{
|
2018-10-05 20:10:58 +08:00
|
|
|
impl->getBlob(layerName).copyTo(outputBlobs);
|
2017-11-16 11:20:08 +08:00
|
|
|
}
|
|
|
|
else if (outputBlobs.isMat())
|
|
|
|
{
|
|
|
|
outputBlobs.assign(impl->getBlob(layerName));
|
|
|
|
}
|
|
|
|
else if (outputBlobs.isMatVector())
|
|
|
|
{
|
2018-01-11 02:50:54 +08:00
|
|
|
if (impl->preferableTarget != DNN_TARGET_CPU)
|
2017-11-16 11:20:08 +08:00
|
|
|
{
|
2018-01-11 02:50:54 +08:00
|
|
|
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
|
|
|
|
{
|
|
|
|
CV_Assert(!ld.outputBlobsWrappers[i].empty());
|
|
|
|
ld.outputBlobsWrappers[i]->copyToHost();
|
|
|
|
}
|
2017-11-16 11:20:08 +08:00
|
|
|
}
|
2018-04-26 19:20:16 +08:00
|
|
|
if (ld.outputBlobs[0].depth() == CV_32F)
|
|
|
|
{
|
|
|
|
std::vector<Mat> & outputvec = *(std::vector<Mat> *)outputBlobs.getObj();
|
|
|
|
outputvec = ld.outputBlobs;
|
|
|
|
} else {
|
|
|
|
std::vector<Mat> & outputvec = *(std::vector<Mat> *)outputBlobs.getObj();
|
|
|
|
outputvec.resize(ld.outputBlobs.size());
|
|
|
|
for (int i = 0; i < outputvec.size(); i++)
|
|
|
|
convertFp16(ld.outputBlobs[i], outputvec[i]);
|
|
|
|
}
|
2017-11-16 11:20:08 +08:00
|
|
|
}
|
|
|
|
else if (outputBlobs.isUMatVector())
|
|
|
|
{
|
2018-01-11 02:50:54 +08:00
|
|
|
std::vector<UMat> & outputvec = *(std::vector<UMat> *)outputBlobs.getObj();
|
|
|
|
|
2018-06-01 15:54:12 +08:00
|
|
|
if (impl->preferableBackend == DNN_BACKEND_OPENCV &&
|
2018-04-26 19:20:16 +08:00
|
|
|
IS_DNN_OPENCL_TARGET(impl->preferableTarget))
|
2018-01-11 02:50:54 +08:00
|
|
|
{
|
2018-04-26 19:20:16 +08:00
|
|
|
if (impl->preferableTarget == DNN_TARGET_OPENCL)
|
|
|
|
outputvec = OpenCLBackendWrapper::getUMatVector(ld.outputBlobsWrappers);
|
|
|
|
else if (impl->preferableTarget == DNN_TARGET_OPENCL_FP16)
|
|
|
|
{
|
|
|
|
std::vector<UMat> out_vec = OpenCLBackendWrapper::getUMatVector(ld.outputBlobsWrappers);
|
|
|
|
outputvec.resize(out_vec.size());
|
|
|
|
for (int i = 0; i < out_vec.size(); i++)
|
|
|
|
convertFp16(out_vec[i], outputvec[i]);
|
|
|
|
}
|
2018-01-11 02:50:54 +08:00
|
|
|
}
|
|
|
|
else
|
2017-11-16 11:20:08 +08:00
|
|
|
{
|
2018-01-11 02:50:54 +08:00
|
|
|
outputvec.resize(ld.outputBlobs.size());
|
|
|
|
for (int i = 0; i < outputvec.size(); ++i)
|
2018-10-05 20:10:58 +08:00
|
|
|
ld.outputBlobs[i].copyTo(outputvec[i]);
|
2017-11-16 11:20:08 +08:00
|
|
|
}
|
2017-11-09 12:57:37 +08:00
|
|
|
}
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
|
2017-11-16 11:20:08 +08:00
|
|
|
void Net::forward(OutputArrayOfArrays outputBlobs,
|
2017-06-26 18:35:51 +08:00
|
|
|
const std::vector<String>& outBlobNames)
|
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
std::vector<LayerPin> pins;
|
|
|
|
for (int i = 0; i < outBlobNames.size(); i++)
|
|
|
|
{
|
2017-11-16 11:20:08 +08:00
|
|
|
pins.push_back(impl->getPinByAlias(outBlobNames[i]));
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
impl->setUpNet(pins);
|
|
|
|
|
|
|
|
LayerPin out = impl->getLatestLayerPin(pins);
|
|
|
|
|
|
|
|
impl->forwardToLayer(impl->getLayerData(out.lid));
|
|
|
|
|
2017-11-16 11:20:08 +08:00
|
|
|
std::vector<Mat> matvec;
|
2017-06-26 18:35:51 +08:00
|
|
|
for (int i = 0; i < pins.size(); i++)
|
|
|
|
{
|
2017-11-16 11:20:08 +08:00
|
|
|
matvec.push_back(impl->getBlob(pins[i]));
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
2017-11-16 11:20:08 +08:00
|
|
|
|
|
|
|
std::vector<Mat> & outputvec = *(std::vector<Mat> *)outputBlobs.getObj();
|
|
|
|
outputvec = matvec;
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void Net::forward(std::vector<std::vector<Mat> >& outputBlobs,
|
|
|
|
const std::vector<String>& outBlobNames)
|
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
std::vector<LayerPin> pins;
|
|
|
|
for (int i = 0; i < outBlobNames.size(); i++)
|
|
|
|
{
|
2019-01-28 23:44:31 +08:00
|
|
|
pins.push_back(impl->getPinByAlias(outBlobNames[i]));
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
impl->setUpNet(pins);
|
|
|
|
|
|
|
|
LayerPin out = impl->getLatestLayerPin(pins);
|
|
|
|
|
|
|
|
impl->forwardToLayer(impl->getLayerData(out.lid));
|
|
|
|
|
|
|
|
outputBlobs.resize(outBlobNames.size());
|
|
|
|
for (int i = 0; i < outBlobNames.size(); i++)
|
|
|
|
{
|
|
|
|
std::vector<LayerPin> lp = impl->getLayerOutPins(outBlobNames[i]);
|
2019-01-28 23:44:31 +08:00
|
|
|
outputBlobs[i].resize(lp.size());
|
|
|
|
for (int j = 0; j < lp.size(); j++)
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2019-01-28 23:44:31 +08:00
|
|
|
outputBlobs[i][j] = impl->getBlob(lp[j]);
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Net::setPreferableBackend(int backendId)
|
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
CV_TRACE_ARG(backendId);
|
|
|
|
|
2017-07-04 22:23:47 +08:00
|
|
|
if( impl->preferableBackend != backendId )
|
|
|
|
{
|
|
|
|
impl->preferableBackend = backendId;
|
|
|
|
impl->netWasAllocated = false;
|
|
|
|
impl->clear();
|
|
|
|
}
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void Net::setPreferableTarget(int targetId)
|
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
CV_TRACE_ARG(targetId);
|
|
|
|
|
2017-07-04 22:23:47 +08:00
|
|
|
if( impl->preferableTarget != targetId )
|
|
|
|
{
|
|
|
|
impl->preferableTarget = targetId;
|
2018-04-26 19:20:16 +08:00
|
|
|
if (IS_DNN_OPENCL_TARGET(targetId))
|
|
|
|
{
|
|
|
|
#ifndef HAVE_OPENCL
|
2018-06-05 22:18:14 +08:00
|
|
|
#ifdef HAVE_INF_ENGINE
|
|
|
|
if (impl->preferableBackend == DNN_BACKEND_OPENCV)
|
|
|
|
#else
|
|
|
|
if (impl->preferableBackend == DNN_BACKEND_DEFAULT ||
|
|
|
|
impl->preferableBackend == DNN_BACKEND_OPENCV)
|
|
|
|
#endif // HAVE_INF_ENGINE
|
|
|
|
impl->preferableTarget = DNN_TARGET_CPU;
|
2018-04-26 19:20:16 +08:00
|
|
|
#else
|
|
|
|
bool fp16 = ocl::Device::getDefault().isExtensionSupported("cl_khr_fp16");
|
|
|
|
if (!fp16 && targetId == DNN_TARGET_OPENCL_FP16)
|
|
|
|
impl->preferableTarget = DNN_TARGET_OPENCL;
|
|
|
|
#endif
|
|
|
|
}
|
2017-07-04 22:23:47 +08:00
|
|
|
impl->netWasAllocated = false;
|
|
|
|
impl->clear();
|
|
|
|
}
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void Net::setInputsNames(const std::vector<String> &inputBlobNames)
|
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
impl->netInputLayer->setNames(inputBlobNames);
|
|
|
|
}
|
|
|
|
|
2018-06-05 04:51:28 +08:00
|
|
|
void Net::setInput(InputArray blob, const String& name, double scalefactor, const Scalar& mean)
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
LayerPin pin;
|
|
|
|
pin.lid = 0;
|
|
|
|
pin.oid = impl->resolvePinOutputName(impl->getLayerData(pin.lid), name);
|
|
|
|
|
|
|
|
if (!pin.valid())
|
|
|
|
CV_Error(Error::StsObjectNotFound, "Requested blob \"" + name + "\" not found");
|
|
|
|
|
|
|
|
LayerData &ld = impl->layers[pin.lid];
|
2018-07-09 19:35:54 +08:00
|
|
|
const int numInputs = std::max(pin.oid+1, (int)ld.requiredOutputs.size());
|
|
|
|
ld.outputBlobs.resize(numInputs);
|
|
|
|
ld.outputBlobsWrappers.resize(numInputs);
|
|
|
|
impl->netInputLayer->inputsData.resize(numInputs);
|
2018-06-05 04:51:28 +08:00
|
|
|
impl->netInputLayer->scaleFactors.resize(numInputs);
|
|
|
|
impl->netInputLayer->means.resize(numInputs);
|
2018-07-09 19:35:54 +08:00
|
|
|
|
|
|
|
MatShape prevShape = shape(impl->netInputLayer->inputsData[pin.oid]);
|
|
|
|
Mat blob_ = blob.getMat();
|
2017-06-26 18:35:51 +08:00
|
|
|
bool oldShape = prevShape == shape(blob_);
|
|
|
|
if (oldShape)
|
2017-11-09 12:57:37 +08:00
|
|
|
{
|
2018-07-09 19:35:54 +08:00
|
|
|
blob_.copyTo(impl->netInputLayer->inputsData[pin.oid]);
|
2017-11-09 12:57:37 +08:00
|
|
|
}
|
2017-06-26 18:35:51 +08:00
|
|
|
else
|
2017-11-09 12:57:37 +08:00
|
|
|
{
|
2017-06-26 18:35:51 +08:00
|
|
|
ld.outputBlobs[pin.oid] = blob_.clone();
|
2018-07-09 19:35:54 +08:00
|
|
|
impl->netInputLayer->inputsData[pin.oid] = ld.outputBlobs[pin.oid];
|
2017-11-09 12:57:37 +08:00
|
|
|
}
|
2017-06-26 18:35:51 +08:00
|
|
|
|
2017-09-06 15:34:07 +08:00
|
|
|
if (!ld.outputBlobsWrappers[pin.oid].empty())
|
|
|
|
{
|
|
|
|
ld.outputBlobsWrappers[pin.oid]->setHostDirty();
|
|
|
|
}
|
2018-06-05 04:51:28 +08:00
|
|
|
impl->netInputLayer->scaleFactors[pin.oid] = scalefactor;
|
|
|
|
impl->netInputLayer->means[pin.oid] = mean;
|
2017-06-26 18:35:51 +08:00
|
|
|
impl->netWasAllocated = impl->netWasAllocated && oldShape;
|
|
|
|
}
|
|
|
|
|
|
|
|
Mat Net::getParam(LayerId layer, int numParam)
|
|
|
|
{
|
|
|
|
LayerData &ld = impl->getLayerData(layer);
|
2018-09-04 22:48:52 +08:00
|
|
|
std::vector<Mat> &layerBlobs = ld.getLayerInstance()->blobs;
|
2017-06-26 18:35:51 +08:00
|
|
|
CV_Assert(numParam < (int)layerBlobs.size());
|
|
|
|
return layerBlobs[numParam];
|
|
|
|
}
|
|
|
|
|
|
|
|
void Net::setParam(LayerId layer, int numParam, const Mat &blob)
|
|
|
|
{
|
|
|
|
LayerData &ld = impl->getLayerData(layer);
|
|
|
|
|
2018-09-04 22:48:52 +08:00
|
|
|
std::vector<Mat> &layerBlobs = ld.getLayerInstance()->blobs;
|
2017-06-26 18:35:51 +08:00
|
|
|
CV_Assert(numParam < (int)layerBlobs.size());
|
|
|
|
//we don't make strong checks, use this function carefully
|
|
|
|
layerBlobs[numParam] = blob;
|
|
|
|
}
|
|
|
|
|
|
|
|
int Net::getLayerId(const String &layer)
|
|
|
|
{
|
|
|
|
return impl->getLayerId(layer);
|
|
|
|
}
|
|
|
|
|
2019-05-28 00:17:07 +08:00
|
|
|
String parseLayerParams(const String& name, const LayerParams& lp) {
|
|
|
|
DictValue param = lp.get(name);
|
|
|
|
std::ostringstream out;
|
|
|
|
out << name << " ";
|
|
|
|
switch (param.size()) {
|
|
|
|
case 1: out << ": "; break;
|
|
|
|
case 2: out << "(HxW): "; break;
|
|
|
|
case 3: out << "(DxHxW): "; break;
|
|
|
|
default: CV_Error(Error::StsNotImplemented, format("Unsupported %s size = %d", name.c_str(), param.size()));
|
|
|
|
}
|
|
|
|
for (size_t i = 0; i < param.size() - 1; i++) {
|
|
|
|
out << param.get<int>(i) << " x ";
|
|
|
|
}
|
|
|
|
out << param.get<int>(param.size() - 1) << "\\l";
|
|
|
|
return out.str();
|
|
|
|
}
|
|
|
|
|
2019-04-13 00:31:07 +08:00
|
|
|
String Net::dump()
|
|
|
|
{
|
|
|
|
CV_Assert(!empty());
|
2019-07-18 23:41:08 +08:00
|
|
|
|
|
|
|
if (impl->netInputLayer->inputsData.empty())
|
|
|
|
CV_Error(Error::StsError, "Requested set input");
|
|
|
|
|
|
|
|
if (!impl->netWasAllocated)
|
|
|
|
impl->setUpNet();
|
|
|
|
|
2019-04-13 00:31:07 +08:00
|
|
|
std::ostringstream out;
|
|
|
|
std::map<int, LayerData>& map = impl->layers;
|
|
|
|
int prefBackend = impl->preferableBackend;
|
|
|
|
std::vector<std::vector<int> > skippedLayers;
|
|
|
|
std::vector<int> skipId;
|
|
|
|
std::vector<int> allLayers(map.size(), -1);
|
|
|
|
int idPrev = -1;
|
|
|
|
Ptr<BackendNode> prevNode;
|
|
|
|
for (std::map<int, LayerData>::reverse_iterator rit = map.rbegin(); rit != map.rend(); ++rit)
|
|
|
|
{
|
|
|
|
std::map<int, Ptr<BackendNode> >::iterator itBackend = rit->second.backendNodes.find(prefBackend);
|
|
|
|
if (prefBackend == DNN_BACKEND_OPENCV || itBackend == rit->second.backendNodes.end() ||
|
|
|
|
itBackend->second.empty())
|
|
|
|
{
|
|
|
|
if (rit->second.skip)
|
|
|
|
skipId.push_back(rit->first);
|
|
|
|
else if (!skipId.empty())
|
|
|
|
{
|
|
|
|
if (prefBackend == DNN_BACKEND_OPENCV || prevNode.empty())
|
|
|
|
skipId.push_back(rit->first);
|
|
|
|
else if (idPrev != -1)
|
|
|
|
skipId.push_back(idPrev);
|
|
|
|
|
|
|
|
std::sort(skipId.begin(), skipId.end());
|
|
|
|
for (int i = 0; i < skipId.size(); i++) {
|
|
|
|
allLayers[skipId[i]] = skippedLayers.size();
|
|
|
|
}
|
|
|
|
skippedLayers.push_back(skipId);
|
|
|
|
skipId.clear();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (itBackend->second == prevNode)
|
|
|
|
skipId.push_back(idPrev);
|
|
|
|
else if (!skipId.empty())
|
|
|
|
{
|
|
|
|
skipId.push_back(idPrev);
|
|
|
|
std::sort(skipId.begin(), skipId.end());
|
|
|
|
for (int i = 0; i < skipId.size(); i++) {
|
|
|
|
allLayers[skipId[i]] = skippedLayers.size();
|
|
|
|
}
|
|
|
|
skippedLayers.push_back(skipId);
|
|
|
|
skipId.clear();
|
|
|
|
}
|
|
|
|
idPrev = rit->first;
|
|
|
|
prevNode = itBackend->second;
|
|
|
|
}
|
|
|
|
}
|
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
|
|
|
String colors[] = {"#ffffb3", "#fccde5", "#8dd3c7", "#bebada", "#80b1d3", "#fdb462", "#ff4848"};
|
2019-04-13 00:31:07 +08:00
|
|
|
String backend;
|
|
|
|
switch (prefBackend) {
|
|
|
|
case DNN_BACKEND_DEFAULT: backend = "DEFAULT/"; break;
|
|
|
|
case DNN_BACKEND_HALIDE: backend = "HALIDE/"; break;
|
|
|
|
case DNN_BACKEND_INFERENCE_ENGINE: backend = "DLIE/"; break;
|
|
|
|
case DNN_BACKEND_OPENCV: backend = "OCV/"; break;
|
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
|
|
|
case DNN_BACKEND_CUDA: backend = "CUDA/"; break;
|
2019-04-13 00:31:07 +08:00
|
|
|
}
|
|
|
|
out << "digraph G {" << '\n';
|
|
|
|
// Add nodes
|
|
|
|
for (std::map<int, LayerData>::iterator it = map.begin(); it != map.end(); ++it)
|
|
|
|
{
|
|
|
|
String name = it->second.params.name;
|
|
|
|
if (allLayers[it->first] == -1 && !name.empty()) {
|
|
|
|
out << " " << "\"" << name << "\"" << " [label=\"";
|
|
|
|
skipId.clear();
|
|
|
|
skipId.push_back(it->first);
|
|
|
|
}
|
|
|
|
else if (name.empty() || it->first != skippedLayers[allLayers[it->first]][0])
|
|
|
|
continue;
|
|
|
|
else { // first node in cluster : it->first == skippedLayers[allLayers[it->first]][0]
|
|
|
|
int cluster = allLayers[it->first];
|
|
|
|
out << " " << "\"" << "cluster_" << cluster << "\"" << " [label=\"{";
|
|
|
|
skipId = skippedLayers[allLayers[it->first]]; // vertices in current cluster
|
|
|
|
}
|
|
|
|
for (int i = 0; i < skipId.size(); i++)
|
|
|
|
{
|
|
|
|
LayerParams& lp = map[skipId[i]].params;
|
|
|
|
if (!lp.name.empty()) {
|
|
|
|
if (i > 0) {
|
|
|
|
out << " | ";
|
|
|
|
}
|
|
|
|
out << lp.name << "\\n" << lp.type << "\\n";
|
2019-05-28 00:17:07 +08:00
|
|
|
if (lp.has("kernel_size")) {
|
|
|
|
String kernel = parseLayerParams("kernel_size", lp);
|
|
|
|
out << kernel;
|
|
|
|
} else if (lp.has("kernel_h") && lp.has("kernel_w")) {
|
|
|
|
DictValue h = lp.get("kernel_h");
|
|
|
|
DictValue w = lp.get("kernel_w");
|
|
|
|
out << "kernel (HxW): " << h << " x " << w << "\\l";
|
|
|
|
}
|
|
|
|
if (lp.has("stride")) {
|
|
|
|
String stride = parseLayerParams("stride", lp);
|
|
|
|
out << stride;
|
|
|
|
} else if (lp.has("stride_h") && lp.has("stride_w")) {
|
|
|
|
DictValue h = lp.get("stride_h");
|
|
|
|
DictValue w = lp.get("stride_w");
|
|
|
|
out << "stride (HxW): " << h << " x " << w << "\\l";
|
|
|
|
}
|
|
|
|
if (lp.has("dilation")) {
|
|
|
|
String dilation = parseLayerParams("dilation", lp);
|
|
|
|
out << dilation;
|
|
|
|
} else if (lp.has("dilation_h") && lp.has("dilation_w")) {
|
|
|
|
DictValue h = lp.get("dilation_h");
|
|
|
|
DictValue w = lp.get("dilation_w");
|
|
|
|
out << "dilation (HxW): " << h << " x " << w << "\\l";
|
|
|
|
}
|
|
|
|
if (lp.has("pad")) {
|
|
|
|
DictValue pad = lp.get("pad");
|
|
|
|
out << "pad ";
|
|
|
|
switch (pad.size()) {
|
|
|
|
case 1: out << ": " << pad << "\\l"; break;
|
|
|
|
case 2: out << "(HxW): (" << pad.get<int>(0) << " x " << pad.get<int>(1) << ")" << "\\l"; break;
|
|
|
|
case 4: out << "(HxW): (" << pad.get<int>(0) << ", " << pad.get<int>(2) << ") x (" << pad.get<int>(1) << ", " << pad.get<int>(3) << ")" << "\\l"; break;
|
|
|
|
case 6: out << "(DxHxW): (" << pad.get<int>(0) << ", " << pad.get<int>(3) << ") x (" << pad.get<int>(1) << ", " << pad.get<int>(4)
|
|
|
|
<< ") x (" << pad.get<int>(2) << ", " << pad.get<int>(5) << ")" << "\\l"; break;
|
|
|
|
default: CV_Error(Error::StsNotImplemented, format("Unsupported pad size = %d", pad.size()));
|
|
|
|
}
|
2019-04-13 00:31:07 +08:00
|
|
|
} else if (lp.has("pad_l") && lp.has("pad_t") && lp.has("pad_r") && lp.has("pad_b")) {
|
|
|
|
DictValue l = lp.get("pad_l");
|
|
|
|
DictValue t = lp.get("pad_t");
|
|
|
|
DictValue r = lp.get("pad_r");
|
|
|
|
DictValue b = lp.get("pad_b");
|
2019-05-28 00:17:07 +08:00
|
|
|
out << "pad (HxW): (" << t << ", " << b << ") x (" << l << ", " << r << ")" << "\\l";
|
2019-04-13 00:31:07 +08:00
|
|
|
}
|
|
|
|
else if (lp.has("pooled_w") || lp.has("pooled_h")) {
|
|
|
|
DictValue h = lp.get("pooled_h");
|
|
|
|
DictValue w = lp.get("pooled_w");
|
|
|
|
out << "pad (HxW): " << h << " x " << w << "\\l";
|
|
|
|
}
|
|
|
|
if (lp.has("pool")) {
|
|
|
|
out << "pool: " << lp.get("pool") << "\\l";
|
|
|
|
}
|
|
|
|
if (lp.has("global_pooling")) {
|
|
|
|
out << "global_pooling: " << lp.get("global_pooling") << "\\l";
|
|
|
|
}
|
|
|
|
if (lp.has("group")) {
|
|
|
|
out << "group: " << lp.get("group") << "\\l";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!it->second.outputBlobs.empty())
|
|
|
|
out << "output: " << it->second.outputBlobs[0].size << "\\l";
|
|
|
|
|
|
|
|
Ptr<BackendNode> layerBackend = it->second.backendNodes[prefBackend];
|
|
|
|
out << (!layerBackend.empty() ? backend : "OCV/");
|
|
|
|
int colorId = 0;
|
|
|
|
switch (it->second.layerInstance->preferableTarget) {
|
|
|
|
case DNN_TARGET_CPU: out << "CPU\\n"; colorId = layerBackend.empty() ? 0 : 5; break;
|
|
|
|
case DNN_TARGET_OPENCL: out << "OCL\\n"; colorId = 1; break;
|
|
|
|
case DNN_TARGET_OPENCL_FP16: out << "OCL_FP16\\n"; colorId = 2; break;
|
|
|
|
case DNN_TARGET_MYRIAD: out << "MYRIAD\\n"; colorId = 3; break;
|
|
|
|
case DNN_TARGET_FPGA: out << "FPGA\\n"; colorId = 4; break;
|
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
|
|
|
case DNN_TARGET_CUDA: out << "CUDA\\n"; colorId = 5; break;
|
|
|
|
case DNN_TARGET_CUDA_FP16: out << "CUDA_FP16\\n"; colorId = 6; break;
|
2019-04-13 00:31:07 +08:00
|
|
|
}
|
|
|
|
out << ((skipId.size() == 1)? "\" " : " }\" ");
|
|
|
|
out << "fillcolor=\"" << colors[colorId] << "\" ";
|
|
|
|
out << "style=filled ";
|
|
|
|
out << "shape=" << ((skipId.size() == 1)? "box" : "record") << "]" << '\n';
|
|
|
|
}
|
|
|
|
out << '\n';
|
|
|
|
// Add edges
|
|
|
|
int inputsSize = impl->netInputLayer->outNames.size();
|
|
|
|
for (std::map<int, LayerData>::iterator it = map.begin(); it != map.end(); ++it)
|
|
|
|
{
|
|
|
|
if (allLayers[it->first] == -1) // node
|
|
|
|
{
|
|
|
|
for (int i = 0; i < it->second.consumers.size(); i++)
|
|
|
|
{
|
|
|
|
int outId = it->second.consumers[i].lid;
|
|
|
|
if (it == map.begin() && inputsSize > 1)
|
|
|
|
out << " " << "\"" << it->second.name << "_" << i << "\"" << " -> ";
|
|
|
|
else
|
|
|
|
out << " " << "\"" << it->second.name << "\"" << " -> ";
|
|
|
|
if (allLayers[outId] == -1) // node
|
|
|
|
out << "\"" << map[outId].name << "\"" << '\n';
|
|
|
|
else // cluster
|
|
|
|
out << "\"" << "cluster_" << allLayers[outId] << "\"" << '\n';
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (it->first == skippedLayers[allLayers[it->first]].back()) // edges from last layer in cluster
|
|
|
|
{
|
|
|
|
for (int i = 0; i < it->second.consumers.size(); i++)
|
|
|
|
{
|
|
|
|
int outId = it->second.consumers[i].lid;
|
|
|
|
if (allLayers[outId] == -1) { // node
|
|
|
|
out << " " << "\"" << "cluster_" << allLayers[it->first] << "\"" << " -> ";
|
|
|
|
out << "\"" << map[outId].name << "\"" << '\n';
|
|
|
|
}
|
|
|
|
else if (allLayers[outId] != allLayers[it->first]) { // another cluster
|
|
|
|
out << " " << "\"" << "cluster_" << allLayers[it->first] << "\"" << " -> ";
|
|
|
|
out << "\"" << "cluster_" << allLayers[outId] << "\"" << '\n';
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
out << "}";
|
|
|
|
return out.str();
|
|
|
|
}
|
|
|
|
|
|
|
|
void Net::dumpToFile(const String& path) {
|
|
|
|
std::ofstream file(path.c_str());
|
|
|
|
file << dump();
|
|
|
|
file.close();
|
|
|
|
}
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
Ptr<Layer> Net::getLayer(LayerId layerId)
|
|
|
|
{
|
|
|
|
LayerData &ld = impl->getLayerData(layerId);
|
2017-06-27 14:52:44 +08:00
|
|
|
return ld.getLayerInstance();
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<Ptr<Layer> > Net::getLayerInputs(LayerId layerId)
|
|
|
|
{
|
|
|
|
LayerData &ld = impl->getLayerData(layerId);
|
|
|
|
|
|
|
|
std::vector<Ptr<Layer> > inputLayers;
|
2019-11-07 02:05:35 +08:00
|
|
|
inputLayers.reserve(ld.inputBlobsId.size());
|
|
|
|
for (int i = 0; i < ld.inputBlobsId.size(); ++i) {
|
|
|
|
inputLayers.push_back(getLayer(ld.inputBlobsId[i].lid));
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
return inputLayers;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<String> Net::getLayerNames() const
|
|
|
|
{
|
|
|
|
std::vector<String> res;
|
|
|
|
res.reserve(impl->layers.size());
|
|
|
|
|
|
|
|
Impl::MapIdToLayerData::iterator it;
|
|
|
|
for (it = impl->layers.begin(); it != impl->layers.end(); it++)
|
|
|
|
{
|
|
|
|
if (it->second.id) //skip Data layer
|
|
|
|
res.push_back(it->second.name);
|
|
|
|
}
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Net::empty() const
|
|
|
|
{
|
|
|
|
return impl->layers.size() <= 1; //first layer is default Data layer
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<int> Net::getUnconnectedOutLayers() const
|
|
|
|
{
|
|
|
|
std::vector<int> layersIds;
|
|
|
|
|
|
|
|
Impl::MapIdToLayerData::iterator it;
|
|
|
|
for (it = impl->layers.begin(); it != impl->layers.end(); it++)
|
|
|
|
{
|
|
|
|
int lid = it->first;
|
|
|
|
LayerData &ld = it->second;
|
|
|
|
|
|
|
|
if (ld.requiredOutputs.size() == 0)
|
|
|
|
layersIds.push_back(lid);
|
|
|
|
}
|
|
|
|
|
|
|
|
return layersIds;
|
|
|
|
}
|
|
|
|
|
2018-09-25 23:10:45 +08:00
|
|
|
std::vector<String> Net::getUnconnectedOutLayersNames() const
|
|
|
|
{
|
|
|
|
std::vector<int> ids = getUnconnectedOutLayers();
|
|
|
|
const size_t n = ids.size();
|
|
|
|
std::vector<String> names(n);
|
|
|
|
for (size_t i = 0; i < n; ++i)
|
|
|
|
{
|
|
|
|
names[i] = impl->layers[ids[i]].name;
|
|
|
|
}
|
|
|
|
return names;
|
|
|
|
}
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
void Net::getLayersShapes(const ShapesVec& netInputShapes,
|
2017-08-02 22:27:58 +08:00
|
|
|
std::vector<int>& layersIds,
|
|
|
|
std::vector<ShapesVec>& inLayersShapes,
|
|
|
|
std::vector<ShapesVec>& outLayersShapes) const
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2017-08-02 22:27:58 +08:00
|
|
|
layersIds.clear();
|
|
|
|
inLayersShapes.clear();
|
|
|
|
outLayersShapes.clear();
|
2017-06-26 18:35:51 +08:00
|
|
|
|
|
|
|
Impl::LayersShapesMap inOutShapes;
|
|
|
|
impl->getLayersShapes(netInputShapes, inOutShapes);
|
|
|
|
|
|
|
|
for(Impl::LayersShapesMap::const_iterator it = inOutShapes.begin();
|
|
|
|
it != inOutShapes.end(); it++)
|
|
|
|
{
|
2017-08-02 22:27:58 +08:00
|
|
|
layersIds.push_back(it->first);
|
|
|
|
inLayersShapes.push_back(it->second.in);
|
|
|
|
outLayersShapes.push_back(it->second.out);
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Net::getLayersShapes(const MatShape& netInputShape,
|
2017-08-02 22:27:58 +08:00
|
|
|
std::vector<int>& layerIds,
|
|
|
|
std::vector<ShapesVec>& inLayersShapes,
|
|
|
|
std::vector<ShapesVec>& outLayersShapes) const
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
|
|
|
getLayersShapes(ShapesVec(1, netInputShape),
|
|
|
|
layerIds, inLayersShapes, outLayersShapes);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Net::getLayerShapes(const MatShape& netInputShape,
|
|
|
|
const int layerId,
|
2017-08-02 22:27:58 +08:00
|
|
|
ShapesVec& inLayerShapes,
|
|
|
|
ShapesVec& outLayerShapes) const
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
|
|
|
getLayerShapes(ShapesVec(1, netInputShape),
|
|
|
|
layerId, inLayerShapes, outLayerShapes);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
void Net::getLayerShapes(const ShapesVec& netInputShapes,
|
|
|
|
const int layerId,
|
2017-08-02 22:27:58 +08:00
|
|
|
ShapesVec& inLayerShapes,
|
|
|
|
ShapesVec& outLayerShapes) const
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
|
|
|
LayerShapes shapes;
|
|
|
|
impl->getLayerShapes(netInputShapes, layerId, shapes);
|
2017-08-02 22:27:58 +08:00
|
|
|
inLayerShapes = shapes.in;
|
|
|
|
outLayerShapes = shapes.out;
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int64 Net::getFLOPS(const std::vector<MatShape>& netInputShapes) const
|
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
int64 flops = 0;
|
|
|
|
std::vector<int> ids;
|
|
|
|
std::vector<std::vector<MatShape> > inShapes, outShapes;
|
2017-08-02 22:27:58 +08:00
|
|
|
getLayersShapes(netInputShapes, ids, inShapes, outShapes);
|
2017-06-26 18:35:51 +08:00
|
|
|
CV_Assert(inShapes.size() == outShapes.size());
|
|
|
|
CV_Assert(inShapes.size() == ids.size());
|
|
|
|
|
|
|
|
for(int i = 0; i < ids.size(); i++)
|
|
|
|
{
|
|
|
|
flops += impl->layers[ids[i]].getLayerInstance()->getFLOPS(inShapes[i],
|
|
|
|
outShapes[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
return flops;
|
|
|
|
}
|
|
|
|
|
|
|
|
int64 Net::getFLOPS(const MatShape& netInputShape) const
|
|
|
|
{
|
|
|
|
return getFLOPS(std::vector<MatShape>(1, netInputShape));
|
|
|
|
}
|
|
|
|
|
|
|
|
int64 Net::getFLOPS(const int layerId,
|
|
|
|
const std::vector<MatShape>& netInputShapes) const
|
|
|
|
{
|
|
|
|
Impl::MapIdToLayerData::iterator layer = impl->layers.find(layerId);
|
|
|
|
CV_Assert(layer != impl->layers.end());
|
|
|
|
|
|
|
|
LayerShapes shapes;
|
|
|
|
impl->getLayerShapes(netInputShapes, layerId, shapes);
|
|
|
|
|
|
|
|
return layer->second.getLayerInstance()->getFLOPS(shapes.in, shapes.out);
|
|
|
|
}
|
|
|
|
|
|
|
|
int64 Net::getFLOPS(const int layerId,
|
|
|
|
const MatShape& netInputShape) const
|
|
|
|
{
|
|
|
|
return getFLOPS(layerId, std::vector<MatShape>(1, netInputShape));
|
|
|
|
}
|
|
|
|
|
|
|
|
void Net::getLayerTypes(std::vector<String>& layersTypes) const
|
|
|
|
{
|
|
|
|
layersTypes.clear();
|
|
|
|
|
|
|
|
std::map<String, int> layers;
|
|
|
|
for (Impl::MapIdToLayerData::iterator it = impl->layers.begin();
|
|
|
|
it != impl->layers.end(); it++)
|
|
|
|
{
|
|
|
|
if (layers.find(it->second.type) == layers.end())
|
|
|
|
layers[it->second.type] = 0;
|
|
|
|
layers[it->second.type]++;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (std::map<String, int>::iterator it = layers.begin();
|
|
|
|
it != layers.end(); it++)
|
|
|
|
{
|
|
|
|
layersTypes.push_back(it->first);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int Net::getLayersCount(const String& layerType) const
|
|
|
|
{
|
|
|
|
int count = 0;
|
|
|
|
for (Impl::MapIdToLayerData::iterator it = impl->layers.begin();
|
|
|
|
it != impl->layers.end(); it++)
|
|
|
|
{
|
|
|
|
if (it->second.type == layerType)
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Net::getMemoryConsumption(const int layerId,
|
|
|
|
const std::vector<MatShape>& netInputShapes,
|
|
|
|
size_t& weights, size_t& blobs) const
|
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
Impl::MapIdToLayerData::iterator layer = impl->layers.find(layerId);
|
|
|
|
CV_Assert(layer != impl->layers.end());
|
|
|
|
|
|
|
|
weights = blobs = 0;
|
|
|
|
|
|
|
|
for(int i = 0; i < layer->second.params.blobs.size(); i++)
|
|
|
|
{
|
|
|
|
const Mat& weightsBlob = layer->second.params.blobs[i];
|
|
|
|
weights += weightsBlob.total()*weightsBlob.elemSize();
|
|
|
|
}
|
|
|
|
|
2017-08-02 22:27:58 +08:00
|
|
|
ShapesVec inLayerShapes, outLayerShapes;
|
|
|
|
getLayerShapes(netInputShapes, layerId, inLayerShapes, outLayerShapes);
|
2017-06-26 18:35:51 +08:00
|
|
|
for(int i = 0; i < outLayerShapes.size(); i++)
|
|
|
|
{
|
|
|
|
blobs += total(outLayerShapes[i]) * sizeof(float);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Net::getMemoryConsumption(const std::vector<MatShape>& netInputShapes,
|
|
|
|
size_t& weights, size_t& blobs) const
|
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
std::vector<int> layerIds;
|
|
|
|
std::vector<size_t> w, b;
|
|
|
|
getMemoryConsumption(netInputShapes, layerIds, w, b);
|
|
|
|
|
|
|
|
weights = blobs = 0;
|
|
|
|
for(int i = 0; i < layerIds.size(); i++)
|
|
|
|
{
|
|
|
|
weights += w[i];
|
|
|
|
blobs += b[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Net::getMemoryConsumption(const int layerId,
|
|
|
|
const MatShape& netInputShape,
|
|
|
|
size_t& weights, size_t& blobs) const
|
|
|
|
{
|
|
|
|
getMemoryConsumption(layerId, std::vector<MatShape>(1, netInputShape),
|
|
|
|
weights, blobs);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Net::getMemoryConsumption(const MatShape& netInputShape,
|
|
|
|
size_t& weights, size_t& blobs) const
|
|
|
|
{
|
|
|
|
getMemoryConsumption(std::vector<MatShape>(1, netInputShape),
|
|
|
|
weights, blobs);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Net::getMemoryConsumption(const std::vector<MatShape>& netInputShapes,
|
|
|
|
std::vector<int>& layerIds, std::vector<size_t>& weights,
|
|
|
|
std::vector<size_t>& blobs) const
|
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
layerIds.clear();
|
|
|
|
weights.clear();
|
|
|
|
blobs.clear();
|
|
|
|
|
2017-08-02 22:27:58 +08:00
|
|
|
std::vector<std::vector<MatShape> > inLayerShapes, outLayerShapes;
|
2017-06-26 18:35:51 +08:00
|
|
|
|
2017-08-02 22:27:58 +08:00
|
|
|
getLayersShapes(netInputShapes, layerIds, inLayerShapes, outLayerShapes);
|
2017-06-26 18:35:51 +08:00
|
|
|
|
|
|
|
for(int i = 0; i < layerIds.size(); i++)
|
|
|
|
{
|
|
|
|
int w = 0, b = 0;
|
|
|
|
Impl::MapIdToLayerData::iterator layer = impl->layers.find(layerIds[i]);
|
|
|
|
CV_Assert(layer != impl->layers.end());
|
|
|
|
|
|
|
|
for(int j = 0; j < layer->second.params.blobs.size(); j++)
|
|
|
|
{
|
|
|
|
const Mat& weightsBlob = layer->second.params.blobs[j];
|
|
|
|
w += weightsBlob.total()*weightsBlob.elemSize();
|
|
|
|
}
|
|
|
|
|
|
|
|
for(int j = 0; j < outLayerShapes[i].size(); j++)
|
|
|
|
{
|
|
|
|
b += total(outLayerShapes[i][j]) * sizeof(float);
|
|
|
|
}
|
|
|
|
|
|
|
|
weights.push_back(w);
|
|
|
|
blobs.push_back(b);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Net::getMemoryConsumption(const MatShape& netInputShape, std::vector<int>& layerIds,
|
|
|
|
std::vector<size_t>& weights, std::vector<size_t>& blobs) const
|
|
|
|
{
|
|
|
|
getMemoryConsumption(std::vector<MatShape>(1, netInputShape), layerIds,
|
|
|
|
weights, blobs);
|
|
|
|
}
|
|
|
|
|
2017-07-04 22:23:47 +08:00
|
|
|
void Net::enableFusion(bool fusion)
|
|
|
|
{
|
|
|
|
if( impl->fusion != fusion )
|
|
|
|
{
|
|
|
|
impl->fusion = fusion;
|
|
|
|
impl->netWasAllocated = false;
|
|
|
|
impl->clear();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
void Net::setHalideScheduler(const String& scheduler)
|
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
CV_TRACE_ARG_VALUE(scheduler, "scheduler", scheduler.c_str());
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
impl->halideConfigFile = scheduler;
|
|
|
|
}
|
|
|
|
|
2017-08-02 22:27:58 +08:00
|
|
|
int64 Net::getPerfProfile(std::vector<double>& timings)
|
|
|
|
{
|
|
|
|
timings = std::vector<double>(impl->layersTimings.begin() + 1, impl->layersTimings.end());
|
2018-11-07 18:54:51 +08:00
|
|
|
int64 total = (int64)std::accumulate(timings.begin(), timings.end(), 0.0);
|
2017-08-02 22:27:58 +08:00
|
|
|
return total;
|
|
|
|
}
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
|
Merge pull request #9114 from pengli:dnn_rebase
add libdnn acceleration to dnn module (#9114)
* import libdnn code
Signed-off-by: Li Peng <peng.li@intel.com>
* add convolution layer ocl acceleration
Signed-off-by: Li Peng <peng.li@intel.com>
* add pooling layer ocl acceleration
Signed-off-by: Li Peng <peng.li@intel.com>
* add softmax layer ocl acceleration
Signed-off-by: Li Peng <peng.li@intel.com>
* add lrn layer ocl acceleration
Signed-off-by: Li Peng <peng.li@intel.com>
* add innerproduct layer ocl acceleration
Signed-off-by: Li Peng <peng.li@intel.com>
* add HAVE_OPENCL macro
Signed-off-by: Li Peng <peng.li@intel.com>
* fix for convolution ocl
Signed-off-by: Li Peng <peng.li@intel.com>
* enable getUMat() for multi-dimension Mat
Signed-off-by: Li Peng <peng.li@intel.com>
* use getUMat for ocl acceleration
Signed-off-by: Li Peng <peng.li@intel.com>
* use CV_OCL_RUN macro
Signed-off-by: Li Peng <peng.li@intel.com>
* set OPENCL target when it is available
and disable fuseLayer for OCL target for the time being
Signed-off-by: Li Peng <peng.li@intel.com>
* fix innerproduct accuracy test
Signed-off-by: Li Peng <peng.li@intel.com>
* remove trailing space
Signed-off-by: Li Peng <peng.li@intel.com>
* Fixed tensorflow demo bug.
Root cause is that tensorflow has different algorithm with libdnn
to calculate convolution output dimension.
libdnn don't calculate output dimension anymore and just use one
passed in by config.
* split gemm ocl file
split it into gemm_buffer.cl and gemm_image.cl
Signed-off-by: Li Peng <peng.li@intel.com>
* Fix compile failure
Signed-off-by: Li Peng <peng.li@intel.com>
* check env flag for auto tuning
Signed-off-by: Li Peng <peng.li@intel.com>
* switch to new ocl kernels for softmax layer
Signed-off-by: Li Peng <peng.li@intel.com>
* update softmax layer
on some platform subgroup extension may not work well,
fallback to non subgroup ocl acceleration.
Signed-off-by: Li Peng <peng.li@intel.com>
* fallback to cpu path for fc layer with multi output
Signed-off-by: Li Peng <peng.li@intel.com>
* update output message
Signed-off-by: Li Peng <peng.li@intel.com>
* update fully connected layer
fallback to gemm API if libdnn return false
Signed-off-by: Li Peng <peng.li@intel.com>
* Add ReLU OCL implementation
* disable layer fusion for now
Signed-off-by: Li Peng <peng.li@intel.com>
* Add OCL implementation for concat layer
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* libdnn: update license and copyrights
Also refine libdnn coding style
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
Signed-off-by: Li Peng <peng.li@intel.com>
* DNN: Don't link OpenCL library explicitly
* DNN: Make default preferableTarget to DNN_TARGET_CPU
User should set it to DNN_TARGET_OPENCL explicitly if want to
use OpenCL acceleration.
Also don't fusion when using DNN_TARGET_OPENCL
* DNN: refine coding style
* Add getOpenCLErrorString
* DNN: Use int32_t/uint32_t instread of alias
* Use namespace ocl4dnn to include libdnn things
* remove extra copyTo in softmax ocl path
Signed-off-by: Li Peng <peng.li@intel.com>
* update ReLU layer ocl path
Signed-off-by: Li Peng <peng.li@intel.com>
* Add prefer target property for layer class
It is used to indicate the target for layer forwarding,
either the default CPU target or OCL target.
Signed-off-by: Li Peng <peng.li@intel.com>
* Add cl_event based timer for cv::ocl
* Rename libdnn to ocl4dnn
Signed-off-by: Li Peng <peng.li@intel.com>
Signed-off-by: wzw <zhiwen.wu@intel.com>
* use UMat for ocl4dnn internal buffer
Remove allocateMemory which use clCreateBuffer directly
Signed-off-by: Li Peng <peng.li@intel.com>
Signed-off-by: wzw <zhiwen.wu@intel.com>
* enable buffer gemm in ocl4dnn innerproduct
Signed-off-by: Li Peng <peng.li@intel.com>
* replace int_tp globally for ocl4dnn kernels.
Signed-off-by: wzw <zhiwen.wu@intel.com>
Signed-off-by: Li Peng <peng.li@intel.com>
* create UMat for layer params
Signed-off-by: Li Peng <peng.li@intel.com>
* update sign ocl kernel
Signed-off-by: Li Peng <peng.li@intel.com>
* update image based gemm of inner product layer
Signed-off-by: Li Peng <peng.li@intel.com>
* remove buffer gemm of inner product layer
call cv::gemm API instead
Signed-off-by: Li Peng <peng.li@intel.com>
* change ocl4dnn forward parameter to UMat
Signed-off-by: Li Peng <peng.li@intel.com>
* Refine auto-tuning mechanism.
- Use OPENCV_OCL4DNN_KERNEL_CONFIG_PATH to set cache directory
for fine-tuned kernel configuration.
e.g. export OPENCV_OCL4DNN_KERNEL_CONFIG_PATH=/home/tmp,
the cache directory will be /home/tmp/spatialkernels/ on Linux.
- Define environment OPENCV_OCL4DNN_ENABLE_AUTO_TUNING to enable
auto-tuning.
- OPENCV_OPENCL_ENABLE_PROFILING is only used to enable profiling
for OpenCL command queue. This fix basic kernel get wrong running
time, i.e. 0ms.
- If creating cache directory failed, disable auto-tuning.
* Detect and create cache dir on windows
Signed-off-by: Li Peng <peng.li@intel.com>
* Refine gemm like convolution kernel.
Signed-off-by: Li Peng <peng.li@intel.com>
* Fix redundant swizzleWeights calling when use cached kernel config.
* Fix "out of resource" bug when auto-tuning too many kernels.
* replace cl_mem with UMat in ocl4dnnConvSpatial class
* OCL4DNN: reduce the tuning kernel candidate.
This patch could reduce 75% of the tuning candidates with less
than 2% performance impact for the final result.
Signed-off-by: Zhigang Gong <zhigang.gong@intel.com>
* replace cl_mem with umat in ocl4dnn convolution
Signed-off-by: Li Peng <peng.li@intel.com>
* remove weight_image_ of ocl4dnn inner product
Actually it is unused in the computation
Signed-off-by: Li Peng <peng.li@intel.com>
* Various fixes for ocl4dnn
1. OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel())
2. Ptr<OCL4DNNInnerProduct<float> > innerProductOp
3. Code comments cleanup
4. ignore check on OCL cpu device
Signed-off-by: Li Peng <peng.li@intel.com>
* add build option for log softmax
Signed-off-by: Li Peng <peng.li@intel.com>
* remove unused ocl kernels in ocl4dnn
Signed-off-by: Li Peng <peng.li@intel.com>
* replace ocl4dnnSet with opencv setTo
Signed-off-by: Li Peng <peng.li@intel.com>
* replace ALIGN with cv::alignSize
Signed-off-by: Li Peng <peng.li@intel.com>
* check kernel build options
Signed-off-by: Li Peng <peng.li@intel.com>
* Handle program compilation fail properly.
* Use std::numeric_limits<float>::infinity() for large float number
* check ocl4dnn kernel compilation result
Signed-off-by: Li Peng <peng.li@intel.com>
* remove unused ctx_id
Signed-off-by: Li Peng <peng.li@intel.com>
* change clEnqueueNDRangeKernel to kernel.run()
Signed-off-by: Li Peng <peng.li@intel.com>
* change cl_mem to UMat in image based gemm
Signed-off-by: Li Peng <peng.li@intel.com>
* check intel subgroup support for lrn and pooling layer
Signed-off-by: Li Peng <peng.li@intel.com>
* Fix convolution bug if group is greater than 1
Signed-off-by: Li Peng <peng.li@intel.com>
* Set default layer preferableTarget to be DNN_TARGET_CPU
Signed-off-by: Li Peng <peng.li@intel.com>
* Add ocl perf test for convolution
Signed-off-by: Li Peng <peng.li@intel.com>
* Add more ocl accuracy test
Signed-off-by: Li Peng <peng.li@intel.com>
* replace cl_image with ocl::Image2D
Signed-off-by: Li Peng <peng.li@intel.com>
* Fix build failure in elementwise layer
Signed-off-by: Li Peng <peng.li@intel.com>
* use getUMat() to get blob data
Signed-off-by: Li Peng <peng.li@intel.com>
* replace cl_mem handle with ocl::KernelArg
Signed-off-by: Li Peng <peng.li@intel.com>
* dnn(build): don't use C++11, OPENCL_LIBRARIES fix
* dnn(ocl4dnn): remove unused OpenCL kernels
* dnn(ocl4dnn): extract OpenCL code into .cl files
* dnn(ocl4dnn): refine auto-tuning
Defaultly disable auto-tuning, set OPENCV_OCL4DNN_ENABLE_AUTO_TUNING
environment variable to enable it.
Use a set of pre-tuned configs as default config if auto-tuning is disabled.
These configs are tuned for Intel GPU with 48/72 EUs, and for googlenet,
AlexNet, ResNet-50
If default config is not suitable, use the first available kernel config
from the candidates. Candidate priority from high to low is gemm like kernel,
IDLF kernel, basick kernel.
* dnn(ocl4dnn): pooling doesn't use OpenCL subgroups
* dnn(ocl4dnn): fix perf test
OpenCV has default 3sec time limit for each performance test.
Warmup OpenCL backend outside of perf measurement loop.
* use ocl::KernelArg as much as possible
Signed-off-by: Li Peng <peng.li@intel.com>
* dnn(ocl4dnn): fix bias bug for gemm like kernel
* dnn(ocl4dnn): wrap cl_mem into UMat
Signed-off-by: Li Peng <peng.li@intel.com>
* dnn(ocl4dnn): Refine signature of kernel config
- Use more readable string as signture of kernel config
- Don't count device name and vendor in signature string
- Default kernel configurations are tuned for Intel GPU with
24/48/72 EUs, and for googlenet, AlexNet, ResNet-50 net model.
* dnn(ocl4dnn): swap width/height in configuration
* dnn(ocl4dnn): enable configs for Intel OpenCL runtime only
* core: make configuration helper functions accessible from non-core modules
* dnn(ocl4dnn): update kernel auto-tuning behavior
Avoid unwanted creation of directories
* dnn(ocl4dnn): simplify kernel to workaround OpenCL compiler crash
* dnn(ocl4dnn): remove redundant code
* dnn(ocl4dnn): Add more clear message for simd size dismatch.
* dnn(ocl4dnn): add const to const argument
Signed-off-by: Li Peng <peng.li@intel.com>
* dnn(ocl4dnn): force compiler use a specific SIMD size for IDLF kernel
* dnn(ocl4dnn): drop unused tuneLocalSize()
* dnn(ocl4dnn): specify OpenCL queue for Timer and convolve() method
* dnn(ocl4dnn): sanitize file names used for cache
* dnn(perf): enable Network tests with OpenCL
* dnn(ocl4dnn/conv): drop computeGlobalSize()
* dnn(ocl4dnn/conv): drop unused fields
* dnn(ocl4dnn/conv): simplify ctor
* dnn(ocl4dnn/conv): refactor kernelConfig localSize=NULL
* dnn(ocl4dnn/conv): drop unsupported double / untested half types
* dnn(ocl4dnn/conv): drop unused variable
* dnn(ocl4dnn/conv): alignSize/divUp
* dnn(ocl4dnn/conv): use enum values
* dnn(ocl4dnn): drop unused innerproduct variable
Signed-off-by: Li Peng <peng.li@intel.com>
* dnn(ocl4dnn): add an generic function to check cl option support
* dnn(ocl4dnn): run softmax subgroup version kernel first
Signed-off-by: Li Peng <peng.li@intel.com>
2017-10-02 20:38:00 +08:00
|
|
|
Layer::Layer() { preferableTarget = DNN_TARGET_CPU; }
|
2017-06-26 18:35:51 +08:00
|
|
|
|
|
|
|
Layer::Layer(const LayerParams ¶ms)
|
|
|
|
: blobs(params.blobs), name(params.name), type(params.type)
|
|
|
|
{
|
Merge pull request #9114 from pengli:dnn_rebase
add libdnn acceleration to dnn module (#9114)
* import libdnn code
Signed-off-by: Li Peng <peng.li@intel.com>
* add convolution layer ocl acceleration
Signed-off-by: Li Peng <peng.li@intel.com>
* add pooling layer ocl acceleration
Signed-off-by: Li Peng <peng.li@intel.com>
* add softmax layer ocl acceleration
Signed-off-by: Li Peng <peng.li@intel.com>
* add lrn layer ocl acceleration
Signed-off-by: Li Peng <peng.li@intel.com>
* add innerproduct layer ocl acceleration
Signed-off-by: Li Peng <peng.li@intel.com>
* add HAVE_OPENCL macro
Signed-off-by: Li Peng <peng.li@intel.com>
* fix for convolution ocl
Signed-off-by: Li Peng <peng.li@intel.com>
* enable getUMat() for multi-dimension Mat
Signed-off-by: Li Peng <peng.li@intel.com>
* use getUMat for ocl acceleration
Signed-off-by: Li Peng <peng.li@intel.com>
* use CV_OCL_RUN macro
Signed-off-by: Li Peng <peng.li@intel.com>
* set OPENCL target when it is available
and disable fuseLayer for OCL target for the time being
Signed-off-by: Li Peng <peng.li@intel.com>
* fix innerproduct accuracy test
Signed-off-by: Li Peng <peng.li@intel.com>
* remove trailing space
Signed-off-by: Li Peng <peng.li@intel.com>
* Fixed tensorflow demo bug.
Root cause is that tensorflow has different algorithm with libdnn
to calculate convolution output dimension.
libdnn don't calculate output dimension anymore and just use one
passed in by config.
* split gemm ocl file
split it into gemm_buffer.cl and gemm_image.cl
Signed-off-by: Li Peng <peng.li@intel.com>
* Fix compile failure
Signed-off-by: Li Peng <peng.li@intel.com>
* check env flag for auto tuning
Signed-off-by: Li Peng <peng.li@intel.com>
* switch to new ocl kernels for softmax layer
Signed-off-by: Li Peng <peng.li@intel.com>
* update softmax layer
on some platform subgroup extension may not work well,
fallback to non subgroup ocl acceleration.
Signed-off-by: Li Peng <peng.li@intel.com>
* fallback to cpu path for fc layer with multi output
Signed-off-by: Li Peng <peng.li@intel.com>
* update output message
Signed-off-by: Li Peng <peng.li@intel.com>
* update fully connected layer
fallback to gemm API if libdnn return false
Signed-off-by: Li Peng <peng.li@intel.com>
* Add ReLU OCL implementation
* disable layer fusion for now
Signed-off-by: Li Peng <peng.li@intel.com>
* Add OCL implementation for concat layer
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* libdnn: update license and copyrights
Also refine libdnn coding style
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
Signed-off-by: Li Peng <peng.li@intel.com>
* DNN: Don't link OpenCL library explicitly
* DNN: Make default preferableTarget to DNN_TARGET_CPU
User should set it to DNN_TARGET_OPENCL explicitly if want to
use OpenCL acceleration.
Also don't fusion when using DNN_TARGET_OPENCL
* DNN: refine coding style
* Add getOpenCLErrorString
* DNN: Use int32_t/uint32_t instread of alias
* Use namespace ocl4dnn to include libdnn things
* remove extra copyTo in softmax ocl path
Signed-off-by: Li Peng <peng.li@intel.com>
* update ReLU layer ocl path
Signed-off-by: Li Peng <peng.li@intel.com>
* Add prefer target property for layer class
It is used to indicate the target for layer forwarding,
either the default CPU target or OCL target.
Signed-off-by: Li Peng <peng.li@intel.com>
* Add cl_event based timer for cv::ocl
* Rename libdnn to ocl4dnn
Signed-off-by: Li Peng <peng.li@intel.com>
Signed-off-by: wzw <zhiwen.wu@intel.com>
* use UMat for ocl4dnn internal buffer
Remove allocateMemory which use clCreateBuffer directly
Signed-off-by: Li Peng <peng.li@intel.com>
Signed-off-by: wzw <zhiwen.wu@intel.com>
* enable buffer gemm in ocl4dnn innerproduct
Signed-off-by: Li Peng <peng.li@intel.com>
* replace int_tp globally for ocl4dnn kernels.
Signed-off-by: wzw <zhiwen.wu@intel.com>
Signed-off-by: Li Peng <peng.li@intel.com>
* create UMat for layer params
Signed-off-by: Li Peng <peng.li@intel.com>
* update sign ocl kernel
Signed-off-by: Li Peng <peng.li@intel.com>
* update image based gemm of inner product layer
Signed-off-by: Li Peng <peng.li@intel.com>
* remove buffer gemm of inner product layer
call cv::gemm API instead
Signed-off-by: Li Peng <peng.li@intel.com>
* change ocl4dnn forward parameter to UMat
Signed-off-by: Li Peng <peng.li@intel.com>
* Refine auto-tuning mechanism.
- Use OPENCV_OCL4DNN_KERNEL_CONFIG_PATH to set cache directory
for fine-tuned kernel configuration.
e.g. export OPENCV_OCL4DNN_KERNEL_CONFIG_PATH=/home/tmp,
the cache directory will be /home/tmp/spatialkernels/ on Linux.
- Define environment OPENCV_OCL4DNN_ENABLE_AUTO_TUNING to enable
auto-tuning.
- OPENCV_OPENCL_ENABLE_PROFILING is only used to enable profiling
for OpenCL command queue. This fix basic kernel get wrong running
time, i.e. 0ms.
- If creating cache directory failed, disable auto-tuning.
* Detect and create cache dir on windows
Signed-off-by: Li Peng <peng.li@intel.com>
* Refine gemm like convolution kernel.
Signed-off-by: Li Peng <peng.li@intel.com>
* Fix redundant swizzleWeights calling when use cached kernel config.
* Fix "out of resource" bug when auto-tuning too many kernels.
* replace cl_mem with UMat in ocl4dnnConvSpatial class
* OCL4DNN: reduce the tuning kernel candidate.
This patch could reduce 75% of the tuning candidates with less
than 2% performance impact for the final result.
Signed-off-by: Zhigang Gong <zhigang.gong@intel.com>
* replace cl_mem with umat in ocl4dnn convolution
Signed-off-by: Li Peng <peng.li@intel.com>
* remove weight_image_ of ocl4dnn inner product
Actually it is unused in the computation
Signed-off-by: Li Peng <peng.li@intel.com>
* Various fixes for ocl4dnn
1. OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel())
2. Ptr<OCL4DNNInnerProduct<float> > innerProductOp
3. Code comments cleanup
4. ignore check on OCL cpu device
Signed-off-by: Li Peng <peng.li@intel.com>
* add build option for log softmax
Signed-off-by: Li Peng <peng.li@intel.com>
* remove unused ocl kernels in ocl4dnn
Signed-off-by: Li Peng <peng.li@intel.com>
* replace ocl4dnnSet with opencv setTo
Signed-off-by: Li Peng <peng.li@intel.com>
* replace ALIGN with cv::alignSize
Signed-off-by: Li Peng <peng.li@intel.com>
* check kernel build options
Signed-off-by: Li Peng <peng.li@intel.com>
* Handle program compilation fail properly.
* Use std::numeric_limits<float>::infinity() for large float number
* check ocl4dnn kernel compilation result
Signed-off-by: Li Peng <peng.li@intel.com>
* remove unused ctx_id
Signed-off-by: Li Peng <peng.li@intel.com>
* change clEnqueueNDRangeKernel to kernel.run()
Signed-off-by: Li Peng <peng.li@intel.com>
* change cl_mem to UMat in image based gemm
Signed-off-by: Li Peng <peng.li@intel.com>
* check intel subgroup support for lrn and pooling layer
Signed-off-by: Li Peng <peng.li@intel.com>
* Fix convolution bug if group is greater than 1
Signed-off-by: Li Peng <peng.li@intel.com>
* Set default layer preferableTarget to be DNN_TARGET_CPU
Signed-off-by: Li Peng <peng.li@intel.com>
* Add ocl perf test for convolution
Signed-off-by: Li Peng <peng.li@intel.com>
* Add more ocl accuracy test
Signed-off-by: Li Peng <peng.li@intel.com>
* replace cl_image with ocl::Image2D
Signed-off-by: Li Peng <peng.li@intel.com>
* Fix build failure in elementwise layer
Signed-off-by: Li Peng <peng.li@intel.com>
* use getUMat() to get blob data
Signed-off-by: Li Peng <peng.li@intel.com>
* replace cl_mem handle with ocl::KernelArg
Signed-off-by: Li Peng <peng.li@intel.com>
* dnn(build): don't use C++11, OPENCL_LIBRARIES fix
* dnn(ocl4dnn): remove unused OpenCL kernels
* dnn(ocl4dnn): extract OpenCL code into .cl files
* dnn(ocl4dnn): refine auto-tuning
Defaultly disable auto-tuning, set OPENCV_OCL4DNN_ENABLE_AUTO_TUNING
environment variable to enable it.
Use a set of pre-tuned configs as default config if auto-tuning is disabled.
These configs are tuned for Intel GPU with 48/72 EUs, and for googlenet,
AlexNet, ResNet-50
If default config is not suitable, use the first available kernel config
from the candidates. Candidate priority from high to low is gemm like kernel,
IDLF kernel, basick kernel.
* dnn(ocl4dnn): pooling doesn't use OpenCL subgroups
* dnn(ocl4dnn): fix perf test
OpenCV has default 3sec time limit for each performance test.
Warmup OpenCL backend outside of perf measurement loop.
* use ocl::KernelArg as much as possible
Signed-off-by: Li Peng <peng.li@intel.com>
* dnn(ocl4dnn): fix bias bug for gemm like kernel
* dnn(ocl4dnn): wrap cl_mem into UMat
Signed-off-by: Li Peng <peng.li@intel.com>
* dnn(ocl4dnn): Refine signature of kernel config
- Use more readable string as signture of kernel config
- Don't count device name and vendor in signature string
- Default kernel configurations are tuned for Intel GPU with
24/48/72 EUs, and for googlenet, AlexNet, ResNet-50 net model.
* dnn(ocl4dnn): swap width/height in configuration
* dnn(ocl4dnn): enable configs for Intel OpenCL runtime only
* core: make configuration helper functions accessible from non-core modules
* dnn(ocl4dnn): update kernel auto-tuning behavior
Avoid unwanted creation of directories
* dnn(ocl4dnn): simplify kernel to workaround OpenCL compiler crash
* dnn(ocl4dnn): remove redundant code
* dnn(ocl4dnn): Add more clear message for simd size dismatch.
* dnn(ocl4dnn): add const to const argument
Signed-off-by: Li Peng <peng.li@intel.com>
* dnn(ocl4dnn): force compiler use a specific SIMD size for IDLF kernel
* dnn(ocl4dnn): drop unused tuneLocalSize()
* dnn(ocl4dnn): specify OpenCL queue for Timer and convolve() method
* dnn(ocl4dnn): sanitize file names used for cache
* dnn(perf): enable Network tests with OpenCL
* dnn(ocl4dnn/conv): drop computeGlobalSize()
* dnn(ocl4dnn/conv): drop unused fields
* dnn(ocl4dnn/conv): simplify ctor
* dnn(ocl4dnn/conv): refactor kernelConfig localSize=NULL
* dnn(ocl4dnn/conv): drop unsupported double / untested half types
* dnn(ocl4dnn/conv): drop unused variable
* dnn(ocl4dnn/conv): alignSize/divUp
* dnn(ocl4dnn/conv): use enum values
* dnn(ocl4dnn): drop unused innerproduct variable
Signed-off-by: Li Peng <peng.li@intel.com>
* dnn(ocl4dnn): add an generic function to check cl option support
* dnn(ocl4dnn): run softmax subgroup version kernel first
Signed-off-by: Li Peng <peng.li@intel.com>
2017-10-02 20:38:00 +08:00
|
|
|
preferableTarget = DNN_TARGET_CPU;
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void Layer::setParamsFrom(const LayerParams ¶ms)
|
|
|
|
{
|
|
|
|
blobs = params.blobs;
|
|
|
|
name = params.name;
|
|
|
|
type = params.type;
|
|
|
|
}
|
|
|
|
|
|
|
|
int Layer::inputNameToIndex(String)
|
|
|
|
{
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-03-12 23:42:53 +08:00
|
|
|
int Layer::outputNameToIndex(const String&)
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2018-06-20 19:25:24 +08:00
|
|
|
return 0;
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool Layer::supportBackend(int backendId)
|
|
|
|
{
|
2018-06-01 15:54:12 +08:00
|
|
|
return backendId == DNN_BACKEND_OPENCV;
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
|
Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
CUDA backend for the DNN module
* stub cuda4dnn design
* minor fixes for tests and doxygen
* add csl public api directory to module headers
* add low-level CSL components
* add high-level CSL components
* integrate csl::Tensor into backbone code
* switch to CPU iff unsupported; otherwise, fail on error
* add fully connected layer
* add softmax layer
* add activation layers
* support arbitary rank TensorDescriptor
* pass input wrappers to `initCUDA()`
* add 1d/2d/3d-convolution
* add pooling layer
* reorganize and refactor code
* fixes for gcc, clang and doxygen; remove cxx14/17 code
* add blank_layer
* add LRN layer
* add rounding modes for pooling layer
* split tensor.hpp into tensor.hpp and tensor_ops.hpp
* add concat layer
* add scale layer
* add batch normalization layer
* split math.cu into activations.cu and math.hpp
* add eltwise layer
* add flatten layer
* add tensor transform api
* add asymmetric padding support for convolution layer
* add reshape layer
* fix rebase issues
* add permute layer
* add padding support for concat layer
* refactor and reorganize code
* add normalize layer
* optimize bias addition in scale layer
* add prior box layer
* fix and optimize normalize layer
* add asymmetric padding support for pooling layer
* add event API
* improve pooling performance for some padding scenarios
* avoid over-allocation of compute resources to kernels
* improve prior box performance
* enable layer fusion
* add const layer
* add resize layer
* add slice layer
* add padding layer
* add deconvolution layer
* fix channelwise ReLU initialization
* add vector traits
* add vectorized versions of relu, clipped_relu, power
* add vectorized concat kernels
* improve concat_with_offsets performance
* vectorize scale and bias kernels
* add support for multi-billion element tensors
* vectorize prior box kernels
* fix address alignment check
* improve bias addition performance of conv/deconv/fc layers
* restructure code for supporting multiple targets
* add DNN_TARGET_CUDA_FP64
* add DNN_TARGET_FP16
* improve vectorization
* add region layer
* improve tensor API, add dynamic ranks
1. use ManagedPtr instead of a Tensor in backend wrapper
2. add new methods to tensor classes
- size_range: computes the combined size of for a given axis range
- tensor span/view can be constructed from a raw pointer and shape
3. the tensor classes can change their rank at runtime (previously rank was fixed at compile-time)
4. remove device code from tensor classes (as they are unused)
5. enforce strict conditions on tensor class APIs to improve debugging ability
* fix parametric relu activation
* add squeeze/unsqueeze tensor API
* add reorg layer
* optimize permute and enable 2d permute
* enable 1d and 2d slice
* add split layer
* add shuffle channel layer
* allow tensors of different ranks in reshape primitive
* patch SliceOp to allow Crop Layer
* allow extra shape inputs in reshape layer
* use `std::move_backward` instead of `std::move` for insert in resizable_static_array
* improve workspace management
* add spatial LRN
* add nms (cpu) to region layer
* add max pooling with argmax ( and a fix to limits.hpp)
* add max unpooling layer
* rename DNN_TARGET_CUDA_FP32 to DNN_TARGET_CUDA
* update supportBackend to be more rigorous
* remove stray include from preventing non-cuda build
* include op_cuda.hpp outside condition #if
* refactoring, fixes and many optimizations
* drop DNN_TARGET_CUDA_FP64
* fix gcc errors
* increase max. tensor rank limit to six
* add Interp layer
* drop custom layers; use BackendNode
* vectorize activation kernels
* fixes for gcc
* remove wrong assertion
* fix broken assertion in unpooling primitive
* fix build errors in non-CUDA build
* completely remove workspace from public API
* fix permute layer
* enable accuracy and perf. tests for DNN_TARGET_CUDA
* add asynchronous forward
* vectorize eltwise ops
* vectorize fill kernel
* fixes for gcc
* remove CSL headers from public API
* remove csl header source group from cmake
* update min. cudnn version in cmake
* add numerically stable FP32 log1pexp
* refactor code
* add FP16 specialization to cudnn based tensor addition
* vectorize scale1 and bias1 + minor refactoring
* fix doxygen build
* fix invalid alignment assertion
* clear backend wrappers before allocateLayers
* ignore memory lock failures
* do not allocate internal blobs
* integrate NVTX
* add numerically stable half precision log1pexp
* fix indentation, following coding style, improve docs
* remove accidental modification of IE code
* Revert "add asynchronous forward"
This reverts commit 1154b9da9da07e9b52f8a81bdcea48cf31c56f70.
* [cmake] throw error for unsupported CC versions
* fix rebase issues
* add more docs, refactor code, fix bugs
* minor refactoring and fixes
* resolve warnings/errors from clang
* remove haveCUDA() checks from supportBackend()
* remove NVTX integration
* changes based on review comments
* avoid exception when no CUDA device is present
* add color code for CUDA in Net::dump
2019-10-21 19:28:00 +08:00
|
|
|
Ptr<BackendNode> Layer::initCUDA(
|
|
|
|
void*,
|
|
|
|
const std::vector<Ptr<BackendWrapper>>&,
|
|
|
|
const std::vector<Ptr<BackendWrapper>>&)
|
|
|
|
{
|
|
|
|
CV_Error(Error::StsNotImplemented, "CUDA pipeline of " + type +
|
|
|
|
" layers is not defined.");
|
|
|
|
return Ptr<BackendNode>();
|
|
|
|
}
|
|
|
|
|
Merge pull request #12703 from wzw-intel:vkcom
* dnn: Add a Vulkan based backend
This commit adds a new backend "DNN_BACKEND_VKCOM" and a
new target "DNN_TARGET_VULKAN". VKCOM means vulkan based
computation library.
This backend uses Vulkan API and SPIR-V shaders to do
the inference computation for layers. The layer types
that implemented in DNN_BACKEND_VKCOM include:
Conv, Concat, ReLU, LRN, PriorBox, Softmax, MaxPooling,
AvePooling, Permute
This is just a beginning work for Vulkan in OpenCV DNN,
more layer types will be supported and performance
tuning is on the way.
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/vulkan: Add FindVulkan.cmake to detect Vulkan SDK
In order to build dnn with Vulkan support, need installing
Vulkan SDK and setting environment variable "VULKAN_SDK" and
add "-DWITH_VULKAN=ON" to cmake command.
You can download Vulkan SDK from:
https://vulkan.lunarg.com/sdk/home#linux
For how to install, see
https://vulkan.lunarg.com/doc/sdk/latest/linux/getting_started.html
https://vulkan.lunarg.com/doc/sdk/latest/windows/getting_started.html
https://vulkan.lunarg.com/doc/sdk/latest/mac/getting_started.html
respectively for linux, windows and mac.
To run the vulkan backend, also need installing mesa driver.
On Ubuntu, use this command 'sudo apt-get install mesa-vulkan-drivers'
To test, use command '$BUILD_DIR/bin/opencv_test_dnn --gtest_filter=*VkCom*'
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/Vulkan: dynamically load Vulkan runtime
No compile-time dependency on Vulkan library.
If Vulkan runtime is unavailable, fallback to CPU path.
Use environment "OPENCL_VULKAN_RUNTIME" to specify path to your
own vulkan runtime library.
Signed-off-by: Wu Zhiwen <zhiwen.wu@intel.com>
* dnn/Vulkan: Add a python script to compile GLSL shaders to SPIR-V shaders
The SPIR-V shaders are in format of text-based 32-bit hexadecimal
numbers, and inserted into .cpp files as unsigned int32 array.
* dnn/Vulkan: Put Vulkan headers into 3rdparty directory and some other fixes
Vulkan header files are copied from
https://github.com/KhronosGroup/Vulkan-Docs/tree/master/include/vulkan
to 3rdparty/include
Fix the Copyright declaration issue.
Refine OpenCVDetectVulkan.cmake
* dnn/Vulkan: Add vulkan backend tests into existing ones.
Also fixed some test failures.
- Don't use bool variable as uniform for shader
- Fix dispathed group number beyond max issue
- Bypass "group > 1" convolution. This should be support in future.
* dnn/Vulkan: Fix multiple initialization in one thread.
2018-10-29 22:51:26 +08:00
|
|
|
Ptr<BackendNode> Layer::initVkCom(const std::vector<Ptr<BackendWrapper> > &)
|
|
|
|
{
|
|
|
|
CV_Error(Error::StsNotImplemented, "VkCom pipeline of " + type +
|
|
|
|
" layers is not defined.");
|
|
|
|
return Ptr<BackendNode>();
|
|
|
|
}
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
Ptr<BackendNode> Layer::initHalide(const std::vector<Ptr<BackendWrapper> > &)
|
|
|
|
{
|
|
|
|
CV_Error(Error::StsNotImplemented, "Halide pipeline of " + type +
|
|
|
|
" layers is not defined.");
|
|
|
|
return Ptr<BackendNode>();
|
|
|
|
}
|
|
|
|
|
2018-02-06 16:57:35 +08:00
|
|
|
Ptr<BackendNode> Layer::initInfEngine(const std::vector<Ptr<BackendWrapper> > &)
|
|
|
|
{
|
|
|
|
CV_Error(Error::StsNotImplemented, "Inference Engine pipeline of " + type +
|
|
|
|
" layers is not defined.");
|
|
|
|
return Ptr<BackendNode>();
|
|
|
|
}
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
void Layer::applyHalideScheduler(Ptr<BackendNode>& node, const std::vector<Mat*> &inputs,
|
|
|
|
const std::vector<Mat> &outputs, int targetId) const
|
|
|
|
{
|
|
|
|
#ifdef HAVE_HALIDE
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
Halide::Var x("x"), y("y"), c("c"), n("n"), co("co"), ci("ci"),
|
|
|
|
xo("xo"), xi("xi"), yo("yo"), yi("yi"), tile("tile");
|
|
|
|
Halide::Func& top = node.dynamicCast<HalideBackendNode>()->funcs.back();
|
|
|
|
|
|
|
|
int outW, outH, outC, outN;
|
|
|
|
getCanonicalSize(outputs[0].size, &outW, &outH, &outC, &outN);
|
|
|
|
|
|
|
|
if (targetId == DNN_TARGET_CPU)
|
|
|
|
{
|
|
|
|
if (outW == 1 && outH == 1)
|
|
|
|
{
|
|
|
|
if (outC + outN == 1)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (outC > 8)
|
|
|
|
top.split(c, co, ci, 8)
|
|
|
|
.fuse(x, y, tile).fuse(co, tile, tile).fuse(n, tile, tile)
|
|
|
|
.parallel(tile)
|
|
|
|
.vectorize(ci, 8);
|
|
|
|
else
|
|
|
|
top.fuse(x, y, tile).fuse(c, tile, tile).fuse(n, tile, tile)
|
|
|
|
.parallel(tile);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (outH > 2)
|
|
|
|
{
|
|
|
|
top.reorder(x, c, y)
|
|
|
|
.split(y, yo, yi, 2)
|
|
|
|
.fuse(yo, n, tile)
|
|
|
|
.parallel(tile)
|
|
|
|
.unroll(yi)
|
|
|
|
.vectorize(x, outW >= 16 ? 16 : outW);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (targetId == DNN_TARGET_OPENCL)
|
|
|
|
{
|
|
|
|
if (outW == 1 && outH == 1)
|
|
|
|
{
|
2018-06-27 21:34:36 +08:00
|
|
|
int c_split = outC > 8 ? (outC > 16 ? 8 : 4) : outC;
|
2017-06-26 18:35:51 +08:00
|
|
|
top.split(c, co, ci, c_split)
|
|
|
|
.fuse(x, y, tile).fuse(co, tile, tile).fuse(n, tile, tile)
|
|
|
|
.gpu_blocks(tile)
|
|
|
|
.gpu_threads(ci);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
int x_split = outW > 8 ? (outW >= 32 ? 16 : 8) : outW;
|
|
|
|
int y_split = outH > 8 ? (outH >= 32 ? 16 : 8) : outH;
|
2018-06-27 21:34:36 +08:00
|
|
|
// Supported vectorization widths: 2, 3, 4, 8, 16
|
|
|
|
int c_split = outC > 8 ? (outC > 16 ? 8 : 4) : std::min(4, outC);
|
2017-06-26 18:35:51 +08:00
|
|
|
top.split(x, xo, xi, x_split).split(y, yo, yi, y_split)
|
|
|
|
.split(c, co, ci, c_split)
|
|
|
|
.gpu_blocks(xo, yo, co)
|
|
|
|
.gpu_threads(xi, yi)
|
|
|
|
.reorder(xi, yi, ci, xo, yo, co)
|
|
|
|
.vectorize(ci);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
CV_Error(Error::StsNotImplemented, "Unknown target identifier");
|
|
|
|
#endif // HAVE_HALIDE
|
|
|
|
}
|
|
|
|
|
|
|
|
Ptr<BackendNode> Layer::tryAttach(const Ptr<BackendNode>& node)
|
|
|
|
{
|
|
|
|
return Ptr<BackendNode>();
|
|
|
|
}
|
|
|
|
|
2017-06-28 16:15:22 +08:00
|
|
|
bool Layer::setActivation(const Ptr<ActivationLayer>&) { return false; }
|
2018-02-13 17:07:56 +08:00
|
|
|
bool Layer::tryFuse(Ptr<Layer>&) { return false; }
|
|
|
|
void Layer::getScaleShift(Mat& scale, Mat& shift) const
|
|
|
|
{
|
|
|
|
scale = Mat();
|
|
|
|
shift = Mat();
|
|
|
|
}
|
|
|
|
|
2017-07-04 22:23:47 +08:00
|
|
|
void Layer::unsetAttached()
|
|
|
|
{
|
|
|
|
setActivation(Ptr<ActivationLayer>());
|
|
|
|
}
|
2017-06-28 16:15:22 +08:00
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
template <typename T>
|
|
|
|
static void vecToPVec(const std::vector<T> &v, std::vector<T*> &pv)
|
|
|
|
{
|
|
|
|
pv.resize(v.size());
|
|
|
|
for (size_t i = 0; i < v.size(); i++)
|
|
|
|
pv[i] = const_cast<T*>(&v[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Layer::finalize(const std::vector<Mat> &inputs, std::vector<Mat> &outputs)
|
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
2018-09-06 18:26:47 +08:00
|
|
|
this->finalize((InputArrayOfArrays)inputs, (OutputArrayOfArrays)outputs);
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void Layer::finalize(const std::vector<Mat*> &input, std::vector<Mat> &output)
|
|
|
|
{
|
2018-09-07 19:33:52 +08:00
|
|
|
CV_UNUSED(input);CV_UNUSED(output);
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
|
2018-09-06 18:26:47 +08:00
|
|
|
void Layer::finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr)
|
|
|
|
{
|
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
std::vector<Mat> inputs, outputs;
|
|
|
|
inputs_arr.getMatVector(inputs);
|
|
|
|
outputs_arr.getMatVector(outputs);
|
|
|
|
|
|
|
|
std::vector<Mat*> inputsp;
|
|
|
|
vecToPVec(inputs, inputsp);
|
|
|
|
this->finalize(inputsp, outputs);
|
|
|
|
}
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
std::vector<Mat> Layer::finalize(const std::vector<Mat> &inputs)
|
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
|
2017-06-26 18:35:51 +08:00
|
|
|
std::vector<Mat> outputs;
|
|
|
|
this->finalize(inputs, outputs);
|
|
|
|
return outputs;
|
|
|
|
}
|
|
|
|
|
2018-09-06 18:26:47 +08:00
|
|
|
void Layer::forward(std::vector<Mat*> &input, std::vector<Mat> &output, std::vector<Mat> &internals)
|
|
|
|
{
|
|
|
|
// We kept this method for compatibility. DNN calls it now only to support users' implementations.
|
|
|
|
}
|
|
|
|
|
|
|
|
void Layer::forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
|
2018-07-25 00:12:58 +08:00
|
|
|
{
|
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
|
|
|
|
|
2018-09-06 18:26:47 +08:00
|
|
|
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
|
2018-07-25 00:12:58 +08:00
|
|
|
}
|
|
|
|
|
2017-11-09 12:57:37 +08:00
|
|
|
void Layer::forward_fallback(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
2017-11-09 12:57:37 +08:00
|
|
|
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
|
2017-06-28 19:46:58 +08:00
|
|
|
|
2018-04-26 19:20:16 +08:00
|
|
|
if (preferableTarget == DNN_TARGET_OPENCL_FP16 && inputs_arr.depth() == CV_16S)
|
|
|
|
{
|
|
|
|
std::vector<UMat> inputs;
|
|
|
|
std::vector<UMat> outputs;
|
|
|
|
std::vector<UMat> internals;
|
|
|
|
|
|
|
|
std::vector<UMat> orig_inputs;
|
|
|
|
std::vector<UMat> orig_outputs;
|
|
|
|
std::vector<UMat> orig_internals;
|
|
|
|
|
|
|
|
inputs_arr.getUMatVector(orig_inputs);
|
|
|
|
outputs_arr.getUMatVector(orig_outputs);
|
|
|
|
internals_arr.getUMatVector(orig_internals);
|
|
|
|
|
|
|
|
inputs.resize(orig_inputs.size());
|
|
|
|
for (size_t i = 0; i < orig_inputs.size(); i++)
|
|
|
|
convertFp16(orig_inputs[i], inputs[i]);
|
|
|
|
|
|
|
|
outputs.resize(orig_outputs.size());
|
|
|
|
for (size_t i = 0; i < orig_outputs.size(); i++)
|
|
|
|
outputs[i].create(shape(orig_outputs[i]), CV_32F);
|
|
|
|
|
|
|
|
internals.resize(orig_internals.size());
|
|
|
|
for (size_t i = 0; i < orig_internals.size(); i++)
|
|
|
|
internals[i].create(shape(orig_internals[i]), CV_32F);
|
|
|
|
|
|
|
|
forward(inputs, outputs, internals);
|
|
|
|
|
|
|
|
for (size_t i = 0; i < outputs.size(); i++)
|
|
|
|
convertFp16(outputs[i], orig_outputs[i]);
|
|
|
|
|
|
|
|
// sync results back
|
|
|
|
outputs_arr.assign(orig_outputs);
|
|
|
|
internals_arr.assign(orig_internals);
|
|
|
|
return;
|
|
|
|
}
|
2017-11-09 12:57:37 +08:00
|
|
|
std::vector<Mat> inpvec;
|
|
|
|
std::vector<Mat> outputs;
|
|
|
|
std::vector<Mat> internals;
|
|
|
|
|
|
|
|
inputs_arr.getMatVector(inpvec);
|
|
|
|
outputs_arr.getMatVector(outputs);
|
|
|
|
internals_arr.getMatVector(internals);
|
|
|
|
|
|
|
|
std::vector<Mat*> inputs(inpvec.size());
|
|
|
|
for (int i = 0; i < inpvec.size(); i++)
|
|
|
|
inputs[i] = &inpvec[i];
|
|
|
|
|
|
|
|
this->forward(inputs, outputs, internals);
|
2017-11-22 19:00:58 +08:00
|
|
|
|
|
|
|
// sync results back
|
|
|
|
outputs_arr.assign(outputs);
|
|
|
|
internals_arr.assign(internals);
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void Layer::run(const std::vector<Mat> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
|
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
|
2018-09-06 18:26:47 +08:00
|
|
|
this->finalize(inputs, outputs);
|
|
|
|
this->forward(inputs, outputs, internals);
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
Layer::~Layer() {}
|
|
|
|
|
|
|
|
bool Layer::getMemoryShapes(const std::vector<MatShape> &inputs,
|
|
|
|
const int requiredOutputs,
|
|
|
|
std::vector<MatShape> &outputs,
|
|
|
|
std::vector<MatShape> &internals) const
|
|
|
|
{
|
|
|
|
CV_Assert(inputs.size());
|
|
|
|
outputs.assign(std::max(requiredOutputs, (int)inputs.size()), inputs[0]);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
|
2017-06-28 01:34:17 +08:00
|
|
|
static Mutex& getLayerFactoryMutex()
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2017-06-28 01:34:17 +08:00
|
|
|
static Mutex* volatile instance = NULL;
|
|
|
|
if (instance == NULL)
|
|
|
|
{
|
|
|
|
cv::AutoLock lock(getInitializationMutex());
|
|
|
|
if (instance == NULL)
|
|
|
|
instance = new Mutex();
|
|
|
|
}
|
|
|
|
return *instance;
|
|
|
|
}
|
|
|
|
|
2018-04-24 19:59:59 +08:00
|
|
|
typedef std::map<String, std::vector<LayerFactory::Constructor> > LayerFactory_Impl;
|
2017-06-28 01:34:17 +08:00
|
|
|
|
|
|
|
static LayerFactory_Impl& getLayerFactoryImpl_()
|
|
|
|
{
|
|
|
|
static LayerFactory_Impl impl;
|
|
|
|
return impl;
|
|
|
|
}
|
2017-06-26 18:35:51 +08:00
|
|
|
|
2017-06-28 01:34:17 +08:00
|
|
|
static LayerFactory_Impl& getLayerFactoryImpl()
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2017-06-28 01:34:17 +08:00
|
|
|
static LayerFactory_Impl* volatile instance = NULL;
|
|
|
|
if (instance == NULL)
|
|
|
|
{
|
|
|
|
cv::AutoLock lock(getLayerFactoryMutex());
|
|
|
|
if (instance == NULL)
|
|
|
|
{
|
|
|
|
instance = &getLayerFactoryImpl_();
|
|
|
|
initializeLayerFactory();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return *instance;
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
|
2018-04-24 19:59:59 +08:00
|
|
|
void LayerFactory::registerLayer(const String &type, Constructor constructor)
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
CV_TRACE_ARG_VALUE(type, "type", type.c_str());
|
|
|
|
|
2017-06-28 01:34:17 +08:00
|
|
|
cv::AutoLock lock(getLayerFactoryMutex());
|
2018-08-23 23:17:04 +08:00
|
|
|
String type_ = toLowerCase(type);
|
2018-04-24 19:59:59 +08:00
|
|
|
LayerFactory_Impl::iterator it = getLayerFactoryImpl().find(type_);
|
2017-06-26 18:35:51 +08:00
|
|
|
|
2018-04-24 19:59:59 +08:00
|
|
|
if (it != getLayerFactoryImpl().end())
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2018-04-24 19:59:59 +08:00
|
|
|
if (it->second.back() == constructor)
|
|
|
|
CV_Error(cv::Error::StsBadArg, "Layer \"" + type_ + "\" already was registered");
|
|
|
|
it->second.push_back(constructor);
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
2018-04-24 19:59:59 +08:00
|
|
|
getLayerFactoryImpl().insert(std::make_pair(type_, std::vector<Constructor>(1, constructor)));
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
|
2017-06-28 19:46:58 +08:00
|
|
|
void LayerFactory::unregisterLayer(const String &type)
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
CV_TRACE_ARG_VALUE(type, "type", type.c_str());
|
|
|
|
|
2017-06-28 01:34:17 +08:00
|
|
|
cv::AutoLock lock(getLayerFactoryMutex());
|
2018-08-23 23:17:04 +08:00
|
|
|
String type_ = toLowerCase(type);
|
2018-04-24 19:59:59 +08:00
|
|
|
|
|
|
|
LayerFactory_Impl::iterator it = getLayerFactoryImpl().find(type_);
|
|
|
|
if (it != getLayerFactoryImpl().end())
|
|
|
|
{
|
|
|
|
if (it->second.size() > 1)
|
|
|
|
it->second.pop_back();
|
|
|
|
else
|
|
|
|
getLayerFactoryImpl().erase(it);
|
|
|
|
}
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
|
2017-06-28 19:46:58 +08:00
|
|
|
Ptr<Layer> LayerFactory::createLayerInstance(const String &type, LayerParams& params)
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2017-06-28 19:46:58 +08:00
|
|
|
CV_TRACE_FUNCTION();
|
|
|
|
CV_TRACE_ARG_VALUE(type, "type", type.c_str());
|
|
|
|
|
2017-06-28 01:34:17 +08:00
|
|
|
cv::AutoLock lock(getLayerFactoryMutex());
|
2018-08-23 23:17:04 +08:00
|
|
|
String type_ = toLowerCase(type);
|
2017-06-28 19:46:58 +08:00
|
|
|
LayerFactory_Impl::const_iterator it = getLayerFactoryImpl().find(type_);
|
2017-06-26 18:35:51 +08:00
|
|
|
|
2017-06-28 01:34:17 +08:00
|
|
|
if (it != getLayerFactoryImpl().end())
|
2017-06-26 18:35:51 +08:00
|
|
|
{
|
2018-04-24 19:59:59 +08:00
|
|
|
CV_Assert(!it->second.empty());
|
|
|
|
return it->second.back()(params);
|
2017-06-26 18:35:51 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
return Ptr<Layer>(); //NULL
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
BackendNode::BackendNode(int backendId) : backendId(backendId) {}
|
|
|
|
|
|
|
|
BackendNode::~BackendNode() {};
|
|
|
|
|
|
|
|
BackendWrapper::BackendWrapper(int backendId, int targetId)
|
|
|
|
: backendId(backendId), targetId(targetId) {}
|
|
|
|
|
|
|
|
BackendWrapper::BackendWrapper(int targetId, const cv::Mat& m)
|
|
|
|
{
|
|
|
|
CV_Error(Error::StsNotImplemented,
|
|
|
|
"Constructor of backend wrapper must be implemented");
|
|
|
|
}
|
|
|
|
|
|
|
|
BackendWrapper::BackendWrapper(const Ptr<BackendWrapper>& base, const MatShape& shape)
|
|
|
|
{
|
|
|
|
CV_Error(Error::StsNotImplemented,
|
|
|
|
"Constructor of backend wrapper must be implemented");
|
|
|
|
}
|
|
|
|
|
|
|
|
BackendWrapper::~BackendWrapper() {}
|
|
|
|
|
2018-03-12 23:42:53 +08:00
|
|
|
Net readNet(const String& _model, const String& _config, const String& _framework)
|
2018-03-04 00:29:37 +08:00
|
|
|
{
|
2018-08-23 23:17:04 +08:00
|
|
|
String framework = toLowerCase(_framework);
|
2018-03-12 23:42:53 +08:00
|
|
|
String model = _model;
|
|
|
|
String config = _config;
|
2018-03-04 00:29:37 +08:00
|
|
|
const std::string modelExt = model.substr(model.rfind('.') + 1);
|
|
|
|
const std::string configExt = config.substr(config.rfind('.') + 1);
|
|
|
|
if (framework == "caffe" || modelExt == "caffemodel" || configExt == "caffemodel" ||
|
|
|
|
modelExt == "prototxt" || configExt == "prototxt")
|
|
|
|
{
|
|
|
|
if (modelExt == "prototxt" || configExt == "caffemodel")
|
|
|
|
std::swap(model, config);
|
|
|
|
return readNetFromCaffe(config, model);
|
|
|
|
}
|
|
|
|
if (framework == "tensorflow" || modelExt == "pb" || configExt == "pb" ||
|
|
|
|
modelExt == "pbtxt" || configExt == "pbtxt")
|
|
|
|
{
|
|
|
|
if (modelExt == "pbtxt" || configExt == "pb")
|
|
|
|
std::swap(model, config);
|
|
|
|
return readNetFromTensorflow(model, config);
|
|
|
|
}
|
|
|
|
if (framework == "torch" || modelExt == "t7" || modelExt == "net" ||
|
|
|
|
configExt == "t7" || configExt == "net")
|
|
|
|
{
|
|
|
|
return readNetFromTorch(model.empty() ? config : model);
|
|
|
|
}
|
|
|
|
if (framework == "darknet" || modelExt == "weights" || configExt == "weights" ||
|
|
|
|
modelExt == "cfg" || configExt == "cfg")
|
|
|
|
{
|
|
|
|
if (modelExt == "cfg" || configExt == "weights")
|
|
|
|
std::swap(model, config);
|
|
|
|
return readNetFromDarknet(config, model);
|
|
|
|
}
|
2018-03-17 00:27:04 +08:00
|
|
|
if (framework == "dldt" || modelExt == "bin" || configExt == "bin" ||
|
|
|
|
modelExt == "xml" || configExt == "xml")
|
|
|
|
{
|
|
|
|
if (modelExt == "xml" || configExt == "bin")
|
|
|
|
std::swap(model, config);
|
|
|
|
return readNetFromModelOptimizer(config, model);
|
|
|
|
}
|
2018-09-11 02:07:51 +08:00
|
|
|
if (framework == "onnx" || modelExt == "onnx")
|
|
|
|
{
|
|
|
|
return readNetFromONNX(model);
|
|
|
|
}
|
2018-04-24 00:02:39 +08:00
|
|
|
CV_Error(Error::StsError, "Cannot determine an origin framework of files: " +
|
2018-03-28 21:34:37 +08:00
|
|
|
model + (config.empty() ? "" : ", " + config));
|
2018-03-04 00:29:37 +08:00
|
|
|
}
|
|
|
|
|
2018-07-11 17:48:34 +08:00
|
|
|
Net readNet(const String& _framework, const std::vector<uchar>& bufferModel,
|
|
|
|
const std::vector<uchar>& bufferConfig)
|
2018-07-04 23:15:31 +08:00
|
|
|
{
|
2018-08-23 23:17:04 +08:00
|
|
|
String framework = toLowerCase(_framework);
|
2018-07-04 23:15:31 +08:00
|
|
|
if (framework == "caffe")
|
|
|
|
return readNetFromCaffe(bufferConfig, bufferModel);
|
|
|
|
else if (framework == "tensorflow")
|
|
|
|
return readNetFromTensorflow(bufferModel, bufferConfig);
|
|
|
|
else if (framework == "darknet")
|
|
|
|
return readNetFromDarknet(bufferConfig, bufferModel);
|
|
|
|
else if (framework == "torch")
|
|
|
|
CV_Error(Error::StsNotImplemented, "Reading Torch models from buffers");
|
|
|
|
else if (framework == "dldt")
|
|
|
|
CV_Error(Error::StsNotImplemented, "Reading Intel's Model Optimizer models from buffers");
|
|
|
|
CV_Error(Error::StsError, "Cannot determine an origin framework with a name " + framework);
|
|
|
|
}
|
|
|
|
|
2018-03-17 00:27:04 +08:00
|
|
|
Net readNetFromModelOptimizer(const String &xml, const String &bin)
|
|
|
|
{
|
|
|
|
return Net::readFromModelOptimizer(xml, bin);
|
|
|
|
}
|
|
|
|
|
2018-09-04 04:20:02 +08:00
|
|
|
CV__DNN_INLINE_NS_END
|
2017-06-29 03:59:02 +08:00
|
|
|
}} // namespace
|