Merge remote-tracking branch 'upstream/3.4' into merge-3.4

This commit is contained in:
Alexander Alekhin 2021-03-24 18:55:24 +00:00
commit b62d015285
12 changed files with 209 additions and 11 deletions

View File

@ -135,9 +135,9 @@ endif()
if(INF_ENGINE_TARGET)
if(NOT INF_ENGINE_RELEASE)
message(WARNING "InferenceEngine version has not been set, 2021.2 will be used by default. Set INF_ENGINE_RELEASE variable if you experience build errors.")
message(WARNING "InferenceEngine version has not been set, 2021.3 will be used by default. Set INF_ENGINE_RELEASE variable if you experience build errors.")
endif()
set(INF_ENGINE_RELEASE "2021020000" CACHE STRING "Force IE version, should be in form YYYYAABBCC (e.g. 2020.1.0.2 -> 2020010002)")
set(INF_ENGINE_RELEASE "2021030000" CACHE STRING "Force IE version, should be in form YYYYAABBCC (e.g. 2020.1.0.2 -> 2020010002)")
set_target_properties(${INF_ENGINE_TARGET} PROPERTIES
INTERFACE_COMPILE_DEFINITIONS "HAVE_INF_ENGINE=1;INF_ENGINE_RELEASE=${INF_ENGINE_RELEASE}"
)

View File

@ -235,6 +235,24 @@ Range normalize_axis_range(const Range& r, int axisSize)
return clamped;
}
static inline
bool isAllOnes(const MatShape &inputShape, int startPos, int endPos)
{
CV_Assert(!inputShape.empty());
CV_CheckGE((int) inputShape.size(), startPos, "");
CV_CheckGE(startPos, 0, "");
CV_CheckLE(startPos, endPos, "");
CV_CheckLE((size_t)endPos, inputShape.size(), "");
for (size_t i = startPos; i < endPos; i++)
{
if (inputShape[i] != 1)
return false;
}
return true;
}
CV__DNN_INLINE_NS_END
}
}

View File

@ -46,6 +46,7 @@
#include "../op_halide.hpp"
#include "../op_inf_engine.hpp"
#include "../ie_ngraph.hpp"
#include <opencv2/dnn/shape_utils.hpp>
#ifdef HAVE_OPENCL
#include "opencl_kernels_dnn.hpp"
@ -97,6 +98,7 @@ public:
: outputChannels(0)
{
setParamsFrom(params);
hasVecInput = false;
op = SUM;
if (params.has("operation"))
{
@ -156,6 +158,9 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
if (hasVecInput && ELTWISE_CHANNNELS_SAME)
return backendId == DNN_BACKEND_OPENCV;
if (backendId == DNN_BACKEND_CUDA)
{
if(channelsModeInput == ELTWISE_CHANNNELS_INPUT_0 || channelsModeInput == ELTWISE_CHANNNELS_INPUT_0_TRUNCATE)
@ -211,9 +216,6 @@ public:
{
CV_Assert(0 && "Internal error");
}
for (size_t j = 2; j < dims; j++)
CV_Assert(inputs[0][j] == inputs[i][j]);
}
channelsMode = variableChannels ? channelsModeInput : ELTWISE_CHANNNELS_SAME;
@ -221,9 +223,56 @@ public:
outputs.assign(1, inputs[0]);
outputs[0][1] = numChannels;
if (dims > 2)
{
size_t vecIdx = 0;
bool isVecFound = false;
for (size_t i = 0; i < inputs.size(); i++)
{
bool allOnes = isAllOnes(inputs[i], 2, dims);
if (!allOnes && !isVecFound)
{
vecIdx = i;
isVecFound = true;
}
if (!allOnes && i != vecIdx)
{
for (size_t j = 2; j < dims; j++)
{
CV_Assert(inputs[vecIdx][j] == inputs[i][j]);
}
}
}
if (channelsModeInput == ELTWISE_CHANNNELS_SAME && isVecFound)
{
for (size_t j = 2; j < dims; j++)
{
outputs[0][j] = inputs[vecIdx][j];
}
}
}
return false;
}
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE
{
std::vector<Mat> inputs;
inputs_arr.getMatVector(inputs);
for (size_t i = 0; i < inputs.size(); i++)
{
MatShape inpShape = shape(inputs[i].size);
if (isAllOnes(inpShape, 2, inputs[i].dims))
{
hasVecInput = true;
return;
}
}
}
class EltwiseInvoker : public ParallelLoopBody
{
@ -516,6 +565,9 @@ public:
if ((inputs_.depth() == CV_16S && op != SUM) || (channelsMode != ELTWISE_CHANNNELS_SAME))
return false;
if (hasVecInput)
return false; // TODO not implemented yet: https://github.com/opencv/opencv/pull/19477
inputs_.getUMatVector(inputs);
outputs_.getUMatVector(outputs);
@ -616,6 +668,47 @@ public:
CV_Assert(outputs.size() == 1);
const int nstripes = getNumThreads();
if (channelsModeInput == ELTWISE_CHANNNELS_SAME && inputs[0].dims > 2)
{
for (size_t i = 0; i < inputs.size(); i++)
{
MatShape inpShape = shape(inputs[i].size);
bool allOnes = isAllOnes(inpShape, 2, inputs[i].dims);
if (allOnes)
{
Mat tmpInput = inputs[i];
MatShape outShape = shape(outputs[0].size);
size_t xSize = outShape[2];
for (size_t j = 3; j < outShape.size(); j++)
xSize *= outShape[j];
int dimVec[3] = {outShape[0], outShape[1], (int) xSize};
std::vector<int> matSizesVec(&dimVec[0], &dimVec[0] + 3);
inputs[i] = Mat(matSizesVec, tmpInput.type());
std::vector<int> idx(outShape.size(), 0);
std::vector<int> outIdx(inpShape.size(), 0);
for (size_t j = 0; j < outShape[0]; j++)
{
outIdx[0] = idx[0] = j;
for(size_t k = 0; k < outShape[1]; k++)
{
outIdx[1] = idx[1] = k;
for (size_t x = 0; x < xSize; x++)
{
outIdx[2] = x;
inputs[i].at<float>(outIdx.data()) = tmpInput.at<float>(idx.data());
}
}
}
inputs[i] = inputs[i].reshape(0, outShape);
}
}
}
EltwiseInvoker::run(*this,
&inputs[0], (int)inputs.size(), outputs[0],
nstripes);
@ -795,6 +888,9 @@ public:
}
Ptr<ActivationLayer> activ;
private:
bool hasVecInput;
};
Ptr<EltwiseLayer> EltwiseLayer::create(const LayerParams& params)

View File

@ -29,10 +29,11 @@
#define INF_ENGINE_RELEASE_2020_4 2020040000
#define INF_ENGINE_RELEASE_2021_1 2021010000
#define INF_ENGINE_RELEASE_2021_2 2021020000
#define INF_ENGINE_RELEASE_2021_3 2021030000
#ifndef INF_ENGINE_RELEASE
#warning("IE version have not been provided via command-line. Using 2021.2 by default")
#define INF_ENGINE_RELEASE INF_ENGINE_RELEASE_2021_2
#warning("IE version have not been provided via command-line. Using 2021.3 by default")
#define INF_ENGINE_RELEASE INF_ENGINE_RELEASE_2021_3
#endif
#define INF_ENGINE_VER_MAJOR_GT(ver) (((INF_ENGINE_RELEASE) / 10000) > ((ver) / 10000))

View File

@ -12,6 +12,7 @@ Implementation of Tensorflow models parser
#include "../precomp.hpp"
#include <opencv2/core/utils/logger.defines.hpp>
#include <opencv2/dnn/shape_utils.hpp>
#undef CV_LOG_STRIP_LEVEL
#define CV_LOG_STRIP_LEVEL CV_LOG_LEVEL_DEBUG + 1
#include <opencv2/core/utils/logger.hpp>
@ -305,7 +306,7 @@ bool hasAllOnes(const Mat &inputs, int startPos, int endPos)
for (int i = startPos; i < endPos; i++)
{
if (inputs.at<int>(i) != 1 || inputs.at<int>(i)!= -1)
if (inputs.at<int>(i) != 1 && inputs.at<int>(i) != -1)
return false;
}
return true;
@ -1825,6 +1826,7 @@ void TFImporter::parseNode(const tensorflow::NodeDef& layer_)
{
// Check if all the inputs have the same shape.
bool equalInpShapes = true;
bool isShapeOnes = false;
MatShape outShape0;
for (int ii = 0; ii < num_inputs && !netInputShapes.empty(); ii++)
{
@ -1845,12 +1847,14 @@ void TFImporter::parseNode(const tensorflow::NodeDef& layer_)
else if (outShape != outShape0)
{
equalInpShapes = false;
isShapeOnes = isAllOnes(outShape, 2, outShape.size()) ||
isAllOnes(outShape0, 2, outShape0.size());
break;
}
}
int id;
if (equalInpShapes || netInputShapes.empty())
if (equalInpShapes || netInputShapes.empty() || (!equalInpShapes && isShapeOnes))
{
layerParams.set("operation", type == "RealDiv" ? "div" : "prod");
id = dstNet.addLayer(name, "Eltwise", layerParams);

View File

@ -30,6 +30,7 @@
#define CV_TEST_TAG_DNN_SKIP_IE_2019R1_1 "dnn_skip_ie_2019r1_1"
#define CV_TEST_TAG_DNN_SKIP_IE_2019R2 "dnn_skip_ie_2019r2"
#define CV_TEST_TAG_DNN_SKIP_IE_2019R3 "dnn_skip_ie_2019r3"
#define CV_TEST_TAG_DNN_SKIP_IE_CPU "dnn_skip_ie_cpu"
#define CV_TEST_TAG_DNN_SKIP_IE_OPENCL "dnn_skip_ie_ocl"
#define CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16 "dnn_skip_ie_ocl_fp16"
#define CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2 "dnn_skip_ie_myriad2"

View File

@ -453,13 +453,13 @@ void initDNNTests()
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER,
#endif
""
CV_TEST_TAG_DNN_SKIP_IE_CPU
);
#endif
registerGlobalSkipTag(
// see validateVPUType(): CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2, CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X
CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16
);
#endif
#ifdef HAVE_VULKAN
registerGlobalSkipTag(
CV_TEST_TAG_DNN_SKIP_VULKAN

View File

@ -727,6 +727,10 @@ TEST_P(Test_Darknet_layers, shortcut)
TEST_P(Test_Darknet_layers, upsample)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021030000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // exception
#endif
testDarknetLayer("upsample");
}

View File

@ -1583,6 +1583,11 @@ TEST_P(Test_Caffe_layers, Interp)
TEST_P(Test_Caffe_layers, DISABLED_Interp) // requires patched protobuf (available in OpenCV source tree only)
#endif
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021030000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // exception
#endif
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);

View File

@ -440,11 +440,19 @@ TEST_P(Test_ONNX_layers, BatchNormalization3D)
TEST_P(Test_ONNX_layers, BatchNormalizationUnfused)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021030000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_CPU, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // exception
#endif
testONNXModels("frozenBatchNorm2d");
}
TEST_P(Test_ONNX_layers, BatchNormalizationSubgraph)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021030000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_CPU, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // exception
#endif
testONNXModels("batch_norm_subgraph");
}
@ -795,6 +803,13 @@ TEST_P(Test_ONNX_layers, Conv1d_variable_weight_bias)
TEST_P(Test_ONNX_layers, GatherMultiOutput)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021030000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // exception
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // exception
#endif
#if defined(INF_ENGINE_RELEASE)
if (target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE);
@ -891,6 +906,7 @@ TEST_P(Test_ONNX_layers, PoolConv1d)
TEST_P(Test_ONNX_layers, ConvResizePool1d)
{
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
@ -898,7 +914,12 @@ TEST_P(Test_ONNX_layers, ConvResizePool1d)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
#if INF_ENGINE_VER_MAJOR_EQ(2021030000)
if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // exception
if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // exception
#endif
}
#endif
testONNXModels("conv_resize_pool_1d");
}

View File

@ -205,6 +205,17 @@ TEST_P(Test_TensorFlow_layers, eltwise)
runTensorFlowNet("eltwise_sub");
}
TEST_P(Test_TensorFlow_layers, eltwise_add_vec)
{
runTensorFlowNet("eltwise_add_vec");
}
TEST_P(Test_TensorFlow_layers, eltwise_mul_vec)
{
runTensorFlowNet("eltwise_mul_vec");
}
TEST_P(Test_TensorFlow_layers, channel_broadcast)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
@ -219,6 +230,12 @@ TEST_P(Test_TensorFlow_layers, pad_and_concat)
TEST_P(Test_TensorFlow_layers, concat_axis_1)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021030000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // exception
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // exception
#endif
runTensorFlowNet("concat_axis_1");
}
@ -279,6 +296,10 @@ TEST_P(Test_TensorFlow_layers, batch_norm_10)
}
TEST_P(Test_TensorFlow_layers, batch_norm_11)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021030000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_CPU, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // nan
#endif
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
runTensorFlowNet("mvn_batch_norm_1x1");
@ -488,6 +509,11 @@ TEST_P(Test_TensorFlow_layers, reshape_nchw)
runTensorFlowNet("reshape_nchw");
}
TEST_P(Test_TensorFlow_layers, reshape_conv)
{
runTensorFlowNet("reshape_conv");
}
TEST_P(Test_TensorFlow_layers, leaky_relu)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
@ -1083,12 +1109,20 @@ TEST_P(Test_TensorFlow_layers, keras_mobilenet_head)
// TF case: align_corners=False, half_pixel_centers=False
TEST_P(Test_TensorFlow_layers, resize_bilinear)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021030000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // exception
#endif
runTensorFlowNet("resize_bilinear");
}
// TF case: align_corners=True, half_pixel_centers=False
TEST_P(Test_TensorFlow_layers, resize_bilinear_align_corners)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021030000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // exception
#endif
runTensorFlowNet("resize_bilinear",
false, 0.0, 0.0, false, // default parameters
"_align_corners");

View File

@ -282,6 +282,15 @@ TEST_P(Test_Torch_layers, net_padding)
TEST_P(Test_Torch_layers, net_non_spatial)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021030000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // crash
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // exception
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // exception
#endif
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 &&
(target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
@ -592,6 +601,11 @@ private:
TEST_P(Test_Torch_layers, upsampling_nearest)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021030000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // TODO
#endif
// Test a custom layer.
CV_DNN_REGISTER_LAYER_CLASS(SpatialUpSamplingNearest, SpatialUpSamplingNearestLayer);
try