mirror of
https://github.com/opencv/opencv.git
synced 2025-07-24 14:06:27 +08:00
Enable more deconvolution layer configurations with IE backend
This commit is contained in:
parent
483e9f341c
commit
9c0af1f675
@ -213,8 +213,7 @@ PERF_TEST_P_(DNNTestNetwork, EAST_text_detection)
|
||||
|
||||
PERF_TEST_P_(DNNTestNetwork, FastNeuralStyle_eccv16)
|
||||
{
|
||||
if (backend == DNN_BACKEND_HALIDE ||
|
||||
(backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16))
|
||||
if (backend == DNN_BACKEND_HALIDE)
|
||||
throw SkipTestException("");
|
||||
processNet("dnn/fast_neural_style_eccv16_starry_night.t7", "", "", Mat(cv::Size(320, 240), CV_32FC3));
|
||||
}
|
||||
|
@ -122,7 +122,7 @@ public:
|
||||
else
|
||||
{
|
||||
ieLayer.setType("Split");
|
||||
ieLayer.getParameters()["axis"] = input->dims.size() - 1;
|
||||
ieLayer.getParameters()["axis"] = (size_t)0;
|
||||
ieLayer.getParameters()["out_sizes"] = input->dims[0];
|
||||
}
|
||||
std::vector<size_t> shape(input->dims);
|
||||
|
@ -61,7 +61,7 @@ namespace dnn
|
||||
class BaseConvolutionLayerImpl : public ConvolutionLayer
|
||||
{
|
||||
public:
|
||||
bool newWeightAndBias;
|
||||
bool fusedWeights, fusedBias;
|
||||
std::vector<double> weightsMultipliers;
|
||||
BaseConvolutionLayerImpl(const LayerParams ¶ms)
|
||||
{
|
||||
@ -90,7 +90,8 @@ public:
|
||||
CV_Assert(adjustPad.width < stride.width &&
|
||||
adjustPad.height < stride.height);
|
||||
}
|
||||
newWeightAndBias = false;
|
||||
fusedWeights = false;
|
||||
fusedBias = false;
|
||||
}
|
||||
|
||||
virtual void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
|
||||
@ -133,6 +134,8 @@ public:
|
||||
}
|
||||
pad = Size(pads_begin[1], pads_begin[0]);
|
||||
}
|
||||
fusedWeights = false;
|
||||
fusedBias = false;
|
||||
}
|
||||
|
||||
bool hasBias() const
|
||||
@ -155,6 +158,8 @@ public:
|
||||
if (!w.empty() || !b.empty())
|
||||
{
|
||||
fuseWeights(w, b);
|
||||
fusedWeights = fusedWeights || !w.empty();
|
||||
fusedBias = fusedBias || (hasBias() && !w.empty()) || !b.empty();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -215,7 +220,6 @@ public:
|
||||
std::vector<float> biasvec;
|
||||
std::vector<float> reluslope;
|
||||
Ptr<ActivationLayer> activ;
|
||||
bool fusedBias;
|
||||
|
||||
#ifdef HAVE_OPENCL
|
||||
Ptr<OCL4DNNConvSpatial<float> > convolutionOp;
|
||||
@ -226,7 +230,6 @@ public:
|
||||
#endif
|
||||
ConvolutionLayerImpl(const LayerParams ¶ms) : BaseConvolutionLayerImpl(params)
|
||||
{
|
||||
fusedBias = false;
|
||||
#ifdef HAVE_OPENCL
|
||||
newActiv = false;
|
||||
activType = OCL4DNN_CONV_FUSED_ACTIV_NONE;
|
||||
@ -406,9 +409,6 @@ public:
|
||||
for (int i = 0; i < outCn; ++i)
|
||||
biasvec[i] += b.at<float>(i);
|
||||
}
|
||||
|
||||
newWeightAndBias = !w.empty() || !b.empty();
|
||||
fusedBias = hasBias() || !b.empty();
|
||||
biasvec[outCn] = biasvec[outCn+1] = biasvec[outCn-1];
|
||||
}
|
||||
|
||||
@ -475,12 +475,12 @@ public:
|
||||
InferenceEngine::Layout::NCDHW;
|
||||
|
||||
auto ieWeights = wrapToInfEngineBlob(blobs[0], layout);
|
||||
if (newWeightAndBias)
|
||||
if (fusedWeights)
|
||||
{
|
||||
if (weightsMat.isContinuous())
|
||||
{
|
||||
Mat fusedWeights = weightsMat.reshape(1, blobs[0].dims, blobs[0].size);
|
||||
ieWeights = wrapToInfEngineBlob(fusedWeights, layout);
|
||||
Mat cvWeights = weightsMat.reshape(1, blobs[0].dims, blobs[0].size);
|
||||
ieWeights = wrapToInfEngineBlob(cvWeights, layout);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -490,8 +490,8 @@ public:
|
||||
ieWeights->allocate();
|
||||
|
||||
Mat newWeights = infEngineBlobToMat(ieWeights).reshape(1, outCn);
|
||||
Mat fusedWeights = weightsMat.colRange(0, newWeights.cols);
|
||||
fusedWeights.copyTo(newWeights);
|
||||
Mat cvWeights = weightsMat.colRange(0, newWeights.cols);
|
||||
cvWeights.copyTo(newWeights);
|
||||
}
|
||||
}
|
||||
InferenceEngine::Blob::Ptr ieBiases;
|
||||
@ -1015,17 +1015,18 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
if ( newWeightAndBias )
|
||||
if (fusedWeights)
|
||||
{
|
||||
weightsMat.copyTo(umat_blobs[0]);
|
||||
if ( fusedBias )
|
||||
{
|
||||
if ( umat_blobs.size() < 2 )
|
||||
umat_blobs.resize(2);
|
||||
umat_blobs[1] = UMat(biasvec, true);
|
||||
}
|
||||
convolutionOp->setBias(fusedBias || hasBias());
|
||||
newWeightAndBias = false;
|
||||
fusedWeights = false;
|
||||
}
|
||||
if (fusedBias)
|
||||
{
|
||||
if ( umat_blobs.size() < 2 )
|
||||
umat_blobs.resize(2);
|
||||
umat_blobs[1] = UMat(biasvec, true);
|
||||
convolutionOp->setBias(true);
|
||||
fusedBias = false;
|
||||
}
|
||||
|
||||
if ( newActiv )
|
||||
@ -1070,7 +1071,7 @@ public:
|
||||
return convolutionOp->Forward(inpMat,
|
||||
inputs.size() == 2 ? inputs[1] : UMat(),
|
||||
umat_blobs[0],
|
||||
(hasBias() || fusedBias) ? umat_blobs[1] : UMat(),
|
||||
umat_blobs.size() > 1 ? umat_blobs[1] : UMat(),
|
||||
outMat,
|
||||
batch_size);
|
||||
}
|
||||
@ -1175,16 +1176,34 @@ public:
|
||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||
{
|
||||
#ifdef HAVE_INF_ENGINE
|
||||
const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW layout
|
||||
const int group = numOutput / outGroupCn;
|
||||
|
||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
|
||||
{
|
||||
if (kernel_size.size() == 3)
|
||||
CV_Error(Error::StsNotImplemented, "Unsupported deconvolution3D layer");
|
||||
|
||||
if (INF_ENGINE_RELEASE >= 2018050000 && (adjustPad.height || adjustPad.width))
|
||||
return false;
|
||||
{
|
||||
if (padMode.empty())
|
||||
{
|
||||
if (preferableTarget != DNN_TARGET_CPU && group != 1)
|
||||
{
|
||||
if ((adjustPad.height && pad.height) || (adjustPad.width && pad.width))
|
||||
return false;
|
||||
}
|
||||
return pad.width >= adjustPad.width && pad.height >= adjustPad.height;
|
||||
}
|
||||
else if (padMode == "SAME")
|
||||
{
|
||||
return kernel.width >= pad.width + 1 + adjustPad.width &&
|
||||
kernel.height >= pad.height + 1 + adjustPad.height;
|
||||
}
|
||||
else if (padMode == "VALID")
|
||||
return false;
|
||||
}
|
||||
|
||||
const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW layout
|
||||
const int group = numOutput / outGroupCn;
|
||||
if (group != 1)
|
||||
{
|
||||
return preferableTarget == DNN_TARGET_CPU;
|
||||
@ -1302,8 +1321,6 @@ public:
|
||||
{
|
||||
cv::add(biasesMat, b.reshape(1, numOutput), biasesMat);
|
||||
}
|
||||
|
||||
newWeightAndBias = !w.empty() || !b.empty();
|
||||
}
|
||||
|
||||
class MatMulInvoker : public ParallelLoopBody
|
||||
@ -1571,14 +1588,15 @@ public:
|
||||
|
||||
if (umat_weights.empty())
|
||||
{
|
||||
if (newWeightAndBias)
|
||||
{
|
||||
if (fusedWeights)
|
||||
weightsMat.copyTo(umat_weights);
|
||||
else
|
||||
transpose(blobs[0].reshape(1, inpCn), umat_weights);
|
||||
|
||||
if (fusedBias)
|
||||
biasesMat.copyTo(umat_biases);
|
||||
}
|
||||
else
|
||||
{
|
||||
transpose(blobs[0].reshape(1, inpCn), umat_weights);
|
||||
if (hasBias())
|
||||
blobs[1].reshape(1, outCn).copyTo(umat_biases);
|
||||
else
|
||||
@ -1778,6 +1796,19 @@ public:
|
||||
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &) CV_OVERRIDE
|
||||
{
|
||||
#ifdef HAVE_INF_ENGINE
|
||||
auto ieWeights = wrapToInfEngineBlob(blobs[0], InferenceEngine::Layout::OIHW);
|
||||
if (fusedWeights)
|
||||
{
|
||||
ieWeights = InferenceEngine::make_shared_blob<float>(
|
||||
InferenceEngine::Precision::FP32, InferenceEngine::Layout::OIHW,
|
||||
ieWeights->dims());
|
||||
ieWeights->allocate();
|
||||
|
||||
int inpCn = blobs[0].size[0];
|
||||
Mat newWeights = infEngineBlobToMat(ieWeights).reshape(1, inpCn);
|
||||
transpose(weightsMat, newWeights);
|
||||
}
|
||||
|
||||
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
|
||||
const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW layout
|
||||
const int group = numOutput / outGroupCn;
|
||||
@ -1788,14 +1819,23 @@ public:
|
||||
ieLayer.setStrides(strides);
|
||||
ieLayer.setDilation(dilations);
|
||||
ieLayer.setPaddingsBegin(pads_begin);
|
||||
ieLayer.setPaddingsEnd(pads_end);
|
||||
|
||||
if (padMode.empty())
|
||||
{
|
||||
ieLayer.setPaddingsEnd({pads_end[0] - adjust_pads[0], pads_end[1] - adjust_pads[1]});
|
||||
}
|
||||
else if (padMode == "SAME")
|
||||
{
|
||||
ieLayer.setPaddingsEnd({kernel_size[0] - pads_begin[0] - 1 - adjust_pads[0],
|
||||
kernel_size[1] - pads_begin[1] - 1 - adjust_pads[1]});
|
||||
}
|
||||
ieLayer.setGroup((size_t)group);
|
||||
ieLayer.setOutDepth((size_t)numOutput);
|
||||
|
||||
InferenceEngine::Builder::Layer l = ieLayer;
|
||||
addConstantData("weights", wrapToInfEngineBlob(blobs[0], InferenceEngine::Layout::OIHW), l);
|
||||
addConstantData("weights", ieWeights, l);
|
||||
if (hasBias())
|
||||
addConstantData("biases", wrapToInfEngineBlob(blobs[1], {(size_t)numOutput}, InferenceEngine::Layout::C), l);
|
||||
addConstantData("biases", wrapToInfEngineBlob(biasesMat, {(size_t)numOutput}, InferenceEngine::Layout::C), l);
|
||||
return Ptr<BackendNode>(new InfEngineBackendNode(l));
|
||||
#else
|
||||
const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW layout
|
||||
|
@ -397,8 +397,9 @@ TEST_P(DNNTestNetwork, FastNeuralStyle_eccv16)
|
||||
Mat inp = blobFromImage(img, 1.0, Size(320, 240), Scalar(103.939, 116.779, 123.68), false, false);
|
||||
// Output image has values in range [-143.526, 148.539].
|
||||
float l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.4 : 4e-5;
|
||||
float lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 7.28 : 2e-3;
|
||||
float lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 7.45 : 2e-3;
|
||||
processNet("dnn/fast_neural_style_eccv16_starry_night.t7", "", inp, "", "", l1, lInf);
|
||||
expectNoFallbacksFromIE(net);
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(/*nothing*/, DNNTestNetwork, dnnBackendsAndTargets(true, true, false));
|
||||
|
@ -159,10 +159,6 @@ TEST_P(Deconvolution, Accuracy)
|
||||
Backend backendId = get<0>(get<7>(GetParam()));
|
||||
Target targetId = get<1>(get<7>(GetParam()));
|
||||
|
||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && (targetId == DNN_TARGET_CPU || targetId == DNN_TARGET_MYRIAD) &&
|
||||
dilation.width == 2 && dilation.height == 2)
|
||||
throw SkipTestException("");
|
||||
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018040000)
|
||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_CPU
|
||||
&& hasBias && group != 1)
|
||||
@ -216,7 +212,7 @@ INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, Deconvolution, Combine(
|
||||
/*in size*/ Values(Size(5, 6)),
|
||||
/*kernel*/ Values(Size(3, 1), Size(1, 3)),
|
||||
/*pad*/ Values(Size(1, 0), Size(0, 1)),
|
||||
/*dilation*/ Values(Size(1, 1), Size(2, 2)),
|
||||
/*dilation*/ Values(Size(1, 1)),
|
||||
/*stride, adj. pad*/ Values(Vec4i(1,1, 0,0), Vec4i(2,2, 1,0), Vec4i(1,2, 0,1)),
|
||||
/*has bias*/ Bool(),
|
||||
dnnBackendsAndTargetsWithHalide()
|
||||
|
@ -172,10 +172,6 @@ TEST_P(Test_Torch_layers, run_depth_concat)
|
||||
|
||||
TEST_P(Test_Torch_layers, run_deconv)
|
||||
{
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2018040000
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
|
||||
throw SkipTestException("Test is disabled for OpenVINO 2018R4");
|
||||
#endif
|
||||
runTorchNet("net_deconv");
|
||||
}
|
||||
|
||||
@ -398,10 +394,10 @@ TEST_P(Test_Torch_nets, ENet_accuracy)
|
||||
// -model models/instance_norm/feathers.t7
|
||||
TEST_P(Test_Torch_nets, FastNeuralStyle_accuracy)
|
||||
{
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
|
||||
#if defined INF_ENGINE_RELEASE
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
|
||||
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
|
||||
throw SkipTestException("Test is disabled for OpenVINO <= 2018R5 + MyriadX target");
|
||||
throw SkipTestException("Test is disabled for MyriadX target");
|
||||
#endif
|
||||
|
||||
checkBackend();
|
||||
|
Loading…
Reference in New Issue
Block a user