Merge pull request #13389 from dkurt:dnn_tf_eltwise_sub

This commit is contained in:
Alexander Alekhin 2018-12-07 13:54:09 +00:00
commit 92e86292dd
3 changed files with 14 additions and 33 deletions

View File

@ -98,7 +98,8 @@ public:
{
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_HALIDE ||
(backendId == DNN_BACKEND_INFERENCE_ENGINE && (op != SUM || coeffs.empty()));
(backendId == DNN_BACKEND_INFERENCE_ENGINE &&
(preferableTarget != DNN_TARGET_MYRIAD || coeffs.empty()));
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -427,6 +428,7 @@ public:
lp.type = "Eltwise";
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::EltwiseLayer> ieLayer(new InferenceEngine::EltwiseLayer(lp));
ieLayer->coeff = coeffs;
if (op == SUM)
ieLayer->_operation = InferenceEngine::EltwiseLayer::Sum;
else if (op == PROD)

View File

@ -939,7 +939,7 @@ void TFImporter::populateNet(Net dstNet)
if (getDataLayout(name, data_layouts) == DATA_LAYOUT_UNKNOWN)
data_layouts[name] = DATA_LAYOUT_NHWC;
}
else if (type == "BiasAdd" || type == "Add")
else if (type == "BiasAdd" || type == "Add" || type == "Sub")
{
bool haveConst = false;
for(int ii = 0; !haveConst && ii < layer.input_size(); ++ii)
@ -953,6 +953,8 @@ void TFImporter::populateNet(Net dstNet)
{
Mat values = getTensorContent(getConstBlob(layer, value_id));
CV_Assert(values.type() == CV_32FC1);
if (type == "Sub")
values *= -1.0f;
int id;
if (values.total() == 1) // is a scalar.
@ -973,6 +975,12 @@ void TFImporter::populateNet(Net dstNet)
else
{
layerParams.set("operation", "sum");
if (type == "Sub")
{
static float subCoeffs[] = {1.f, -1.f};
layerParams.set("coeff", DictValue::arrayReal<float*>(subCoeffs, 2));
}
int id = dstNet.addLayer(name, "Eltwise", layerParams);
layer_id[name] = id;
@ -985,36 +993,6 @@ void TFImporter::populateNet(Net dstNet)
}
}
}
else if (type == "Sub")
{
bool haveConst = false;
for(int ii = 0; !haveConst && ii < layer.input_size(); ++ii)
{
Pin input = parsePin(layer.input(ii));
haveConst = value_id.find(input.name) != value_id.end();
}
CV_Assert(haveConst);
Mat values = getTensorContent(getConstBlob(layer, value_id));
CV_Assert(values.type() == CV_32FC1);
values *= -1.0f;
int id;
if (values.total() == 1) // is a scalar.
{
layerParams.set("shift", values.at<float>(0));
id = dstNet.addLayer(name, "Power", layerParams);
}
else // is a vector
{
layerParams.blobs.resize(1, values);
id = dstNet.addLayer(name, "Shift", layerParams);
}
layer_id[name] = id;
// one input only
connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
}
else if (type == "MatMul")
{
CV_Assert(layer.input_size() == 2);

View File

@ -139,9 +139,10 @@ TEST_P(Test_TensorFlow_layers, padding)
runTensorFlowNet("keras_pad_concat");
}
TEST_P(Test_TensorFlow_layers, eltwise_add_mul)
TEST_P(Test_TensorFlow_layers, eltwise)
{
runTensorFlowNet("eltwise_add_mul");
runTensorFlowNet("eltwise_sub");
}
TEST_P(Test_TensorFlow_layers, pad_and_concat)