diff --git a/modules/dnn/src/onnx/onnx_importer.cpp b/modules/dnn/src/onnx/onnx_importer.cpp index 2a440a1284..133a1117ae 100644 --- a/modules/dnn/src/onnx/onnx_importer.cpp +++ b/modules/dnn/src/onnx/onnx_importer.cpp @@ -3230,14 +3230,54 @@ void ONNXImporter::parseQuantDequant(LayerParams& layerParams, const opencv_onnx addLayer(layerParams, node_proto); } -void ONNXImporter::parseQConv(LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto) +void ONNXImporter::parseQConv(LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto_) { + opencv_onnx::NodeProto node_proto = node_proto_; int ninputs = node_proto.input_size(); CV_Assert(ninputs == 8 || ninputs == 9); Mat inp_sc = getBlob(node_proto, 1); Mat inp_zp = getBlob(node_proto, 2); + if (layerParams.has("pad")) + { + bool asymmetricPadding = false; + DictValue pads = layerParams.get("pad"); + const int dims = pads.size() / 2; + + for (int i = 0; i < dims; ++i) + { + if (pads.get(i) != pads.get(i + dims)) + { + asymmetricPadding = true; + break; + } + } + if (asymmetricPadding && pads.size() == 4) + { + layerParams.erase("pad"); + std::vector paddings(4, 0); + for (int i = 0; i < dims; ++i) + { + paddings.push_back(pads.get(i)); + paddings.push_back(pads.get(dims + i)); + } + LayerParams padLp; + padLp.name = layerParams.name + "/pad"; + padLp.type = "PaddingInt8"; + padLp.set("paddings", DictValue::arrayInt(&paddings[0], paddings.size())); + padLp.set("depth", CV_8S); + padLp.set("value", inp_zp.at(0)); + + opencv_onnx::NodeProto proto; + proto.add_input(node_proto.input(0)); + proto.add_output(padLp.name); + + addLayer(padLp, proto); + node_proto.set_input(0, padLp.name); + } + } + Mat weights = getBlob(node_proto, 3); int outCn = weights.size[0]; Mat w_scale = getBlob(node_proto, 4); diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp index 8503f55c25..f222f8683d 100644 --- a/modules/dnn/test/test_onnx_importer.cpp +++ b/modules/dnn/test/test_onnx_importer.cpp @@ -1752,6 +1752,8 @@ TEST_P(Test_ONNX_layers, Quantized_Convolution) testONNXModels("quantized_conv_uint8_weights", npy, 0.004, 0.02); testONNXModels("quantized_conv_int8_weights", npy, 0.03, 0.5); testONNXModels("quantized_conv_per_channel_weights", npy, 0.06, 0.4); + + testONNXModels("quantized_conv_asymmetric_pads_int8_weights"); } TEST_P(Test_ONNX_layers, Quantized_MatMul)