mirror of
https://github.com/opencv/opencv.git
synced 2025-06-07 17:44:04 +08:00
Merge pull request #1165 from fengyuentau:gold_yolo
[BugFix] dnn (ONNX): Foce dropping constant inputs in parseClip if they are shared #25319 Resolves https://github.com/opencv/opencv/issues/25278 Merge with https://github.com/opencv/opencv_extra/pull/1165 In Gold-YOLO ,`Div` has a constant input `B=6` which is then parsed into a `Const` layer in the ONNX importer, but `Clip` also has the shared constant input `max=6` which is already a `Const` layer and then connected to `Elementwise` layer. This should not happen because in the `forward()` of `Elementwise` layer, the legacy code goes through and apply activation to each input. More details on https://github.com/opencv/opencv/issues/25278#issuecomment-2032199630. ### Pull Request Readiness Checklist See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request - [x] I agree to contribute to the project under Apache 2 License. - [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV - [x] The PR is proposed to the proper branch - [x] There is a reference to the original bug report and related work - [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable Patch to opencv_extra has the same branch name. - [x] The feature is well documented and sample code can be built with the project CMake
This commit is contained in:
parent
13c95efa74
commit
55d7e3f8cc
@ -91,7 +91,8 @@ class ONNXImporter
|
|||||||
|
|
||||||
void addConstant(const std::string& name, const Mat& blob);
|
void addConstant(const std::string& name, const Mat& blob);
|
||||||
void addLayer(LayerParams& layerParams,
|
void addLayer(LayerParams& layerParams,
|
||||||
const opencv_onnx::NodeProto& node_proto);
|
const opencv_onnx::NodeProto& node_proto,
|
||||||
|
int num_inputs = std::numeric_limits<int>::max());
|
||||||
void setParamsDtype(LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto);
|
void setParamsDtype(LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto);
|
||||||
|
|
||||||
void lstm_extractConsts(LayerParams& layerParams, const opencv_onnx::NodeProto& lstm_proto, size_t idx, int* blobShape_, int size);
|
void lstm_extractConsts(LayerParams& layerParams, const opencv_onnx::NodeProto& lstm_proto, size_t idx, int* blobShape_, int size);
|
||||||
@ -617,7 +618,8 @@ ONNXImporter::TensorInfo ONNXImporter::getBlobExtraInfo(const std::string& input
|
|||||||
}
|
}
|
||||||
|
|
||||||
void ONNXImporter::addLayer(LayerParams& layerParams,
|
void ONNXImporter::addLayer(LayerParams& layerParams,
|
||||||
const opencv_onnx::NodeProto& node_proto)
|
const opencv_onnx::NodeProto& node_proto,
|
||||||
|
int num_inputs)
|
||||||
{
|
{
|
||||||
int depth = layerParams.get<int>("depth", CV_32F);
|
int depth = layerParams.get<int>("depth", CV_32F);
|
||||||
int id = dstNet.addLayer(layerParams.name, layerParams.type, depth, layerParams);
|
int id = dstNet.addLayer(layerParams.name, layerParams.type, depth, layerParams);
|
||||||
@ -632,7 +634,8 @@ void ONNXImporter::addLayer(LayerParams& layerParams,
|
|||||||
|
|
||||||
std::vector<MatShape> layerInpShapes, layerOutShapes, layerInternalShapes;
|
std::vector<MatShape> layerInpShapes, layerOutShapes, layerInternalShapes;
|
||||||
int inpNum = 0;
|
int inpNum = 0;
|
||||||
for (int j = 0; j < node_proto.input_size(); j++)
|
num_inputs = std::min(node_proto.input_size(), num_inputs);
|
||||||
|
for (int j = 0; j < num_inputs; j++)
|
||||||
{
|
{
|
||||||
const std::string& input_name = node_proto.input(j);
|
const std::string& input_name = node_proto.input(j);
|
||||||
IterLayerId_t layerId = layer_id.find(input_name);
|
IterLayerId_t layerId = layer_id.find(input_name);
|
||||||
@ -1799,7 +1802,7 @@ void ONNXImporter::parseClip(LayerParams& layerParams, const opencv_onnx::NodePr
|
|||||||
|
|
||||||
layerParams.set("min_value", layerParams.get<float>("min", min_value));
|
layerParams.set("min_value", layerParams.get<float>("min", min_value));
|
||||||
layerParams.set("max_value", layerParams.get<float>("max", max_value));
|
layerParams.set("max_value", layerParams.get<float>("max", max_value));
|
||||||
addLayer(layerParams, node_proto);
|
addLayer(layerParams, node_proto, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ONNXImporter::parseLeakyRelu(LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto)
|
void ONNXImporter::parseLeakyRelu(LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto)
|
||||||
|
@ -3096,6 +3096,10 @@ TEST_P(Test_ONNX_layers, MatMulAddFusion) {
|
|||||||
testONNXModels("biased_matmul", npy, l1, lInf);
|
testONNXModels("biased_matmul", npy, l1, lInf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_P(Test_ONNX_layers, ClipDivSharedConstant) {
|
||||||
|
testONNXModels("clip_div_shared_constant");
|
||||||
|
}
|
||||||
|
|
||||||
INSTANTIATE_TEST_CASE_P(/**/, Test_ONNX_nets, dnnBackendsAndTargets());
|
INSTANTIATE_TEST_CASE_P(/**/, Test_ONNX_nets, dnnBackendsAndTargets());
|
||||||
|
|
||||||
}} // namespace
|
}} // namespace
|
||||||
|
Loading…
Reference in New Issue
Block a user