opencv/modules/dnn/test/test_graph_simplifier.cpp

133 lines
4.9 KiB
C++
Raw Normal View History

// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
namespace opencv_test { namespace {
class Test_Graph_Simplifier : public ::testing::Test {
public:
bool required;
Test_Graph_Simplifier() : required(true) {}
void test_conformance(const std::string &basename, const std::string &expected_layer) {
test(basename + std::string("/model"), std::vector<std::string>{expected_layer}, std::string("dnn/onnx/conformance/node/"));
}
void test(const std::string &basename, const std::string &expected_layer) {
test(basename, std::vector<std::string>{expected_layer});
}
void test(const std::string &basename, const std::vector<std::string> &expected_layers, const std::string &model_path_prefix = std::string("dnn/onnx/models/")) {
std::string model_path = findDataFile(model_path_prefix + basename + std::string(".onnx"), required);
auto net = readNet(model_path);
std::vector<std::string> layers;
net.getLayerTypes(layers);
// remove Const, Identity (output layer), __NetInputLayer__ (input layer)
layers.erase(std::remove_if(layers.begin(), layers.end(), [] (const std::string l) { return l == "Const" || l == "Identity" || l == "__NetInputLayer__"; }), layers.end());
EXPECT_EQ(layers, expected_layers);
}
};
TEST_F(Test_Graph_Simplifier, GeluSubGraph) {
test("gelu", "Gelu");
}
TEST_F(Test_Graph_Simplifier, GeluApproximationSubGraph) {
test("gelu_approximation", "GeluApproximation");
}
TEST_F(Test_Graph_Simplifier, LayerNormSubGraph) {
test("layer_norm_expanded", "LayerNormalization");
Merge pull request #24544 from fengyuentau:layernorm_conformance dnn test: move layer norm tests into conformance tests #24544 Merge with https://github.com/opencv/opencv_extra/pull/1122 ## Motivation Some ONNX operators, such as `LayerNormalization`, `BatchNormalization` and so on, produce outputs for training (mean, stdev). So they have reference outputs of conformance tests for those training outputs as well. However, when it comes to inference, we do not need and produce those outputs for training here in dnn. Hence, output size does not match if we use dnn to infer those conformance models. This has become the barrier if we want to test these operators using their conformance tests. <!-- | Operator | Inference needed | Outputs (required - total) | Optional outputs for training? | | ----------------------- | ----------------------------------- | -------------------------- | ------------------------------ | | BatchNormalization | Yes | 1 - 3 | Yes | | Dropout | Maybe, can be eliminated via fusion | 1 - 2 | Yes | | GRU | Yes | 0 - 2 | No | | LSTM | Yes | 0 - 3 | No | | LayerNormalization | Yes | 1 - 3 | Yes | | MaxPool | Yes | 1 - 2 | Yes | | RNN | Yes | 0 - 2 | No | | SoftmaxCrossEntropyLoss | No | 1 - 2 | -- | --> **I checked all ONNX operators with optional outputs. Turns out there are only `BatchNormalization`, `Dropout`, `LayerNormalization` and `MaxPool` has optional outputs for training. All except `LayerNormalization` have models set for training mode and eval mode. Blame ONNX for that.** ## Solution In this pull request, we remove graph outputs if the graph looks like the following: ``` [X] [Scale] [Bias] [X] [Scale] [Bias] \ | / this patch \ | / LayerNormalization -----------> LayerNormalization / | \ | [Y] [Mean] [Stdev] [Y] ``` We can update conformance tests and turn on some cases as well if extending to more layers. Notes: 1. This workaround does not solve expanded function operators if they are fused into a single operator, such as `$onnx/onnx/backend/test/data/node/test_layer_normalization_2d_axis1_expanded`, but they can be run without fusion. Note that either dnn or onnxruntime does not fuse those expanded function operators. ### Pull Request Readiness Checklist See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request - [x] I agree to contribute to the project under Apache 2 License. - [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV - [x] The PR is proposed to the proper branch - [x] There is a reference to the original bug report and related work - [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable Patch to opencv_extra has the same branch name. - [x] The feature is well documented and sample code can be built with the project CMake
2023-11-20 16:19:24 +08:00
test("layer_norm_expanded_with_initializers", "LayerNormalization");
}
TEST_F(Test_Graph_Simplifier, ResizeSubgraph) {
/* Test for 6 subgraphs:
- GatherCastSubgraph
- MulCastSubgraph
- UpsampleSubgraph
- ResizeSubgraph1
- ResizeSubgraph2
- ResizeSubgraph3
*/
test("upsample_unfused_torch1.2", std::vector<std::string>{"BatchNorm", "Resize"});
test("resize_nearest_unfused_opset11_torch1.3", std::vector<std::string>{"BatchNorm", "Convolution", "Resize"});
test("resize_nearest_unfused_opset11_torch1.4", std::vector<std::string>{"BatchNorm", "Convolution", "Resize"});
test("upsample_unfused_opset9_torch1.4", std::vector<std::string>{"BatchNorm", "Convolution", "Resize"});
test("two_resizes_with_shared_subgraphs", std::vector<std::string>{"NaryEltwise", "Resize"});
}
TEST_F(Test_Graph_Simplifier, SoftmaxSubgraph) {
/* Test for 3 subgraphs
- SoftMaxSubgraph
- SoftMaxSubgraph2 (conformance)
- LogSoftMaxSubgraph (conformance)
*/
test("softmax_unfused", "Softmax");
test_conformance("test_softmax_example_expanded", "Softmax");
test_conformance("test_softmax_axis_2_expanded", "Softmax");
test_conformance("test_softmax_default_axis_expanded", "Softmax");
test_conformance("test_softmax_axis_0_expanded", "Softmax");
test_conformance("test_softmax_axis_1_expanded", "Softmax");
test_conformance("test_softmax_large_number_expanded", "Softmax");
test_conformance("test_softmax_negative_axis_expanded", "Softmax");
test_conformance("test_logsoftmax_axis_2_expanded", "Softmax");
test_conformance("test_logsoftmax_example_1_expanded", "Softmax");
test_conformance("test_logsoftmax_negative_axis_expanded", "Softmax");
test_conformance("test_logsoftmax_axis_0_expanded", "Softmax");
test_conformance("test_logsoftmax_axis_1_expanded", "Softmax");
test_conformance("test_logsoftmax_large_number_expanded", "Softmax");
test_conformance("test_logsoftmax_default_axis_expanded", "Softmax");
}
TEST_F(Test_Graph_Simplifier, HardSwishSubgraph) {
test_conformance("test_hardswish_expanded", "HardSwish");
}
TEST_F(Test_Graph_Simplifier, CeluSubgraph) {
test_conformance("test_celu_expanded", "Celu");
}
TEST_F(Test_Graph_Simplifier, NormalizeSubgraph) {
/* Test for 6 subgraphs
- NormalizeSubgraph1
- NormalizeSubgraph2
- NormalizeSubgraph2_2
- NormalizeSubgraph3
- NormalizeSubgraph4
- NormalizeSubgraph5
*/
test("reduceL2_subgraph_2", "Normalize");
test("reduceL2_subgraph", "Normalize");
test("normalize_fusion", "Normalize");
}
TEST_F(Test_Graph_Simplifier, BatchNormalizationSubgraph) {
/* Test for 2 subgraphs
- BatchNormalizationSubgraph1
- BatchNormalizationSubgraph2
*/
test("frozenBatchNorm2d", "BatchNorm");
test("batch_norm_subgraph", "BatchNorm");
}
TEST_F(Test_Graph_Simplifier, ExpandSubgraph) {
test("expand_neg_batch", "Expand");
}
TEST_F(Test_Graph_Simplifier, MishSubgraph) {
/* Test for 2 subgraphs
- SoftplusSubgraph
- MishSubgraph
*/
test("mish_no_softplus", "Mish");
test("mish", "Mish");
}
}}