2023-11-07 22:40:31 +08:00
|
|
|
// This file is part of OpenCV project.
|
|
|
|
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
|
|
|
// of this distribution and at http://opencv.org/license.html.
|
|
|
|
|
|
|
|
#include "test_precomp.hpp"
|
|
|
|
|
|
|
|
namespace opencv_test { namespace {
|
|
|
|
|
|
|
|
class Test_Graph_Simplifier : public ::testing::Test {
|
|
|
|
public:
|
|
|
|
bool required;
|
|
|
|
|
|
|
|
Test_Graph_Simplifier() : required(true) {}
|
|
|
|
|
|
|
|
void test_conformance(const std::string &basename, const std::string &expected_layer) {
|
|
|
|
test(basename + std::string("/model"), std::vector<std::string>{expected_layer}, std::string("dnn/onnx/conformance/node/"));
|
|
|
|
}
|
|
|
|
|
|
|
|
void test(const std::string &basename, const std::string &expected_layer) {
|
|
|
|
test(basename, std::vector<std::string>{expected_layer});
|
|
|
|
}
|
|
|
|
|
|
|
|
void test(const std::string &basename, const std::vector<std::string> &expected_layers, const std::string &model_path_prefix = std::string("dnn/onnx/models/")) {
|
|
|
|
std::string model_path = findDataFile(model_path_prefix + basename + std::string(".onnx"), required);
|
|
|
|
auto net = readNet(model_path);
|
|
|
|
std::vector<std::string> layers;
|
|
|
|
net.getLayerTypes(layers);
|
|
|
|
|
|
|
|
// remove Const, Identity (output layer), __NetInputLayer__ (input layer)
|
|
|
|
layers.erase(std::remove_if(layers.begin(), layers.end(), [] (const std::string l) { return l == "Const" || l == "Identity" || l == "__NetInputLayer__"; }), layers.end());
|
Merge pull request #26056 from vpisarev:new_dnn_engine
New dnn engine #26056
This is the 1st PR with the new engine; CI is green and PR is ready to be merged, I think.
Merge together with https://github.com/opencv/opencv_contrib/pull/3794
---
**Known limitations:**
* [solved] OpenVINO is temporarily disabled, but is probably easy to restore (it's not a deal breaker to merge this PR, I guess)
* The new engine does not support any backends nor any targets except for the default CPU implementation. But it's possible to choose the old engine when loading a model, then all the functionality is available.
* [Caffe patch is here: #26208] The new engine only supports ONNX. When a model is constructed manually or is loaded from a file of different format (.tf, .tflite, .caffe, .darknet), the old engine is used.
* Even in the case of ONNX some layers are not supported by the new engine, such as all quantized layers (including DequantizeLinear, QuantizeLinear, QLinearConv etc.), LSTM, GRU, .... It's planned, of course, to have full support for ONNX by OpenCV 5.0 gold release. When a loaded model contains unsupported layers, we switch to the old engine automatically (at ONNX parsing time, not at `forward()` time).
* Some layers , e.g. Expat, are only partially supported by the new engine. In the case of unsupported flavours it switches to the old engine automatically (at ONNX parsing time, not at `forward()` time).
* 'Concat' graph optimization is disabled. The optimization eliminates Concat layer and instead makes the layers that generate tensors to be concatenated to write the outputs to the final destination. Of course, it's only possible when `axis=0` or `axis=N=1`. The optimization is not compatible with dynamic shapes since we need to know in advance where to store the tensors. Because some of the layer implementations have been modified to become more compatible with the new engine, the feature appears to be broken even when the old engine is used.
* Some `dnn::Net` API is not available with the new engine. Also, shape inference may return false if some of the output or intermediate tensors' shapes cannot be inferred without running the model. Probably this can be fixed by a dummy run of the model with zero inputs.
* Some overloads of `dnn::Net::getFLOPs()` and `dnn::Net::getMemoryConsumption()` are not exposed any longer in wrapper generators; but the most useful overloads are exposed (and checked by Java tests).
* [in progress] A few Einsum tests related to empty shapes have been disabled due to crashes in the tests and in Einsum implementations. The code and the tests need to be repaired.
* OpenCL implementation of Deconvolution is disabled. It's very bad and very slow anyway; need to be completely revised.
* Deconvolution3D test is now skipped, because it was only supported by CUDA and OpenVINO backends, both of which are not supported by the new engine.
* Some tests, such as FastNeuralStyle, checked that the in the case of CUDA backend there is no fallback to CPU. Currently all layers in the new engine are processed on CPU, so there are many fallbacks. The checks, therefore, have been temporarily disabled.
---
- [x] I agree to contribute to the project under Apache 2 License.
- [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV
- [x] The PR is proposed to the proper branch
- [ ] There is a reference to the original bug report and related work
- [ ] There is accuracy test, performance test and test data in opencv_extra repository, if applicable
Patch to opencv_extra has the same branch name.
- [ ] The feature is well documented and sample code can be built with the project CMake
2024-10-16 20:28:19 +08:00
|
|
|
// Instead of 'Tile', 'Expand' etc. we may now have 'Tile2', 'Expand2' etc.
|
|
|
|
// We should correctly match them with the respective patterns
|
|
|
|
for (auto& l: layers) {
|
|
|
|
if (!l.empty() && l[l.size()-1] == '2')
|
|
|
|
l = l.substr(0, l.size()-1);
|
|
|
|
}
|
2023-11-07 22:40:31 +08:00
|
|
|
|
|
|
|
EXPECT_EQ(layers, expected_layers);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
TEST_F(Test_Graph_Simplifier, GeluSubGraph) {
|
|
|
|
test("gelu", "Gelu");
|
2023-11-24 15:40:32 +08:00
|
|
|
test("bias_gelu", std::vector<std::string>{"Gelu", "NaryEltwise"});
|
2023-11-07 22:40:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(Test_Graph_Simplifier, GeluApproximationSubGraph) {
|
|
|
|
test("gelu_approximation", "GeluApproximation");
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(Test_Graph_Simplifier, LayerNormSubGraph) {
|
|
|
|
test("layer_norm_expanded", "LayerNormalization");
|
Merge pull request #24544 from fengyuentau:layernorm_conformance
dnn test: move layer norm tests into conformance tests #24544
Merge with https://github.com/opencv/opencv_extra/pull/1122
## Motivation
Some ONNX operators, such as `LayerNormalization`, `BatchNormalization` and so on, produce outputs for training (mean, stdev). So they have reference outputs of conformance tests for those training outputs as well. However, when it comes to inference, we do not need and produce those outputs for training here in dnn. Hence, output size does not match if we use dnn to infer those conformance models. This has become the barrier if we want to test these operators using their conformance tests.
<!--
| Operator | Inference needed | Outputs (required - total) | Optional outputs for training? |
| ----------------------- | ----------------------------------- | -------------------------- | ------------------------------ |
| BatchNormalization | Yes | 1 - 3 | Yes |
| Dropout | Maybe, can be eliminated via fusion | 1 - 2 | Yes |
| GRU | Yes | 0 - 2 | No |
| LSTM | Yes | 0 - 3 | No |
| LayerNormalization | Yes | 1 - 3 | Yes |
| MaxPool | Yes | 1 - 2 | Yes |
| RNN | Yes | 0 - 2 | No |
| SoftmaxCrossEntropyLoss | No | 1 - 2 | -- |
-->
**I checked all ONNX operators with optional outputs. Turns out there are only `BatchNormalization`, `Dropout`, `LayerNormalization` and `MaxPool` has optional outputs for training. All except `LayerNormalization` have models set for training mode and eval mode. Blame ONNX for that.**
## Solution
In this pull request, we remove graph outputs if the graph looks like the following:
```
[X] [Scale] [Bias] [X] [Scale] [Bias]
\ | / this patch \ | /
LayerNormalization -----------> LayerNormalization
/ | \ |
[Y] [Mean] [Stdev] [Y]
```
We can update conformance tests and turn on some cases as well if extending to more layers.
Notes:
1. This workaround does not solve expanded function operators if they are fused into a single operator, such as `$onnx/onnx/backend/test/data/node/test_layer_normalization_2d_axis1_expanded`, but they can be run without fusion. Note that either dnn or onnxruntime does not fuse those expanded function operators.
### Pull Request Readiness Checklist
See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request
- [x] I agree to contribute to the project under Apache 2 License.
- [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV
- [x] The PR is proposed to the proper branch
- [x] There is a reference to the original bug report and related work
- [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable
Patch to opencv_extra has the same branch name.
- [x] The feature is well documented and sample code can be built with the project CMake
2023-11-20 16:19:24 +08:00
|
|
|
test("layer_norm_expanded_with_initializers", "LayerNormalization");
|
2023-11-07 22:40:31 +08:00
|
|
|
}
|
|
|
|
|
2024-01-10 18:01:00 +08:00
|
|
|
TEST_F(Test_Graph_Simplifier, LayerNormNoFusionSubGraph) {
|
|
|
|
test("layer_norm_no_fusion", std::vector<std::string>{"NaryEltwise", "Reduce", "Sqrt"});
|
|
|
|
}
|
|
|
|
|
2023-11-07 22:40:31 +08:00
|
|
|
TEST_F(Test_Graph_Simplifier, ResizeSubgraph) {
|
|
|
|
/* Test for 6 subgraphs:
|
|
|
|
- GatherCastSubgraph
|
|
|
|
- MulCastSubgraph
|
|
|
|
- UpsampleSubgraph
|
|
|
|
- ResizeSubgraph1
|
|
|
|
- ResizeSubgraph2
|
|
|
|
- ResizeSubgraph3
|
|
|
|
*/
|
|
|
|
test("upsample_unfused_torch1.2", std::vector<std::string>{"BatchNorm", "Resize"});
|
|
|
|
test("resize_nearest_unfused_opset11_torch1.3", std::vector<std::string>{"BatchNorm", "Convolution", "Resize"});
|
|
|
|
test("resize_nearest_unfused_opset11_torch1.4", std::vector<std::string>{"BatchNorm", "Convolution", "Resize"});
|
|
|
|
test("upsample_unfused_opset9_torch1.4", std::vector<std::string>{"BatchNorm", "Convolution", "Resize"});
|
|
|
|
test("two_resizes_with_shared_subgraphs", std::vector<std::string>{"NaryEltwise", "Resize"});
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(Test_Graph_Simplifier, SoftmaxSubgraph) {
|
|
|
|
/* Test for 3 subgraphs
|
|
|
|
- SoftMaxSubgraph
|
|
|
|
- SoftMaxSubgraph2 (conformance)
|
|
|
|
- LogSoftMaxSubgraph (conformance)
|
|
|
|
*/
|
|
|
|
test("softmax_unfused", "Softmax");
|
|
|
|
test_conformance("test_softmax_example_expanded", "Softmax");
|
|
|
|
test_conformance("test_softmax_axis_2_expanded", "Softmax");
|
|
|
|
test_conformance("test_softmax_default_axis_expanded", "Softmax");
|
|
|
|
test_conformance("test_softmax_axis_0_expanded", "Softmax");
|
|
|
|
test_conformance("test_softmax_axis_1_expanded", "Softmax");
|
|
|
|
test_conformance("test_softmax_large_number_expanded", "Softmax");
|
|
|
|
test_conformance("test_softmax_negative_axis_expanded", "Softmax");
|
|
|
|
test_conformance("test_logsoftmax_axis_2_expanded", "Softmax");
|
|
|
|
test_conformance("test_logsoftmax_example_1_expanded", "Softmax");
|
|
|
|
test_conformance("test_logsoftmax_negative_axis_expanded", "Softmax");
|
|
|
|
test_conformance("test_logsoftmax_axis_0_expanded", "Softmax");
|
|
|
|
test_conformance("test_logsoftmax_axis_1_expanded", "Softmax");
|
|
|
|
test_conformance("test_logsoftmax_large_number_expanded", "Softmax");
|
|
|
|
test_conformance("test_logsoftmax_default_axis_expanded", "Softmax");
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(Test_Graph_Simplifier, HardSwishSubgraph) {
|
|
|
|
test_conformance("test_hardswish_expanded", "HardSwish");
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(Test_Graph_Simplifier, CeluSubgraph) {
|
|
|
|
test_conformance("test_celu_expanded", "Celu");
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(Test_Graph_Simplifier, NormalizeSubgraph) {
|
|
|
|
/* Test for 6 subgraphs
|
|
|
|
- NormalizeSubgraph1
|
|
|
|
- NormalizeSubgraph2
|
|
|
|
- NormalizeSubgraph2_2
|
|
|
|
- NormalizeSubgraph3
|
|
|
|
- NormalizeSubgraph4
|
|
|
|
- NormalizeSubgraph5
|
|
|
|
*/
|
|
|
|
test("reduceL2_subgraph_2", "Normalize");
|
|
|
|
test("reduceL2_subgraph", "Normalize");
|
|
|
|
test("normalize_fusion", "Normalize");
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(Test_Graph_Simplifier, BatchNormalizationSubgraph) {
|
|
|
|
/* Test for 2 subgraphs
|
|
|
|
- BatchNormalizationSubgraph1
|
|
|
|
- BatchNormalizationSubgraph2
|
|
|
|
*/
|
|
|
|
test("frozenBatchNorm2d", "BatchNorm");
|
|
|
|
test("batch_norm_subgraph", "BatchNorm");
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(Test_Graph_Simplifier, ExpandSubgraph) {
|
|
|
|
test("expand_neg_batch", "Expand");
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(Test_Graph_Simplifier, MishSubgraph) {
|
|
|
|
/* Test for 2 subgraphs
|
|
|
|
- SoftplusSubgraph
|
|
|
|
- MishSubgraph
|
|
|
|
*/
|
|
|
|
test("mish_no_softplus", "Mish");
|
|
|
|
test("mish", "Mish");
|
|
|
|
}
|
|
|
|
|
2023-12-21 00:35:07 +08:00
|
|
|
TEST_F(Test_Graph_Simplifier, AttentionSubgraph) {
|
|
|
|
/* Test for 2 subgraphs
|
|
|
|
- AttentionSubgraph
|
|
|
|
- AttentionSingleHeadSubgraph
|
|
|
|
*/
|
|
|
|
test("attention", "Attention");
|
|
|
|
test("attention_single_head", "Attention");
|
|
|
|
}
|
|
|
|
|
2024-03-29 22:35:23 +08:00
|
|
|
TEST_F(Test_Graph_Simplifier, BiasedMatMulSubgraph) {
|
|
|
|
/* Test for 1 subgraphs
|
|
|
|
- BiasedMatMulSubgraph
|
|
|
|
*/
|
|
|
|
test("biased_matmul", "MatMul");
|
|
|
|
}
|
|
|
|
|
2023-11-07 22:40:31 +08:00
|
|
|
}}
|