mirror of
https://github.com/opencv/opencv.git
synced 2025-06-07 01:13:28 +08:00
Merge pull request #25861 from Abdurrahheem:ash/torch-attention-export-fix-4x
Merge pull request #25861 from Abdurrahheem:ash/torch-attention-export-fix-4x
Support for Unflatten operation requred by Attention layer - 4.x #25861
### Pull Request Readiness Checklist
All test data and models for PR are located [#1190](https://github.com/opencv/opencv_extra/pull/1190)
This PR fixes issue reised when importing batched vanilla `Attention` layer from `PyTorch` via ONNX. Currently batched version of `Attention` layer in PyTorch [has unflatten operation inside](e3b3431c42/torch/nn/functional.py (L5500C17-L5500C31)
). `unflatten` operation causes issue in `reshape` layer (see the Reshape_2 in the graph below) due to incorrect output of `slice` layer. This PR particularly fixes `slice` and `concat` layers to handle `unflatten` operation.
<img width="673" alt="image" src="https://github.com/opencv/opencv/assets/44877829/5b612b31-657a-47f1-83a4-0ac35a950abd">
See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request
- [x] I agree to contribute to the project under Apache 2 License.
- [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV
- [x] The PR is proposed to the proper branch
- [x] There is a reference to the original bug report and related work
- [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable
Patch to opencv_extra has the same branch name.
- [x] The feature is well documented and sample code can be built with the project CMake
This commit is contained in:
parent
9f5139b575
commit
efbc9f0b66
@ -302,6 +302,8 @@ public:
|
||||
ranges[cAxis].start = 0;
|
||||
for (size_t i = 0; i < inputs.size(); i++)
|
||||
{
|
||||
if (inputs[i].empty())
|
||||
continue;
|
||||
ranges[cAxis].end = ranges[cAxis].start + inputs[i].size[cAxis];
|
||||
for (int j = 0; j < outMat.dims; ++j)
|
||||
{
|
||||
|
@ -69,10 +69,12 @@ Range normalizeRange(const Range& input_range, int n)
|
||||
{
|
||||
Range range = input_range;
|
||||
|
||||
range.start = std::min(std::max(range.start, -n), n - 1);
|
||||
if (range.start < 0)
|
||||
{
|
||||
range.start += n;
|
||||
if (range.start != n){
|
||||
range.start = std::min(std::max(range.start, -n), n - 1);
|
||||
if (range.start < 0)
|
||||
{
|
||||
range.start += n;
|
||||
}
|
||||
}
|
||||
|
||||
range.end = std::min(std::max(range.end, -n), n);
|
||||
@ -610,7 +612,9 @@ public:
|
||||
{
|
||||
for (size_t i = 0; i < outputs.size(); i++)
|
||||
{
|
||||
inpMat(finalSliceRanges[i]).copyTo(outputs[i]);
|
||||
if (finalSliceRanges[i][0].start != finalSliceRanges[i][0].end){
|
||||
inpMat(finalSliceRanges[i]).copyTo(outputs[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -3110,6 +3110,13 @@ TEST_P(Test_ONNX_layers, Attention) {
|
||||
TEST_P(Test_ONNX_layers, AttentionSingleHead) {
|
||||
testONNXModels("attention_single_head");
|
||||
}
|
||||
TEST_P(Test_ONNX_layers, PyTorchAttentionSingleHead){
|
||||
testONNXModels("pytorch_attention_single_head");
|
||||
}
|
||||
|
||||
TEST_P(Test_ONNX_layers, PyTorchUnflatten){
|
||||
testONNXModels("unflatten");
|
||||
}
|
||||
|
||||
TEST_P(Test_ONNX_nets, ViT_B_32) {
|
||||
applyTestTag(CV_TEST_TAG_LONG, CV_TEST_TAG_DEBUG_LONG);
|
||||
|
Loading…
Reference in New Issue
Block a user