diff --git a/modules/dnn/src/layers/einsum_layer.cpp b/modules/dnn/src/layers/einsum_layer.cpp index 6faaae7ae2..7f8eb47112 100644 --- a/modules/dnn/src/layers/einsum_layer.cpp +++ b/modules/dnn/src/layers/einsum_layer.cpp @@ -547,7 +547,7 @@ public: MatShape realOutputDims = shape(result); size_t realProd = std::accumulate(realOutputDims.begin(), realOutputDims.end(), 1, std::multiplies()); - CV_CheckEQ(reqProd, realProd, "Real output can not be shaped in to requred output"); + CV_CheckEQ(reqProd, realProd, "Real output can not be shaped in to required output"); // reduce dimentions result = result.reshape(1, einsumOutDims.size(), einsumOutDims.data()); @@ -1280,11 +1280,12 @@ Mat LayerEinsumImpl::pairwiseOperandProcess( // Covered by ExplicitEinsumAsTensorContractionReshapeFinal. output = output.reshape(1, reshaped_dims.size(), reshaped_dims.data()); } - } else { - output = Transpose( - output, - outputDims, - outputPermutation); + else { + output = Transpose( + output, + outputDims, + outputPermutation); + } } } else { // This is the final pair - Transpose directly to the output ordering required and copy the contents to the op's output // not sure if this finalize shape is needed at all