From 5bdc41964a14d33550c5c382bab979347026743b Mon Sep 17 00:00:00 2001 From: Abduragim Shtanchaev <44877829+Abdurrahheem@users.noreply.github.com> Date: Wed, 15 May 2024 11:50:03 +0400 Subject: [PATCH] Merge pull request #25487 from Abdurrahheem:ash/01D-additional-fixes Additional fixes to 0/1D tests #25487 This has additional fixes requited for 0/1D tests. ### Pull Request Readiness Checklist See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request - [x] I agree to contribute to the project under Apache 2 License. - [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV - [x] The PR is proposed to the proper branch - [x] There is a reference to the original bug report and related work - [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable Patch to opencv_extra has the same branch name. - [x] The feature is well documented and sample code can be built with the project CMake --- modules/core/src/minmax.dispatch.cpp | 8 +- modules/dnn/src/layers/arg_layer.cpp | 3 +- modules/dnn/src/layers/eltwise_layer.cpp | 8 +- modules/dnn/src/layers/gather_layer.cpp | 6 +- modules/dnn/test/test_common.cpp | 7 +- modules/dnn/test/test_layers_1d.cpp | 215 +++++++++++++---------- 6 files changed, 150 insertions(+), 97 deletions(-) diff --git a/modules/core/src/minmax.dispatch.cpp b/modules/core/src/minmax.dispatch.cpp index 529bd80b9f..6c50cb20b5 100644 --- a/modules/core/src/minmax.dispatch.cpp +++ b/modules/core/src/minmax.dispatch.cpp @@ -456,12 +456,14 @@ static void reduceMinMax(cv::InputArray src, cv::OutputArray dst, ReduceMode mod CV_INSTRUMENT_REGION(); cv::Mat srcMat = src.getMat(); - axis = (axis + srcMat.dims) % srcMat.dims; - CV_Assert(srcMat.channels() == 1 && axis >= 0 && axis < srcMat.dims); + int dims = std::max(1, srcMat.dims); + axis = (axis + dims) % dims; + CV_Assert(srcMat.channels() == 1 && axis >= 0 && axis <= srcMat.dims); std::vector sizes(srcMat.dims); std::copy(srcMat.size.p, srcMat.size.p + srcMat.dims, sizes.begin()); - sizes[axis] = 1; + if(!sizes.empty()) + sizes[axis] = 1; dst.create(srcMat.dims, sizes.data(), CV_32SC1); // indices cv::Mat dstMat = dst.getMat(); diff --git a/modules/dnn/src/layers/arg_layer.cpp b/modules/dnn/src/layers/arg_layer.cpp index edb9ddbe8d..059c93cdae 100644 --- a/modules/dnn/src/layers/arg_layer.cpp +++ b/modules/dnn/src/layers/arg_layer.cpp @@ -66,7 +66,8 @@ public: MatShape inpShape = inputs[0]; const int axis_ = normalize_axis(axis, inpShape); - handleKeepDims(inpShape, axis_); + if (!inpShape.empty()) + handleKeepDims(inpShape, axis_); outputs.assign(1, inpShape); return false; diff --git a/modules/dnn/src/layers/eltwise_layer.cpp b/modules/dnn/src/layers/eltwise_layer.cpp index f82c11c849..9219ca6f52 100644 --- a/modules/dnn/src/layers/eltwise_layer.cpp +++ b/modules/dnn/src/layers/eltwise_layer.cpp @@ -191,6 +191,10 @@ public: std::vector &internals) const CV_OVERRIDE { CV_Assert(inputs.size() >= 2); + if (inputs[0].size() == 0){ + outputs.assign(1, inputs[0]); + return false; + } CV_Assert(inputs[0].size() >= 1); CV_Assert(coeffs.size() == 0 || coeffs.size() == inputs.size()); CV_Assert(op == SUM || coeffs.size() == 0); @@ -310,13 +314,13 @@ public: int nstripes) { const EltwiseOp op = self.op; - CV_Check(dst.dims, 1 <= dst.dims && dst.dims <= 5, ""); + CV_Check(dst.dims, 0 <= dst.dims && dst.dims <= 5, ""); CV_CheckTypeEQ(dst.type(), CV_32FC1, ""); CV_Assert(dst.isContinuous()); CV_Assert(self.coeffs.empty() || self.coeffs.size() == (size_t)nsrcs); CV_CheckGE(nsrcs, 2, ""); - if (dst.dims != 1) + if (dst.dims > 1) CV_Assert(self.outputChannels == dst.size[1]); EltwiseInvoker p(self); diff --git a/modules/dnn/src/layers/gather_layer.cpp b/modules/dnn/src/layers/gather_layer.cpp index 06ca8fcd66..3117b618e9 100644 --- a/modules/dnn/src/layers/gather_layer.cpp +++ b/modules/dnn/src/layers/gather_layer.cpp @@ -30,12 +30,16 @@ public: { CV_CheckEQ(inputs.size(), 2ull, ""); MatShape inpShape = inputs[0]; + if (inpShape.size() == 0 ){ + outputs.assign(1, inpShape); + return false; + } + const int axis = normalize_axis(m_axis, inpShape); inpShape.erase(inpShape.begin() + axis); auto end = m_real_ndims == -1 ? inputs[1].end() : inputs[1].begin() + m_real_ndims; inpShape.insert(inpShape.begin() + axis, inputs[1].begin(), end); - outputs.assign(1, inpShape); return false; } diff --git a/modules/dnn/test/test_common.cpp b/modules/dnn/test/test_common.cpp index 7afa2a18e0..4a9b9c4147 100644 --- a/modules/dnn/test/test_common.cpp +++ b/modules/dnn/test/test_common.cpp @@ -12,21 +12,24 @@ void runLayer(cv::Ptr layer, std::vector &inpBlobs, std size_t ninputs = inpBlobs.size(); std::vector inp(ninputs), outp, intp; std::vector inputs, outputs, internals; + std::vector inputs_types, outputs_types, internals_types; for (size_t i = 0; i < ninputs; i++) { inp[i] = inpBlobs[i].clone(); inputs.push_back(cv::dnn::shape(inp[i])); + inputs_types.push_back(cv::dnn::MatType(inp[i].type())); } layer->getMemoryShapes(inputs, 0, outputs, internals); + layer->getTypes(inputs_types, outputs.size(), internals.size(), outputs_types, internals_types); for (size_t i = 0; i < outputs.size(); i++) { - outp.push_back(cv::Mat(outputs[i], CV_32F)); + outp.push_back(cv::Mat(outputs[i], outputs_types[i])); } for (size_t i = 0; i < internals.size(); i++) { - intp.push_back(cv::Mat(internals[i], CV_32F)); + intp.push_back(cv::Mat(internals[i], internals_types[i])); } layer->finalize(inp, outp); diff --git a/modules/dnn/test/test_layers_1d.cpp b/modules/dnn/test/test_layers_1d.cpp index 1a46221e3c..b07a1c5a48 100644 --- a/modules/dnn/test/test_layers_1d.cpp +++ b/modules/dnn/test/test_layers_1d.cpp @@ -42,64 +42,82 @@ TEST_P(Layer_1d_Test, Scale) cv::Mat output_ref = input.mul(weight); runLayer(layer, inputs, outputs); - + ASSERT_EQ(1, outputs.size()); ASSERT_EQ(shape(output_ref), shape(outputs[0])); normAssert(output_ref, outputs[0]); } -typedef testing::TestWithParam> Layer_Gather_1d_Test; +typedef testing::TestWithParam, int>> Layer_Gather_1d_Test; TEST_P(Layer_Gather_1d_Test, Accuracy) { - int batch_size = get<0>(GetParam()); + std::vector input_shape = get<0>(GetParam()); int axis = get<1>(GetParam()); + // skip case when axis > input shape + if (axis > input_shape.size()) + return; + LayerParams lp; lp.type = "Gather"; - lp.name = "gatherLayer"; + lp.name = "GatherLayer"; lp.set("axis", axis); lp.set("real_ndims", 1); - Ptr layer = GatherLayer::create(lp); - std::vector input_shape = {batch_size, 1}; - std::vector indices_shape = {1, 1}; - std::vector output_shape = {batch_size, 1}; - - if (batch_size == 0){ - input_shape.erase(input_shape.begin()); - indices_shape.erase(indices_shape.begin()); - output_shape.erase(output_shape.begin()); - } else if (axis == 0) { - output_shape[0] = 1; - } - - cv::Mat input = cv::Mat(input_shape, CV_32F, 1.0); + cv::Mat input(input_shape.size(), input_shape.data(), CV_32F); cv::randu(input, 0.0, 1.0); - cv::Mat indices = cv::Mat(indices_shape, CV_32S, 0.0); - cv::Mat output_ref = cv::Mat(output_shape, CV_32F, input(cv::Range::all(), cv::Range(0, 1)).data); + + std::vector indices_shape = {1}; + cv::Mat indices = cv::Mat(indices_shape.size(), indices_shape.data(), CV_32S, 0.0); + + cv::Mat output_ref; + if (input_shape.size() == 0 || input_shape.size() == 1){ + output_ref = input; + } else if (axis == 0){ + output_ref = input.row(0); + } else if (axis == 1){ + output_ref = input.col(0); + } std::vector inputs{input, indices}; std::vector outputs; runLayer(layer, inputs, outputs); + ASSERT_EQ(1, outputs.size()); ASSERT_EQ(shape(output_ref), shape(outputs[0])); normAssert(output_ref, outputs[0]); } INSTANTIATE_TEST_CASE_P(/*nothing*/, Layer_Gather_1d_Test, Combine( -/*input blob shape*/ Values(0, 1, 2, 3), -/*operation*/ Values(0, 1) +/*input blob shape*/ testing::Values( + std::vector({}), + std::vector({1}), + std::vector({1, 4}), + std::vector({4, 4}) + ), +/*axis*/ testing::Values(0, 1) )); -typedef testing::TestWithParam> Layer_Arg_1d_Test; -TEST_P(Layer_Arg_1d_Test, Accuracy) { - - int batch_size = get<0>(GetParam()); - int axis = get<1>(GetParam()); - std::string operation = get<2>(GetParam()); +template +int arg_op(const std::vector& vec, const std::string& operation) { + CV_Assert(!vec.empty()); + if (operation == "max") { + return static_cast(std::distance(vec.begin(), std::max_element(vec.begin(), vec.end()))); + } else if (operation == "min") { + return static_cast(std::distance(vec.begin(), std::min_element(vec.begin(), vec.end()))); + } else { + CV_Error(Error::StsAssert, "Provided operation: " + operation + " is not supported. Please check the test instantiation."); + } +} +// Test for ArgLayer is disabled because there problem in runLayer function related to type assignment +typedef testing::TestWithParam, std::string>> Layer_Arg_1d_Test; +TEST_P(Layer_Arg_1d_Test, Accuracy_01D) { + std::vector input_shape = get<0>(GetParam()); + std::string operation = get<1>(GetParam()); LayerParams lp; lp.type = "Arg"; lp.name = "arg" + operation + "_Layer"; + int axis = (input_shape.size() == 0 || input_shape.size() == 1 ) ? 0 : 1; lp.set("op", operation); lp.set("axis", axis); lp.set("keepdims", 1); @@ -107,42 +125,57 @@ TEST_P(Layer_Arg_1d_Test, Accuracy) { Ptr layer = ArgLayer::create(lp); - std::vector input_shape = {batch_size, 1}; - std::vector output_shape = {1, 1}; - - if (batch_size == 0){ - input_shape.erase(input_shape.begin()); - output_shape.erase(output_shape.begin()); + cv::Mat input = cv::Mat(input_shape.size(), input_shape.data(), CV_32F); + for (int i = 0; i < input.total(); i++){ + input.at(i) = i; } - if (axis != 0 && batch_size != 0){ - output_shape[0] = batch_size; + // create reference output with required shape and values + int index; + cv::Mat output_ref; + std::vector ref_output; + if (input_shape.size() == 2 ){ + int rows = input_shape[0]; + int cols = input_shape[1]; + ref_output.resize(rows); + for (int i = 0; i < rows; i++) { + std::vector row_vec(cols); + for (int j = 0; j < cols; j++) { + row_vec[j] = input.at(i, j); + } + ref_output[i] = (int) arg_op(row_vec, operation); + } + output_ref = cv::Mat(rows, (axis == 1) ? 1 : cols, CV_32S, ref_output.data()); + } else if (input_shape.size() <= 1) { + index = arg_op(std::vector(input.begin(), input.end()), operation); + output_ref = cv::Mat(input_shape.size(), input_shape.data(), CV_32FC1, &index); } - cv::Mat input = cv::Mat(input_shape, CV_32F, 1); - cv::Mat output_ref = cv::Mat(output_shape, CV_32F, 0); - - for (int i = 0; i < batch_size; ++i) - input.at(i, 0) = static_cast(i + 1); - std::vector inputs{input}; std::vector outputs; runLayer(layer, inputs, outputs); + ASSERT_EQ(1, outputs.size()); ASSERT_EQ(shape(output_ref), shape(outputs[0])); + // convert output_ref to float to match the output type + output_ref.convertTo(output_ref, CV_64SC1); normAssert(output_ref, outputs[0]); } INSTANTIATE_TEST_CASE_P(/*nothing*/, Layer_Arg_1d_Test, Combine( -/*input blob shape*/ Values(0, 1, 2, 3), -/*operation*/ Values(0, 1), +/*input blob shape*/ testing::Values( + std::vector({}), + std::vector({1}), + std::vector({1, 4}), + std::vector({4, 4}) + ), /*operation*/ Values( "max", "min") )); -typedef testing::TestWithParam> Layer_NaryElemwise_1d_Test; +typedef testing::TestWithParam, std::string>> Layer_NaryElemwise_1d_Test; TEST_P(Layer_NaryElemwise_1d_Test, Accuracy) { - int batch_size = get<0>(GetParam()); + std::vector input_shape = get<0>(GetParam()); std::string operation = get<1>(GetParam()); LayerParams lp; @@ -151,12 +184,8 @@ TEST_P(Layer_NaryElemwise_1d_Test, Accuracy) { lp.set("operation", operation); Ptr layer = NaryEltwiseLayer::create(lp); - std::vector input_shape = {batch_size, 1}; - if (batch_size == 0) - input_shape.erase(input_shape.begin()); - - cv::Mat input1 = cv::Mat(input_shape, CV_32F, 0.0); - cv::Mat input2 = cv::Mat(input_shape, CV_32F, 0.0); + cv::Mat input1 = cv::Mat(input_shape.size(), input_shape.data(), CV_32F); + cv::Mat input2 = cv::Mat(input_shape.size(), input_shape.data(), CV_32F); cv::randu(input1, 0.0, 1.0); cv::randu(input2, 0.0, 1.0); @@ -177,22 +206,26 @@ TEST_P(Layer_NaryElemwise_1d_Test, Accuracy) { runLayer(layer, inputs, outputs); if (!output_ref.empty()) { + ASSERT_EQ(1, outputs.size()); ASSERT_EQ(shape(output_ref), shape(outputs[0])); normAssert(output_ref, outputs[0]); } else { CV_Error(Error::StsAssert, "Provided operation: " + operation + " is not supported. Please check the test instantiation."); } } - INSTANTIATE_TEST_CASE_P(/*nothing*/, Layer_NaryElemwise_1d_Test, Combine( -/*input blob shape*/ Values(0, 1), -/*operation*/ Values("div", "mul", "sum", "sub") +/*input blob shape*/ testing::Values( + std::vector({}), + std::vector({1}), + std::vector({1, 4}), + std::vector({4, 1})), +/*operation*/ testing::Values("div", "mul", "sum", "sub") )); -typedef testing::TestWithParam> Layer_Elemwise_1d_Test; -TEST_P(Layer_Elemwise_1d_Test, Accuracy) { +typedef testing::TestWithParam, std::string>> Layer_Elemwise_1d_Test; +TEST_P(Layer_Elemwise_1d_Test, Accuracy_01D) { - int batch_size = get<0>(GetParam()); + std::vector input_shape = get<0>(GetParam()); std::string operation = get<1>(GetParam()); LayerParams lp; @@ -201,12 +234,8 @@ TEST_P(Layer_Elemwise_1d_Test, Accuracy) { lp.set("operation", operation); Ptr layer = EltwiseLayer::create(lp); - std::vector input_shape = {batch_size, 1}; - if (batch_size == 0) - input_shape.erase(input_shape.begin()); - - cv::Mat input1 = cv::Mat(input_shape, CV_32F, 1.0); - cv::Mat input2 = cv::Mat(input_shape, CV_32F, 1.0); + cv::Mat input1(input_shape.size(), input_shape.data(), CV_32F); + cv::Mat input2(input_shape.size(), input_shape.data(), CV_32F); cv::randu(input1, 0.0, 1.0); cv::randu(input2, 0.0, 1.0); @@ -226,26 +255,29 @@ TEST_P(Layer_Elemwise_1d_Test, Accuracy) { output_ref = cv::Mat(); } - std::vector inputs{input1, input2}; std::vector outputs; runLayer(layer, inputs, outputs); - if (!output_ref.empty()) { + ASSERT_EQ(1, outputs.size()); ASSERT_EQ(shape(output_ref), shape(outputs[0])); normAssert(output_ref, outputs[0]); } else { CV_Error(Error::StsAssert, "Provided operation: " + operation + " is not supported. Please check the test instantiation."); } } - INSTANTIATE_TEST_CASE_P(/*nothing*/, Layer_Elemwise_1d_Test, Combine( -/*input blob shape*/ Values(0, 1, 2, 3), -/*operation*/ Values("div", "prod", "max", "min", "sum") +/*input blob shape*/ testing::Values( + std::vector({}), + std::vector({1}), + std::vector({4}), + std::vector({1, 4}), + std::vector({4, 1})), +/*operation*/ testing::Values("div", "prod", "max", "min", "sum") )); -TEST(Layer_Reshape_Test, Accuracy) +TEST(Layer_Reshape_Test, Accuracy_1D) { LayerParams lp; lp.type = "Reshape"; @@ -267,6 +299,7 @@ TEST(Layer_Reshape_Test, Accuracy) std::vector outputs; runLayer(layer, inputs, outputs); + ASSERT_EQ(1, outputs.size()); ASSERT_EQ(shape(output_ref), shape(outputs[0])); normAssert(output_ref, outputs[0]); } @@ -291,9 +324,10 @@ TEST_P(Layer_Split_Test, Accuracy_01D) std::vector inputs{input}; std::vector outputs; runLayer(layer, inputs, outputs); + ASSERT_EQ(outputs.size(), top_count); for (int i = 0; i < top_count; i++) { - ASSERT_EQ(shape(output_ref), shape(outputs[i])); + ASSERT_EQ(shape(outputs[i]), shape(output_ref)); normAssert(output_ref, outputs[i]); } } @@ -330,7 +364,7 @@ TEST_P(Layer_Expand_Test, Accuracy_ND) { std::vector outputs; runLayer(layer, inputs, outputs); - ASSERT_EQ(outputs.size(), 1); + ASSERT_EQ(1, outputs.size()); ASSERT_EQ(shape(output_ref), shape(outputs[0])); normAssert(output_ref, outputs[0]); } @@ -373,6 +407,7 @@ TEST_P(Layer_Concat_Test, Accuracy_01D) std::vector outputs; runLayer(layer, inputs, outputs); + ASSERT_EQ(1, outputs.size()); ASSERT_EQ(shape(output_ref), shape(outputs[0])); normAssert(output_ref, outputs[0]); } @@ -412,7 +447,7 @@ TEST_P(Layer_Softmax_Test, Accuracy_01D) { std::vector inputs{input}; std::vector outputs; runLayer(layer, inputs, outputs); - ASSERT_EQ(outputs.size(), 1); + ASSERT_EQ(1, outputs.size()); ASSERT_EQ(shape(output_ref), shape(outputs[0])); normAssert(output_ref, outputs[0]); } @@ -430,16 +465,17 @@ INSTANTIATE_TEST_CASE_P(/*nothing*/, Layer_Softmax_Test, Combine( testing::Values(0, 1) )); -typedef testing::TestWithParam, std::string>> Layer_Scatter_Test; +typedef testing::TestWithParam>, std::string>> Layer_Scatter_Test; TEST_P(Layer_Scatter_Test, Accuracy1D) { - - std::vector input_shape = get<0>(GetParam()); + auto tup = get<0>(GetParam()); + int axis = get<0>(tup); + std::vector input_shape = get<1>(tup); std::string opr = get<1>(GetParam()); LayerParams lp; lp.type = "Scatter"; lp.name = "addLayer"; - lp.set("axis", 0); + lp.set("axis", axis); lp.set("reduction", opr); Ptr layer = ScatterLayer::create(lp); @@ -452,7 +488,7 @@ TEST_P(Layer_Scatter_Test, Accuracy1D) { // create reference output cv::Mat output_ref(input_shape, CV_32F, 0.0); - for (int i = 0; i < input_shape[0]; i++){ + for (int i = 0; i < ((input_shape.size() == 1) ? input_shape[0] : input_shape[1]); i++){ output_ref.at(indices[i]) = input.at(i); } @@ -469,13 +505,14 @@ TEST_P(Layer_Scatter_Test, Accuracy1D) { std::vector inputs{output, indices_mat, input}; std::vector outputs; runLayer(layer, inputs, outputs); - ASSERT_EQ(outputs.size(), 1); + ASSERT_EQ(1, outputs.size()); ASSERT_EQ(shape(output_ref), shape(outputs[0])); + normAssert(output_ref, outputs[0]); } INSTANTIATE_TEST_CASE_P(/*nothing*/, Layer_Scatter_Test, Combine( -/*input blob shape*/ testing::Values(std::vector{4}, - std::vector{1, 4}), -/*reduce*/ Values("none", "add", "mul", "max", "min") +/*input blob shape*/ testing::Values(std::make_tuple(0, std::vector{4}), + std::make_tuple(1, std::vector{1, 4})), +/*reduce*/ testing::Values("none", "add", "mul", "max", "min") )); @@ -501,7 +538,7 @@ TEST_P(Layer_Permute_Test, Accuracy_01D) std::vector outputs; runLayer(layer, inputs, outputs); - ASSERT_EQ(outputs.size(), 1); + ASSERT_EQ(1, outputs.size()); ASSERT_EQ(shape(output_ref), shape(outputs[0])); normAssert(output_ref, outputs[0]); } @@ -555,9 +592,9 @@ TEST_P(Layer_Slice_Test, Accuracy_1D){ std::vector inputs{input}; std::vector outputs; runLayer(layer, inputs, outputs); - + ASSERT_EQ(outputs.size(), splits); for (int i = 0; i < splits; ++i){ - ASSERT_EQ(shape(output_refs[i]), shape(outputs[i])); + ASSERT_EQ(shape(outputs[i]), shape(output_refs[i])); normAssert(output_refs[i], outputs[i]); } } @@ -650,11 +687,13 @@ TEST_P(Layer_FullyConnected_Test, Accuracy_01D) Mat input(input_shape.size(), input_shape.data(), CV_32F); randn(input, 0, 1); Mat output_ref = input.reshape(1, 1) * weights; - output_ref.dims = 1; + output_ref.dims = input_shape.size(); std::vector inputs{input}; std::vector outputs; runLayer(layer, inputs, outputs); + ASSERT_EQ(1, outputs.size()); + ASSERT_EQ(shape(output_ref), shape(outputs[0])); normAssert(output_ref, outputs[0]); } INSTANTIATE_TEST_CASE_P(/*nothting*/, Layer_FullyConnected_Test, @@ -699,7 +738,7 @@ TEST_P(Layer_BatchNorm_Test, Accuracy_01D) cv::sqrt(varMat + 1e-5, varMat); output_ref = (output_ref - meanMat) / varMat; - ASSERT_EQ(outputs.size(), 1); + ASSERT_EQ(1, outputs.size()); ASSERT_EQ(shape(output_ref), shape(outputs[0])); normAssert(output_ref, outputs[0]); @@ -732,7 +771,7 @@ TEST_P(Layer_Const_Test, Accuracy_01D) std::vector inputs; // No inputs are needed for a ConstLayer std::vector outputs; runLayer(layer, inputs, outputs); - ASSERT_EQ(outputs.size(), 1); + ASSERT_EQ(1, outputs.size()); ASSERT_EQ(shape(output_ref), shape(outputs[0])); normAssert(output_ref, outputs[0]); }