diff --git a/modules/dnn/test/test_common.cpp b/modules/dnn/test/test_common.cpp index a2ea6ead53..7afa2a18e0 100644 --- a/modules/dnn/test/test_common.cpp +++ b/modules/dnn/test/test_common.cpp @@ -4,3 +4,38 @@ #include "test_precomp.hpp" #include "test_common.impl.hpp" // shared with perf tests +#include + +namespace opencv_test { +void runLayer(cv::Ptr layer, std::vector &inpBlobs, std::vector &outBlobs) +{ + size_t ninputs = inpBlobs.size(); + std::vector inp(ninputs), outp, intp; + std::vector inputs, outputs, internals; + + for (size_t i = 0; i < ninputs; i++) + { + inp[i] = inpBlobs[i].clone(); + inputs.push_back(cv::dnn::shape(inp[i])); + } + + layer->getMemoryShapes(inputs, 0, outputs, internals); + for (size_t i = 0; i < outputs.size(); i++) + { + outp.push_back(cv::Mat(outputs[i], CV_32F)); + } + for (size_t i = 0; i < internals.size(); i++) + { + intp.push_back(cv::Mat(internals[i], CV_32F)); + } + + layer->finalize(inp, outp); + layer->forward(inp, outp, intp); + + size_t noutputs = outp.size(); + outBlobs.resize(noutputs); + for (size_t i = 0; i < noutputs; i++) + outBlobs[i] = outp[i]; +} + +} diff --git a/modules/dnn/test/test_common.hpp b/modules/dnn/test/test_common.hpp index 57b174e16c..9b1b56fcc9 100644 --- a/modules/dnn/test/test_common.hpp +++ b/modules/dnn/test/test_common.hpp @@ -238,7 +238,8 @@ protected: } }; +void runLayer(cv::Ptr layer, std::vector &inpBlobs, std::vector &outBlobs); + } // namespace - #endif diff --git a/modules/dnn/test/test_layers.cpp b/modules/dnn/test/test_layers.cpp index 066e5ec136..020a22199f 100644 --- a/modules/dnn/test/test_layers.cpp +++ b/modules/dnn/test/test_layers.cpp @@ -62,37 +62,6 @@ static String _tf(TString filename) return (basetestdir + "dnn/layers/") + filename; } -void runLayer(Ptr layer, std::vector &inpBlobs, std::vector &outBlobs) -{ - size_t ninputs = inpBlobs.size(); - std::vector inp(ninputs), outp, intp; - std::vector inputs, outputs, internals; - - for (size_t i = 0; i < ninputs; i++) - { - inp[i] = inpBlobs[i].clone(); - inputs.push_back(shape(inp[i])); - } - - layer->getMemoryShapes(inputs, 0, outputs, internals); - for (size_t i = 0; i < outputs.size(); i++) - { - outp.push_back(Mat(outputs[i], CV_32F)); - } - for (size_t i = 0; i < internals.size(); i++) - { - intp.push_back(Mat(internals[i], CV_32F)); - } - - layer->finalize(inp, outp); - layer->forward(inp, outp, intp); - - size_t noutputs = outp.size(); - outBlobs.resize(noutputs); - for (size_t i = 0; i < noutputs; i++) - outBlobs[i] = outp[i]; -} - class Test_Caffe_layers : public DNNTestLayer { public: @@ -618,242 +587,6 @@ TEST(Layer_LSTM_Test_Accuracy_with_, HiddenParams) normAssert(h_t_reference, outputs[0]); } -typedef testing::TestWithParam> Layer_Scale_1d_Test; -TEST_P(Layer_Scale_1d_Test, Accuracy) -{ - int batch_size = get<0>(GetParam()); - - LayerParams lp; - lp.type = "Scale"; - lp.name = "scaleLayer"; - lp.set("axis", 0); - lp.set("mode", "scale"); - lp.set("bias_term", false); - Ptr layer = ScaleLayer::create(lp); - - std::vector input_shape = {batch_size, 3}; - std::vector output_shape = {batch_size, 3}; - - if (batch_size == 0){ - input_shape.erase(input_shape.begin()); - output_shape.erase(output_shape.begin()); - } - - cv::Mat input = cv::Mat(input_shape, CV_32F, 1.0); - cv::randn(input, 0.0, 1.0); - cv::Mat weight = cv::Mat(output_shape, CV_32F, 2.0); - - std::vector inputs{input, weight}; - std::vector outputs; - - cv::Mat output_ref = input.mul(weight); - runLayer(layer, inputs, outputs); - - ASSERT_EQ(shape(output_ref), shape(outputs[0])); - normAssert(output_ref, outputs[0]); -} -INSTANTIATE_TEST_CASE_P(/*nothing*/, Layer_Scale_1d_Test, -/*operation*/ Values(0, 1)); - - -typedef testing::TestWithParam> Layer_Gather_1d_Test; -TEST_P(Layer_Gather_1d_Test, Accuracy) { - - int batch_size = get<0>(GetParam()); - int axis = get<1>(GetParam()); - - LayerParams lp; - lp.type = "Gather"; - lp.name = "gatherLayer"; - lp.set("axis", axis); - lp.set("real_ndims", 1); - - Ptr layer = GatherLayer::create(lp); - - std::vector input_shape = {batch_size, 1}; - std::vector indices_shape = {1, 1}; - std::vector output_shape = {batch_size, 1}; - - if (batch_size == 0){ - input_shape.erase(input_shape.begin()); - indices_shape.erase(indices_shape.begin()); - output_shape.erase(output_shape.begin()); - } else if (axis == 0) { - output_shape[0] = 1; - } - - cv::Mat input = cv::Mat(input_shape, CV_32F, 1.0); - cv::randu(input, 0.0, 1.0); - cv::Mat indices = cv::Mat(indices_shape, CV_32F, 0.0); - cv::Mat output_ref = cv::Mat(output_shape, CV_32F, input(cv::Range::all(), cv::Range(0, 1)).data); - - std::vector inputs{input, indices}; - std::vector outputs; - - runLayer(layer, inputs, outputs); - ASSERT_EQ(shape(output_ref), shape(outputs[0])); - normAssert(output_ref, outputs[0]); -} -INSTANTIATE_TEST_CASE_P(/*nothing*/, Layer_Gather_1d_Test, Combine( -/*input blob shape*/ Values(0, 1, 2, 3), -/*operation*/ Values(0, 1) -)); - -typedef testing::TestWithParam> Layer_Arg_1d_Test; -TEST_P(Layer_Arg_1d_Test, Accuracy) { - - int batch_size = get<0>(GetParam()); - int axis = get<1>(GetParam()); - std::string operation = get<2>(GetParam()); - - LayerParams lp; - lp.type = "Arg"; - lp.name = "arg" + operation + "_Layer"; - lp.set("op", operation); - lp.set("axis", axis); - lp.set("keepdims", 1); - lp.set("select_last_index", 0); - - Ptr layer = ArgLayer::create(lp); - - std::vector input_shape = {batch_size, 1}; - std::vector output_shape = {1, 1}; - - if (batch_size == 0){ - input_shape.erase(input_shape.begin()); - output_shape.erase(output_shape.begin()); - } - - if (axis != 0 && batch_size != 0){ - output_shape[0] = batch_size; - } - - cv::Mat input = cv::Mat(input_shape, CV_32F, 1); - cv::Mat output_ref = cv::Mat(output_shape, CV_32F, 0); - - for (int i = 0; i < batch_size; ++i) - input.at(i, 0) = static_cast(i + 1); - - std::vector inputs{input}; - std::vector outputs; - - runLayer(layer, inputs, outputs); - ASSERT_EQ(shape(output_ref), shape(outputs[0])); - normAssert(output_ref, outputs[0]); -} - -INSTANTIATE_TEST_CASE_P(/*nothing*/, Layer_Arg_1d_Test, Combine( -/*input blob shape*/ Values(0, 1, 2, 3), -/*operation*/ Values(0, 1), -/*operation*/ Values( "max", "min") -)); - -typedef testing::TestWithParam> Layer_NaryElemwise_1d_Test; -TEST_P(Layer_NaryElemwise_1d_Test, Accuracy) { - - int batch_size = get<0>(GetParam()); - std::string operation = get<1>(GetParam()); - - LayerParams lp; - lp.type = "Eltwise"; - lp.name = operation + "_Layer"; - lp.set("operation", operation); - Ptr layer = NaryEltwiseLayer::create(lp); - - std::vector input_shape = {batch_size, 1}; - if (batch_size == 0) - input_shape.erase(input_shape.begin()); - - cv::Mat input1 = cv::Mat(input_shape, CV_32F, 0.0); - cv::Mat input2 = cv::Mat(input_shape, CV_32F, 0.0); - cv::randu(input1, 0.0, 1.0); - cv::randu(input2, 0.0, 1.0); - - cv::Mat output_ref; - if (operation == "sum") { - output_ref = input1 + input2; - } else if (operation == "mul") { - output_ref = input1.mul(input2); - } else if (operation == "div") { - output_ref = input1 / input2; - } else if (operation == "sub") { - output_ref = input1 - input2; - } else { - output_ref = cv::Mat(); - } - std::vector inputs{input1, input2}; - std::vector outputs; - - runLayer(layer, inputs, outputs); - if (!output_ref.empty()) { - ASSERT_EQ(shape(output_ref), shape(outputs[0])); - normAssert(output_ref, outputs[0]); - } else { - CV_Error(Error::StsAssert, "Provided operation: " + operation + " is not supported. Please check the test instantiation."); - } -} - -INSTANTIATE_TEST_CASE_P(/*nothing*/, Layer_NaryElemwise_1d_Test, Combine( -/*input blob shape*/ Values(0, 1), -/*operation*/ Values("div", "mul", "sum", "sub") -)); - -typedef testing::TestWithParam> Layer_Elemwise_1d_Test; -TEST_P(Layer_Elemwise_1d_Test, Accuracy) { - - int batch_size = get<0>(GetParam()); - std::string operation = get<1>(GetParam()); - - LayerParams lp; - lp.type = "Eltwise"; - lp.name = operation + "_Layer"; - lp.set("operation", operation); - Ptr layer = EltwiseLayer::create(lp); - - std::vector input_shape = {batch_size, 1}; - if (batch_size == 0) - input_shape.erase(input_shape.begin()); - - cv::Mat input1 = cv::Mat(input_shape, CV_32F, 1.0); - cv::Mat input2 = cv::Mat(input_shape, CV_32F, 1.0); - cv::randu(input1, 0.0, 1.0); - cv::randu(input2, 0.0, 1.0); - - // Dynamically select the operation - cv::Mat output_ref; - if (operation == "sum") { - output_ref = input1 + input2; - } else if (operation == "max") { - output_ref = cv::max(input1, input2); - } else if (operation == "min") { - output_ref = cv::min(input1, input2); - } else if (operation == "prod") { - output_ref = input1.mul(input2); - } else if (operation == "div") { - output_ref = input1 / input2; - } else { - output_ref = cv::Mat(); - } - - - std::vector inputs{input1, input2}; - std::vector outputs; - - runLayer(layer, inputs, outputs); - - if (!output_ref.empty()) { - ASSERT_EQ(shape(output_ref), shape(outputs[0])); - normAssert(output_ref, outputs[0]); - } else { - CV_Error(Error::StsAssert, "Provided operation: " + operation + " is not supported. Please check the test instantiation."); - } -} - -INSTANTIATE_TEST_CASE_P(/*nothing*/, Layer_Elemwise_1d_Test, Combine( -/*input blob shape*/ Values(0, 1, 2, 3), -/*operation*/ Values("div", "prod", "max", "min", "sum") -)); - TEST(Layer_GRU_Test_Accuracy_with_, Pytorch) { Mat Wx = blobFromNPY(_tf("gru.W.npy")); diff --git a/modules/dnn/test/test_layers_1d.cpp b/modules/dnn/test/test_layers_1d.cpp new file mode 100644 index 0000000000..ccf11e20d1 --- /dev/null +++ b/modules/dnn/test/test_layers_1d.cpp @@ -0,0 +1,248 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2024, OpenCV Team, all rights reserved. +// Third party copyrights are property of their respective owners. + +#include "test_precomp.hpp" +#include +#include +#include // CV_DNN_REGISTER_LAYER_CLASS + +namespace opencv_test { namespace { + +typedef testing::TestWithParam> Layer_1d_Test; +TEST_P(Layer_1d_Test, Scale) +{ + int batch_size = get<0>(GetParam()); + + LayerParams lp; + lp.type = "Scale"; + lp.name = "scaleLayer"; + lp.set("axis", 0); + lp.set("mode", "scale"); + lp.set("bias_term", false); + Ptr layer = ScaleLayer::create(lp); + + std::vector input_shape = {batch_size, 3}; + std::vector output_shape = {batch_size, 3}; + + if (batch_size == 0){ + input_shape.erase(input_shape.begin()); + output_shape.erase(output_shape.begin()); + } + + cv::Mat input = cv::Mat(input_shape, CV_32F, 1.0); + cv::randn(input, 0.0, 1.0); + cv::Mat weight = cv::Mat(output_shape, CV_32F, 2.0); + + std::vector inputs{input, weight}; + std::vector outputs; + + cv::Mat output_ref = input.mul(weight); + runLayer(layer, inputs, outputs); + + ASSERT_EQ(shape(output_ref), shape(outputs[0])); + normAssert(output_ref, outputs[0]); +} + +typedef testing::TestWithParam> Layer_Gather_1d_Test; +TEST_P(Layer_Gather_1d_Test, Accuracy) { + + int batch_size = get<0>(GetParam()); + int axis = get<1>(GetParam()); + + LayerParams lp; + lp.type = "Gather"; + lp.name = "gatherLayer"; + lp.set("axis", axis); + lp.set("real_ndims", 1); + + Ptr layer = GatherLayer::create(lp); + + std::vector input_shape = {batch_size, 1}; + std::vector indices_shape = {1, 1}; + std::vector output_shape = {batch_size, 1}; + + if (batch_size == 0){ + input_shape.erase(input_shape.begin()); + indices_shape.erase(indices_shape.begin()); + output_shape.erase(output_shape.begin()); + } else if (axis == 0) { + output_shape[0] = 1; + } + + cv::Mat input = cv::Mat(input_shape, CV_32F, 1.0); + cv::randu(input, 0.0, 1.0); + cv::Mat indices = cv::Mat(indices_shape, CV_32F, 0.0); + cv::Mat output_ref = cv::Mat(output_shape, CV_32F, input(cv::Range::all(), cv::Range(0, 1)).data); + + std::vector inputs{input, indices}; + std::vector outputs; + + runLayer(layer, inputs, outputs); + ASSERT_EQ(shape(output_ref), shape(outputs[0])); + normAssert(output_ref, outputs[0]); +} +INSTANTIATE_TEST_CASE_P(/*nothing*/, Layer_Gather_1d_Test, Combine( +/*input blob shape*/ Values(0, 1, 2, 3), +/*operation*/ Values(0, 1) +)); + +typedef testing::TestWithParam> Layer_Arg_1d_Test; +TEST_P(Layer_Arg_1d_Test, Accuracy) { + + int batch_size = get<0>(GetParam()); + int axis = get<1>(GetParam()); + std::string operation = get<2>(GetParam()); + + LayerParams lp; + lp.type = "Arg"; + lp.name = "arg" + operation + "_Layer"; + lp.set("op", operation); + lp.set("axis", axis); + lp.set("keepdims", 1); + lp.set("select_last_index", 0); + + Ptr layer = ArgLayer::create(lp); + + std::vector input_shape = {batch_size, 1}; + std::vector output_shape = {1, 1}; + + if (batch_size == 0){ + input_shape.erase(input_shape.begin()); + output_shape.erase(output_shape.begin()); + } + + if (axis != 0 && batch_size != 0){ + output_shape[0] = batch_size; + } + + cv::Mat input = cv::Mat(input_shape, CV_32F, 1); + cv::Mat output_ref = cv::Mat(output_shape, CV_32F, 0); + + for (int i = 0; i < batch_size; ++i) + input.at(i, 0) = static_cast(i + 1); + + std::vector inputs{input}; + std::vector outputs; + + runLayer(layer, inputs, outputs); + ASSERT_EQ(shape(output_ref), shape(outputs[0])); + normAssert(output_ref, outputs[0]); +} + +INSTANTIATE_TEST_CASE_P(/*nothing*/, Layer_Arg_1d_Test, Combine( +/*input blob shape*/ Values(0, 1, 2, 3), +/*operation*/ Values(0, 1), +/*operation*/ Values( "max", "min") +)); + +typedef testing::TestWithParam> Layer_NaryElemwise_1d_Test; +TEST_P(Layer_NaryElemwise_1d_Test, Accuracy) { + + int batch_size = get<0>(GetParam()); + std::string operation = get<1>(GetParam()); + + LayerParams lp; + lp.type = "Eltwise"; + lp.name = operation + "_Layer"; + lp.set("operation", operation); + Ptr layer = NaryEltwiseLayer::create(lp); + + std::vector input_shape = {batch_size, 1}; + if (batch_size == 0) + input_shape.erase(input_shape.begin()); + + cv::Mat input1 = cv::Mat(input_shape, CV_32F, 0.0); + cv::Mat input2 = cv::Mat(input_shape, CV_32F, 0.0); + cv::randu(input1, 0.0, 1.0); + cv::randu(input2, 0.0, 1.0); + + cv::Mat output_ref; + if (operation == "sum") { + output_ref = input1 + input2; + } else if (operation == "mul") { + output_ref = input1.mul(input2); + } else if (operation == "div") { + output_ref = input1 / input2; + } else if (operation == "sub") { + output_ref = input1 - input2; + } else { + output_ref = cv::Mat(); + } + std::vector inputs{input1, input2}; + std::vector outputs; + + runLayer(layer, inputs, outputs); + if (!output_ref.empty()) { + ASSERT_EQ(shape(output_ref), shape(outputs[0])); + normAssert(output_ref, outputs[0]); + } else { + CV_Error(Error::StsAssert, "Provided operation: " + operation + " is not supported. Please check the test instantiation."); + } +} + +INSTANTIATE_TEST_CASE_P(/*nothing*/, Layer_NaryElemwise_1d_Test, Combine( +/*input blob shape*/ Values(0, 1), +/*operation*/ Values("div", "mul", "sum", "sub") +)); + +typedef testing::TestWithParam> Layer_Elemwise_1d_Test; +TEST_P(Layer_Elemwise_1d_Test, Accuracy) { + + int batch_size = get<0>(GetParam()); + std::string operation = get<1>(GetParam()); + + LayerParams lp; + lp.type = "Eltwise"; + lp.name = operation + "_Layer"; + lp.set("operation", operation); + Ptr layer = EltwiseLayer::create(lp); + + std::vector input_shape = {batch_size, 1}; + if (batch_size == 0) + input_shape.erase(input_shape.begin()); + + cv::Mat input1 = cv::Mat(input_shape, CV_32F, 1.0); + cv::Mat input2 = cv::Mat(input_shape, CV_32F, 1.0); + cv::randu(input1, 0.0, 1.0); + cv::randu(input2, 0.0, 1.0); + + // Dynamically select the operation + cv::Mat output_ref; + if (operation == "sum") { + output_ref = input1 + input2; + } else if (operation == "max") { + output_ref = cv::max(input1, input2); + } else if (operation == "min") { + output_ref = cv::min(input1, input2); + } else if (operation == "prod") { + output_ref = input1.mul(input2); + } else if (operation == "div") { + output_ref = input1 / input2; + } else { + output_ref = cv::Mat(); + } + + + std::vector inputs{input1, input2}; + std::vector outputs; + + runLayer(layer, inputs, outputs); + + if (!output_ref.empty()) { + ASSERT_EQ(shape(output_ref), shape(outputs[0])); + normAssert(output_ref, outputs[0]); + } else { + CV_Error(Error::StsAssert, "Provided operation: " + operation + " is not supported. Please check the test instantiation."); + } +} + +INSTANTIATE_TEST_CASE_P(/*nothing*/, Layer_Elemwise_1d_Test, Combine( +/*input blob shape*/ Values(0, 1, 2, 3), +/*operation*/ Values("div", "prod", "max", "min", "sum") +)); + +}}