2023-02-13 22:00:20 +08:00
|
|
|
// This file is part of OpenCV project.
|
|
|
|
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
|
|
|
// of this distribution and at http://opencv.org/license.html.
|
|
|
|
|
|
|
|
/*
|
|
|
|
Test for TFLite models loading
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "test_precomp.hpp"
|
|
|
|
#include "npy_blob.hpp"
|
|
|
|
|
|
|
|
#include <opencv2/dnn/layer.details.hpp> // CV_DNN_REGISTER_LAYER_CLASS
|
|
|
|
#include <opencv2/dnn/utils/debug_utils.hpp>
|
2023-03-21 19:50:53 +08:00
|
|
|
#include <opencv2/dnn/shape_utils.hpp>
|
2023-02-13 22:00:20 +08:00
|
|
|
|
2023-02-19 00:21:07 +08:00
|
|
|
#ifdef OPENCV_TEST_DNN_TFLITE
|
|
|
|
|
|
|
|
namespace opencv_test { namespace {
|
2023-02-13 22:00:20 +08:00
|
|
|
|
|
|
|
using namespace cv;
|
|
|
|
using namespace cv::dnn;
|
|
|
|
|
2023-08-04 16:28:51 +08:00
|
|
|
class Test_TFLite : public DNNTestLayer {
|
|
|
|
public:
|
|
|
|
void testModel(Net& net, const std::string& modelName, const Mat& input, double l1 = 0, double lInf = 0);
|
|
|
|
void testModel(const std::string& modelName, const Mat& input, double l1 = 0, double lInf = 0);
|
|
|
|
void testModel(const std::string& modelName, const Size& inpSize, double l1 = 0, double lInf = 0);
|
|
|
|
void testLayer(const std::string& modelName, double l1 = 0, double lInf = 0);
|
|
|
|
};
|
|
|
|
|
2023-03-21 19:50:53 +08:00
|
|
|
void testInputShapes(const Net& net, const std::vector<Mat>& inps) {
|
|
|
|
std::vector<MatShape> inLayerShapes;
|
|
|
|
std::vector<MatShape> outLayerShapes;
|
Merge pull request #24411 from alexlyulkov:al/dnn-type-inference
Added int32, int64 support and type inference to dnn #24411
**Added a type inference to dnn similar to the shape inference, added int32 and int64 support.**
- Added getTypes method for layers that calculates layer outputs types and internals types from inputs types (Similar to getMemoryShapes). By default outputs and internals types = input[0] type
- Added type inference pipeline similar to shape inference pipeline. LayersShapes struct (that is used in shape inference pipeline) now contains both shapes and types
- All layers output blobs are now allocated using the calculated types from the type inference.
- Inputs and constants with int32 and int64 types are not automatically converted into float32 now.
- Added int32 and int64 support for all the layers with indexing and for all the layers required in tests.
Added int32 and int64 support for CUDA:
- Added host<->device data moving for int32 and int64
- Added int32 and int64 support for several layers (just slightly modified CUDA C++ templates)
Passed all the accuracy tests on CPU, OCL, OCL_FP16, CUDA, CUDA_FP16. (except RAFT model)
**CURRENT PROBLEMS**:
- ONNX parser always converts int64 constants and layers attributes to int32, so some models with int64 constants doesn't work (e.g. RAFT). The solution is to disable int64->int32 conversion and fix attributes reading in a lot of ONNX layers parsers (https://github.com/opencv/opencv/issues/25102)
- I didn't add type inference and int support to VULCAN, so it doesn't work at all now.
- Some layers don't support int yet, so some unknown models may not work.
**CURRENT WORKAROUNDS**:
- CPU arg_layer indides are implemented in int32 followed by a int32->int64 conversion (the master branch has the same workaround with int32->float conversion)
- CPU and OCL pooling_layer indices are implemented in float followed by a float->int64 conversion
- CPU gather_layer indices are implemented in int32, so int64 indices are converted to int32 (the master branch has the same workaround with float->int32 conversion)
**DISABLED TESTS**:
- RAFT model
**REMOVED TESTS**:
- Greater_input_dtype_int64 (because it doesn't fit ONNX rules, the whole test is just comparing float tensor with int constant)
**TODO IN NEXT PULL REQUESTS**:
- Add int64 support for ONNX parser
- Add int support for more layers
- Add int support for OCL (currently int layers just run on CPU)
- Add int tests
- Add int support for other backends
2024-03-01 22:07:38 +08:00
|
|
|
net.getLayerShapes(MatShape(), CV_32F, 0, inLayerShapes, outLayerShapes);
|
2023-03-21 19:50:53 +08:00
|
|
|
ASSERT_EQ(inLayerShapes.size(), inps.size());
|
|
|
|
|
|
|
|
for (int i = 0; i < inps.size(); ++i) {
|
|
|
|
ASSERT_EQ(inLayerShapes[i], shape(inps[i]));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-04 16:28:51 +08:00
|
|
|
void Test_TFLite::testModel(Net& net, const std::string& modelName, const Mat& input, double l1, double lInf)
|
2023-02-19 00:21:07 +08:00
|
|
|
{
|
2023-08-04 16:28:51 +08:00
|
|
|
l1 = l1 ? l1 : default_l1;
|
|
|
|
lInf = lInf ? lInf : default_lInf;
|
|
|
|
|
|
|
|
net.setPreferableBackend(backend);
|
|
|
|
net.setPreferableTarget(target);
|
|
|
|
|
2023-03-21 19:50:53 +08:00
|
|
|
testInputShapes(net, {input});
|
2023-02-13 22:00:20 +08:00
|
|
|
net.setInput(input);
|
|
|
|
|
|
|
|
std::vector<String> outNames = net.getUnconnectedOutLayersNames();
|
|
|
|
|
|
|
|
std::vector<Mat> outs;
|
|
|
|
net.forward(outs, outNames);
|
|
|
|
|
|
|
|
ASSERT_EQ(outs.size(), outNames.size());
|
|
|
|
for (int i = 0; i < outNames.size(); ++i) {
|
|
|
|
Mat ref = blobFromNPY(findDataFile(format("dnn/tflite/%s_out_%s.npy", modelName.c_str(), outNames[i].c_str())));
|
2024-04-06 21:55:17 +08:00
|
|
|
// A workaround solution for the following cases due to inconsistent shape definitions.
|
|
|
|
// The details please see: https://github.com/opencv/opencv/pull/25297#issuecomment-2039081369
|
|
|
|
if (modelName == "face_landmark" || modelName == "selfie_segmentation") {
|
|
|
|
ref = ref.reshape(1, 1);
|
|
|
|
outs[i] = outs[i].reshape(1, 1);
|
|
|
|
}
|
|
|
|
normAssert(ref, outs[i], outNames[i].c_str(), l1, lInf);
|
2023-02-13 22:00:20 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-04 16:28:51 +08:00
|
|
|
void Test_TFLite::testModel(const std::string& modelName, const Mat& input, double l1, double lInf)
|
2023-07-21 14:13:37 +08:00
|
|
|
{
|
|
|
|
Net net = readNet(findDataFile("dnn/tflite/" + modelName + ".tflite", false));
|
|
|
|
testModel(net, modelName, input, l1, lInf);
|
|
|
|
}
|
|
|
|
|
2023-08-04 16:28:51 +08:00
|
|
|
void Test_TFLite::testModel(const std::string& modelName, const Size& inpSize, double l1, double lInf)
|
2023-02-19 00:21:07 +08:00
|
|
|
{
|
2023-02-13 22:00:20 +08:00
|
|
|
Mat input = imread(findDataFile("cv/shared/lena.png"));
|
|
|
|
input = blobFromImage(input, 1.0 / 255, inpSize, 0, true);
|
2023-02-19 00:21:07 +08:00
|
|
|
testModel(modelName, input, l1, lInf);
|
2023-02-13 22:00:20 +08:00
|
|
|
}
|
|
|
|
|
2023-08-04 16:28:51 +08:00
|
|
|
void Test_TFLite::testLayer(const std::string& modelName, double l1, double lInf)
|
2023-07-21 14:13:37 +08:00
|
|
|
{
|
|
|
|
Mat inp = blobFromNPY(findDataFile("dnn/tflite/" + modelName + "_inp.npy"));
|
|
|
|
Net net = readNet(findDataFile("dnn/tflite/" + modelName + ".tflite"));
|
|
|
|
testModel(net, modelName, inp, l1, lInf);
|
|
|
|
}
|
|
|
|
|
2023-02-13 22:00:20 +08:00
|
|
|
// https://google.github.io/mediapipe/solutions/face_mesh
|
2023-08-04 16:28:51 +08:00
|
|
|
TEST_P(Test_TFLite, face_landmark)
|
2023-02-13 22:00:20 +08:00
|
|
|
{
|
2023-08-04 16:28:51 +08:00
|
|
|
if (backend == DNN_BACKEND_CUDA && target == DNN_TARGET_CUDA_FP16)
|
|
|
|
applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA_FP16);
|
|
|
|
double l1 = 2e-5, lInf = 2e-4;
|
|
|
|
if (target == DNN_TARGET_CPU_FP16 || target == DNN_TARGET_CUDA_FP16 || target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD ||
|
|
|
|
(backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL))
|
|
|
|
{
|
|
|
|
l1 = 0.15;
|
|
|
|
lInf = 0.82;
|
|
|
|
}
|
|
|
|
testModel("face_landmark", Size(192, 192), l1, lInf);
|
2023-02-13 22:00:20 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// https://google.github.io/mediapipe/solutions/face_detection
|
2023-08-04 16:28:51 +08:00
|
|
|
TEST_P(Test_TFLite, face_detection_short_range)
|
2023-02-13 22:00:20 +08:00
|
|
|
{
|
2023-08-04 16:28:51 +08:00
|
|
|
double l1 = 0, lInf = 2e-4;
|
|
|
|
if (target == DNN_TARGET_CPU_FP16 || target == DNN_TARGET_CUDA_FP16 || target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD ||
|
|
|
|
(backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL))
|
|
|
|
{
|
|
|
|
l1 = 0.04;
|
|
|
|
lInf = 0.8;
|
|
|
|
}
|
|
|
|
testModel("face_detection_short_range", Size(128, 128), l1, lInf);
|
2023-02-13 22:00:20 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// https://google.github.io/mediapipe/solutions/selfie_segmentation
|
2023-08-04 16:28:51 +08:00
|
|
|
TEST_P(Test_TFLite, selfie_segmentation)
|
2023-02-13 22:00:20 +08:00
|
|
|
{
|
2023-08-04 16:28:51 +08:00
|
|
|
double l1 = 0, lInf = 0;
|
|
|
|
if (target == DNN_TARGET_CPU_FP16 || target == DNN_TARGET_CUDA_FP16 || target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD ||
|
|
|
|
(backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL))
|
|
|
|
{
|
|
|
|
l1 = 0.01;
|
|
|
|
lInf = 0.48;
|
|
|
|
}
|
|
|
|
testModel("selfie_segmentation", Size(256, 256), l1, lInf);
|
2023-02-13 22:00:20 +08:00
|
|
|
}
|
|
|
|
|
2023-08-04 16:28:51 +08:00
|
|
|
TEST_P(Test_TFLite, max_unpooling)
|
2023-02-13 22:00:20 +08:00
|
|
|
{
|
2023-08-04 16:28:51 +08:00
|
|
|
if (backend == DNN_BACKEND_CUDA)
|
|
|
|
applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA);
|
|
|
|
|
2023-08-23 15:31:14 +08:00
|
|
|
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2022010000)
|
|
|
|
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
|
|
|
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
|
|
|
#endif
|
|
|
|
|
2023-08-04 16:28:51 +08:00
|
|
|
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU) {
|
|
|
|
if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
|
|
|
|
if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
|
|
|
|
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
|
|
|
|
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
|
|
|
|
|
2023-02-13 22:00:20 +08:00
|
|
|
// Due Max Unpoling is a numerically unstable operation and small difference between frameworks
|
|
|
|
// might lead to positional difference of maximal elements in the tensor, this test checks
|
|
|
|
// behavior of Max Unpooling layer only.
|
|
|
|
Net net = readNet(findDataFile("dnn/tflite/hair_segmentation.tflite", false));
|
2023-08-04 16:28:51 +08:00
|
|
|
net.setPreferableBackend(backend);
|
|
|
|
net.setPreferableTarget(target);
|
2023-02-13 22:00:20 +08:00
|
|
|
|
Merge pull request #26330 from alexlyulkov:al/new-engine-tflite-parser2
Modified TFLite parser for the new dnn engine #26330
The new dnn graph is creating just by defining input and output names of each layer.
Some TFLite layers has fused activation, which doesn't have layer name and input and output names. Also some layers require additional preprocessing layers (e.g. NHWC -> NCHW). All these layers should be added to the graph with some unique layer and input and output names.
I solve this problem by adding additionalPreLayer and additionalPostLayer layers.
If a layer has a fused activation, I add additionalPostLayer and change input and output names this way:
**original**: conv_relu(conv123, conv123_input, conv123_output)
**new**: conv(conv123, conv123_input, conv123_output_additional_post_layer) + relu(conv123_relu, conv1_output_additional_post_layer, conv123_output)
If a layer has additional preprocessing layer, I change input and output names this way:
**original**: permute_reshape(reshape345, reshape345_input, reshape345_output)
**new**: permute(reshape345_permute, reshape345_input, reshape345_input_additional_pre_layer) + reshape(reshape345, reshape345_input_additional_pre_layer, reshape345_output)
### Pull Request Readiness Checklist
See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request
- [x] I agree to contribute to the project under Apache 2 License.
- [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV
- [x] The PR is proposed to the proper branch
- [ ] There is a reference to the original bug report and related work
- [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable
Patch to opencv_extra has the same branch name.
- [x] The feature is well documented and sample code can be built with the project CMake
2024-10-22 14:05:58 +08:00
|
|
|
if (net.getMainGraph())
|
|
|
|
throw SkipTestException("The new dnn engine doesn't support forward to specified layers"); // https://github.com/opencv/opencv/issues/26349
|
|
|
|
|
2023-02-13 22:00:20 +08:00
|
|
|
Mat input = imread(findDataFile("cv/shared/lena.png"));
|
|
|
|
cvtColor(input, input, COLOR_BGR2RGBA);
|
|
|
|
input = input.mul(Scalar(1, 1, 1, 0));
|
|
|
|
input = blobFromImage(input, 1.0 / 255);
|
2023-03-21 19:50:53 +08:00
|
|
|
testInputShapes(net, {input});
|
2023-02-13 22:00:20 +08:00
|
|
|
net.setInput(input);
|
|
|
|
|
|
|
|
std::vector<std::vector<Mat> > outs;
|
2023-09-05 23:08:28 +08:00
|
|
|
net.forward(outs, {"p_re_lu_1", "max_pooling_with_argmax2d", "conv2d_86", "max_unpooling2d_2"});
|
2023-08-04 16:28:51 +08:00
|
|
|
|
2023-02-13 22:00:20 +08:00
|
|
|
ASSERT_EQ(outs.size(), 4);
|
|
|
|
ASSERT_EQ(outs[0].size(), 1);
|
|
|
|
ASSERT_EQ(outs[1].size(), 2);
|
|
|
|
ASSERT_EQ(outs[2].size(), 1);
|
|
|
|
ASSERT_EQ(outs[3].size(), 1);
|
|
|
|
Mat poolInp = outs[0][0];
|
|
|
|
Mat poolOut = outs[1][0];
|
|
|
|
Mat poolIds = outs[1][1];
|
|
|
|
Mat unpoolInp = outs[2][0];
|
|
|
|
Mat unpoolOut = outs[3][0];
|
|
|
|
|
|
|
|
ASSERT_EQ(poolInp.size, unpoolOut.size);
|
|
|
|
ASSERT_EQ(poolOut.size, poolIds.size);
|
|
|
|
ASSERT_EQ(poolOut.size, unpoolInp.size);
|
|
|
|
|
2023-08-04 16:28:51 +08:00
|
|
|
ASSERT_EQ(countNonZero(poolInp), poolInp.total());
|
|
|
|
|
2023-02-13 22:00:20 +08:00
|
|
|
for (int c = 0; c < 32; ++c) {
|
|
|
|
float *poolInpData = poolInp.ptr<float>(0, c);
|
|
|
|
float *poolOutData = poolOut.ptr<float>(0, c);
|
Merge pull request #24411 from alexlyulkov:al/dnn-type-inference
Added int32, int64 support and type inference to dnn #24411
**Added a type inference to dnn similar to the shape inference, added int32 and int64 support.**
- Added getTypes method for layers that calculates layer outputs types and internals types from inputs types (Similar to getMemoryShapes). By default outputs and internals types = input[0] type
- Added type inference pipeline similar to shape inference pipeline. LayersShapes struct (that is used in shape inference pipeline) now contains both shapes and types
- All layers output blobs are now allocated using the calculated types from the type inference.
- Inputs and constants with int32 and int64 types are not automatically converted into float32 now.
- Added int32 and int64 support for all the layers with indexing and for all the layers required in tests.
Added int32 and int64 support for CUDA:
- Added host<->device data moving for int32 and int64
- Added int32 and int64 support for several layers (just slightly modified CUDA C++ templates)
Passed all the accuracy tests on CPU, OCL, OCL_FP16, CUDA, CUDA_FP16. (except RAFT model)
**CURRENT PROBLEMS**:
- ONNX parser always converts int64 constants and layers attributes to int32, so some models with int64 constants doesn't work (e.g. RAFT). The solution is to disable int64->int32 conversion and fix attributes reading in a lot of ONNX layers parsers (https://github.com/opencv/opencv/issues/25102)
- I didn't add type inference and int support to VULCAN, so it doesn't work at all now.
- Some layers don't support int yet, so some unknown models may not work.
**CURRENT WORKAROUNDS**:
- CPU arg_layer indides are implemented in int32 followed by a int32->int64 conversion (the master branch has the same workaround with int32->float conversion)
- CPU and OCL pooling_layer indices are implemented in float followed by a float->int64 conversion
- CPU gather_layer indices are implemented in int32, so int64 indices are converted to int32 (the master branch has the same workaround with float->int32 conversion)
**DISABLED TESTS**:
- RAFT model
**REMOVED TESTS**:
- Greater_input_dtype_int64 (because it doesn't fit ONNX rules, the whole test is just comparing float tensor with int constant)
**TODO IN NEXT PULL REQUESTS**:
- Add int64 support for ONNX parser
- Add int support for more layers
- Add int support for OCL (currently int layers just run on CPU)
- Add int tests
- Add int support for other backends
2024-03-01 22:07:38 +08:00
|
|
|
int64_t *poolIdsData = poolIds.ptr<int64_t>(0, c);
|
2023-02-13 22:00:20 +08:00
|
|
|
float *unpoolInpData = unpoolInp.ptr<float>(0, c);
|
|
|
|
float *unpoolOutData = unpoolOut.ptr<float>(0, c);
|
|
|
|
for (int y = 0; y < 64; ++y) {
|
|
|
|
for (int x = 0; x < 64; ++x) {
|
|
|
|
int maxIdx = (y * 128 + x) * 2;
|
|
|
|
std::vector<int> indices{maxIdx + 1, maxIdx + 128, maxIdx + 129};
|
|
|
|
std::string errMsg = format("Channel %d, y: %d, x: %d", c, y, x);
|
|
|
|
for (int idx : indices) {
|
|
|
|
if (poolInpData[idx] > poolInpData[maxIdx]) {
|
|
|
|
EXPECT_EQ(unpoolOutData[maxIdx], 0.0f) << errMsg;
|
|
|
|
maxIdx = idx;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPECT_EQ(poolInpData[maxIdx], poolOutData[y * 64 + x]) << errMsg;
|
2023-08-04 16:28:51 +08:00
|
|
|
if (backend != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) {
|
Merge pull request #24411 from alexlyulkov:al/dnn-type-inference
Added int32, int64 support and type inference to dnn #24411
**Added a type inference to dnn similar to the shape inference, added int32 and int64 support.**
- Added getTypes method for layers that calculates layer outputs types and internals types from inputs types (Similar to getMemoryShapes). By default outputs and internals types = input[0] type
- Added type inference pipeline similar to shape inference pipeline. LayersShapes struct (that is used in shape inference pipeline) now contains both shapes and types
- All layers output blobs are now allocated using the calculated types from the type inference.
- Inputs and constants with int32 and int64 types are not automatically converted into float32 now.
- Added int32 and int64 support for all the layers with indexing and for all the layers required in tests.
Added int32 and int64 support for CUDA:
- Added host<->device data moving for int32 and int64
- Added int32 and int64 support for several layers (just slightly modified CUDA C++ templates)
Passed all the accuracy tests on CPU, OCL, OCL_FP16, CUDA, CUDA_FP16. (except RAFT model)
**CURRENT PROBLEMS**:
- ONNX parser always converts int64 constants and layers attributes to int32, so some models with int64 constants doesn't work (e.g. RAFT). The solution is to disable int64->int32 conversion and fix attributes reading in a lot of ONNX layers parsers (https://github.com/opencv/opencv/issues/25102)
- I didn't add type inference and int support to VULCAN, so it doesn't work at all now.
- Some layers don't support int yet, so some unknown models may not work.
**CURRENT WORKAROUNDS**:
- CPU arg_layer indides are implemented in int32 followed by a int32->int64 conversion (the master branch has the same workaround with int32->float conversion)
- CPU and OCL pooling_layer indices are implemented in float followed by a float->int64 conversion
- CPU gather_layer indices are implemented in int32, so int64 indices are converted to int32 (the master branch has the same workaround with float->int32 conversion)
**DISABLED TESTS**:
- RAFT model
**REMOVED TESTS**:
- Greater_input_dtype_int64 (because it doesn't fit ONNX rules, the whole test is just comparing float tensor with int constant)
**TODO IN NEXT PULL REQUESTS**:
- Add int64 support for ONNX parser
- Add int support for more layers
- Add int support for OCL (currently int layers just run on CPU)
- Add int tests
- Add int support for other backends
2024-03-01 22:07:38 +08:00
|
|
|
EXPECT_EQ(poolIdsData[y * 64 + x], (int64_t)maxIdx) << errMsg;
|
2023-08-04 16:28:51 +08:00
|
|
|
}
|
2023-02-13 22:00:20 +08:00
|
|
|
EXPECT_EQ(unpoolOutData[maxIdx], unpoolInpData[y * 64 + x]) << errMsg;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-04 16:28:51 +08:00
|
|
|
TEST_P(Test_TFLite, EfficientDet_int8) {
|
Merge pull request #25458 from alexlyulkov:al/dnn-openvino-int-support
Added int support for OpenVINO dnn backend #25458
Modified dnn OpenVINO integration to support type inference and int operations.
Added OpenVINO support to Cast, CumSum, Expand, Gather, GatherElements, Scatter, ScatterND, Tile layers.
I tried to add Reduce layer, but looks like OpenVINO uses float values inside Reduce operation so it can't pass our int tests.
OpenVINO uses int32 precision for int64 operations, so I've modified input values for int64 tests when backend is OpenVINO.
OpenVINO has a strange behavior with custom layers and int64 values. After model compilation OpenVINO may change types, so the model can have different output type. That's why these tests were disabled:
- Test_ArgMax_Int.random/0, where GetParam() = (4, NGRAPH/CPU)
- Test_ArgMax_Int.random/6, where GetParam() = (11, NGRAPH/CPU)
- Test_Reduce_Int.random/6, where GetParam() = (11, NGRAPH/CPU)
- Test_Reduce_Int.two_axes/6, where GetParam() = (11, NGRAPH/CPU)
Also these tests were temporary disabled, they didn't work on both 4.x and 5.x branches:
- Test_Caffe_layers.layer_prelu_fc/0, where GetParam() = NGRAPH/CPU
- Test_ONNX_layers.LSTM_Activations/0, where GetParam() = NGRAPH/CPU
- Test_ONNX_layers.Quantized_Convolution/0, where GetParam() = NGRAPH/CPU
- Test_ONNX_layers.Quantized_Eltwise_Scalar/0, where GetParam() = NGRAPH/CPU
- Test_TFLite.EfficientDet_int8/0, where GetParam() = NGRAPH/CPU
### Pull Request Readiness Checklist
See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request
- [x] I agree to contribute to the project under Apache 2 License.
- [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV
- [x] The PR is proposed to the proper branch
- [ ] There is a reference to the original bug report and related work
- [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable
Patch to opencv_extra has the same branch name.
- [x] The feature is well documented and sample code can be built with the project CMake
2024-05-15 16:51:59 +08:00
|
|
|
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
|
|
|
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // TODO: fix this test for OpenVINO
|
|
|
|
|
2023-09-28 21:24:43 +08:00
|
|
|
if (target != DNN_TARGET_CPU || (backend != DNN_BACKEND_OPENCV &&
|
|
|
|
backend != DNN_BACKEND_TIMVX && backend != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)) {
|
|
|
|
throw SkipTestException("Only OpenCV, TimVX and OpenVINO targets support INT8 on CPU");
|
|
|
|
}
|
2023-04-24 18:44:10 +08:00
|
|
|
Net net = readNet(findDataFile("dnn/tflite/coco_efficientdet_lite0_v1_1.0_quant_2021_09_06.tflite", false));
|
2023-08-04 16:28:51 +08:00
|
|
|
net.setPreferableBackend(backend);
|
|
|
|
net.setPreferableTarget(target);
|
2023-04-24 18:44:10 +08:00
|
|
|
|
|
|
|
Mat img = imread(findDataFile("dnn/dog416.png"));
|
|
|
|
Mat blob = blobFromImage(img, 1.0, Size(320, 320));
|
|
|
|
|
|
|
|
net.setInput(blob);
|
|
|
|
Mat out = net.forward();
|
|
|
|
Mat_<float> ref({3, 7}, {
|
|
|
|
0, 7, 0.62890625, 0.6014542579650879, 0.13300055265426636, 0.8977657556533813, 0.292389452457428,
|
|
|
|
0, 17, 0.56640625, 0.15983937680721283, 0.35905322432518005, 0.5155506730079651, 0.9409466981887817,
|
|
|
|
0, 1, 0.5, 0.14357104897499084, 0.2240825891494751, 0.7183101177215576, 0.9140362739562988
|
|
|
|
});
|
|
|
|
normAssertDetections(ref, out, "", 0.5, 0.05, 0.1);
|
|
|
|
}
|
|
|
|
|
2023-08-04 16:28:51 +08:00
|
|
|
TEST_P(Test_TFLite, replicate_by_pack) {
|
|
|
|
double l1 = 0, lInf = 0;
|
|
|
|
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL)
|
|
|
|
{
|
|
|
|
l1 = 4e-4;
|
|
|
|
lInf = 2e-3;
|
|
|
|
}
|
|
|
|
testLayer("replicate_by_pack", l1, lInf);
|
2023-07-21 14:13:37 +08:00
|
|
|
}
|
|
|
|
|
2024-03-29 16:21:13 +08:00
|
|
|
TEST_P(Test_TFLite, split) {
|
|
|
|
testLayer("split");
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(Test_TFLite, fully_connected) {
|
|
|
|
if (backend == DNN_BACKEND_VKCOM)
|
|
|
|
applyTestTag(CV_TEST_TAG_DNN_SKIP_VULKAN);
|
|
|
|
testLayer("fully_connected");
|
|
|
|
}
|
|
|
|
|
Merge pull request #25297 from CNOCycle:tflite/transpose
Support Transpose op in TFlite #25297
**Merge with extra**: https://github.com/opencv/opencv_extra/pull/1168
The purpose of this PR is to introduce support for the Transpose op in TFlite format and to add a shape comparison between the output tensors and the references. In some occasional cases, the shape of the output tensor is `[1,4,1,1]`, while the shape of the reference tensor is `[1,4]`. Consequently, the norm check incorrectly reports that the test has passed, as the residual is zero.
Below is a Python script for generating testing data. The generated data can be integrated into the repo `opencv_extra`.
```python
import numpy as np
import tensorflow as tf
PREFIX_TFL = '/path/to/opencv_extra/testdata/dnn/tflite/'
def generator(input_tensor, model, saved_name):
# convert keras model to .tflite format
converter = tf.lite.TFLiteConverter.from_keras_model(model)
#converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.optimizations = [None]
tflite_model = converter.convert()
with open(f'{PREFIX_TFL}/{saved_name}.tflite', 'wb') as f:
f.write(tflite_model)
# save the input tensor to .npy
if input_tensor.ndim == 4:
opencv_tensor = np.transpose(input_tensor, (0,3,1,2))
else:
opencv_tensor = input_tensor
opencv_tensor = np.copy(opencv_tensor, order='C').astype(np.float32)
np.save(f'{PREFIX_TFL}/{saved_name}_inp.npy', opencv_tensor)
# generate output tenosr and save it to .npy
mat_out = model(input_tensor).numpy()
mat_out = np.copy(mat_out, order='C').astype(np.float32)
if mat_out.ndim == 4:
mat_out = np.transpose(mat_out, (0,3,1,2))
interpreter = tf.lite.Interpreter(model_content=tflite_model)
out_name = interpreter.get_output_details()[0]['name']
np.save(f'{PREFIX_TFL}/{saved_name}_out_{out_name}.npy', mat_out)
def build_transpose():
model_name = "keras_permute"
mat_in = np.array([[[1,2,3], [4,5,6]]], dtype=np.float32)
model = tf.keras.Sequential()
model.add(tf.keras.Input(shape=(2,3)))
model.add(tf.keras.layers.Permute((2,1)))
model.summary()
generator(mat_in, model, model_name)
if __name__ == '__main__':
build_transpose()
```
### Pull Request Readiness Checklist
- [x] I agree to contribute to the project under Apache 2 License.
- [X] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV
- [X] The PR is proposed to the proper branch
- [ ] There is a reference to the original bug report and related work
- [ ] There is accuracy test, performance test and test data in opencv_extra repository, if applicable
Patch to opencv_extra has the same branch name.
- [X] The feature is well documented and sample code can be built with the project CMake
2024-05-16 01:07:25 +08:00
|
|
|
TEST_P(Test_TFLite, permute) {
|
|
|
|
testLayer("permutation_3d");
|
|
|
|
// Temporarily disabled as TFLiteConverter produces a incorrect graph in this case
|
|
|
|
//testLayer("permutation_4d_0123");
|
|
|
|
testLayer("permutation_4d_0132");
|
|
|
|
testLayer("permutation_4d_0213");
|
|
|
|
testLayer("permutation_4d_0231");
|
|
|
|
}
|
|
|
|
|
2024-06-01 00:31:21 +08:00
|
|
|
TEST_P(Test_TFLite, global_average_pooling_2d) {
|
|
|
|
testLayer("global_average_pooling_2d");
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(Test_TFLite, global_max_pooling_2d) {
|
|
|
|
testLayer("global_max_pooling_2d");
|
|
|
|
}
|
|
|
|
|
2024-09-09 16:33:20 +08:00
|
|
|
TEST_P(Test_TFLite, leakyRelu) {
|
|
|
|
testLayer("leakyRelu");
|
|
|
|
}
|
|
|
|
|
2023-08-04 16:28:51 +08:00
|
|
|
INSTANTIATE_TEST_CASE_P(/**/, Test_TFLite, dnnBackendsAndTargets());
|
|
|
|
|
2023-02-19 00:21:07 +08:00
|
|
|
}} // namespace
|
|
|
|
|
|
|
|
#endif // OPENCV_TEST_DNN_TFLITE
|