diff --git a/modules/dnn/src/onnx/onnx_graph_simplifier.cpp b/modules/dnn/src/onnx/onnx_graph_simplifier.cpp index 091d2d4ae9..730c08b25c 100644 --- a/modules/dnn/src/onnx/onnx_graph_simplifier.cpp +++ b/modules/dnn/src/onnx/onnx_graph_simplifier.cpp @@ -798,11 +798,67 @@ Mat getMatFromTensor(const opencv_onnx::TensorProto& tensor_proto) Mat(sizes, CV_32FC1, val).copyTo(blob); } } + else if (datatype == opencv_onnx::TensorProto_DataType_FLOAT16) + { + // FIXME, for now, we only load FP16 Tensor as FP32 Mat, full support for FP16 is required in the future. + CV_LOG_ONCE_WARNING(NULL, "DNN: load FP16 model as FP32 model, and it takes twice the FP16 RAM requirement."); + + // ONNX saves float 16 data in two format: int32 and raw_data. + // Link: https://github.com/onnx/onnx/issues/4460#issuecomment-1224373746 + if (!tensor_proto.int32_data().empty()) + { + int offset = 0; +#ifdef WORDS_BIGENDIAN + offset = 1; +#endif + const ::google::protobuf::RepeatedField field = tensor_proto.int32_data(); + + AutoBuffer aligned_val; + size_t sz = tensor_proto.int32_data().size(); + aligned_val.allocate(sz); + float16_t* bufPtr = aligned_val.data(); + + float16_t *fp16Ptr = (float16_t *)field.data(); + for (int i = 0; i < sz; i++) + { + bufPtr[i] = fp16Ptr[i*2 + offset]; + } + Mat(sizes, CV_16FC1, bufPtr).convertTo(blob, CV_32FC1); + } + else + { + char* val = const_cast(tensor_proto.raw_data().c_str()); +#if CV_STRONG_ALIGNMENT + // Aligned pointer is required. + AutoBuffer aligned_val; + if (!isAligned(val)) + { + size_t sz = tensor_proto.raw_data().size(); + aligned_val.allocate(divUp(sz, sizeof(float16_t))); + memcpy(aligned_val.data(), val, sz); + val = (char*)aligned_val.data(); + } +#endif + Mat(sizes, CV_16FC1, val).convertTo(blob, CV_32FC1); + } + } else if (datatype == opencv_onnx::TensorProto_DataType_DOUBLE) { const ::google::protobuf::RepeatedField field = tensor_proto.double_data(); CV_Assert(!field.empty()); - Mat(sizes, CV_64FC1, (void*)field.data()).convertTo(blob, CV_32FC1); + char* val = (char *)field.data(); +#if CV_STRONG_ALIGNMENT + // Aligned pointer is required. + AutoBuffer aligned_val; + if (!isAligned(val)) + { + size_t sz = tensor_proto.raw_data().size(); + aligned_val.allocate(divUp(sz, sizeof(double))); + memcpy(aligned_val.data(), val, sz); + val = (char*)aligned_val.data(); + } +#endif + Mat(sizes, CV_64FC1, val).convertTo(blob, CV_32FC1); } else if (datatype == opencv_onnx::TensorProto_DataType_INT32) { diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp index cbeb059ea7..85f24a36b1 100644 --- a/modules/dnn/test/test_onnx_importer.cpp +++ b/modules/dnn/test/test_onnx_importer.cpp @@ -2102,6 +2102,11 @@ TEST_P(Test_ONNX_nets, MobileNet_v2) testONNXModels("mobilenetv2", pb, default_l1, default_lInf, true); } +TEST_P(Test_ONNX_nets, MobileNet_v2_FP16) +{ + testONNXModels("mobilenetv2_fp16", npy, default_l1, default_lInf, true); +} + TEST_P(Test_ONNX_nets, LResNet100E_IR) { applyTestTag(