diff --git a/modules/dnn/src/layers/const_layer.cpp b/modules/dnn/src/layers/const_layer.cpp index 5b76e4dd4c..2a1e27db56 100644 --- a/modules/dnn/src/layers/const_layer.cpp +++ b/modules/dnn/src/layers/const_layer.cpp @@ -62,10 +62,15 @@ public: { std::vector outputs; outs.getUMatVector(outputs); - if (outs.depth() == CV_16S) - convertFp16(blobs[0], outputs[0]); + if (outs.depth() == CV_16S) { + auto blob = blobs[0]; + if (blob.type() != CV_32F) { + blob.convertTo(blob, CV_32F); + } + convertFp16(blob, outputs[0]); + } else - blobs[0].copyTo(outputs[0]); + blobs[0].convertTo(outputs[0], outputs[0].type()); return true; } #endif @@ -80,7 +85,7 @@ public: std::vector outputs; outputs_arr.getMatVector(outputs); - blobs[0].copyTo(outputs[0]); + blobs[0].convertTo(outputs[0], outputs[0].type()); } #ifdef HAVE_CANN @@ -126,6 +131,8 @@ public: ngraph::element::Type dType; if (blobs[0].depth() == CV_32F) { dType = ngraph::element::f32; + } else if (blobs[0].depth() == CV_32S) { + dType = ngraph::element::i32; } else if (blobs[0].depth() == CV_8S) { dType = ngraph::element::i8; } else { @@ -163,7 +170,11 @@ public: auto context = reinterpret_cast(context_); CV_Assert(blobs.size() == 1); - return make_cuda_node(preferableTarget, std::move(context->stream), blobs[0]); + Mat blob = blobs[0]; + if (blob.type() != CV_32F) { + blob.convertTo(blob, CV_32F); + } + return make_cuda_node(preferableTarget, std::move(context->stream), blob); } #endif diff --git a/modules/dnn/src/onnx/onnx_importer.cpp b/modules/dnn/src/onnx/onnx_importer.cpp index 8cad34695f..d1bf278133 100644 --- a/modules/dnn/src/onnx/onnx_importer.cpp +++ b/modules/dnn/src/onnx/onnx_importer.cpp @@ -383,7 +383,7 @@ void runLayer(LayerParams& params, const std::vector& inputs, { inpShapes[i] = shape(inputs[i]); if (i > 0 && ddepth != inputs[i].depth()) - CV_Error(Error::StsNotImplemented, "Mixed input data types."); + CV_Error(Error::StsNotImplemented, cv::format("Mixed input data types. Required type: %d, actual type: %d", ddepth, inputs[i].depth())); // Quantize and Dequantize layer have different output type than input. if (params.type != "Quantize" && params.type != "Dequantize") diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp index 6aa6dc672e..f984d8d84f 100644 --- a/modules/dnn/test/test_onnx_importer.cpp +++ b/modules/dnn/test/test_onnx_importer.cpp @@ -675,6 +675,9 @@ TEST_P(Test_ONNX_layers, Compare_GT) testONNXModels("greater"); } +TEST_P(Test_ONNX_layers, Greater_input_dtype_int64) { + testONNXModels("greater_input_dtype_int64"); +} TEST_P(Test_ONNX_layers, Compare_LT) {