mirror of
https://github.com/opencv/opencv.git
synced 2024-12-15 09:49:13 +08:00
1d1faaabef
Added int32, int64 support and type inference to dnn #24411 **Added a type inference to dnn similar to the shape inference, added int32 and int64 support.** - Added getTypes method for layers that calculates layer outputs types and internals types from inputs types (Similar to getMemoryShapes). By default outputs and internals types = input[0] type - Added type inference pipeline similar to shape inference pipeline. LayersShapes struct (that is used in shape inference pipeline) now contains both shapes and types - All layers output blobs are now allocated using the calculated types from the type inference. - Inputs and constants with int32 and int64 types are not automatically converted into float32 now. - Added int32 and int64 support for all the layers with indexing and for all the layers required in tests. Added int32 and int64 support for CUDA: - Added host<->device data moving for int32 and int64 - Added int32 and int64 support for several layers (just slightly modified CUDA C++ templates) Passed all the accuracy tests on CPU, OCL, OCL_FP16, CUDA, CUDA_FP16. (except RAFT model) **CURRENT PROBLEMS**: - ONNX parser always converts int64 constants and layers attributes to int32, so some models with int64 constants doesn't work (e.g. RAFT). The solution is to disable int64->int32 conversion and fix attributes reading in a lot of ONNX layers parsers (https://github.com/opencv/opencv/issues/25102) - I didn't add type inference and int support to VULCAN, so it doesn't work at all now. - Some layers don't support int yet, so some unknown models may not work. **CURRENT WORKAROUNDS**: - CPU arg_layer indides are implemented in int32 followed by a int32->int64 conversion (the master branch has the same workaround with int32->float conversion) - CPU and OCL pooling_layer indices are implemented in float followed by a float->int64 conversion - CPU gather_layer indices are implemented in int32, so int64 indices are converted to int32 (the master branch has the same workaround with float->int32 conversion) **DISABLED TESTS**: - RAFT model **REMOVED TESTS**: - Greater_input_dtype_int64 (because it doesn't fit ONNX rules, the whole test is just comparing float tensor with int constant) **TODO IN NEXT PULL REQUESTS**: - Add int64 support for ONNX parser - Add int support for more layers - Add int support for OCL (currently int layers just run on CPU) - Add int tests - Add int support for other backends
122 lines
3.8 KiB
Java
122 lines
3.8 KiB
Java
package org.opencv.test.dnn;
|
|
|
|
import java.io.File;
|
|
import java.io.FileInputStream;
|
|
import java.io.IOException;
|
|
import java.util.ArrayList;
|
|
import java.util.List;
|
|
import org.opencv.core.Core;
|
|
import org.opencv.core.Mat;
|
|
import org.opencv.core.MatOfInt;
|
|
import org.opencv.core.MatOfFloat;
|
|
import org.opencv.core.MatOfByte;
|
|
import org.opencv.core.Scalar;
|
|
import org.opencv.core.Size;
|
|
import org.opencv.dnn.DictValue;
|
|
import org.opencv.dnn.Dnn;
|
|
import org.opencv.dnn.Layer;
|
|
import org.opencv.dnn.Net;
|
|
import org.opencv.imgcodecs.Imgcodecs;
|
|
import org.opencv.imgproc.Imgproc;
|
|
import org.opencv.test.OpenCVTestCase;
|
|
|
|
/*
|
|
* regression test for #12324,
|
|
* testing various java.util.List invocations,
|
|
* which use the LIST_GET macro
|
|
*/
|
|
|
|
public class DnnListRegressionTest extends OpenCVTestCase {
|
|
|
|
private final static String ENV_OPENCV_DNN_TEST_DATA_PATH = "OPENCV_DNN_TEST_DATA_PATH";
|
|
|
|
private final static String ENV_OPENCV_TEST_DATA_PATH = "OPENCV_TEST_DATA_PATH";
|
|
|
|
String modelFileName = "";
|
|
String sourceImageFile = "";
|
|
|
|
Net net;
|
|
|
|
@Override
|
|
protected void setUp() throws Exception {
|
|
super.setUp();
|
|
|
|
String envDnnTestDataPath = System.getenv(ENV_OPENCV_DNN_TEST_DATA_PATH);
|
|
|
|
if(envDnnTestDataPath == null){
|
|
isTestCaseEnabled = false;
|
|
return;
|
|
}
|
|
|
|
File dnnTestDataPath = new File(envDnnTestDataPath);
|
|
modelFileName = new File(dnnTestDataPath, "dnn/tensorflow_inception_graph.pb").toString();
|
|
|
|
String envTestDataPath = System.getenv(ENV_OPENCV_TEST_DATA_PATH);
|
|
|
|
if(envTestDataPath == null) throw new Exception(ENV_OPENCV_TEST_DATA_PATH + " has to be defined!");
|
|
|
|
File testDataPath = new File(envTestDataPath);
|
|
|
|
File f = new File(testDataPath, "dnn/grace_hopper_227.png");
|
|
sourceImageFile = f.toString();
|
|
if(!f.exists()) throw new Exception("Test image is missing: " + sourceImageFile);
|
|
|
|
net = Dnn.readNetFromTensorflow(modelFileName);
|
|
|
|
Mat image = Imgcodecs.imread(sourceImageFile);
|
|
assertNotNull("Loading image from file failed!", image);
|
|
|
|
Mat inputBlob = Dnn.blobFromImage(image, 1.0, new Size(224, 224), new Scalar(0), true, true);
|
|
assertNotNull("Converting image to blob failed!", inputBlob);
|
|
|
|
net.setInput(inputBlob, "input");
|
|
}
|
|
|
|
public void testSetInputsNames() {
|
|
List<String> inputs = new ArrayList();
|
|
inputs.add("input");
|
|
try {
|
|
net.setInputsNames(inputs);
|
|
} catch(Exception e) {
|
|
fail("Net setInputsNames failed: " + e.getMessage());
|
|
}
|
|
}
|
|
|
|
public void testForward() {
|
|
List<Mat> outs = new ArrayList();
|
|
List<String> outNames = new ArrayList();
|
|
outNames.add("softmax2");
|
|
try {
|
|
net.forward(outs,outNames);
|
|
} catch(Exception e) {
|
|
fail("Net forward failed: " + e.getMessage());
|
|
}
|
|
}
|
|
|
|
public void testGetMemoryConsumption() {
|
|
int layerId = 1;
|
|
List<MatOfInt> netInputShapes = new ArrayList();
|
|
netInputShapes.add(new MatOfInt(1, 3, 224, 224));
|
|
MatOfInt netInputTypes = new MatOfInt(5);
|
|
long[] weights=null;
|
|
long[] blobs=null;
|
|
try {
|
|
net.getMemoryConsumption(layerId, netInputShapes, netInputTypes, weights, blobs);
|
|
} catch(Exception e) {
|
|
fail("Net getMemoryConsumption failed: " + e.getMessage());
|
|
}
|
|
}
|
|
|
|
public void testGetFLOPS() {
|
|
int layerId = 1;
|
|
List<MatOfInt> netInputShapes = new ArrayList();
|
|
netInputShapes.add(new MatOfInt(1, 3, 224, 224));
|
|
MatOfInt netInputTypes = new MatOfInt(5);
|
|
try {
|
|
net.getFLOPS(layerId, netInputShapes, netInputTypes);
|
|
} catch(Exception e) {
|
|
fail("Net getFLOPS failed: " + e.getMessage());
|
|
}
|
|
}
|
|
}
|