mirror of
https://github.com/opencv/opencv.git
synced 2025-01-18 22:44:02 +08:00
dnn: improve debug messages, add ONNX opset version
This commit is contained in:
parent
19ac54277b
commit
cc02fcd889
@ -247,8 +247,6 @@ std::vector<Target> getAvailableTargets(Backend be)
|
||||
|
||||
namespace
|
||||
{
|
||||
typedef std::vector<MatShape> ShapesVec;
|
||||
|
||||
struct LayerShapes
|
||||
{
|
||||
ShapesVec in, out, internal;
|
||||
@ -2981,20 +2979,24 @@ struct Net::Impl : public detail::NetImplBase
|
||||
|
||||
void getLayerShapesRecursively(int id, LayersShapesMap& inOutShapes)
|
||||
{
|
||||
std::vector<LayerPin>& inputLayerIds = layers[id].inputBlobsId;
|
||||
CV_CheckGE(id, 0, "");
|
||||
CV_CheckLT(id, (int)layers.size(), "");
|
||||
LayerData& layerData = layers[id];
|
||||
std::vector<LayerPin>& inputLayerIds = layerData.inputBlobsId;
|
||||
LayerShapes& layerShapes = inOutShapes[id];
|
||||
|
||||
if (id == 0 && inOutShapes[id].in[0].empty())
|
||||
if (id == 0 && layerShapes.in[0].empty())
|
||||
{
|
||||
if (!layers[0].outputBlobs.empty())
|
||||
if (!layerData.outputBlobs.empty())
|
||||
{
|
||||
ShapesVec shapes;
|
||||
for (int i = 0; i < layers[0].outputBlobs.size(); i++)
|
||||
for (int i = 0; i < layerData.outputBlobs.size(); i++)
|
||||
{
|
||||
Mat& inp = layers[0].outputBlobs[i];
|
||||
CV_Assert(inp.total());
|
||||
Mat& inp = layerData.outputBlobs[i];
|
||||
CV_Assert(!inp.empty());
|
||||
shapes.push_back(shape(inp));
|
||||
}
|
||||
inOutShapes[0].in = shapes;
|
||||
layerShapes.in = shapes;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -3010,17 +3012,17 @@ struct Net::Impl : public detail::NetImplBase
|
||||
}
|
||||
if (none)
|
||||
{
|
||||
inOutShapes[0].out.clear();
|
||||
layerShapes.out.clear();
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
inOutShapes[0].in = inputShapes;
|
||||
layerShapes.in = inputShapes;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (inOutShapes[id].in.empty())
|
||||
if (layerShapes.in.empty())
|
||||
{
|
||||
for(int i = 0; i < inputLayerIds.size(); i++)
|
||||
{
|
||||
@ -3033,14 +3035,14 @@ struct Net::Impl : public detail::NetImplBase
|
||||
getLayerShapesRecursively(layerId, inOutShapes);
|
||||
}
|
||||
const MatShape& shape = inOutShapes[layerId].out[inputLayerIds[i].oid];
|
||||
inOutShapes[id].in.push_back(shape);
|
||||
layerShapes.in.push_back(shape);
|
||||
}
|
||||
}
|
||||
const ShapesVec& is = inOutShapes[id].in;
|
||||
ShapesVec& os = inOutShapes[id].out;
|
||||
ShapesVec& ints = inOutShapes[id].internal;
|
||||
int requiredOutputs = layers[id].requiredOutputs.size();
|
||||
Ptr<Layer> l = layers[id].getLayerInstance();
|
||||
const ShapesVec& is = layerShapes.in;
|
||||
ShapesVec& os = layerShapes.out;
|
||||
ShapesVec& ints = layerShapes.internal;
|
||||
int requiredOutputs = layerData.requiredOutputs.size();
|
||||
Ptr<Layer> l = layerData.getLayerInstance();
|
||||
CV_Assert(l);
|
||||
bool layerSupportInPlace = false;
|
||||
try
|
||||
@ -3068,13 +3070,38 @@ struct Net::Impl : public detail::NetImplBase
|
||||
CV_LOG_ERROR(NULL, "Exception message: " << e.what());
|
||||
throw;
|
||||
}
|
||||
inOutShapes[id].supportInPlace = layerSupportInPlace;
|
||||
layerShapes.supportInPlace = layerSupportInPlace;
|
||||
|
||||
for (int i = 0; i < ints.size(); i++)
|
||||
CV_Assert(total(ints[i]) > 0);
|
||||
try
|
||||
{
|
||||
for (int i = 0; i < ints.size(); i++)
|
||||
CV_CheckGT(total(ints[i]), 0, "");
|
||||
|
||||
for (int i = 0; i < os.size(); i++)
|
||||
CV_Assert(total(os[i]) > 0);
|
||||
for (int i = 0; i < os.size(); i++)
|
||||
CV_CheckGT(total(os[i]), 0, "");
|
||||
}
|
||||
catch (const cv::Exception& e)
|
||||
{
|
||||
CV_LOG_ERROR(NULL, "OPENCV/DNN: [" << l->type << "]:(" << l->name << "): getMemoryShapes() post validation failed." <<
|
||||
" inputs=" << is.size() <<
|
||||
" outputs=" << os.size() << "/" << requiredOutputs <<
|
||||
" blobs=" << l->blobs.size() <<
|
||||
" inplace=" << layerSupportInPlace);
|
||||
for (size_t i = 0; i < is.size(); ++i)
|
||||
{
|
||||
CV_LOG_ERROR(NULL, " input[" << i << "] = " << toString(is[i]));
|
||||
}
|
||||
for (size_t i = 0; i < os.size(); ++i)
|
||||
{
|
||||
CV_LOG_ERROR(NULL, " output[" << i << "] = " << toString(os[i]));
|
||||
}
|
||||
for (size_t i = 0; i < l->blobs.size(); ++i)
|
||||
{
|
||||
CV_LOG_ERROR(NULL, " blobs[" << i << "] = " << typeToString(l->blobs[i].type()) << " " << toString(shape(l->blobs[i])));
|
||||
}
|
||||
CV_LOG_ERROR(NULL, "Exception message: " << e.what());
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
void getLayersShapes(const ShapesVec& netInputShapes,
|
||||
@ -3102,42 +3129,57 @@ struct Net::Impl : public detail::NetImplBase
|
||||
|
||||
void updateLayersShapes()
|
||||
{
|
||||
CV_Assert(!layers[0].outputBlobs.empty());
|
||||
CV_LOG_DEBUG(NULL, "updateLayersShapes() with layers.size=" << layers.size());
|
||||
CV_Assert(netInputLayer);
|
||||
DataLayer& inputLayer = *netInputLayer;
|
||||
LayerData& inputLayerData = layers[0];
|
||||
CV_Assert(inputLayerData.layerInstance.get() == &inputLayer);
|
||||
CV_Assert(!inputLayerData.outputBlobs.empty());
|
||||
ShapesVec inputShapes;
|
||||
for(int i = 0; i < layers[0].outputBlobs.size(); i++)
|
||||
for(int i = 0; i < inputLayerData.outputBlobs.size(); i++)
|
||||
{
|
||||
Mat& inp = layers[0].outputBlobs[i];
|
||||
CV_Assert(inp.total());
|
||||
if (preferableBackend == DNN_BACKEND_OPENCV &&
|
||||
Mat& inp = inputLayerData.outputBlobs[i];
|
||||
CV_Assert(!inp.empty());
|
||||
if (preferableBackend == DNN_BACKEND_OPENCV && // FIXIT: wrong place for output allocation
|
||||
preferableTarget == DNN_TARGET_OPENCL_FP16)
|
||||
{
|
||||
layers[0].outputBlobs[i].create(inp.dims, inp.size, CV_16S);
|
||||
inp.create(inp.dims, inp.size, CV_16S);
|
||||
}
|
||||
inputShapes.push_back(shape(inp));
|
||||
}
|
||||
CV_LOG_DEBUG(NULL, toString(inputShapes, "Network input shapes"));
|
||||
LayersShapesMap layersShapes;
|
||||
layersShapes[0].in = inputShapes;
|
||||
for (MapIdToLayerData::iterator it = layers.begin();
|
||||
it != layers.end(); it++)
|
||||
{
|
||||
int layerId = it->first;
|
||||
std::vector<LayerPin>& inputLayerIds = it->second.inputBlobsId;
|
||||
if (layersShapes[layerId].in.empty())
|
||||
LayerData& layerData = it->second;
|
||||
std::vector<LayerPin>& inputLayerIds = layerData.inputBlobsId;
|
||||
LayerShapes& layerShapes = layersShapes[layerId];
|
||||
CV_LOG_DEBUG(NULL, "layer " << layerId << ": [" << layerData.type << "]:(" << layerData.name << ") with inputs.size=" << inputLayerIds.size());
|
||||
if (layerShapes.in.empty())
|
||||
{
|
||||
for(int i = 0; i < inputLayerIds.size(); i++)
|
||||
{
|
||||
int inputLayerId = inputLayerIds[i].lid;
|
||||
const LayerPin& inputPin = inputLayerIds[i];
|
||||
int inputLayerId = inputPin.lid;
|
||||
CV_LOG_DEBUG(NULL, " input[" << i << "] " << inputLayerId << ":" << inputPin.oid << " as [" << layers[inputLayerId].type << "]:(" << layers[inputLayerId].name << ")");
|
||||
LayersShapesMap::iterator inputIt = layersShapes.find(inputLayerId);
|
||||
if(inputIt == layersShapes.end() || inputIt->second.out.empty())
|
||||
if (inputIt == layersShapes.end() || inputIt->second.out.empty())
|
||||
{
|
||||
getLayerShapesRecursively(inputLayerId, layersShapes);
|
||||
}
|
||||
const MatShape& shape = layersShapes[inputLayerId].out[inputLayerIds[i].oid];
|
||||
layersShapes[layerId].in.push_back(shape);
|
||||
const MatShape& shape = layersShapes[inputLayerId].out[inputPin.oid];
|
||||
layerShapes.in.push_back(shape);
|
||||
}
|
||||
it->second.layerInstance->updateMemoryShapes(layersShapes[layerId].in);
|
||||
layerData.layerInstance->updateMemoryShapes(layerShapes.in);
|
||||
}
|
||||
CV_LOG_DEBUG(NULL, "Layer " << layerId << ": " << toString(layerShapes.in, "input shapes"));
|
||||
CV_LOG_IF_DEBUG(NULL, !layerShapes.out.empty(), "Layer " << layerId << ": " << toString(layerShapes.out, "output shapes"));
|
||||
CV_LOG_IF_DEBUG(NULL, !layerShapes.internal.empty(), "Layer " << layerId << ": " << toString(layerShapes.internal, "internal shapes"));
|
||||
}
|
||||
CV_LOG_DEBUG(NULL, "updateLayersShapes() - DONE");
|
||||
}
|
||||
|
||||
LayerPin getLatestLayerPin(const std::vector<LayerPin>& pins)
|
||||
|
@ -29,6 +29,43 @@ struct NetImplBase
|
||||
|
||||
} // namespace detail
|
||||
|
||||
|
||||
typedef std::vector<MatShape> ShapesVec;
|
||||
|
||||
static inline std::string toString(const ShapesVec& shapes, const std::string& name = std::string())
|
||||
{
|
||||
std::ostringstream ss;
|
||||
if (!name.empty())
|
||||
ss << name << ' ';
|
||||
ss << '[';
|
||||
for(size_t i = 0, n = shapes.size(); i < n; ++i)
|
||||
ss << ' ' << toString(shapes[i]);
|
||||
ss << " ]";
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
static inline std::string toString(const Mat& blob, const std::string& name = std::string())
|
||||
{
|
||||
std::ostringstream ss;
|
||||
if (!name.empty())
|
||||
ss << name << ' ';
|
||||
if (blob.empty())
|
||||
{
|
||||
ss << "<empty>";
|
||||
}
|
||||
else if (blob.dims == 1)
|
||||
{
|
||||
Mat blob_ = blob;
|
||||
blob_.dims = 2; // hack
|
||||
ss << blob_.t();
|
||||
}
|
||||
else
|
||||
{
|
||||
ss << blob.reshape(1, 1);
|
||||
}
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
CV__DNN_EXPERIMENTAL_NS_END
|
||||
}} // namespace
|
||||
|
||||
|
@ -8,8 +8,6 @@
|
||||
#ifndef __OPENCV_DNN_ONNX_SIMPLIFIER_HPP__
|
||||
#define __OPENCV_DNN_ONNX_SIMPLIFIER_HPP__
|
||||
|
||||
#include "../precomp.hpp"
|
||||
|
||||
#if defined(__GNUC__) && __GNUC__ >= 5
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wsuggest-override"
|
||||
|
@ -65,6 +65,7 @@ public:
|
||||
|
||||
ONNXImporter(Net& net, const char *onnxFile)
|
||||
: dstNet(net), dispatch(buildDispatchMap())
|
||||
, onnx_opset(0)
|
||||
{
|
||||
hasDynamicShapes = false;
|
||||
CV_Assert(onnxFile);
|
||||
@ -86,6 +87,7 @@ public:
|
||||
|
||||
ONNXImporter(Net& net, const char* buffer, size_t sizeBuffer)
|
||||
: dstNet(net), dispatch(buildDispatchMap())
|
||||
, onnx_opset(0)
|
||||
{
|
||||
hasDynamicShapes = false;
|
||||
CV_LOG_DEBUG(NULL, "DNN/ONNX: processing in-memory ONNX model (" << sizeBuffer << " bytes)");
|
||||
@ -178,6 +180,9 @@ private:
|
||||
|
||||
const DispatchMap dispatch;
|
||||
static const DispatchMap buildDispatchMap();
|
||||
|
||||
int onnx_opset; // OperatorSetIdProto for 'onnx' domain
|
||||
void parseOperatorSet();
|
||||
};
|
||||
|
||||
inline void replaceLayerParam(LayerParams& layerParams, const String& oldKey, const String& newKey)
|
||||
@ -489,10 +494,45 @@ void ONNXImporter::addNegation(const LayerParams& layerParams, opencv_onnx::Node
|
||||
|
||||
void ONNXImporter::addConstant(const std::string& name, const Mat& blob)
|
||||
{
|
||||
CV_LOG_DEBUG(NULL, "DNN/ONNX: add constant '" << name << "' shape=" << toString(shape(blob)) << ": " << toString(blob));
|
||||
constBlobs.insert(std::make_pair(name, blob));
|
||||
outShapes.insert(std::make_pair(name, shape(blob)));
|
||||
}
|
||||
|
||||
void ONNXImporter::parseOperatorSet()
|
||||
{
|
||||
int ir_version = model_proto.has_ir_version() ? static_cast<int>(model_proto.ir_version()) : -1;
|
||||
if (ir_version < 3)
|
||||
return;
|
||||
|
||||
int opset_size = model_proto.opset_import_size();
|
||||
if (opset_size <= 0)
|
||||
{
|
||||
CV_LOG_INFO(NULL, "DNN/ONNX: missing opset information")
|
||||
return;
|
||||
}
|
||||
|
||||
for (int i = 0; i < opset_size; ++i)
|
||||
{
|
||||
const ::opencv_onnx::OperatorSetIdProto& opset_entry = model_proto.opset_import(i);
|
||||
const std::string& domain = opset_entry.has_domain() ? opset_entry.domain() : std::string();
|
||||
int version = opset_entry.has_version() ? opset_entry.version() : -1;
|
||||
if (domain.empty() || domain == "ai.onnx")
|
||||
{
|
||||
// ONNX opset covered by specification: https://github.com/onnx/onnx/blob/master/docs/Operators.md
|
||||
onnx_opset = std::max(onnx_opset, version);
|
||||
}
|
||||
else
|
||||
{
|
||||
// OpenCV don't know other opsets
|
||||
// will fail later on unsupported node processing
|
||||
CV_LOG_WARNING(NULL, "DNN/ONNX: unsupported opset[" << i << "]: domain='" << domain << "' version=" << version);
|
||||
}
|
||||
}
|
||||
|
||||
CV_LOG_INFO(NULL, "DNN/ONNX: ONNX opset version = " << onnx_opset);
|
||||
}
|
||||
|
||||
void ONNXImporter::populateNet()
|
||||
{
|
||||
CV_Assert(model_proto.has_graph());
|
||||
@ -513,6 +553,8 @@ void ONNXImporter::populateNet()
|
||||
<< ", outputs = " << graph_proto.output_size()
|
||||
);
|
||||
|
||||
parseOperatorSet();
|
||||
|
||||
simplifySubgraphs(graph_proto);
|
||||
|
||||
const int layersSize = graph_proto.node_size();
|
||||
@ -539,7 +581,8 @@ void ONNXImporter::populateNet()
|
||||
if (!tensorShape.dim(j).dim_param().empty() && !(j == 0 && inpShape.size() >= 3))
|
||||
hasDynamicShapes = true;
|
||||
}
|
||||
if (!inpShape.empty() && !hasDynamicShapes)
|
||||
CV_LOG_DEBUG(NULL, "DNN/ONNX: input[" << i << "] shape=" << toString(inpShape));
|
||||
if (!inpShape.empty() && !hasDynamicShapes) // FIXIT result is not reliable for models with multiple inputs
|
||||
{
|
||||
inpShape[0] = std::max(inpShape[0], 1); // It's OK to have undetermined batch size
|
||||
}
|
||||
@ -573,6 +616,15 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto)
|
||||
CV_Assert(node_proto.output_size() >= 1);
|
||||
std::string name = node_proto.output(0);
|
||||
const std::string& layer_type = node_proto.op_type();
|
||||
const std::string& layer_type_domain = node_proto.has_domain() ? node_proto.domain() : std::string();
|
||||
if (!layer_type_domain.empty() && layer_type_domain != "ai.onnx")
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "DNN/ONNX: can't handle node with " << node_proto.input_size() << " inputs and " << node_proto.output_size() << " outputs: "
|
||||
<< cv::format("[%s@%s]:(%s)", layer_type.c_str(), layer_type_domain.c_str(), name.c_str())
|
||||
);
|
||||
CV_Error(Error::StsNotImplemented, cv::format("ONNX: unsupported domain: %s", layer_type_domain.c_str()));
|
||||
}
|
||||
|
||||
CV_LOG_DEBUG(NULL, "DNN/ONNX: processing node with " << node_proto.input_size() << " inputs and " << node_proto.output_size() << " outputs: "
|
||||
<< cv::format("[%s]:(%s)", layer_type.c_str(), name.c_str())
|
||||
);
|
||||
|
@ -60,5 +60,6 @@
|
||||
#include <opencv2/core/utils/trace.hpp>
|
||||
#include <opencv2/dnn.hpp>
|
||||
#include <opencv2/dnn/all_layers.hpp>
|
||||
#include <opencv2/dnn/shape_utils.hpp>
|
||||
|
||||
#include "dnn_common.hpp"
|
||||
|
@ -8,8 +8,6 @@
|
||||
#ifndef __OPENCV_DNN_TF_SIMPLIFIER_HPP__
|
||||
#define __OPENCV_DNN_TF_SIMPLIFIER_HPP__
|
||||
|
||||
#include "../precomp.hpp"
|
||||
|
||||
#ifdef HAVE_PROTOBUF
|
||||
|
||||
#include "tf_io.hpp"
|
||||
|
Loading…
Reference in New Issue
Block a user