mirror of
https://github.com/opencv/opencv.git
synced 2025-06-07 17:44:04 +08:00
Merge pull request #13841 from dkurt:dnn_ie_future_3
This commit is contained in:
commit
8cedc052ca
@ -730,9 +730,9 @@ struct DataLayer : public Layer
|
|||||||
biases->set(biasesVec);
|
biases->set(biasesVec);
|
||||||
|
|
||||||
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
|
||||||
InferenceEngine::Builder::ScaleShiftLayer ieLayer(name);
|
InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name);
|
||||||
ieLayer.setWeights(weights);
|
addConstantData("weights", weights, ieLayer);
|
||||||
ieLayer.setBiases(biases);
|
addConstantData("biases", biases, ieLayer);
|
||||||
#else
|
#else
|
||||||
InferenceEngine::LayerParams lp;
|
InferenceEngine::LayerParams lp;
|
||||||
lp.name = name;
|
lp.name = name;
|
||||||
@ -1638,25 +1638,15 @@ struct Net::Impl
|
|||||||
preferableTarget == DNN_TARGET_FPGA) && !fused)
|
preferableTarget == DNN_TARGET_FPGA) && !fused)
|
||||||
{
|
{
|
||||||
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R5)
|
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R5)
|
||||||
bool hasWeights = false;
|
|
||||||
for (const std::string& name : {"weights", "biases"})
|
for (const std::string& name : {"weights", "biases"})
|
||||||
{
|
{
|
||||||
auto it = ieNode->layer.getParameters().find(name);
|
auto it = ieNode->layer.getParameters().find(name);
|
||||||
if (it != ieNode->layer.getParameters().end())
|
if (it != ieNode->layer.getParameters().end())
|
||||||
{
|
{
|
||||||
InferenceEngine::Blob::CPtr bp = it->second.as<InferenceEngine::Blob::CPtr>();
|
InferenceEngine::Blob::Ptr bp = it->second.as<InferenceEngine::Blob::Ptr>();
|
||||||
it->second = (InferenceEngine::Blob::CPtr)convertFp16(std::const_pointer_cast<InferenceEngine::Blob>(bp));
|
it->second = convertFp16(std::const_pointer_cast<InferenceEngine::Blob>(bp));
|
||||||
hasWeights = true;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!hasWeights)
|
|
||||||
{
|
|
||||||
InferenceEngine::Blob::Ptr blob = InferenceEngine::make_shared_blob<int16_t>(
|
|
||||||
InferenceEngine::Precision::FP16,
|
|
||||||
InferenceEngine::Layout::C, {1});
|
|
||||||
blob->allocate();
|
|
||||||
ieNode->layer.getParameters()["weights"] = (InferenceEngine::Blob::CPtr)blob;
|
|
||||||
}
|
|
||||||
#else
|
#else
|
||||||
auto& blobs = ieNode->layer.getConstantData();
|
auto& blobs = ieNode->layer.getConstantData();
|
||||||
if (blobs.empty())
|
if (blobs.empty())
|
||||||
|
@ -350,11 +350,10 @@ public:
|
|||||||
{
|
{
|
||||||
#ifdef HAVE_INF_ENGINE
|
#ifdef HAVE_INF_ENGINE
|
||||||
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
|
||||||
InferenceEngine::Builder::ScaleShiftLayer ieLayer(name);
|
InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name);
|
||||||
|
|
||||||
const size_t numChannels = weights_.total();
|
const size_t numChannels = weights_.total();
|
||||||
ieLayer.setWeights(wrapToInfEngineBlob(weights_, {numChannels}, InferenceEngine::Layout::C));
|
addConstantData("weights", wrapToInfEngineBlob(weights_, {numChannels}, InferenceEngine::Layout::C), ieLayer);
|
||||||
ieLayer.setBiases(wrapToInfEngineBlob(bias_, {numChannels}, InferenceEngine::Layout::C));
|
addConstantData("biases", wrapToInfEngineBlob(bias_, {numChannels}, InferenceEngine::Layout::C), ieLayer);
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
||||||
#else
|
#else
|
||||||
InferenceEngine::LayerParams lp;
|
InferenceEngine::LayerParams lp;
|
||||||
|
@ -125,7 +125,9 @@ public:
|
|||||||
ieLayer.getParameters()["axis"] = input->dims.size() - 1;
|
ieLayer.getParameters()["axis"] = input->dims.size() - 1;
|
||||||
ieLayer.getParameters()["out_sizes"] = input->dims[0];
|
ieLayer.getParameters()["out_sizes"] = input->dims[0];
|
||||||
}
|
}
|
||||||
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(1));
|
std::vector<size_t> shape(input->dims);
|
||||||
|
std::reverse(shape.begin(), shape.end());
|
||||||
|
ieLayer.setInputPorts({InferenceEngine::Port(shape)});
|
||||||
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
|
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
||||||
#else
|
#else
|
||||||
|
@ -493,11 +493,11 @@ public:
|
|||||||
ieLayer.setGroup((size_t)group);
|
ieLayer.setGroup((size_t)group);
|
||||||
ieLayer.setOutDepth((size_t)outCn);
|
ieLayer.setOutDepth((size_t)outCn);
|
||||||
|
|
||||||
ieLayer.setWeights(ieWeights);
|
|
||||||
if (ieBiases)
|
|
||||||
ieLayer.setBiases(ieBiases);
|
|
||||||
|
|
||||||
InferenceEngine::Builder::Layer l = ieLayer;
|
InferenceEngine::Builder::Layer l = ieLayer;
|
||||||
|
addConstantData("weights", ieWeights, l);
|
||||||
|
if (ieBiases)
|
||||||
|
addConstantData("biases", ieBiases, l);
|
||||||
|
|
||||||
if (!padMode.empty())
|
if (!padMode.empty())
|
||||||
l.getParameters()["auto_pad"] = padMode == "VALID" ? std::string("valid") : std::string("same_upper");
|
l.getParameters()["auto_pad"] = padMode == "VALID" ? std::string("valid") : std::string("same_upper");
|
||||||
|
|
||||||
@ -1725,12 +1725,11 @@ public:
|
|||||||
ieLayer.setGroup((size_t)group);
|
ieLayer.setGroup((size_t)group);
|
||||||
ieLayer.setOutDepth((size_t)numOutput);
|
ieLayer.setOutDepth((size_t)numOutput);
|
||||||
|
|
||||||
ieLayer.setWeights(wrapToInfEngineBlob(blobs[0], InferenceEngine::Layout::OIHW));
|
InferenceEngine::Builder::Layer l = ieLayer;
|
||||||
|
addConstantData("weights", wrapToInfEngineBlob(blobs[0], InferenceEngine::Layout::OIHW), l);
|
||||||
if (hasBias())
|
if (hasBias())
|
||||||
{
|
addConstantData("biases", wrapToInfEngineBlob(blobs[1], {(size_t)numOutput}, InferenceEngine::Layout::C), l);
|
||||||
ieLayer.setBiases(wrapToInfEngineBlob(blobs[1], {(size_t)numOutput}, InferenceEngine::Layout::C));
|
return Ptr<BackendNode>(new InfEngineBackendNode(l));
|
||||||
}
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
|
||||||
#else
|
#else
|
||||||
const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW layout
|
const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW layout
|
||||||
const int group = numOutput / outGroupCn;
|
const int group = numOutput / outGroupCn;
|
||||||
|
@ -1134,10 +1134,10 @@ struct ChannelsPReLUFunctor
|
|||||||
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
|
||||||
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
|
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
|
||||||
{
|
{
|
||||||
InferenceEngine::Builder::PReLULayer ieLayer("");
|
InferenceEngine::Builder::Layer l = InferenceEngine::Builder::PReLULayer("");
|
||||||
const size_t numChannels = scale.total();
|
const size_t numChannels = scale.total();
|
||||||
ieLayer.setWeights(wrapToInfEngineBlob(scale, {numChannels}, InferenceEngine::Layout::C));
|
addConstantData("weights", wrapToInfEngineBlob(scale, {numChannels}, InferenceEngine::Layout::C), l);
|
||||||
return ieLayer;
|
return l;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp)
|
InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp)
|
||||||
|
@ -448,11 +448,12 @@ public:
|
|||||||
const int outNum = blobs[0].size[0];
|
const int outNum = blobs[0].size[0];
|
||||||
ieLayer.setOutputNum(outNum);
|
ieLayer.setOutputNum(outNum);
|
||||||
|
|
||||||
ieLayer.setWeights(wrapToInfEngineBlob(blobs[0], {(size_t)blobs[0].size[0], (size_t)blobs[0].size[1], 1, 1}, InferenceEngine::Layout::OIHW));
|
InferenceEngine::Builder::Layer l = ieLayer;
|
||||||
|
addConstantData("weights", wrapToInfEngineBlob(blobs[0], {(size_t)blobs[0].size[0], (size_t)blobs[0].size[1], 1, 1}, InferenceEngine::Layout::OIHW), l);
|
||||||
if (blobs.size() > 1)
|
if (blobs.size() > 1)
|
||||||
ieLayer.setBiases(wrapToInfEngineBlob(blobs[1], {(size_t)outNum}, InferenceEngine::Layout::C));
|
addConstantData("biases", wrapToInfEngineBlob(blobs[1], {(size_t)outNum}, InferenceEngine::Layout::C), l);
|
||||||
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
return Ptr<BackendNode>(new InfEngineBackendNode(l));
|
||||||
#else
|
#else
|
||||||
InferenceEngine::LayerParams lp;
|
InferenceEngine::LayerParams lp;
|
||||||
lp.name = name;
|
lp.name = name;
|
||||||
|
@ -291,7 +291,7 @@ public:
|
|||||||
l.getParameters()["channel_shared"] = blobs[0].total() == 1;
|
l.getParameters()["channel_shared"] = blobs[0].total() == 1;
|
||||||
}
|
}
|
||||||
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R5)
|
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R5)
|
||||||
l.getParameters()["weights"] = (InferenceEngine::Blob::CPtr)weights;
|
l.getParameters()["weights"] = weights;
|
||||||
#else
|
#else
|
||||||
l.addConstantData("weights", weights);
|
l.addConstantData("weights", weights);
|
||||||
#endif
|
#endif
|
||||||
|
@ -524,12 +524,12 @@ public:
|
|||||||
if (_stepX == _stepY)
|
if (_stepX == _stepY)
|
||||||
{
|
{
|
||||||
l.getParameters()["step"] = _stepX;
|
l.getParameters()["step"] = _stepX;
|
||||||
l.getParameters()["step_h"] = 0.0;
|
l.getParameters()["step_h"] = 0.0f;
|
||||||
l.getParameters()["step_w"] = 0.0;
|
l.getParameters()["step_w"] = 0.0f;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
l.getParameters()["step"] = 0.0;
|
l.getParameters()["step"] = 0.0f;
|
||||||
l.getParameters()["step_h"] = _stepY;
|
l.getParameters()["step_h"] = _stepY;
|
||||||
l.getParameters()["step_w"] = _stepX;
|
l.getParameters()["step_w"] = _stepX;
|
||||||
}
|
}
|
||||||
|
@ -198,13 +198,13 @@ public:
|
|||||||
{
|
{
|
||||||
#ifdef HAVE_INF_ENGINE
|
#ifdef HAVE_INF_ENGINE
|
||||||
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
|
||||||
InferenceEngine::Builder::ScaleShiftLayer ieLayer(name);
|
InferenceEngine::Builder::Layer l = InferenceEngine::Builder::ScaleShiftLayer(name);
|
||||||
|
|
||||||
CV_Assert(!blobs.empty());
|
CV_Assert(!blobs.empty());
|
||||||
const size_t numChannels = blobs[0].total();
|
const size_t numChannels = blobs[0].total();
|
||||||
if (hasWeights)
|
if (hasWeights)
|
||||||
{
|
{
|
||||||
ieLayer.setWeights(wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C));
|
addConstantData("weights", wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C), l);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -214,11 +214,11 @@ public:
|
|||||||
|
|
||||||
std::vector<float> ones(numChannels, 1);
|
std::vector<float> ones(numChannels, 1);
|
||||||
weights->set(ones);
|
weights->set(ones);
|
||||||
ieLayer.setWeights(weights);
|
addConstantData("weights", weights, l);
|
||||||
}
|
}
|
||||||
if (hasBias)
|
if (hasBias)
|
||||||
ieLayer.setBiases(wrapToInfEngineBlob(blobs.back(), {numChannels}, InferenceEngine::Layout::C));
|
addConstantData("biases", wrapToInfEngineBlob(blobs.back(), {numChannels}, InferenceEngine::Layout::C), l);
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
return Ptr<BackendNode>(new InfEngineBackendNode(l));
|
||||||
#else
|
#else
|
||||||
InferenceEngine::LayerParams lp;
|
InferenceEngine::LayerParams lp;
|
||||||
lp.name = name;
|
lp.name = name;
|
||||||
|
@ -18,6 +18,11 @@ namespace cv { namespace dnn {
|
|||||||
|
|
||||||
#ifdef HAVE_INF_ENGINE
|
#ifdef HAVE_INF_ENGINE
|
||||||
|
|
||||||
|
// For networks with input layer which has an empty name, IE generates a name id[some_number].
|
||||||
|
// OpenCV lets users use an empty input name and to prevent unexpected naming,
|
||||||
|
// we can use some predefined name.
|
||||||
|
static std::string kDefaultInpLayerName = "empty_inp_layer_name";
|
||||||
|
|
||||||
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
|
||||||
InfEngineBackendNode::InfEngineBackendNode(const InferenceEngine::Builder::Layer& _layer)
|
InfEngineBackendNode::InfEngineBackendNode(const InferenceEngine::Builder::Layer& _layer)
|
||||||
: BackendNode(DNN_BACKEND_INFERENCE_ENGINE), layer(_layer) {}
|
: BackendNode(DNN_BACKEND_INFERENCE_ENGINE), layer(_layer) {}
|
||||||
@ -90,7 +95,7 @@ void InfEngineBackendNet::connect(const std::vector<Ptr<BackendWrapper> >& input
|
|||||||
it = layers.find(inpName);
|
it = layers.find(inpName);
|
||||||
if (it == layers.end())
|
if (it == layers.end())
|
||||||
{
|
{
|
||||||
InferenceEngine::Builder::InputLayer inpLayer(inpName);
|
InferenceEngine::Builder::InputLayer inpLayer(!inpName.empty() ? inpName : kDefaultInpLayerName);
|
||||||
|
|
||||||
std::vector<size_t> shape(inp->blob->dims());
|
std::vector<size_t> shape(inp->blob->dims());
|
||||||
std::reverse(shape.begin(), shape.end());
|
std::reverse(shape.begin(), shape.end());
|
||||||
@ -119,6 +124,14 @@ void InfEngineBackendNet::init(int targetId)
|
|||||||
for (int id : unconnectedLayersIds)
|
for (int id : unconnectedLayersIds)
|
||||||
{
|
{
|
||||||
InferenceEngine::Builder::OutputLayer outLayer("myconv1");
|
InferenceEngine::Builder::OutputLayer outLayer("myconv1");
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R5)
|
||||||
|
// Inference Engine determines network precision by ports.
|
||||||
|
InferenceEngine::Precision p = (targetId == DNN_TARGET_MYRIAD ||
|
||||||
|
targetId == DNN_TARGET_OPENCL_FP16) ?
|
||||||
|
InferenceEngine::Precision::FP16 :
|
||||||
|
InferenceEngine::Precision::FP32;
|
||||||
|
outLayer.setPort(InferenceEngine::Port({}, p));
|
||||||
|
#endif
|
||||||
netBuilder.addLayer({InferenceEngine::PortInfo(id)}, outLayer);
|
netBuilder.addLayer({InferenceEngine::PortInfo(id)}, outLayer);
|
||||||
}
|
}
|
||||||
cnn = InferenceEngine::CNNNetwork(InferenceEngine::Builder::convertToICNNNetwork(netBuilder.build()));
|
cnn = InferenceEngine::CNNNetwork(InferenceEngine::Builder::convertToICNNNetwork(netBuilder.build()));
|
||||||
@ -167,12 +180,56 @@ void InfEngineBackendNet::init(int targetId)
|
|||||||
initPlugin(cnn);
|
initPlugin(cnn);
|
||||||
}
|
}
|
||||||
|
|
||||||
void InfEngineBackendNet::addLayer(const InferenceEngine::Builder::Layer& layer)
|
void InfEngineBackendNet::addLayer(InferenceEngine::Builder::Layer& layer)
|
||||||
{
|
{
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R5)
|
||||||
|
// Add weights to network and connect them after input blobs.
|
||||||
|
std::map<std::string, InferenceEngine::Parameter>& params = layer.getParameters();
|
||||||
|
std::vector<int> blobsIds;
|
||||||
|
std::vector<int> portIds;
|
||||||
|
for (const std::string& name : {"weights", "biases"})
|
||||||
|
{
|
||||||
|
bool asInput = false;
|
||||||
|
int portId = 0;
|
||||||
|
for (int i = 0; i < layer.getInputPorts().size(); ++i)
|
||||||
|
{
|
||||||
|
const auto& port = layer.getInputPorts()[i];
|
||||||
|
auto it = port.getParameters().find("type");
|
||||||
|
if (it != port.getParameters().end() && it->second == name)
|
||||||
|
{
|
||||||
|
portId = i;
|
||||||
|
asInput = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!asInput)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
auto it = params.find(name);
|
||||||
|
if (it != params.end())
|
||||||
|
{
|
||||||
|
InferenceEngine::Blob::Ptr blob = it->second.as<InferenceEngine::Blob::Ptr>();
|
||||||
|
params.erase(it);
|
||||||
|
int blobId = netBuilder.addLayer(InferenceEngine::Builder::ConstLayer(name).setData(blob));
|
||||||
|
blobsIds.push_back(blobId);
|
||||||
|
portIds.push_back(portId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
int id = netBuilder.addLayer(layer);
|
int id = netBuilder.addLayer(layer);
|
||||||
const std::string& layerName = layer.getName();
|
const std::string& layerName = layer.getName();
|
||||||
CV_Assert(layers.insert({layerName, id}).second);
|
CV_Assert(layers.insert({layerName, id}).second);
|
||||||
unconnectedLayersIds.insert(id);
|
unconnectedLayersIds.insert(id);
|
||||||
|
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R5)
|
||||||
|
// By default, all the weights are connected to last ports ids.
|
||||||
|
for (int i = 0; i < blobsIds.size(); ++i)
|
||||||
|
{
|
||||||
|
netBuilder.connect((size_t)blobsIds[i], {(size_t)id, portIds[i]});
|
||||||
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void InfEngineBackendNet::addOutput(const std::string& name)
|
void InfEngineBackendNet::addOutput(const std::string& name)
|
||||||
@ -705,7 +762,7 @@ void InfEngineBackendNet::addBlobs(const std::vector<Ptr<BackendWrapper> >& ptrs
|
|||||||
{
|
{
|
||||||
std::string name = wrapper->dataPtr->name;
|
std::string name = wrapper->dataPtr->name;
|
||||||
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
|
||||||
name = name.empty() ? "id1" : name; // TODO: drop the magic input name.
|
name = name.empty() ? kDefaultInpLayerName : name;
|
||||||
#endif
|
#endif
|
||||||
allBlobs.insert({name, wrapper->blob});
|
allBlobs.insert({name, wrapper->blob});
|
||||||
}
|
}
|
||||||
@ -776,6 +833,18 @@ InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob)
|
|||||||
return halfs;
|
return halfs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
|
||||||
|
void addConstantData(const std::string& name, InferenceEngine::Blob::Ptr data,
|
||||||
|
InferenceEngine::Builder::Layer& l)
|
||||||
|
{
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R5)
|
||||||
|
l.getParameters()[name] = data;
|
||||||
|
#else
|
||||||
|
l.addConstantData(name, data);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif // HAVE_INF_ENGINE
|
#endif // HAVE_INF_ENGINE
|
||||||
|
|
||||||
bool haveInfEngine()
|
bool haveInfEngine()
|
||||||
|
@ -162,7 +162,7 @@ public:
|
|||||||
|
|
||||||
InfEngineBackendNet(InferenceEngine::CNNNetwork& net);
|
InfEngineBackendNet(InferenceEngine::CNNNetwork& net);
|
||||||
|
|
||||||
void addLayer(const InferenceEngine::Builder::Layer& layer);
|
void addLayer(InferenceEngine::Builder::Layer& layer);
|
||||||
|
|
||||||
void addOutput(const std::string& name);
|
void addOutput(const std::string& name);
|
||||||
|
|
||||||
@ -255,6 +255,10 @@ Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob);
|
|||||||
// Allocates memory for a new blob.
|
// Allocates memory for a new blob.
|
||||||
InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob);
|
InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob);
|
||||||
|
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
|
||||||
|
void addConstantData(const std::string& name, InferenceEngine::Blob::Ptr data, InferenceEngine::Builder::Layer& l);
|
||||||
|
#endif
|
||||||
|
|
||||||
// This is a fake class to run networks from Model Optimizer. Objects of that
|
// This is a fake class to run networks from Model Optimizer. Objects of that
|
||||||
// class simulate responses of layers are imported by OpenCV and supported by
|
// class simulate responses of layers are imported by OpenCV and supported by
|
||||||
// Inference Engine. The main difference is that they do not perform forward pass.
|
// Inference Engine. The main difference is that they do not perform forward pass.
|
||||||
|
@ -695,7 +695,8 @@ TEST_P(Eltwise, Accuracy)
|
|||||||
Target targetId = get<1>(get<4>(GetParam()));
|
Target targetId = get<1>(get<4>(GetParam()));
|
||||||
|
|
||||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE > 2018050000
|
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE > 2018050000
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_OPENCL)
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE &&
|
||||||
|
(targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16))
|
||||||
throw SkipTestException("");
|
throw SkipTestException("");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user