2018-02-06 16:57:35 +08:00
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
2019-03-29 21:42:58 +08:00
// Copyright (C) 2018-2019, Intel Corporation, all rights reserved.
2018-02-06 16:57:35 +08:00
// Third party copyrights are property of their respective owners.
# include "precomp.hpp"
# include "op_inf_engine.hpp"
# include <opencv2/dnn/shape_utils.hpp>
2018-04-09 21:22:19 +08:00
# ifdef HAVE_INF_ENGINE
# include <ie_extension.h>
# include <ie_plugin_dispatcher.hpp>
# endif // HAVE_INF_ENGINE
2019-03-29 21:42:58 +08:00
# include <opencv2/core/utils/configuration.private.hpp>
# include <opencv2/core/utils/logger.hpp>
2018-02-06 16:57:35 +08:00
namespace cv { namespace dnn {
# ifdef HAVE_INF_ENGINE
2019-02-14 18:30:30 +08:00
// For networks with input layer which has an empty name, IE generates a name id[some_number].
// OpenCV lets users use an empty input name and to prevent unexpected naming,
// we can use some predefined name.
static std : : string kDefaultInpLayerName = " empty_inp_layer_name " ;
2019-09-03 23:58:57 +08:00
static std : : string kOpenCVLayersType = " OpenCVLayer " ;
static std : : string shapesToStr ( const std : : vector < Mat > & mats )
{
std : : ostringstream shapes ;
shapes < < mats . size ( ) < < " " ;
for ( const Mat & m : mats )
{
shapes < < m . dims < < " " ;
for ( int i = 0 ; i < m . dims ; + + i )
shapes < < m . size [ i ] < < " " ;
}
return shapes . str ( ) ;
}
static void strToShapes ( const std : : string & str , std : : vector < std : : vector < size_t > > & shapes )
{
std : : istringstream ss ( str ) ;
int num , dims ;
ss > > num ;
shapes . resize ( num ) ;
for ( int i = 0 ; i < num ; + + i )
{
ss > > dims ;
shapes [ i ] . resize ( dims ) ;
for ( int j = 0 ; j < dims ; + + j )
ss > > shapes [ i ] [ j ] ;
}
}
class InfEngineCustomLayer : public InferenceEngine : : ILayerExecImpl
{
public :
explicit InfEngineCustomLayer ( const InferenceEngine : : CNNLayer & layer ) : cnnLayer ( layer )
{
std : : istringstream iss ( layer . GetParamAsString ( " impl " ) ) ;
size_t ptr ;
iss > > ptr ;
cvLayer = ( Layer * ) ptr ;
std : : vector < std : : vector < size_t > > shapes ;
strToShapes ( layer . GetParamAsString ( " internals " ) , shapes ) ;
internals . resize ( shapes . size ( ) ) ;
for ( int i = 0 ; i < shapes . size ( ) ; + + i )
internals [ i ] . create ( std : : vector < int > ( shapes [ i ] . begin ( ) , shapes [ i ] . end ( ) ) , CV_32F ) ;
}
virtual InferenceEngine : : StatusCode execute ( std : : vector < InferenceEngine : : Blob : : Ptr > & inputs ,
std : : vector < InferenceEngine : : Blob : : Ptr > & outputs ,
InferenceEngine : : ResponseDesc * resp ) noexcept
{
std : : vector < Mat > inpMats , outMats ;
infEngineBlobsToMats ( inputs , inpMats ) ;
infEngineBlobsToMats ( outputs , outMats ) ;
try
{
cvLayer - > forward ( inpMats , outMats , internals ) ;
return InferenceEngine : : StatusCode : : OK ;
}
catch ( . . . )
{
return InferenceEngine : : StatusCode : : GENERAL_ERROR ;
}
}
virtual InferenceEngine : : StatusCode
getSupportedConfigurations ( std : : vector < InferenceEngine : : LayerConfig > & conf ,
InferenceEngine : : ResponseDesc * resp ) noexcept
{
std : : vector < InferenceEngine : : DataConfig > inDataConfig ;
std : : vector < InferenceEngine : : DataConfig > outDataConfig ;
for ( auto & it : cnnLayer . insData )
{
InferenceEngine : : DataConfig conf ;
conf . desc = it . lock ( ) - > getTensorDesc ( ) ;
inDataConfig . push_back ( conf ) ;
}
for ( auto & it : cnnLayer . outData )
{
InferenceEngine : : DataConfig conf ;
conf . desc = it - > getTensorDesc ( ) ;
outDataConfig . push_back ( conf ) ;
}
InferenceEngine : : LayerConfig layerConfig ;
layerConfig . inConfs = inDataConfig ;
layerConfig . outConfs = outDataConfig ;
conf . push_back ( layerConfig ) ;
return InferenceEngine : : StatusCode : : OK ;
}
InferenceEngine : : StatusCode init ( InferenceEngine : : LayerConfig & config ,
InferenceEngine : : ResponseDesc * resp ) noexcept
{
return InferenceEngine : : StatusCode : : OK ;
}
private :
InferenceEngine : : CNNLayer cnnLayer ;
dnn : : Layer * cvLayer ;
std : : vector < Mat > internals ;
} ;
class InfEngineCustomLayerShapeInfer : public InferenceEngine : : IShapeInferImpl
{
public :
InferenceEngine : : StatusCode
inferShapes ( const std : : vector < InferenceEngine : : Blob : : CPtr > & inBlobs ,
const std : : map < std : : string , std : : string > & params ,
const std : : map < std : : string , InferenceEngine : : Blob : : Ptr > & blobs ,
std : : vector < InferenceEngine : : SizeVector > & outShapes ,
InferenceEngine : : ResponseDesc * desc ) noexcept override
{
strToShapes ( params . at ( " outputs " ) , outShapes ) ;
return InferenceEngine : : StatusCode : : OK ;
}
} ;
class InfEngineCustomLayerFactory : public InferenceEngine : : ILayerImplFactory {
public :
explicit InfEngineCustomLayerFactory ( const InferenceEngine : : CNNLayer * layer ) : cnnLayer ( * layer ) { }
InferenceEngine : : StatusCode
getImplementations ( std : : vector < InferenceEngine : : ILayerImpl : : Ptr > & impls ,
InferenceEngine : : ResponseDesc * resp ) noexcept override {
impls . push_back ( std : : make_shared < InfEngineCustomLayer > ( cnnLayer ) ) ;
return InferenceEngine : : StatusCode : : OK ;
}
private :
InferenceEngine : : CNNLayer cnnLayer ;
} ;
class InfEngineExtension : public InferenceEngine : : IExtension
{
public :
virtual void SetLogCallback ( InferenceEngine : : IErrorListener & ) noexcept { }
virtual void Unload ( ) noexcept { }
virtual void Release ( ) noexcept { }
virtual void GetVersion ( const InferenceEngine : : Version * & ) const noexcept { }
virtual InferenceEngine : : StatusCode getPrimitiveTypes ( char * * & , unsigned int & ,
InferenceEngine : : ResponseDesc * ) noexcept
{
return InferenceEngine : : StatusCode : : OK ;
}
InferenceEngine : : StatusCode getFactoryFor ( InferenceEngine : : ILayerImplFactory * & factory ,
const InferenceEngine : : CNNLayer * cnnLayer ,
InferenceEngine : : ResponseDesc * resp ) noexcept
{
if ( cnnLayer - > type ! = kOpenCVLayersType )
return InferenceEngine : : StatusCode : : NOT_IMPLEMENTED ;
factory = new InfEngineCustomLayerFactory ( cnnLayer ) ;
return InferenceEngine : : StatusCode : : OK ;
}
} ;
2019-02-14 18:30:30 +08:00
2019-01-14 14:55:44 +08:00
InfEngineBackendNode : : InfEngineBackendNode ( const InferenceEngine : : Builder : : Layer & _layer )
: BackendNode ( DNN_BACKEND_INFERENCE_ENGINE ) , layer ( _layer ) { }
2018-02-06 16:57:35 +08:00
2019-09-03 23:58:57 +08:00
InfEngineBackendNode : : InfEngineBackendNode ( Ptr < Layer > & cvLayer_ , std : : vector < Mat * > & inputs ,
std : : vector < Mat > & outputs ,
std : : vector < Mat > & internals )
: BackendNode ( DNN_BACKEND_INFERENCE_ENGINE ) , layer ( cvLayer_ - > name ) ,
cvLayer ( cvLayer_ )
{
CV_Assert ( ! cvLayer - > name . empty ( ) ) ;
layer . setName ( cvLayer - > name ) ;
layer . setType ( kOpenCVLayersType ) ;
layer . getParameters ( ) [ " impl " ] = ( size_t ) cvLayer . get ( ) ;
layer . getParameters ( ) [ " outputs " ] = shapesToStr ( outputs ) ;
layer . getParameters ( ) [ " internals " ] = shapesToStr ( internals ) ;
layer . setInputPorts ( std : : vector < InferenceEngine : : Port > ( inputs . size ( ) ) ) ;
layer . setOutputPorts ( std : : vector < InferenceEngine : : Port > ( outputs . size ( ) ) ) ;
}
2018-02-06 16:57:35 +08:00
static std : : vector < Ptr < InfEngineBackendWrapper > >
infEngineWrappers ( const std : : vector < Ptr < BackendWrapper > > & ptrs )
{
std : : vector < Ptr < InfEngineBackendWrapper > > wrappers ( ptrs . size ( ) ) ;
for ( int i = 0 ; i < ptrs . size ( ) ; + + i )
{
CV_Assert ( ! ptrs [ i ] . empty ( ) ) ;
wrappers [ i ] = ptrs [ i ] . dynamicCast < InfEngineBackendWrapper > ( ) ;
CV_Assert ( ! wrappers [ i ] . empty ( ) ) ;
}
return wrappers ;
}
2019-01-14 14:55:44 +08:00
InfEngineBackendNet : : InfEngineBackendNet ( ) : netBuilder ( " " )
{
hasNetOwner = false ;
2019-08-07 03:20:26 +08:00
device_name = " CPU " ;
2019-01-14 14:55:44 +08:00
}
InfEngineBackendNet : : InfEngineBackendNet ( InferenceEngine : : CNNNetwork & net ) : netBuilder ( " " ) , cnn ( net )
{
hasNetOwner = true ;
2019-08-07 03:20:26 +08:00
device_name = " CPU " ;
2019-01-14 14:55:44 +08:00
}
void InfEngineBackendNet : : connect ( const std : : vector < Ptr < BackendWrapper > > & inputs ,
const std : : vector < Ptr < BackendWrapper > > & outputs ,
const std : : string & layerName )
{
std : : vector < Ptr < InfEngineBackendWrapper > > inpWrappers = infEngineWrappers ( inputs ) ;
std : : map < std : : string , int > : : iterator it = layers . find ( layerName ) ;
CV_Assert ( it ! = layers . end ( ) ) ;
const int layerId = it - > second ;
2019-01-25 20:02:29 +08:00
for ( size_t i = 0 ; i < inpWrappers . size ( ) ; + + i )
2019-01-14 14:55:44 +08:00
{
const auto & inp = inpWrappers [ i ] ;
2019-08-07 03:20:26 +08:00
const std : : string & inpName = inp - > dataPtr - > getName ( ) ;
2019-01-14 14:55:44 +08:00
int inpId ;
it = layers . find ( inpName ) ;
if ( it = = layers . end ( ) )
{
2019-02-14 18:30:30 +08:00
InferenceEngine : : Builder : : InputLayer inpLayer ( ! inpName . empty ( ) ? inpName : kDefaultInpLayerName ) ;
2019-08-07 03:20:26 +08:00
std : : vector < size_t > shape ( inp - > blob - > getTensorDesc ( ) . getDims ( ) ) ;
2019-01-14 14:55:44 +08:00
inpLayer . setPort ( InferenceEngine : : Port ( shape ) ) ;
inpId = netBuilder . addLayer ( inpLayer ) ;
layers . insert ( { inpName , inpId } ) ;
}
else
inpId = it - > second ;
2019-01-25 20:02:29 +08:00
netBuilder . connect ( ( size_t ) inpId , { ( size_t ) layerId , i } ) ;
2019-01-14 14:55:44 +08:00
unconnectedLayersIds . erase ( inpId ) ;
}
CV_Assert ( ! outputs . empty ( ) ) ;
InferenceEngine : : DataPtr dataPtr = infEngineDataNode ( outputs [ 0 ] ) ;
2019-08-07 03:20:26 +08:00
# if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
2019-01-14 14:55:44 +08:00
dataPtr - > name = layerName ;
2019-08-07 03:20:26 +08:00
# else
dataPtr - > setName ( layerName ) ;
# endif
2019-01-14 14:55:44 +08:00
}
void InfEngineBackendNet : : init ( int targetId )
{
if ( ! hasNetOwner )
{
CV_Assert ( ! unconnectedLayersIds . empty ( ) ) ;
for ( int id : unconnectedLayersIds )
{
InferenceEngine : : Builder : : OutputLayer outLayer ( " myconv1 " ) ;
2019-04-01 20:00:25 +08:00
# if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1)
2019-02-14 18:30:30 +08:00
// Inference Engine determines network precision by ports.
InferenceEngine : : Precision p = ( targetId = = DNN_TARGET_MYRIAD | |
targetId = = DNN_TARGET_OPENCL_FP16 ) ?
InferenceEngine : : Precision : : FP16 :
InferenceEngine : : Precision : : FP32 ;
outLayer . setPort ( InferenceEngine : : Port ( { } , p ) ) ;
# endif
2019-01-25 20:02:29 +08:00
netBuilder . addLayer ( { InferenceEngine : : PortInfo ( id ) } , outLayer ) ;
2019-01-14 14:55:44 +08:00
}
2019-09-03 23:58:57 +08:00
netBuilder . getContext ( ) . addShapeInferImpl ( kOpenCVLayersType ,
std : : make_shared < InfEngineCustomLayerShapeInfer > ( ) ) ;
2019-01-14 14:55:44 +08:00
cnn = InferenceEngine : : CNNNetwork ( InferenceEngine : : Builder : : convertToICNNNetwork ( netBuilder . build ( ) ) ) ;
}
switch ( targetId )
{
2019-08-07 03:20:26 +08:00
case DNN_TARGET_CPU :
device_name = " CPU " ;
break ;
case DNN_TARGET_OPENCL :
case DNN_TARGET_OPENCL_FP16 :
device_name = " GPU " ;
break ;
case DNN_TARGET_MYRIAD :
device_name = " MYRIAD " ;
break ;
case DNN_TARGET_FPGA :
device_name = " FPGA " ;
break ;
default :
CV_Error ( Error : : StsNotImplemented , " Unknown target " ) ;
} ;
2019-01-14 14:55:44 +08:00
for ( const auto & name : requestedOutputs )
{
cnn . addOutput ( name ) ;
}
for ( const auto & it : cnn . getInputsInfo ( ) )
{
const std : : string & name = it . first ;
auto blobIt = allBlobs . find ( name ) ;
CV_Assert ( blobIt ! = allBlobs . end ( ) ) ;
2019-08-07 03:20:26 +08:00
it . second - > setPrecision ( blobIt - > second - > getTensorDesc ( ) . getPrecision ( ) ) ;
2019-01-14 14:55:44 +08:00
}
for ( const auto & it : cnn . getOutputsInfo ( ) )
{
const std : : string & name = it . first ;
auto blobIt = allBlobs . find ( name ) ;
CV_Assert ( blobIt ! = allBlobs . end ( ) ) ;
2019-08-07 03:20:26 +08:00
it . second - > setPrecision ( blobIt - > second - > getTensorDesc ( ) . getPrecision ( ) ) ; // Should be always FP32
2019-01-14 14:55:44 +08:00
}
initPlugin ( cnn ) ;
}
2019-02-14 18:30:30 +08:00
void InfEngineBackendNet : : addLayer ( InferenceEngine : : Builder : : Layer & layer )
2019-01-14 14:55:44 +08:00
{
2019-04-01 20:00:25 +08:00
# if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1)
2019-02-14 18:30:30 +08:00
// Add weights to network and connect them after input blobs.
std : : map < std : : string , InferenceEngine : : Parameter > & params = layer . getParameters ( ) ;
std : : vector < int > blobsIds ;
std : : vector < int > portIds ;
for ( const std : : string & name : { " weights " , " biases " } )
{
bool asInput = false ;
int portId = 0 ;
for ( int i = 0 ; i < layer . getInputPorts ( ) . size ( ) ; + + i )
{
const auto & port = layer . getInputPorts ( ) [ i ] ;
auto it = port . getParameters ( ) . find ( " type " ) ;
if ( it ! = port . getParameters ( ) . end ( ) & & it - > second = = name )
{
portId = i ;
asInput = true ;
break ;
}
}
if ( ! asInput )
continue ;
auto it = params . find ( name ) ;
if ( it ! = params . end ( ) )
{
InferenceEngine : : Blob : : Ptr blob = it - > second . as < InferenceEngine : : Blob : : Ptr > ( ) ;
params . erase ( it ) ;
int blobId = netBuilder . addLayer ( InferenceEngine : : Builder : : ConstLayer ( name ) . setData ( blob ) ) ;
blobsIds . push_back ( blobId ) ;
portIds . push_back ( portId ) ;
}
}
# endif
2019-01-14 14:55:44 +08:00
int id = netBuilder . addLayer ( layer ) ;
const std : : string & layerName = layer . getName ( ) ;
CV_Assert ( layers . insert ( { layerName , id } ) . second ) ;
unconnectedLayersIds . insert ( id ) ;
2019-02-14 18:30:30 +08:00
2019-04-01 20:00:25 +08:00
# if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1)
2019-02-14 18:30:30 +08:00
// By default, all the weights are connected to last ports ids.
for ( int i = 0 ; i < blobsIds . size ( ) ; + + i )
{
2019-02-21 16:42:55 +08:00
netBuilder . connect ( ( size_t ) blobsIds [ i ] , { ( size_t ) id , ( size_t ) portIds [ i ] } ) ;
2019-02-14 18:30:30 +08:00
}
# endif
2019-01-14 14:55:44 +08:00
}
void InfEngineBackendNet : : addOutput ( const std : : string & name )
{
requestedOutputs . push_back ( name ) ;
}
2018-05-31 19:05:21 +08:00
static InferenceEngine : : Layout estimateLayout ( const Mat & m )
{
if ( m . dims = = 4 )
return InferenceEngine : : Layout : : NCHW ;
else if ( m . dims = = 2 )
return InferenceEngine : : Layout : : NC ;
else
return InferenceEngine : : Layout : : ANY ;
}
2018-02-06 16:57:35 +08:00
static InferenceEngine : : DataPtr wrapToInfEngineDataNode ( const Mat & m , const std : : string & name = " " )
{
2019-08-07 03:20:26 +08:00
std : : vector < size_t > shape ( & m . size [ 0 ] , & m . size [ 0 ] + m . dims ) ;
2018-06-05 04:51:28 +08:00
if ( m . type ( ) = = CV_32F )
2019-08-07 03:20:26 +08:00
return InferenceEngine : : DataPtr ( new InferenceEngine : : Data ( name ,
{ InferenceEngine : : Precision : : FP32 , shape , estimateLayout ( m ) } ) ) ;
2018-06-05 04:51:28 +08:00
else if ( m . type ( ) = = CV_8U )
2019-08-07 03:20:26 +08:00
return InferenceEngine : : DataPtr ( new InferenceEngine : : Data ( name ,
{ InferenceEngine : : Precision : : U8 , shape , estimateLayout ( m ) } ) ) ;
2018-06-05 04:51:28 +08:00
else
CV_Error ( Error : : StsNotImplemented , format ( " Unsupported data type %d " , m . type ( ) ) ) ;
2018-02-06 16:57:35 +08:00
}
2018-06-05 04:51:28 +08:00
InferenceEngine : : Blob : : Ptr wrapToInfEngineBlob ( const Mat & m , const std : : vector < size_t > & shape ,
InferenceEngine : : Layout layout )
2018-02-06 16:57:35 +08:00
{
2018-06-05 04:51:28 +08:00
if ( m . type ( ) = = CV_32F )
2019-08-07 03:20:26 +08:00
return InferenceEngine : : make_shared_blob < float > (
{ InferenceEngine : : Precision : : FP32 , shape , layout } , ( float * ) m . data ) ;
2018-06-05 04:51:28 +08:00
else if ( m . type ( ) = = CV_8U )
2019-08-07 03:20:26 +08:00
return InferenceEngine : : make_shared_blob < uint8_t > (
{ InferenceEngine : : Precision : : U8 , shape , layout } , ( uint8_t * ) m . data ) ;
2018-06-05 04:51:28 +08:00
else
CV_Error ( Error : : StsNotImplemented , format ( " Unsupported data type %d " , m . type ( ) ) ) ;
2018-02-06 16:57:35 +08:00
}
2018-06-05 04:51:28 +08:00
InferenceEngine : : Blob : : Ptr wrapToInfEngineBlob ( const Mat & m , InferenceEngine : : Layout layout )
2018-02-06 16:57:35 +08:00
{
2019-08-07 03:20:26 +08:00
std : : vector < size_t > shape ( & m . size [ 0 ] , & m . size [ 0 ] + m . dims ) ;
return wrapToInfEngineBlob ( m , shape , layout ) ;
2018-02-06 16:57:35 +08:00
}
2019-04-20 02:01:19 +08:00
InferenceEngine : : Blob : : Ptr cloneBlob ( const InferenceEngine : : Blob : : Ptr & blob )
{
InferenceEngine : : Blob : : Ptr copy ;
2019-08-07 03:20:26 +08:00
auto description = blob - > getTensorDesc ( ) ;
InferenceEngine : : Precision precision = description . getPrecision ( ) ;
2019-04-20 02:01:19 +08:00
if ( precision = = InferenceEngine : : Precision : : FP32 )
{
2019-08-07 03:20:26 +08:00
copy = InferenceEngine : : make_shared_blob < float > ( description ) ;
2019-04-20 02:01:19 +08:00
}
else if ( precision = = InferenceEngine : : Precision : : U8 )
{
2019-08-07 03:20:26 +08:00
copy = InferenceEngine : : make_shared_blob < uint8_t > ( description ) ;
2019-04-20 02:01:19 +08:00
}
else
CV_Error ( Error : : StsNotImplemented , " Unsupported blob precision " ) ;
copy - > allocate ( ) ;
return copy ;
}
2018-02-06 16:57:35 +08:00
InferenceEngine : : DataPtr infEngineDataNode ( const Ptr < BackendWrapper > & ptr )
{
CV_Assert ( ! ptr . empty ( ) ) ;
Ptr < InfEngineBackendWrapper > p = ptr . dynamicCast < InfEngineBackendWrapper > ( ) ;
CV_Assert ( ! p . empty ( ) ) ;
return p - > dataPtr ;
}
InfEngineBackendWrapper : : InfEngineBackendWrapper ( int targetId , const cv : : Mat & m )
: BackendWrapper ( DNN_BACKEND_INFERENCE_ENGINE , targetId )
{
dataPtr = wrapToInfEngineDataNode ( m ) ;
2018-05-31 19:05:21 +08:00
blob = wrapToInfEngineBlob ( m , estimateLayout ( m ) ) ;
2018-02-06 16:57:35 +08:00
}
2018-06-05 04:51:28 +08:00
InfEngineBackendWrapper : : InfEngineBackendWrapper ( Ptr < BackendWrapper > wrapper )
: BackendWrapper ( DNN_BACKEND_INFERENCE_ENGINE , wrapper - > targetId )
{
Ptr < InfEngineBackendWrapper > ieWrapper = wrapper . dynamicCast < InfEngineBackendWrapper > ( ) ;
CV_Assert ( ! ieWrapper . empty ( ) ) ;
InferenceEngine : : DataPtr srcData = ieWrapper - > dataPtr ;
2019-08-07 03:20:26 +08:00
dataPtr = InferenceEngine : : DataPtr ( new InferenceEngine : : Data ( srcData - > getName ( ) , srcData - > getTensorDesc ( ) ) ) ;
2018-06-05 04:51:28 +08:00
blob = ieWrapper - > blob ;
}
Ptr < BackendWrapper > InfEngineBackendWrapper : : create ( Ptr < BackendWrapper > wrapper )
{
return Ptr < BackendWrapper > ( new InfEngineBackendWrapper ( wrapper ) ) ;
}
2018-02-06 16:57:35 +08:00
InfEngineBackendWrapper : : ~ InfEngineBackendWrapper ( )
{
}
void InfEngineBackendWrapper : : copyToHost ( )
{
}
void InfEngineBackendWrapper : : setHostDirty ( )
{
}
2019-08-07 03:20:26 +08:00
# if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
static std : : map < std : : string , InferenceEngine : : InferenceEnginePluginPtr > & getSharedPlugins ( )
2019-01-31 21:10:59 +08:00
{
2019-08-07 03:20:26 +08:00
static std : : map < std : : string , InferenceEngine : : InferenceEnginePluginPtr > sharedPlugins ;
2019-01-31 21:10:59 +08:00
return sharedPlugins ;
}
2019-08-07 03:20:26 +08:00
# else
static InferenceEngine : : Core & getCore ( )
{
static InferenceEngine : : Core core ;
return core ;
}
# endif
2019-03-29 21:42:58 +08:00
2019-06-14 23:17:02 +08:00
# if !defined(OPENCV_DNN_IE_VPU_TYPE_DEFAULT)
2019-03-29 21:42:58 +08:00
static bool detectMyriadX_ ( )
{
InferenceEngine : : Builder : : Network builder ( " " ) ;
InferenceEngine : : idx_t inpId = builder . addLayer (
InferenceEngine : : Builder : : InputLayer ( ) . setPort ( InferenceEngine : : Port ( { 1 } ) ) ) ;
# if INF_ENGINE_RELEASE <= 2018050000
InferenceEngine : : idx_t clampId ;
{
InferenceEngine : : Builder : : Layer l = InferenceEngine : : Builder : : ClampLayer ( ) ;
auto & blobs = l . getConstantData ( ) ;
auto blob = InferenceEngine : : make_shared_blob < int16_t > (
InferenceEngine : : Precision : : FP16 ,
InferenceEngine : : Layout : : C , { 1 } ) ;
blob - > allocate ( ) ;
blobs [ " " ] = blob ;
clampId = builder . addLayer ( { inpId } , l ) ;
}
builder . addLayer ( { InferenceEngine : : PortInfo ( clampId ) } , InferenceEngine : : Builder : : OutputLayer ( ) ) ;
# else
2019-06-14 23:17:02 +08:00
2019-03-29 21:42:58 +08:00
InferenceEngine : : idx_t clampId = builder . addLayer ( { inpId } , InferenceEngine : : Builder : : ClampLayer ( ) ) ;
builder . addLayer ( { InferenceEngine : : PortInfo ( clampId ) } ,
InferenceEngine : : Builder : : OutputLayer ( ) . setPort ( InferenceEngine : : Port ( { } ,
InferenceEngine : : Precision : : FP16 ) ) ) ;
# endif
InferenceEngine : : CNNNetwork cnn = InferenceEngine : : CNNNetwork (
InferenceEngine : : Builder : : convertToICNNNetwork ( builder . build ( ) ) ) ;
2019-08-07 03:20:26 +08:00
# if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
2019-03-29 21:42:58 +08:00
InferenceEngine : : InferenceEnginePluginPtr enginePtr ;
{
AutoLock lock ( getInitializationMutex ( ) ) ;
auto & sharedPlugins = getSharedPlugins ( ) ;
2019-08-07 03:20:26 +08:00
auto pluginIt = sharedPlugins . find ( " MYRIAD " ) ;
2019-03-29 21:42:58 +08:00
if ( pluginIt ! = sharedPlugins . end ( ) ) {
enginePtr = pluginIt - > second ;
} else {
auto dispatcher = InferenceEngine : : PluginDispatcher ( { " " } ) ;
2019-08-07 03:20:26 +08:00
enginePtr = dispatcher . getPluginByDevice ( " MYRIAD " ) ;
sharedPlugins [ " MYRIAD " ] = enginePtr ;
2019-03-29 21:42:58 +08:00
}
}
auto plugin = InferenceEngine : : InferencePlugin ( enginePtr ) ;
try
{
2019-04-14 00:02:03 +08:00
auto netExec = plugin . LoadNetwork ( cnn , { { " VPU_PLATFORM " , " VPU_2480 " } } ) ;
2019-08-07 03:20:26 +08:00
# else
try
{
auto netExec = getCore ( ) . LoadNetwork ( cnn , " MYRIAD " , { { " VPU_PLATFORM " , " VPU_2480 " } } ) ;
# endif
2019-03-29 21:42:58 +08:00
auto infRequest = netExec . CreateInferRequest ( ) ;
} catch ( . . . ) {
return false ;
}
return true ;
}
2019-06-14 23:17:02 +08:00
# endif // !defined(OPENCV_DNN_IE_VPU_TYPE_DEFAULT)
2019-03-29 21:42:58 +08:00
2019-08-07 03:20:26 +08:00
void InfEngineBackendNet : : initPlugin ( InferenceEngine : : CNNNetwork & net )
2018-03-17 00:27:04 +08:00
{
CV_Assert ( ! isInitialized ( ) ) ;
2018-04-09 21:22:19 +08:00
2018-06-05 22:18:14 +08:00
try
2018-06-01 19:10:32 +08:00
{
2019-01-31 21:10:59 +08:00
AutoLock lock ( getInitializationMutex ( ) ) ;
2019-09-03 23:58:57 +08:00
InferenceEngine : : Core & ie = getCore ( ) ;
2019-08-07 03:20:26 +08:00
# if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
2019-01-31 21:10:59 +08:00
auto & sharedPlugins = getSharedPlugins ( ) ;
2019-08-07 03:20:26 +08:00
auto pluginIt = sharedPlugins . find ( device_name ) ;
2018-06-05 22:18:14 +08:00
if ( pluginIt ! = sharedPlugins . end ( ) )
{
enginePtr = pluginIt - > second ;
}
else
2019-08-07 03:20:26 +08:00
# endif
2018-06-05 22:18:14 +08:00
{
2019-08-07 03:20:26 +08:00
# if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
2018-11-16 22:09:54 +08:00
auto dispatcher = InferenceEngine : : PluginDispatcher ( { " " } ) ;
2019-08-07 03:20:26 +08:00
if ( device_name = = " FPGA " )
2018-11-16 22:09:54 +08:00
enginePtr = dispatcher . getPluginByDevice ( " HETERO:FPGA,CPU " ) ;
else
2019-08-07 03:20:26 +08:00
enginePtr = dispatcher . getPluginByDevice ( device_name ) ;
sharedPlugins [ device_name ] = enginePtr ;
# else
isInit = true ;
# endif
2019-06-26 14:41:01 +08:00
std : : vector < std : : string > candidates ;
std : : string param_pluginPath = utils : : getConfigurationParameterString ( " OPENCV_DNN_IE_EXTRA_PLUGIN_PATH " , " " ) ;
if ( ! param_pluginPath . empty ( ) )
{
candidates . push_back ( param_pluginPath ) ;
}
2019-08-07 03:20:26 +08:00
if ( device_name = = " CPU " | | device_name = = " FPGA " )
2018-06-05 22:18:14 +08:00
{
std : : string suffixes [ ] = { " _avx2 " , " _sse4 " , " " } ;
bool haveFeature [ ] = {
checkHardwareSupport ( CPU_AVX2 ) ,
checkHardwareSupport ( CPU_SSE4_2 ) ,
true
} ;
for ( int i = 0 ; i < 3 ; + + i )
{
if ( ! haveFeature [ i ] )
continue ;
2019-06-26 14:41:01 +08:00
# ifdef _WIN32
candidates . push_back ( " cpu_extension " + suffixes [ i ] + " .dll " ) ;
# elif defined(__APPLE__)
candidates . push_back ( " libcpu_extension " + suffixes [ i ] + " .so " ) ; // built as loadable module
candidates . push_back ( " libcpu_extension " + suffixes [ i ] + " .dylib " ) ; // built as shared library
# else
candidates . push_back ( " libcpu_extension " + suffixes [ i ] + " .so " ) ;
# endif // _WIN32
2018-06-05 22:18:14 +08:00
}
}
2019-06-26 14:41:01 +08:00
bool found = false ;
for ( size_t i = 0 ; i ! = candidates . size ( ) ; + + i )
{
const std : : string & libName = candidates [ i ] ;
try
{
InferenceEngine : : IExtensionPtr extension =
InferenceEngine : : make_so_pointer < InferenceEngine : : IExtension > ( libName ) ;
2019-08-07 03:20:26 +08:00
# if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
2019-06-26 14:41:01 +08:00
enginePtr - > AddExtension ( extension , 0 ) ;
2019-08-07 03:20:26 +08:00
# else
2019-09-03 23:58:57 +08:00
ie . AddExtension ( extension , " CPU " ) ;
// OpenCV fallbacks as extensions.
ie . AddExtension ( std : : make_shared < InfEngineExtension > ( ) , " CPU " ) ;
2019-08-07 03:20:26 +08:00
# endif
2019-06-26 14:41:01 +08:00
CV_LOG_INFO ( NULL , " DNN-IE: Loaded extension plugin: " < < libName ) ;
found = true ;
break ;
}
catch ( . . . ) { }
}
if ( ! found & & ! candidates . empty ( ) )
{
CV_LOG_WARNING ( NULL , " DNN-IE: Can't load extension plugin (extra layers for some networks). Specify path via OPENCV_DNN_IE_EXTRA_PLUGIN_PATH parameter " ) ;
}
// Some of networks can work without a library of extra layers.
2019-06-25 03:41:30 +08:00
# ifndef _WIN32
// Limit the number of CPU threads.
2019-08-07 03:20:26 +08:00
# if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
2019-06-25 03:41:30 +08:00
enginePtr - > SetConfig ( { {
InferenceEngine : : PluginConfigParams : : KEY_CPU_THREADS_NUM , format ( " %d " , getNumThreads ( ) ) ,
} } , 0 ) ;
2019-08-07 03:20:26 +08:00
# else
if ( device_name = = " CPU " )
2019-09-03 23:58:57 +08:00
ie . SetConfig ( { {
2019-08-07 03:20:26 +08:00
InferenceEngine : : PluginConfigParams : : KEY_CPU_THREADS_NUM , format ( " %d " , getNumThreads ( ) ) ,
} } , device_name ) ;
# endif
2019-06-25 03:41:30 +08:00
# endif
2018-06-05 22:18:14 +08:00
}
2019-08-07 03:20:26 +08:00
# if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
2018-06-05 22:18:14 +08:00
plugin = InferenceEngine : : InferencePlugin ( enginePtr ) ;
netExec = plugin . LoadNetwork ( net , { } ) ;
2019-08-07 03:20:26 +08:00
# else
2019-09-03 23:58:57 +08:00
bool isHetero = false ;
if ( device_name ! = " CPU " )
{
isHetero = device_name = = " FPGA " ;
for ( auto & layer : net )
{
if ( layer - > type = = kOpenCVLayersType )
{
layer - > affinity = " CPU " ;
isHetero = true ;
}
else
layer - > affinity = device_name ;
}
}
if ( isHetero )
netExec = ie . LoadNetwork ( net , " HETERO: " + device_name + " ,CPU " ) ;
else
netExec = ie . LoadNetwork ( net , device_name ) ;
2019-08-07 03:20:26 +08:00
# endif
2018-06-05 22:18:14 +08:00
}
catch ( const std : : exception & ex )
2018-04-09 21:22:19 +08:00
{
2018-06-05 22:18:14 +08:00
CV_Error ( Error : : StsAssert , format ( " Failed to initialize Inference Engine backend: %s " , ex . what ( ) ) ) ;
2018-04-09 21:22:19 +08:00
}
2018-02-06 16:57:35 +08:00
}
bool InfEngineBackendNet : : isInitialized ( )
{
2019-08-07 03:20:26 +08:00
# if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
2018-06-01 19:10:32 +08:00
return ( bool ) enginePtr ;
2019-08-07 03:20:26 +08:00
# else
return isInit ;
# endif
2018-02-06 16:57:35 +08:00
}
2019-06-27 18:13:48 +08:00
void InfEngineBackendNet : : addBlobs ( const std : : vector < cv : : Ptr < BackendWrapper > > & ptrs )
2018-02-06 16:57:35 +08:00
{
auto wrappers = infEngineWrappers ( ptrs ) ;
for ( const auto & wrapper : wrappers )
{
2019-08-07 03:20:26 +08:00
std : : string name = wrapper - > dataPtr - > getName ( ) ;
2019-02-14 18:30:30 +08:00
name = name . empty ( ) ? kDefaultInpLayerName : name ;
2019-01-14 14:55:44 +08:00
allBlobs . insert ( { name , wrapper - > blob } ) ;
2018-02-06 16:57:35 +08:00
}
}
2019-04-20 02:01:19 +08:00
void InfEngineBackendNet : : InfEngineReqWrapper : : makePromises ( const std : : vector < Ptr < BackendWrapper > > & outsWrappers )
{
auto outs = infEngineWrappers ( outsWrappers ) ;
outProms . clear ( ) ;
outProms . resize ( outs . size ( ) ) ;
outsNames . resize ( outs . size ( ) ) ;
for ( int i = 0 ; i < outs . size ( ) ; + + i )
{
2019-05-01 19:51:12 +08:00
outs [ i ] - > futureMat = outProms [ i ] . getArrayResult ( ) ;
2019-08-07 03:20:26 +08:00
outsNames [ i ] = outs [ i ] - > dataPtr - > getName ( ) ;
2019-04-20 02:01:19 +08:00
}
}
void InfEngineBackendNet : : forward ( const std : : vector < Ptr < BackendWrapper > > & outBlobsWrappers ,
bool isAsync )
2018-02-06 16:57:35 +08:00
{
2019-04-20 02:01:19 +08:00
// Look for finished requests.
Ptr < InfEngineReqWrapper > reqWrapper ;
for ( auto & wrapper : infRequests )
{
if ( wrapper - > isReady )
{
reqWrapper = wrapper ;
break ;
}
}
if ( reqWrapper . empty ( ) )
{
reqWrapper = Ptr < InfEngineReqWrapper > ( new InfEngineReqWrapper ( ) ) ;
try
{
reqWrapper - > req = netExec . CreateInferRequest ( ) ;
}
catch ( const std : : exception & ex )
{
CV_Error ( Error : : StsAssert , format ( " Failed to initialize Inference Engine backend: %s " , ex . what ( ) ) ) ;
}
infRequests . push_back ( reqWrapper ) ;
InferenceEngine : : BlobMap inpBlobs , outBlobs ;
for ( const auto & it : cnn . getInputsInfo ( ) )
{
const std : : string & name = it . first ;
auto blobIt = allBlobs . find ( name ) ;
CV_Assert ( blobIt ! = allBlobs . end ( ) ) ;
inpBlobs [ name ] = isAsync ? cloneBlob ( blobIt - > second ) : blobIt - > second ;
}
for ( const auto & it : cnn . getOutputsInfo ( ) )
{
const std : : string & name = it . first ;
auto blobIt = allBlobs . find ( name ) ;
CV_Assert ( blobIt ! = allBlobs . end ( ) ) ;
outBlobs [ name ] = isAsync ? cloneBlob ( blobIt - > second ) : blobIt - > second ;
}
reqWrapper - > req . SetInput ( inpBlobs ) ;
reqWrapper - > req . SetOutput ( outBlobs ) ;
InferenceEngine : : IInferRequest : : Ptr infRequestPtr = reqWrapper - > req ;
infRequestPtr - > SetUserData ( reqWrapper . get ( ) , 0 ) ;
2019-04-30 00:03:10 +08:00
infRequestPtr - > SetCompletionCallback (
2019-04-20 02:01:19 +08:00
[ ] ( InferenceEngine : : IInferRequest : : Ptr request , InferenceEngine : : StatusCode status )
{
InfEngineReqWrapper * wrapper ;
request - > GetUserData ( ( void * * ) & wrapper , 0 ) ;
2019-05-01 19:51:12 +08:00
CV_Assert ( wrapper & & " Internal error " ) ;
2019-04-20 02:01:19 +08:00
2019-05-01 19:51:12 +08:00
size_t processedOutputs = 0 ;
try
2019-04-20 02:01:19 +08:00
{
2019-05-01 19:51:12 +08:00
for ( ; processedOutputs < wrapper - > outProms . size ( ) ; + + processedOutputs )
{
const std : : string & name = wrapper - > outsNames [ processedOutputs ] ;
Mat m = infEngineBlobToMat ( wrapper - > req . GetBlob ( name ) ) ;
2019-04-20 02:01:19 +08:00
2019-05-01 19:51:12 +08:00
try
{
CV_Assert ( status = = InferenceEngine : : StatusCode : : OK ) ;
wrapper - > outProms [ processedOutputs ] . setValue ( m . clone ( ) ) ;
}
catch ( . . . )
{
try {
wrapper - > outProms [ processedOutputs ] . setException ( std : : current_exception ( ) ) ;
} catch ( . . . ) {
Fix modules/ typos
Found using `codespell -q 3 -S ./3rdparty -L activ,amin,ang,atleast,childs,dof,endwhile,halfs,hist,iff,nd,od,uint`
backporting of commit: ec43292e1ea9da963e67427505b4113750829c3e
2019-08-16 06:02:09 +08:00
CV_LOG_ERROR ( NULL , " DNN: Exception occurred during async inference exception propagation " ) ;
2019-05-01 19:51:12 +08:00
}
}
}
}
catch ( . . . )
{
std : : exception_ptr e = std : : current_exception ( ) ;
for ( ; processedOutputs < wrapper - > outProms . size ( ) ; + + processedOutputs )
2019-04-20 02:01:19 +08:00
{
try {
2019-05-01 19:51:12 +08:00
wrapper - > outProms [ processedOutputs ] . setException ( e ) ;
2019-04-20 02:01:19 +08:00
} catch ( . . . ) {
Fix modules/ typos
Found using `codespell -q 3 -S ./3rdparty -L activ,amin,ang,atleast,childs,dof,endwhile,halfs,hist,iff,nd,od,uint`
backporting of commit: ec43292e1ea9da963e67427505b4113750829c3e
2019-08-16 06:02:09 +08:00
CV_LOG_ERROR ( NULL , " DNN: Exception occurred during async inference exception propagation " ) ;
2019-04-20 02:01:19 +08:00
}
}
}
wrapper - > isReady = true ;
}
2019-04-30 00:03:10 +08:00
) ;
2019-04-20 02:01:19 +08:00
}
if ( isAsync )
{
// Copy actual data to infer request's input blobs.
for ( const auto & it : cnn . getInputsInfo ( ) )
{
const std : : string & name = it . first ;
auto blobIt = allBlobs . find ( name ) ;
Mat srcMat = infEngineBlobToMat ( blobIt - > second ) ;
Mat dstMat = infEngineBlobToMat ( reqWrapper - > req . GetBlob ( name ) ) ;
srcMat . copyTo ( dstMat ) ;
}
// Set promises to output blobs wrappers.
reqWrapper - > makePromises ( outBlobsWrappers ) ;
reqWrapper - > isReady = false ;
reqWrapper - > req . StartAsync ( ) ;
}
else
{
reqWrapper - > req . Infer ( ) ;
}
2018-02-06 16:57:35 +08:00
}
2018-03-12 22:35:28 +08:00
Mat infEngineBlobToMat ( const InferenceEngine : : Blob : : Ptr & blob )
2018-02-06 16:57:35 +08:00
{
// NOTE: Inference Engine sizes are reversed.
2019-08-07 03:20:26 +08:00
std : : vector < size_t > dims = blob - > getTensorDesc ( ) . getDims ( ) ;
std : : vector < int > size ( dims . begin ( ) , dims . end ( ) ) ;
auto precision = blob - > getTensorDesc ( ) . getPrecision ( ) ;
2019-05-05 17:49:38 +08:00
int type = - 1 ;
2019-08-07 03:20:26 +08:00
switch ( precision )
2019-05-05 17:49:38 +08:00
{
case InferenceEngine : : Precision : : FP32 : type = CV_32F ; break ;
case InferenceEngine : : Precision : : U8 : type = CV_8U ; break ;
default :
CV_Error ( Error : : StsNotImplemented , " Unsupported blob precision " ) ;
}
return Mat ( size , type , ( void * ) blob - > buffer ( ) ) ;
2018-02-06 16:57:35 +08:00
}
2019-09-03 23:58:57 +08:00
void infEngineBlobsToMats ( const std : : vector < InferenceEngine : : Blob : : Ptr > & blobs ,
std : : vector < Mat > & mats )
{
mats . resize ( blobs . size ( ) ) ;
for ( int i = 0 ; i < blobs . size ( ) ; + + i )
mats [ i ] = infEngineBlobToMat ( blobs [ i ] ) ;
}
2018-03-17 00:27:04 +08:00
bool InfEngineBackendLayer : : getMemoryShapes ( const std : : vector < MatShape > & inputs ,
const int requiredOutputs ,
std : : vector < MatShape > & outputs ,
std : : vector < MatShape > & internals ) const
{
2019-01-11 01:29:44 +08:00
InferenceEngine : : ICNNNetwork : : InputShapes inShapes = t_net . getInputShapes ( ) ;
InferenceEngine : : ICNNNetwork : : InputShapes : : iterator itr ;
bool equal_flag = true ;
size_t i = 0 ;
for ( itr = inShapes . begin ( ) ; itr ! = inShapes . end ( ) ; + + itr )
{
InferenceEngine : : SizeVector currentInShape ( inputs [ i ] . begin ( ) , inputs [ i ] . end ( ) ) ;
if ( itr - > second ! = currentInShape )
{
itr - > second = currentInShape ;
equal_flag = false ;
}
i + + ;
}
if ( ! equal_flag )
{
InferenceEngine : : CNNNetwork curr_t_net ( t_net ) ;
curr_t_net . reshape ( inShapes ) ;
}
std : : vector < size_t > dims = t_net . getOutputsInfo ( ) [ name ] - > getDims ( ) ;
outputs . push_back ( MatShape ( dims . begin ( ) , dims . end ( ) ) ) ;
2018-03-17 00:27:04 +08:00
return false ;
}
bool InfEngineBackendLayer : : supportBackend ( int backendId )
{
return backendId = = DNN_BACKEND_DEFAULT | |
2018-11-15 04:25:23 +08:00
( backendId = = DNN_BACKEND_INFERENCE_ENGINE & & haveInfEngine ( ) ) ;
2018-03-17 00:27:04 +08:00
}
void InfEngineBackendLayer : : forward ( InputArrayOfArrays inputs , OutputArrayOfArrays outputs ,
OutputArrayOfArrays internals )
{
CV_Error ( Error : : StsInternal , " Choose Inference Engine as a preferable backend. " ) ;
}
2019-02-11 22:13:39 +08:00
InferenceEngine : : Blob : : Ptr convertFp16 ( const InferenceEngine : : Blob : : Ptr & blob )
2018-03-12 22:35:28 +08:00
{
2019-08-07 03:20:26 +08:00
auto halfs = InferenceEngine : : make_shared_blob < int16_t > ( {
InferenceEngine : : Precision : : FP16 , blob - > getTensorDesc ( ) . getDims ( ) ,
blob - > getTensorDesc ( ) . getLayout ( )
} ) ;
2018-03-12 22:35:28 +08:00
halfs - > allocate ( ) ;
Mat floatsData ( 1 , blob - > size ( ) , CV_32F , blob - > buffer ( ) ) ;
Mat halfsData ( 1 , blob - > size ( ) , CV_16SC1 , halfs - > buffer ( ) ) ;
convertFp16 ( floatsData , halfsData ) ;
return halfs ;
}
2019-02-14 18:30:30 +08:00
void addConstantData ( const std : : string & name , InferenceEngine : : Blob : : Ptr data ,
InferenceEngine : : Builder : : Layer & l )
{
2019-04-01 20:00:25 +08:00
# if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1)
2019-02-14 18:30:30 +08:00
l . getParameters ( ) [ name ] = data ;
# else
l . addConstantData ( name , data ) ;
# endif
}
2018-02-06 16:57:35 +08:00
# endif // HAVE_INF_ENGINE
bool haveInfEngine ( )
{
# ifdef HAVE_INF_ENGINE
return true ;
# else
return false ;
# endif // HAVE_INF_ENGINE
}
2019-04-20 02:01:19 +08:00
void forwardInfEngine ( const std : : vector < Ptr < BackendWrapper > > & outBlobsWrappers ,
Ptr < BackendNode > & node , bool isAsync )
2018-02-06 16:57:35 +08:00
{
CV_Assert ( haveInfEngine ( ) ) ;
# ifdef HAVE_INF_ENGINE
CV_Assert ( ! node . empty ( ) ) ;
Ptr < InfEngineBackendNode > ieNode = node . dynamicCast < InfEngineBackendNode > ( ) ;
CV_Assert ( ! ieNode . empty ( ) ) ;
2019-04-20 02:01:19 +08:00
ieNode - > net - > forward ( outBlobsWrappers , isAsync ) ;
2018-02-06 16:57:35 +08:00
# endif // HAVE_INF_ENGINE
}
2018-09-18 16:21:08 +08:00
CV__DNN_EXPERIMENTAL_NS_BEGIN
void resetMyriadDevice ( )
{
# ifdef HAVE_INF_ENGINE
2019-01-31 21:10:59 +08:00
AutoLock lock ( getInitializationMutex ( ) ) ;
2019-08-07 03:20:26 +08:00
# if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
getSharedPlugins ( ) . erase ( " MYRIAD " ) ;
# else
2019-09-03 23:58:57 +08:00
// To unregister both "MYRIAD" and "HETERO:MYRIAD,CPU" plugins
getCore ( ) = InferenceEngine : : Core ( ) ;
2019-08-07 03:20:26 +08:00
# endif
2018-09-18 16:21:08 +08:00
# endif // HAVE_INF_ENGINE
}
2019-03-29 21:42:58 +08:00
# ifdef HAVE_INF_ENGINE
bool isMyriadX ( )
{
static bool myriadX = getInferenceEngineVPUType ( ) = = CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X ;
return myriadX ;
}
static std : : string getInferenceEngineVPUType_ ( )
{
static std : : string param_vpu_type = utils : : getConfigurationParameterString ( " OPENCV_DNN_IE_VPU_TYPE " , " " ) ;
if ( param_vpu_type = = " " )
{
# if defined(OPENCV_DNN_IE_VPU_TYPE_DEFAULT)
param_vpu_type = OPENCV_DNN_IE_VPU_TYPE_DEFAULT ;
2019-06-14 23:17:02 +08:00
# else
2019-03-29 21:42:58 +08:00
CV_LOG_INFO ( NULL , " OpenCV-DNN: running Inference Engine VPU autodetection: Myriad2/X. In case of other accelerator types specify 'OPENCV_DNN_IE_VPU_TYPE' parameter " ) ;
try {
bool isMyriadX_ = detectMyriadX_ ( ) ;
if ( isMyriadX_ )
{
param_vpu_type = CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X ;
}
else
{
param_vpu_type = CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2 ;
}
}
catch ( . . . )
{
CV_LOG_WARNING ( NULL , " OpenCV-DNN: Failed Inference Engine VPU autodetection. Specify 'OPENCV_DNN_IE_VPU_TYPE' parameter. " ) ;
param_vpu_type . clear ( ) ;
}
# endif
}
CV_LOG_INFO ( NULL , " OpenCV-DNN: Inference Engine VPU type=' " < < param_vpu_type < < " ' " ) ;
return param_vpu_type ;
}
cv : : String getInferenceEngineVPUType ( )
{
static cv : : String vpu_type = getInferenceEngineVPUType_ ( ) ;
return vpu_type ;
}
# else // HAVE_INF_ENGINE
cv : : String getInferenceEngineVPUType ( )
{
CV_Error ( Error : : StsNotImplemented , " This OpenCV build doesn't include InferenceEngine support " ) ;
}
# endif // HAVE_INF_ENGINE
2018-09-18 16:21:08 +08:00
CV__DNN_EXPERIMENTAL_NS_END
2018-02-06 16:57:35 +08:00
} } // namespace dnn, namespace cv