mirror of
https://github.com/opencv/opencv.git
synced 2025-06-08 01:53:19 +08:00
Make Intel's Inference Engine backend is default if no preferable backend is specified.
This commit is contained in:
parent
1822e85f4a
commit
b781ac7346
@ -66,16 +66,22 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
|
||||
|
||||
/**
|
||||
* @brief Enum of computation backends supported by layers.
|
||||
* @see Net::setPreferableBackend
|
||||
*/
|
||||
enum Backend
|
||||
{
|
||||
//! DNN_BACKEND_DEFAULT equals to DNN_BACKEND_INFERENCE_ENGINE if
|
||||
//! OpenCV is built with Intel's Inference Engine library or
|
||||
//! DNN_BACKEND_OPENCV otherwise.
|
||||
DNN_BACKEND_DEFAULT,
|
||||
DNN_BACKEND_HALIDE,
|
||||
DNN_BACKEND_INFERENCE_ENGINE
|
||||
DNN_BACKEND_INFERENCE_ENGINE,
|
||||
DNN_BACKEND_OPENCV
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Enum of target devices for computations.
|
||||
* @see Net::setPreferableTarget
|
||||
*/
|
||||
enum Target
|
||||
{
|
||||
@ -460,6 +466,9 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
|
||||
* @brief Ask network to use specific computation backend where it supported.
|
||||
* @param[in] backendId backend identifier.
|
||||
* @see Backend
|
||||
*
|
||||
* If OpenCV is compiled with Intel's Inference Engine library, DNN_BACKEND_DEFAULT
|
||||
* means DNN_BACKEND_INFERENCE_ENGINE. Otherwise it equals to DNN_BACKEND_OPENCV.
|
||||
*/
|
||||
CV_WRAP void setPreferableBackend(int backendId);
|
||||
|
||||
@ -467,6 +476,14 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
|
||||
* @brief Ask network to make computations on specific target device.
|
||||
* @param[in] targetId target identifier.
|
||||
* @see Target
|
||||
*
|
||||
* List of supported combinations backend / target:
|
||||
* | | DNN_BACKEND_OPENCV | DNN_BACKEND_INFERENCE_ENGINE | DNN_BACKEND_HALIDE |
|
||||
* |------------------------|--------------------|------------------------------|--------------------|
|
||||
* | DNN_TARGET_CPU | + | + | + |
|
||||
* | DNN_TARGET_OPENCL | + | + | + |
|
||||
* | DNN_TARGET_OPENCL_FP16 | + | + | |
|
||||
* | DNN_TARGET_MYRIAD | | + | |
|
||||
*/
|
||||
CV_WRAP void setPreferableTarget(int targetId);
|
||||
|
||||
|
@ -12,7 +12,7 @@
|
||||
|
||||
namespace opencv_test {
|
||||
|
||||
CV_ENUM(DNNBackend, DNN_BACKEND_DEFAULT, DNN_BACKEND_HALIDE, DNN_BACKEND_INFERENCE_ENGINE)
|
||||
CV_ENUM(DNNBackend, DNN_BACKEND_DEFAULT, DNN_BACKEND_HALIDE, DNN_BACKEND_INFERENCE_ENGINE, DNN_BACKEND_OPENCV)
|
||||
CV_ENUM(DNNTarget, DNN_TARGET_CPU, DNN_TARGET_OPENCL, DNN_TARGET_OPENCL_FP16, DNN_TARGET_MYRIAD)
|
||||
|
||||
class DNNTestNetwork : public ::perf::TestBaseWithParam< tuple<DNNBackend, DNNTarget> >
|
||||
@ -54,7 +54,7 @@ public:
|
||||
void processNet(std::string weights, std::string proto, std::string halide_scheduler,
|
||||
const Mat& input, const std::string& outputLayer = "")
|
||||
{
|
||||
if (backend == DNN_BACKEND_DEFAULT && target == DNN_TARGET_OPENCL)
|
||||
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL)
|
||||
{
|
||||
#if defined(HAVE_OPENCL)
|
||||
if (!cv::ocl::useOpenCL())
|
||||
@ -149,7 +149,7 @@ PERF_TEST_P_(DNNTestNetwork, Inception_5h)
|
||||
PERF_TEST_P_(DNNTestNetwork, ENet)
|
||||
{
|
||||
if ((backend == DNN_BACKEND_INFERENCE_ENGINE) ||
|
||||
(backend == DNN_BACKEND_DEFAULT && target == DNN_TARGET_OPENCL_FP16))
|
||||
(backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16))
|
||||
throw SkipTestException("");
|
||||
processNet("dnn/Enet-model-best.net", "", "enet.yml",
|
||||
Mat(cv::Size(512, 256), CV_32FC3));
|
||||
@ -267,9 +267,9 @@ const tuple<DNNBackend, DNNTarget> testCases[] = {
|
||||
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_OPENCL_FP16),
|
||||
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_MYRIAD),
|
||||
#endif
|
||||
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_DEFAULT, DNN_TARGET_CPU),
|
||||
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_DEFAULT, DNN_TARGET_OPENCL),
|
||||
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_DEFAULT, DNN_TARGET_OPENCL_FP16)
|
||||
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_OPENCV, DNN_TARGET_CPU),
|
||||
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_OPENCV, DNN_TARGET_OPENCL),
|
||||
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_OPENCV, DNN_TARGET_OPENCL_FP16)
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(/*nothing*/, DNNTestNetwork, testing::ValuesIn(testCases));
|
||||
|
@ -225,7 +225,7 @@ void imagesFromBlob(const cv::Mat& blob_, OutputArrayOfArrays images_)
|
||||
class OpenCLBackendWrapper : public BackendWrapper
|
||||
{
|
||||
public:
|
||||
OpenCLBackendWrapper(Mat& m) : BackendWrapper(DNN_BACKEND_DEFAULT, DNN_TARGET_OPENCL)
|
||||
OpenCLBackendWrapper(Mat& m) : BackendWrapper(DNN_BACKEND_OPENCV, DNN_TARGET_OPENCL)
|
||||
{
|
||||
m.copyTo(umat);
|
||||
host = &m;
|
||||
@ -233,7 +233,7 @@ public:
|
||||
}
|
||||
|
||||
OpenCLBackendWrapper(const Ptr<BackendWrapper>& baseBuffer, Mat& m)
|
||||
: BackendWrapper(DNN_BACKEND_DEFAULT, DNN_TARGET_OPENCL)
|
||||
: BackendWrapper(DNN_BACKEND_OPENCV, DNN_TARGET_OPENCL)
|
||||
{
|
||||
Ptr<OpenCLBackendWrapper> base = baseBuffer.dynamicCast<OpenCLBackendWrapper>();
|
||||
CV_Assert(!base.empty());
|
||||
@ -654,7 +654,7 @@ private:
|
||||
|
||||
static Ptr<BackendWrapper> wrapMat(int backendId, int targetId, cv::Mat& m)
|
||||
{
|
||||
if (backendId == DNN_BACKEND_DEFAULT)
|
||||
if (backendId == DNN_BACKEND_OPENCV)
|
||||
{
|
||||
if (targetId == DNN_TARGET_CPU)
|
||||
return Ptr<BackendWrapper>();
|
||||
@ -727,7 +727,7 @@ struct Net::Impl
|
||||
|
||||
Ptr<BackendWrapper> wrap(Mat& host)
|
||||
{
|
||||
if (preferableBackend == DNN_BACKEND_DEFAULT && preferableTarget == DNN_TARGET_CPU)
|
||||
if (preferableBackend == DNN_BACKEND_OPENCV && preferableTarget == DNN_TARGET_CPU)
|
||||
return Ptr<BackendWrapper>();
|
||||
|
||||
MatShape shape(host.dims);
|
||||
@ -738,7 +738,7 @@ struct Net::Impl
|
||||
if (backendWrappers.find(data) != backendWrappers.end())
|
||||
{
|
||||
Ptr<BackendWrapper> baseBuffer = backendWrappers[data];
|
||||
if (preferableBackend == DNN_BACKEND_DEFAULT)
|
||||
if (preferableBackend == DNN_BACKEND_OPENCV)
|
||||
{
|
||||
CV_Assert(IS_DNN_OPENCL_TARGET(preferableTarget));
|
||||
return OpenCLBackendWrapper::create(baseBuffer, host);
|
||||
@ -850,9 +850,27 @@ struct Net::Impl
|
||||
{
|
||||
CV_TRACE_FUNCTION();
|
||||
|
||||
if (preferableBackend == DNN_BACKEND_DEFAULT)
|
||||
#ifdef HAVE_INF_ENGINE
|
||||
preferableBackend = DNN_BACKEND_INFERENCE_ENGINE;
|
||||
#else
|
||||
preferableBackend = DNN_BACKEND_OPENCV;
|
||||
#endif
|
||||
CV_Assert(preferableBackend != DNN_BACKEND_OPENCV ||
|
||||
preferableTarget == DNN_TARGET_CPU ||
|
||||
preferableTarget == DNN_TARGET_OPENCL ||
|
||||
preferableTarget == DNN_TARGET_OPENCL_FP16);
|
||||
CV_Assert(preferableBackend != DNN_BACKEND_HALIDE ||
|
||||
preferableTarget == DNN_TARGET_CPU ||
|
||||
preferableTarget == DNN_TARGET_OPENCL);
|
||||
CV_Assert(preferableBackend != DNN_BACKEND_INFERENCE_ENGINE ||
|
||||
preferableTarget == DNN_TARGET_CPU ||
|
||||
preferableTarget == DNN_TARGET_OPENCL ||
|
||||
preferableTarget == DNN_TARGET_OPENCL_FP16 ||
|
||||
preferableTarget == DNN_TARGET_MYRIAD);
|
||||
if (!netWasAllocated || this->blobsToKeep != blobsToKeep_)
|
||||
{
|
||||
if (preferableBackend == DNN_BACKEND_DEFAULT && IS_DNN_OPENCL_TARGET(preferableTarget))
|
||||
if (preferableBackend == DNN_BACKEND_OPENCV && IS_DNN_OPENCL_TARGET(preferableTarget))
|
||||
#ifndef HAVE_OPENCL
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "DNN: OpenCL target is not available in this OpenCV build, switching to CPU.");
|
||||
@ -1036,7 +1054,7 @@ struct Net::Impl
|
||||
void initBackend()
|
||||
{
|
||||
CV_TRACE_FUNCTION();
|
||||
if (preferableBackend == DNN_BACKEND_DEFAULT)
|
||||
if (preferableBackend == DNN_BACKEND_OPENCV)
|
||||
CV_Assert(preferableTarget == DNN_TARGET_CPU || IS_DNN_OPENCL_TARGET(preferableTarget));
|
||||
else if (preferableBackend == DNN_BACKEND_HALIDE)
|
||||
initHalideBackend();
|
||||
@ -1375,7 +1393,7 @@ struct Net::Impl
|
||||
std::vector<LayerPin> pinsForInternalBlobs;
|
||||
blobManager.allocateBlobsForLayer(ld, layerShapesIt->second, pinsForInternalBlobs,
|
||||
preferableBackend == DNN_BACKEND_INFERENCE_ENGINE,
|
||||
preferableBackend == DNN_BACKEND_DEFAULT &&
|
||||
preferableBackend == DNN_BACKEND_OPENCV &&
|
||||
preferableTarget == DNN_TARGET_OPENCL_FP16);
|
||||
ld.outputBlobsWrappers.resize(ld.outputBlobs.size());
|
||||
for (int i = 0; i < ld.outputBlobs.size(); ++i)
|
||||
@ -1418,7 +1436,7 @@ struct Net::Impl
|
||||
|
||||
void fuseLayers(const std::vector<LayerPin>& blobsToKeep_)
|
||||
{
|
||||
if( !fusion || preferableBackend != DNN_BACKEND_DEFAULT &&
|
||||
if( !fusion || preferableBackend != DNN_BACKEND_OPENCV &&
|
||||
preferableBackend != DNN_BACKEND_INFERENCE_ENGINE)
|
||||
return;
|
||||
|
||||
@ -1446,7 +1464,7 @@ struct Net::Impl
|
||||
// some other layers.
|
||||
|
||||
// TODO: OpenCL target support more fusion styles.
|
||||
if ( preferableBackend == DNN_BACKEND_DEFAULT && IS_DNN_OPENCL_TARGET(preferableTarget) &&
|
||||
if ( preferableBackend == DNN_BACKEND_OPENCV && IS_DNN_OPENCL_TARGET(preferableTarget) &&
|
||||
(!cv::ocl::useOpenCL() || (ld.layerInstance->type != "Convolution" &&
|
||||
ld.layerInstance->type != "MVN")) )
|
||||
continue;
|
||||
@ -1481,7 +1499,7 @@ struct Net::Impl
|
||||
break;
|
||||
}
|
||||
|
||||
if (preferableBackend != DNN_BACKEND_DEFAULT)
|
||||
if (preferableBackend != DNN_BACKEND_OPENCV)
|
||||
continue; // Go to the next layer.
|
||||
|
||||
// For now, OpenCL target support fusion with activation of ReLU/ChannelsPReLU/Power/Tanh
|
||||
@ -1624,7 +1642,7 @@ struct Net::Impl
|
||||
}
|
||||
}
|
||||
|
||||
if (preferableBackend != DNN_BACKEND_DEFAULT)
|
||||
if (preferableBackend != DNN_BACKEND_OPENCV)
|
||||
continue; // Go to the next layer.
|
||||
|
||||
// the optimization #2. if there is no layer that takes max pooling layer's computed
|
||||
@ -1735,7 +1753,7 @@ struct Net::Impl
|
||||
{
|
||||
CV_Assert(layers[0].outputBlobs[i].total());
|
||||
if (layers[0].outputBlobs[i].depth() == CV_32F &&
|
||||
preferableBackend == DNN_BACKEND_DEFAULT &&
|
||||
preferableBackend == DNN_BACKEND_OPENCV &&
|
||||
preferableTarget == DNN_TARGET_OPENCL_FP16)
|
||||
{
|
||||
Mat mat = layers[0].outputBlobs[i].clone();
|
||||
@ -1781,12 +1799,12 @@ struct Net::Impl
|
||||
TickMeter tm;
|
||||
tm.start();
|
||||
|
||||
if (preferableBackend == DNN_BACKEND_DEFAULT ||
|
||||
if (preferableBackend == DNN_BACKEND_OPENCV ||
|
||||
!layer->supportBackend(preferableBackend))
|
||||
{
|
||||
if( !ld.skip )
|
||||
{
|
||||
if (preferableBackend == DNN_BACKEND_DEFAULT && IS_DNN_OPENCL_TARGET(preferableTarget))
|
||||
if (preferableBackend == DNN_BACKEND_OPENCV && IS_DNN_OPENCL_TARGET(preferableTarget))
|
||||
{
|
||||
std::vector<UMat> umat_outputBlobs = OpenCLBackendWrapper::getUMatVector(ld.outputBlobsWrappers);
|
||||
layer->forward(OpenCLBackendWrapper::getUMatVector(ld.inputBlobsWrappers),
|
||||
@ -2132,7 +2150,7 @@ void Net::forward(OutputArrayOfArrays outputBlobs, const String& outputName)
|
||||
{
|
||||
std::vector<UMat> & outputvec = *(std::vector<UMat> *)outputBlobs.getObj();
|
||||
|
||||
if (impl->preferableBackend == DNN_BACKEND_DEFAULT &&
|
||||
if (impl->preferableBackend == DNN_BACKEND_OPENCV &&
|
||||
IS_DNN_OPENCL_TARGET(impl->preferableTarget))
|
||||
{
|
||||
if (impl->preferableTarget == DNN_TARGET_OPENCL)
|
||||
@ -2270,7 +2288,7 @@ void Net::setInput(InputArray blob, const String& name)
|
||||
ld.outputBlobsWrappers.resize(ld.outputBlobs.size());
|
||||
MatShape prevShape = shape(ld.outputBlobs[pin.oid]);
|
||||
Mat blob_;
|
||||
if (impl->preferableBackend == DNN_BACKEND_DEFAULT &&
|
||||
if (impl->preferableBackend == DNN_BACKEND_OPENCV &&
|
||||
impl->preferableTarget == DNN_TARGET_OPENCL_FP16)
|
||||
{
|
||||
Mat blob_mat = blob.getMat();
|
||||
@ -2664,7 +2682,7 @@ int Layer::outputNameToIndex(const String&)
|
||||
|
||||
bool Layer::supportBackend(int backendId)
|
||||
{
|
||||
return backendId == DNN_BACKEND_DEFAULT;
|
||||
return backendId == DNN_BACKEND_OPENCV;
|
||||
}
|
||||
|
||||
Ptr<BackendNode> Layer::initHalide(const std::vector<Ptr<BackendWrapper> > &)
|
||||
|
@ -109,7 +109,7 @@ public:
|
||||
|
||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||
{
|
||||
return backendId == DNN_BACKEND_DEFAULT ||
|
||||
return backendId == DNN_BACKEND_OPENCV ||
|
||||
backendId == DNN_BACKEND_HALIDE && haveHalide() ||
|
||||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ public:
|
||||
|
||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||
{
|
||||
return backendId == DNN_BACKEND_DEFAULT ||
|
||||
return backendId == DNN_BACKEND_OPENCV ||
|
||||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
|
||||
}
|
||||
|
||||
|
@ -103,7 +103,7 @@ public:
|
||||
|
||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||
{
|
||||
return backendId == DNN_BACKEND_DEFAULT ||
|
||||
return backendId == DNN_BACKEND_OPENCV ||
|
||||
backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1 && !padding || // By channels
|
||||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && !padding;
|
||||
}
|
||||
|
@ -81,7 +81,7 @@ public:
|
||||
|
||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||
{
|
||||
return backendId == DNN_BACKEND_DEFAULT ||
|
||||
return backendId == DNN_BACKEND_OPENCV ||
|
||||
backendId == DNN_BACKEND_HALIDE && haveHalide() ||
|
||||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
|
||||
}
|
||||
@ -1568,6 +1568,39 @@ public:
|
||||
return Ptr<BackendNode>();
|
||||
}
|
||||
|
||||
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &) CV_OVERRIDE
|
||||
{
|
||||
#ifdef HAVE_INF_ENGINE
|
||||
const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW layout
|
||||
const int group = numOutput / outGroupCn;
|
||||
|
||||
InferenceEngine::LayerParams lp;
|
||||
lp.name = name;
|
||||
lp.type = "Deconvolution";
|
||||
lp.precision = InferenceEngine::Precision::FP32;
|
||||
std::shared_ptr<InferenceEngine::DeconvolutionLayer> ieLayer(new InferenceEngine::DeconvolutionLayer(lp));
|
||||
|
||||
ieLayer->_kernel_x = kernel.width;
|
||||
ieLayer->_kernel_y = kernel.height;
|
||||
ieLayer->_stride_x = stride.width;
|
||||
ieLayer->_stride_y = stride.height;
|
||||
ieLayer->_out_depth = numOutput;
|
||||
ieLayer->_padding_x = pad.width;
|
||||
ieLayer->_padding_y = pad.height;
|
||||
ieLayer->_dilation_x = dilation.width;
|
||||
ieLayer->_dilation_y = dilation.height;
|
||||
ieLayer->_group = group;
|
||||
|
||||
ieLayer->_weights = wrapToInfEngineBlob(blobs[0], InferenceEngine::Layout::OIHW);
|
||||
if (hasBias())
|
||||
{
|
||||
ieLayer->_biases = wrapToInfEngineBlob(blobs[1], {(size_t)numOutput}, InferenceEngine::Layout::C);
|
||||
}
|
||||
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
||||
#endif // HAVE_INF_ENGINE
|
||||
return Ptr<BackendNode>();
|
||||
}
|
||||
|
||||
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
|
||||
const std::vector<MatShape> &outputs) const CV_OVERRIDE
|
||||
{
|
||||
|
@ -195,7 +195,7 @@ public:
|
||||
|
||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||
{
|
||||
return backendId == DNN_BACKEND_DEFAULT ||
|
||||
return backendId == DNN_BACKEND_OPENCV ||
|
||||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && !_locPredTransposed;
|
||||
}
|
||||
|
||||
|
@ -115,7 +115,7 @@ public:
|
||||
|
||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||
{
|
||||
return backendId == DNN_BACKEND_DEFAULT ||
|
||||
return backendId == DNN_BACKEND_OPENCV ||
|
||||
backendId == DNN_BACKEND_HALIDE && haveHalide() ||
|
||||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
|
||||
}
|
||||
@ -496,8 +496,9 @@ struct TanHFunctor
|
||||
#ifdef HAVE_INF_ENGINE
|
||||
InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp)
|
||||
{
|
||||
CV_Error(Error::StsNotImplemented, "TanH");
|
||||
return InferenceEngine::CNNLayerPtr();
|
||||
lp.type = "TanH";
|
||||
std::shared_ptr<InferenceEngine::CNNLayer> ieLayer(new InferenceEngine::CNNLayer(lp));
|
||||
return ieLayer;
|
||||
}
|
||||
#endif // HAVE_INF_ENGINE
|
||||
|
||||
|
@ -96,7 +96,7 @@ public:
|
||||
|
||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||
{
|
||||
return backendId == DNN_BACKEND_DEFAULT ||
|
||||
return backendId == DNN_BACKEND_OPENCV ||
|
||||
backendId == DNN_BACKEND_HALIDE && haveHalide() ||
|
||||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
|
||||
}
|
||||
|
@ -64,7 +64,7 @@ public:
|
||||
|
||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||
{
|
||||
return backendId == DNN_BACKEND_DEFAULT ||
|
||||
return backendId == DNN_BACKEND_OPENCV ||
|
||||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
|
||||
}
|
||||
|
||||
|
@ -128,7 +128,7 @@ public:
|
||||
|
||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||
{
|
||||
return backendId == DNN_BACKEND_DEFAULT ||
|
||||
return backendId == DNN_BACKEND_OPENCV ||
|
||||
backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1 ||
|
||||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && axis == 1;
|
||||
}
|
||||
|
@ -90,7 +90,7 @@ public:
|
||||
|
||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||
{
|
||||
return backendId == DNN_BACKEND_DEFAULT ||
|
||||
return backendId == DNN_BACKEND_OPENCV ||
|
||||
backendId == DNN_BACKEND_HALIDE && haveHalide() ||
|
||||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
|
||||
}
|
||||
|
@ -34,7 +34,7 @@ public:
|
||||
|
||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||
{
|
||||
return backendId == DNN_BACKEND_DEFAULT ||
|
||||
return backendId == DNN_BACKEND_OPENCV ||
|
||||
backendId == DNN_BACKEND_HALIDE && haveHalide() &&
|
||||
!poolPad.width && !poolPad.height;
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ public:
|
||||
|
||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||
{
|
||||
return backendId == DNN_BACKEND_DEFAULT ||
|
||||
return backendId == DNN_BACKEND_OPENCV ||
|
||||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() &&
|
||||
pnorm == 2 && !blobs.empty();
|
||||
}
|
||||
|
@ -87,7 +87,7 @@ public:
|
||||
|
||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||
{
|
||||
return backendId == DNN_BACKEND_DEFAULT ||
|
||||
return backendId == DNN_BACKEND_OPENCV ||
|
||||
backendId == DNN_BACKEND_HALIDE && haveHalide() && dstRanges.size() == 4;
|
||||
}
|
||||
|
||||
|
@ -118,7 +118,7 @@ public:
|
||||
|
||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||
{
|
||||
return backendId == DNN_BACKEND_DEFAULT ||
|
||||
return backendId == DNN_BACKEND_OPENCV ||
|
||||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
|
||||
}
|
||||
|
||||
|
@ -135,7 +135,7 @@ public:
|
||||
|
||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||
{
|
||||
return backendId == DNN_BACKEND_DEFAULT ||
|
||||
return backendId == DNN_BACKEND_OPENCV ||
|
||||
backendId == DNN_BACKEND_HALIDE && haveHalide() &&
|
||||
(type == MAX || type == AVE && !pad.width && !pad.height) ||
|
||||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && (type == MAX || type == AVE);
|
||||
|
@ -270,7 +270,7 @@ public:
|
||||
|
||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||
{
|
||||
return backendId == DNN_BACKEND_DEFAULT ||
|
||||
return backendId == DNN_BACKEND_OPENCV ||
|
||||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
|
||||
}
|
||||
|
||||
|
@ -85,11 +85,6 @@ public:
|
||||
return false;
|
||||
}
|
||||
|
||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||
{
|
||||
return backendId == DNN_BACKEND_DEFAULT;
|
||||
}
|
||||
|
||||
#ifdef HAVE_OPENCL
|
||||
bool forward_ocl(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals)
|
||||
{
|
||||
|
@ -168,7 +168,7 @@ public:
|
||||
|
||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||
{
|
||||
return backendId == DNN_BACKEND_DEFAULT ||
|
||||
return backendId == DNN_BACKEND_OPENCV ||
|
||||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
|
||||
}
|
||||
|
||||
|
@ -42,7 +42,7 @@ public:
|
||||
|
||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||
{
|
||||
return backendId == DNN_BACKEND_DEFAULT ||
|
||||
return backendId == DNN_BACKEND_OPENCV ||
|
||||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
|
||||
}
|
||||
|
||||
|
@ -48,7 +48,7 @@ public:
|
||||
|
||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||
{
|
||||
return backendId == DNN_BACKEND_DEFAULT ||
|
||||
return backendId == DNN_BACKEND_OPENCV ||
|
||||
backendId == DNN_BACKEND_HALIDE && haveHalide() ||
|
||||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
|
||||
}
|
||||
|
@ -88,7 +88,7 @@ public:
|
||||
|
||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||
{
|
||||
return backendId == DNN_BACKEND_DEFAULT ||
|
||||
return backendId == DNN_BACKEND_OPENCV ||
|
||||
backendId == DNN_BACKEND_HALIDE && haveHalide() && axisRaw == 1 ||
|
||||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && !logSoftMax;
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ public:
|
||||
std::string halideScheduler = "",
|
||||
double l1 = 0.0, double lInf = 0.0)
|
||||
{
|
||||
if (backend == DNN_BACKEND_DEFAULT && target == DNN_TARGET_OPENCL)
|
||||
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL)
|
||||
{
|
||||
#ifdef HAVE_OPENCL
|
||||
if (!cv::ocl::useOpenCL())
|
||||
@ -72,11 +72,11 @@ public:
|
||||
|
||||
// Create two networks - with default backend and target and a tested one.
|
||||
Net netDefault = readNet(weights, proto);
|
||||
Net net = readNet(weights, proto);
|
||||
|
||||
netDefault.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
netDefault.setInput(inp);
|
||||
Mat outDefault = netDefault.forward(outputLayer).clone();
|
||||
|
||||
Net net = readNet(weights, proto);
|
||||
net.setInput(inp);
|
||||
net.setPreferableBackend(backend);
|
||||
net.setPreferableTarget(target);
|
||||
@ -167,7 +167,7 @@ TEST_P(DNNTestNetwork, Inception_5h)
|
||||
TEST_P(DNNTestNetwork, ENet)
|
||||
{
|
||||
if ((backend == DNN_BACKEND_INFERENCE_ENGINE) ||
|
||||
(backend == DNN_BACKEND_DEFAULT && target == DNN_TARGET_OPENCL_FP16))
|
||||
(backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16))
|
||||
throw SkipTestException("");
|
||||
processNet("dnn/Enet-model-best.net", "", Size(512, 512), "l367_Deconvolution",
|
||||
target == DNN_TARGET_OPENCL ? "dnn/halide_scheduler_opencl_enet.yml" :
|
||||
@ -181,8 +181,8 @@ TEST_P(DNNTestNetwork, MobileNet_SSD_Caffe)
|
||||
throw SkipTestException("");
|
||||
Mat sample = imread(findDataFile("dnn/street.png", false));
|
||||
Mat inp = blobFromImage(sample, 1.0f / 127.5, Size(300, 300), Scalar(127.5, 127.5, 127.5), false);
|
||||
float l1 = (backend == DNN_BACKEND_DEFAULT && target == DNN_TARGET_OPENCL_FP16) ? 0.0007 : 0.0;
|
||||
float lInf = (backend == DNN_BACKEND_DEFAULT && target == DNN_TARGET_OPENCL_FP16) ? 0.011 : 0.0;
|
||||
float l1 = (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) ? 0.0007 : 0.0;
|
||||
float lInf = (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) ? 0.011 : 0.0;
|
||||
|
||||
processNet("dnn/MobileNetSSD_deploy.caffemodel", "dnn/MobileNetSSD_deploy.prototxt",
|
||||
inp, "detection_out", "", l1, lInf);
|
||||
@ -196,8 +196,8 @@ TEST_P(DNNTestNetwork, MobileNet_SSD_TensorFlow)
|
||||
throw SkipTestException("");
|
||||
Mat sample = imread(findDataFile("dnn/street.png", false));
|
||||
Mat inp = blobFromImage(sample, 1.0f / 127.5, Size(300, 300), Scalar(127.5, 127.5, 127.5), false);
|
||||
float l1 = (backend == DNN_BACKEND_DEFAULT && target == DNN_TARGET_OPENCL_FP16) ? 0.008 : 0.0;
|
||||
float lInf = (backend == DNN_BACKEND_DEFAULT && target == DNN_TARGET_OPENCL_FP16) ? 0.06 : 0.0;
|
||||
float l1 = (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) ? 0.008 : 0.0;
|
||||
float lInf = (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) ? 0.06 : 0.0;
|
||||
processNet("dnn/ssd_mobilenet_v1_coco.pb", "dnn/ssd_mobilenet_v1_coco.pbtxt",
|
||||
inp, "detection_out", "", l1, lInf);
|
||||
}
|
||||
@ -280,7 +280,7 @@ TEST_P(DNNTestNetwork, Inception_v2_SSD_TensorFlow)
|
||||
TEST_P(DNNTestNetwork, DenseNet_121)
|
||||
{
|
||||
if ((backend == DNN_BACKEND_HALIDE) ||
|
||||
(backend == DNN_BACKEND_DEFAULT && target == DNN_TARGET_OPENCL_FP16) ||
|
||||
(backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) ||
|
||||
(backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL_FP16 ||
|
||||
target == DNN_TARGET_MYRIAD)))
|
||||
throw SkipTestException("");
|
||||
@ -298,8 +298,8 @@ const tuple<DNNBackend, DNNTarget> testCases[] = {
|
||||
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_OPENCL_FP16),
|
||||
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_MYRIAD),
|
||||
#endif
|
||||
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_DEFAULT, DNN_TARGET_OPENCL),
|
||||
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_DEFAULT, DNN_TARGET_OPENCL_FP16)
|
||||
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_OPENCV, DNN_TARGET_OPENCL),
|
||||
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_OPENCV, DNN_TARGET_OPENCL_FP16)
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(/*nothing*/, DNNTestNetwork, testing::ValuesIn(testCases));
|
||||
|
@ -62,6 +62,7 @@ TEST(Test_Caffe, memory_read)
|
||||
ASSERT_TRUE(readFileInMemory(model, dataModel));
|
||||
|
||||
Net net = readNetFromCaffe(dataProto.c_str(), dataProto.size());
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
ASSERT_FALSE(net.empty());
|
||||
|
||||
Net net2 = readNetFromCaffe(dataProto.c_str(), dataProto.size(),
|
||||
@ -108,6 +109,7 @@ TEST_P(Reproducibility_AlexNet, Accuracy)
|
||||
const float l1 = 1e-5;
|
||||
const float lInf = (targetId == DNN_TARGET_OPENCL_FP16) ? 3e-3 : 1e-4;
|
||||
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
net.setPreferableTarget(targetId);
|
||||
|
||||
Mat sample = imread(_tf("grace_hopper_227.png"));
|
||||
@ -132,6 +134,7 @@ TEST(Reproducibility_FCN, Accuracy)
|
||||
net = readNetFromCaffe(proto, model);
|
||||
ASSERT_FALSE(net.empty());
|
||||
}
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
|
||||
Mat sample = imread(_tf("street.png"));
|
||||
ASSERT_TRUE(!sample.empty());
|
||||
@ -160,6 +163,7 @@ TEST(Reproducibility_SSD, Accuracy)
|
||||
net = readNetFromCaffe(proto, model);
|
||||
ASSERT_FALSE(net.empty());
|
||||
}
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
|
||||
Mat sample = imread(_tf("street.png"));
|
||||
ASSERT_TRUE(!sample.empty());
|
||||
@ -185,6 +189,7 @@ TEST_P(Reproducibility_MobileNet_SSD, Accuracy)
|
||||
const float l1 = (targetId == DNN_TARGET_OPENCL_FP16) ? 1.5e-4 : 1e-5;
|
||||
const float lInf = (targetId == DNN_TARGET_OPENCL_FP16) ? 4e-4 : 1e-4;
|
||||
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
net.setPreferableTarget(targetId);
|
||||
|
||||
Mat sample = imread(_tf("street.png"));
|
||||
@ -236,6 +241,7 @@ TEST_P(Reproducibility_ResNet50, Accuracy)
|
||||
findDataFile("dnn/ResNet-50-model.caffemodel", false));
|
||||
|
||||
int targetId = GetParam();
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
net.setPreferableTarget(targetId);
|
||||
|
||||
float l1 = (targetId == DNN_TARGET_OPENCL_FP16) ? 3e-5 : 1e-5;
|
||||
@ -271,6 +277,7 @@ TEST_P(Reproducibility_SqueezeNet_v1_1, Accuracy)
|
||||
findDataFile("dnn/squeezenet_v1.1.caffemodel", false));
|
||||
|
||||
int targetId = GetParam();
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
net.setPreferableTarget(targetId);
|
||||
|
||||
Mat input = blobFromImage(imread(_tf("googlenet_0.png")), 1.0f, Size(227,227), Scalar(), false);
|
||||
@ -302,6 +309,7 @@ TEST(Reproducibility_AlexNet_fp16, Accuracy)
|
||||
|
||||
shrinkCaffeModel(model, "bvlc_alexnet.caffemodel_fp16");
|
||||
Net net = readNetFromCaffe(proto, "bvlc_alexnet.caffemodel_fp16");
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
|
||||
Mat sample = imread(findDataFile("dnn/grace_hopper_227.png", false));
|
||||
|
||||
@ -321,6 +329,7 @@ TEST(Reproducibility_GoogLeNet_fp16, Accuracy)
|
||||
|
||||
shrinkCaffeModel(model, "bvlc_googlenet.caffemodel_fp16");
|
||||
Net net = readNetFromCaffe(proto, "bvlc_googlenet.caffemodel_fp16");
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
|
||||
std::vector<Mat> inpMats;
|
||||
inpMats.push_back( imread(_tf("googlenet_0.png")) );
|
||||
@ -347,6 +356,7 @@ TEST(Reproducibility_Colorization, Accuracy)
|
||||
const string proto = findDataFile("dnn/colorization_deploy_v2.prototxt", false);
|
||||
const string model = findDataFile("dnn/colorization_release_v2.caffemodel", false);
|
||||
Net net = readNetFromCaffe(proto, model);
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
|
||||
net.getLayer(net.getLayerId("class8_ab"))->blobs.push_back(kernel);
|
||||
net.getLayer(net.getLayerId("conv8_313_rh"))->blobs.push_back(Mat(1, 313, CV_32F, 2.606));
|
||||
@ -367,6 +377,7 @@ TEST(Reproducibility_DenseNet_121, Accuracy)
|
||||
Mat ref = blobFromNPY(_tf("densenet_121_output.npy"));
|
||||
|
||||
Net net = readNetFromCaffe(proto, model);
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
|
||||
net.setInput(inp);
|
||||
Mat out = net.forward();
|
||||
@ -378,6 +389,7 @@ TEST(Test_Caffe, multiple_inputs)
|
||||
{
|
||||
const string proto = findDataFile("dnn/layers/net_input.prototxt", false);
|
||||
Net net = readNetFromCaffe(proto);
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
|
||||
Mat first_image(10, 11, CV_32FC3);
|
||||
Mat second_image(10, 11, CV_32FC3);
|
||||
@ -412,7 +424,7 @@ TEST_P(opencv_face_detector, Accuracy)
|
||||
Mat img = imread(findDataFile("gpu/lbpcascade/er.png", false));
|
||||
Mat blob = blobFromImage(img, 1.0, Size(), Scalar(104.0, 177.0, 123.0), false, false);
|
||||
|
||||
net.setPreferableBackend(DNN_BACKEND_DEFAULT);
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
net.setPreferableTarget(targetId);
|
||||
|
||||
net.setInput(blob);
|
||||
@ -455,6 +467,7 @@ TEST(Test_Caffe, FasterRCNN_and_RFCN)
|
||||
std::string model = findDataFile("dnn/" + models[i], false);
|
||||
|
||||
Net net = readNetFromCaffe(proto, model);
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
Mat img = imread(findDataFile("dnn/dog416.png", false));
|
||||
resize(img, img, Size(800, 600));
|
||||
Mat blob = blobFromImage(img, 1.0, Size(), Scalar(102.9801, 115.9465, 122.7717), false, false);
|
||||
|
@ -74,7 +74,7 @@ static void testDarknetModel(const std::string& cfg, const std::string& weights,
|
||||
int backendId, int targetId, float scoreDiff = 0.0,
|
||||
float iouDiff = 0.0, float confThreshold = 0.24)
|
||||
{
|
||||
if (backendId == DNN_BACKEND_DEFAULT && targetId == DNN_TARGET_OPENCL)
|
||||
if (backendId == DNN_BACKEND_OPENCV && targetId == DNN_TARGET_OPENCL)
|
||||
{
|
||||
#ifdef HAVE_OPENCL
|
||||
if (!cv::ocl::useOpenCL())
|
||||
@ -197,9 +197,9 @@ const tuple<DNNBackend, DNNTarget> testCases[] = {
|
||||
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_OPENCL_FP16),
|
||||
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_MYRIAD),
|
||||
#endif
|
||||
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_DEFAULT, DNN_TARGET_CPU),
|
||||
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_DEFAULT, DNN_TARGET_OPENCL),
|
||||
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_DEFAULT, DNN_TARGET_OPENCL_FP16)
|
||||
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_OPENCV, DNN_TARGET_CPU),
|
||||
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_OPENCV, DNN_TARGET_OPENCL),
|
||||
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_OPENCV, DNN_TARGET_OPENCL_FP16)
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(/**/, Test_Darknet_nets, testing::ValuesIn(testCases));
|
||||
@ -214,6 +214,7 @@ static void testDarknetLayer(const std::string& name, bool hasWeights = false)
|
||||
Mat ref = blobFromNPY(findDataFile("dnn/darknet/" + name + "_out.npy", false));
|
||||
|
||||
Net net = readNet(cfg, model);
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
net.setInput(inp);
|
||||
Mat out = net.forward();
|
||||
normAssert(out, ref);
|
||||
|
@ -52,10 +52,23 @@ static std::string _tf(TString filename)
|
||||
return (getOpenCVExtraDir() + "/dnn/") + filename;
|
||||
}
|
||||
|
||||
TEST(Reproducibility_GoogLeNet, Accuracy)
|
||||
typedef testing::TestWithParam<DNNTarget> Reproducibility_GoogLeNet;
|
||||
TEST_P(Reproducibility_GoogLeNet, Batching)
|
||||
{
|
||||
Net net = readNetFromCaffe(findDataFile("dnn/bvlc_googlenet.prototxt", false),
|
||||
findDataFile("dnn/bvlc_googlenet.caffemodel", false));
|
||||
int targetId = GetParam();
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
net.setPreferableTarget(targetId);
|
||||
|
||||
if (targetId == DNN_TARGET_OPENCL)
|
||||
{
|
||||
// Initialize network for a single image in the batch but test with batch size=2.
|
||||
Mat inp = Mat(224, 224, CV_8UC3);
|
||||
randu(inp, -1, 1);
|
||||
net.setInput(blobFromImage(inp));
|
||||
net.forward();
|
||||
}
|
||||
|
||||
std::vector<Mat> inpMats;
|
||||
inpMats.push_back( imread(_tf("googlenet_0.png")) );
|
||||
@ -69,36 +82,13 @@ TEST(Reproducibility_GoogLeNet, Accuracy)
|
||||
normAssert(out, ref);
|
||||
}
|
||||
|
||||
OCL_TEST(Reproducibility_GoogLeNet, Accuracy)
|
||||
{
|
||||
Net net = readNetFromCaffe(findDataFile("dnn/bvlc_googlenet.prototxt", false),
|
||||
findDataFile("dnn/bvlc_googlenet.caffemodel", false));
|
||||
|
||||
net.setPreferableBackend(DNN_BACKEND_DEFAULT);
|
||||
net.setPreferableTarget(DNN_TARGET_OPENCL);
|
||||
|
||||
// Initialize network for a single image in the batch but test with batch size=2.
|
||||
Mat inp = Mat(224, 224, CV_8UC3);
|
||||
randu(inp, -1, 1);
|
||||
net.setInput(blobFromImage(inp));
|
||||
net.forward();
|
||||
|
||||
std::vector<Mat> inpMats;
|
||||
inpMats.push_back( imread(_tf("googlenet_0.png")) );
|
||||
inpMats.push_back( imread(_tf("googlenet_1.png")) );
|
||||
ASSERT_TRUE(!inpMats[0].empty() && !inpMats[1].empty());
|
||||
|
||||
net.setInput(blobFromImages(inpMats, 1.0f, Size(), Scalar(), false), "data");
|
||||
Mat out = net.forward("prob");
|
||||
|
||||
Mat ref = blobFromNPY(_tf("googlenet_prob.npy"));
|
||||
normAssert(out, ref);
|
||||
}
|
||||
|
||||
TEST(IntermediateBlobs_GoogLeNet, Accuracy)
|
||||
TEST_P(Reproducibility_GoogLeNet, IntermediateBlobs)
|
||||
{
|
||||
Net net = readNetFromCaffe(findDataFile("dnn/bvlc_googlenet.prototxt", false),
|
||||
findDataFile("dnn/bvlc_googlenet.caffemodel", false));
|
||||
int targetId = GetParam();
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
net.setPreferableTarget(targetId);
|
||||
|
||||
std::vector<String> blobsNames;
|
||||
blobsNames.push_back("conv1/7x7_s2");
|
||||
@ -121,39 +111,13 @@ TEST(IntermediateBlobs_GoogLeNet, Accuracy)
|
||||
}
|
||||
}
|
||||
|
||||
OCL_TEST(IntermediateBlobs_GoogLeNet, Accuracy)
|
||||
{
|
||||
Net net = readNetFromCaffe(findDataFile("dnn/bvlc_googlenet.prototxt", false),
|
||||
findDataFile("dnn/bvlc_googlenet.caffemodel", false));
|
||||
|
||||
net.setPreferableBackend(DNN_BACKEND_DEFAULT);
|
||||
net.setPreferableTarget(DNN_TARGET_OPENCL);
|
||||
|
||||
std::vector<String> blobsNames;
|
||||
blobsNames.push_back("conv1/7x7_s2");
|
||||
blobsNames.push_back("conv1/relu_7x7");
|
||||
blobsNames.push_back("inception_4c/1x1");
|
||||
blobsNames.push_back("inception_4c/relu_1x1");
|
||||
std::vector<Mat> outs;
|
||||
Mat in = blobFromImage(imread(_tf("googlenet_0.png")), 1.0f, Size(), Scalar(), false);
|
||||
net.setInput(in, "data");
|
||||
net.forward(outs, blobsNames);
|
||||
CV_Assert(outs.size() == blobsNames.size());
|
||||
|
||||
for (size_t i = 0; i < blobsNames.size(); i++)
|
||||
{
|
||||
std::string filename = blobsNames[i];
|
||||
std::replace( filename.begin(), filename.end(), '/', '#');
|
||||
Mat ref = blobFromNPY(_tf("googlenet_" + filename + ".npy"));
|
||||
|
||||
normAssert(outs[i], ref, "", 1E-4, 1E-2);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(SeveralCalls_GoogLeNet, Accuracy)
|
||||
TEST_P(Reproducibility_GoogLeNet, SeveralCalls)
|
||||
{
|
||||
Net net = readNetFromCaffe(findDataFile("dnn/bvlc_googlenet.prototxt", false),
|
||||
findDataFile("dnn/bvlc_googlenet.caffemodel", false));
|
||||
int targetId = GetParam();
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
net.setPreferableTarget(targetId);
|
||||
|
||||
std::vector<Mat> inpMats;
|
||||
inpMats.push_back( imread(_tf("googlenet_0.png")) );
|
||||
@ -179,36 +143,6 @@ TEST(SeveralCalls_GoogLeNet, Accuracy)
|
||||
normAssert(outs[0], ref, "", 1E-4, 1E-2);
|
||||
}
|
||||
|
||||
OCL_TEST(SeveralCalls_GoogLeNet, Accuracy)
|
||||
{
|
||||
Net net = readNetFromCaffe(findDataFile("dnn/bvlc_googlenet.prototxt", false),
|
||||
findDataFile("dnn/bvlc_googlenet.caffemodel", false));
|
||||
|
||||
net.setPreferableBackend(DNN_BACKEND_DEFAULT);
|
||||
net.setPreferableTarget(DNN_TARGET_OPENCL);
|
||||
|
||||
std::vector<Mat> inpMats;
|
||||
inpMats.push_back( imread(_tf("googlenet_0.png")) );
|
||||
inpMats.push_back( imread(_tf("googlenet_1.png")) );
|
||||
ASSERT_TRUE(!inpMats[0].empty() && !inpMats[1].empty());
|
||||
|
||||
net.setInput(blobFromImages(inpMats, 1.0f, Size(), Scalar(), false), "data");
|
||||
Mat out = net.forward();
|
||||
|
||||
Mat ref = blobFromNPY(_tf("googlenet_prob.npy"));
|
||||
normAssert(out, ref);
|
||||
|
||||
std::vector<String> blobsNames;
|
||||
blobsNames.push_back("conv1/7x7_s2");
|
||||
std::vector<Mat> outs;
|
||||
Mat in = blobFromImage(inpMats[0], 1.0f, Size(), Scalar(), false);
|
||||
net.setInput(in, "data");
|
||||
net.forward(outs, blobsNames);
|
||||
CV_Assert(outs.size() == blobsNames.size());
|
||||
|
||||
ref = blobFromNPY(_tf("googlenet_conv1#7x7_s2.npy"));
|
||||
|
||||
normAssert(outs[0], ref, "", 1E-4, 1E-2);
|
||||
}
|
||||
INSTANTIATE_TEST_CASE_P(/**/, Reproducibility_GoogLeNet, availableDnnTargets());
|
||||
|
||||
}} // namespace
|
||||
|
@ -26,6 +26,7 @@ static void test(LayerParams& params, Mat& input)
|
||||
net.connect(0, 0, lid, 0);
|
||||
|
||||
net.setInput(input);
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
Mat outputDefault = net.forward(params.name).clone();
|
||||
|
||||
net.setPreferableBackend(DNN_BACKEND_HALIDE);
|
||||
@ -368,6 +369,7 @@ TEST(MaxPoolUnpool_Halide, Accuracy)
|
||||
Mat input({1, 1, 4, 4}, CV_32F);
|
||||
randu(input, -1.0f, 1.0f);
|
||||
net.setInput(input);
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
Mat outputDefault = net.forward("testUnpool").clone();
|
||||
|
||||
net.setPreferableBackend(DNN_BACKEND_HALIDE);
|
||||
@ -401,6 +403,7 @@ void testInPlaceActivation(LayerParams& lp)
|
||||
Mat input({1, kNumChannels, 10, 10}, CV_32F);
|
||||
randu(input, -1.0f, 1.0f);
|
||||
net.setInput(input);
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
Mat outputDefault = net.forward(lp.name).clone();
|
||||
|
||||
net.setInput(input);
|
||||
@ -579,6 +582,7 @@ TEST_P(Concat, Accuracy)
|
||||
randu(input, -1.0f, 1.0f);
|
||||
|
||||
net.setInput(input);
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
Mat outputDefault = net.forward(concatParam.name).clone();
|
||||
|
||||
net.setPreferableBackend(DNN_BACKEND_HALIDE);
|
||||
@ -655,6 +659,7 @@ TEST_P(Eltwise, Accuracy)
|
||||
randu(input, -1.0f, 1.0f);
|
||||
|
||||
net.setInput(input);
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
Mat outputDefault = net.forward(eltwiseParam.name).clone();
|
||||
|
||||
net.setPreferableBackend(DNN_BACKEND_HALIDE);
|
||||
@ -698,6 +703,7 @@ TEST(MixedBackends_Halide_Default_Halide, Accuracy)
|
||||
Mat input({4, 3, 5, 6}, CV_32F);
|
||||
randu(input, -1.0f, 1.0f);
|
||||
net.setInput(input);
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
Mat outputDefault = net.forward().clone();
|
||||
|
||||
net.setPreferableBackend(DNN_BACKEND_HALIDE);
|
||||
|
@ -105,7 +105,7 @@ void testLayerUsingCaffeModels(String basename, int targetId = DNN_TARGET_CPU,
|
||||
Net net = readNetFromCaffe(prototxt, (useCaffeModel) ? caffemodel : String());
|
||||
ASSERT_FALSE(net.empty());
|
||||
|
||||
net.setPreferableBackend(DNN_BACKEND_DEFAULT);
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
net.setPreferableTarget(targetId);
|
||||
|
||||
Mat inp = blobFromNPY(inpfile);
|
||||
@ -260,6 +260,7 @@ TEST(Layer_Test_Fused_Concat, Accuracy)
|
||||
randu(input, 0.0f, 1.0f); // [0, 1] to make AbsVal an identity transformation.
|
||||
|
||||
net.setInput(input);
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
Mat out = net.forward();
|
||||
|
||||
normAssert(slice(out, Range::all(), Range(0, 2), Range::all(), Range::all()), input);
|
||||
@ -308,7 +309,7 @@ static void test_Reshape_Split_Slice_layers(int targetId)
|
||||
Net net = readNetFromCaffe(_tf("reshape_and_slice_routines.prototxt"));
|
||||
ASSERT_FALSE(net.empty());
|
||||
|
||||
net.setPreferableBackend(DNN_BACKEND_DEFAULT);
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
net.setPreferableTarget(targetId);
|
||||
|
||||
Mat input(6, 12, CV_32F);
|
||||
@ -335,6 +336,7 @@ TEST(Layer_Conv_Elu, Accuracy)
|
||||
Mat ref = blobFromNPY(_tf("layer_elu_out.npy"));
|
||||
|
||||
net.setInput(inp, "input");
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
Mat out = net.forward();
|
||||
|
||||
normAssert(ref, out);
|
||||
@ -502,6 +504,7 @@ void testLayerUsingDarknetModels(String basename, bool useDarknetModel = false,
|
||||
Mat ref = blobFromNPY(outfile);
|
||||
|
||||
net.setInput(inp, "data");
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
Mat out = net.forward();
|
||||
|
||||
normAssert(ref, out);
|
||||
@ -527,6 +530,7 @@ TEST(Layer_Test_ROIPooling, Accuracy)
|
||||
|
||||
net.setInput(inp, "input");
|
||||
net.setInput(rois, "rois");
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
|
||||
Mat out = net.forward();
|
||||
|
||||
@ -547,6 +551,7 @@ TEST_P(Test_Caffe_layers, FasterRCNN_Proposal)
|
||||
net.setInput(imInfo, "im_info");
|
||||
|
||||
std::vector<Mat> outs;
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
net.forward(outs, "output");
|
||||
|
||||
for (int i = 0; i < 2; ++i)
|
||||
@ -614,6 +619,7 @@ TEST_P(Scale_untrainable, Accuracy)
|
||||
net.setInputsNames(inpNames);
|
||||
net.setInput(input, inpNames[0]);
|
||||
net.setInput(weights, inpNames[1]);
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
Mat out = net.forward();
|
||||
|
||||
Mat ref(input.dims, input.size, CV_32F);
|
||||
@ -681,6 +687,7 @@ TEST_P(Crop, Accuracy)
|
||||
net.setInputsNames(inpNames);
|
||||
net.setInput(inpImage, inpNames[0]);
|
||||
net.setInput(sizImage, inpNames[1]);
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
|
||||
// There are a few conditions that represent invalid input to the crop
|
||||
// layer, so in those cases we want to verify an exception is thrown.
|
||||
@ -744,6 +751,7 @@ TEST(Layer_Test_Average_pooling_kernel_area, Accuracy)
|
||||
Mat target = (Mat_<float>(2, 2) << (1 + 2 + 4 + 5) / 4.f, (3 + 6) / 2.f, (7 + 8) / 2.f, 9);
|
||||
Mat tmp = blobFromImage(inp);
|
||||
net.setInput(blobFromImage(inp));
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
Mat out = net.forward();
|
||||
normAssert(out, blobFromImage(target));
|
||||
}
|
||||
@ -768,6 +776,7 @@ TEST(Layer_PriorBox, squares)
|
||||
Mat inp(1, 2, CV_32F);
|
||||
randu(inp, -1, 1);
|
||||
net.setInput(blobFromImage(inp));
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
Mat out = net.forward();
|
||||
|
||||
Mat target = (Mat_<float>(4, 4) << 0.0, 0.0, 0.75, 1.0,
|
||||
@ -789,6 +798,7 @@ TEST(Layer_Test_Convolution_DLDT, Accuracy)
|
||||
Mat inp = blobFromNPY(_tf("blob.npy"));
|
||||
|
||||
netDefault.setInput(inp);
|
||||
netDefault.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
Mat outDefault = netDefault.forward();
|
||||
|
||||
net.setInput(inp);
|
||||
@ -847,7 +857,7 @@ public:
|
||||
|
||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||
{
|
||||
return backendId == DNN_BACKEND_DEFAULT;
|
||||
return backendId == DNN_BACKEND_OPENCV;
|
||||
}
|
||||
|
||||
virtual void forward(std::vector<cv::Mat*> &inputs, std::vector<cv::Mat> &outputs, std::vector<cv::Mat> &internals) CV_OVERRIDE {}
|
||||
|
@ -128,6 +128,7 @@ TEST(LayerFactory, custom_layers)
|
||||
net.addLayerToPrev(lp.name, lp.type, lp);
|
||||
|
||||
net.setInput(inp);
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
Mat output = net.forward();
|
||||
|
||||
if (i == 0) EXPECT_EQ(output.at<float>(0), 1);
|
||||
|
@ -52,7 +52,7 @@
|
||||
namespace opencv_test {
|
||||
using namespace cv::dnn;
|
||||
|
||||
CV_ENUM(DNNBackend, DNN_BACKEND_DEFAULT, DNN_BACKEND_HALIDE, DNN_BACKEND_INFERENCE_ENGINE)
|
||||
CV_ENUM(DNNBackend, DNN_BACKEND_DEFAULT, DNN_BACKEND_HALIDE, DNN_BACKEND_INFERENCE_ENGINE, DNN_BACKEND_OPENCV)
|
||||
CV_ENUM(DNNTarget, DNN_TARGET_CPU, DNN_TARGET_OPENCL, DNN_TARGET_OPENCL_FP16, DNN_TARGET_MYRIAD)
|
||||
|
||||
static testing::internal::ParamGenerator<DNNTarget> availableDnnTargets()
|
||||
|
@ -34,6 +34,7 @@ TEST(Test_TensorFlow, read_inception)
|
||||
net = readNetFromTensorflow(model);
|
||||
ASSERT_FALSE(net.empty());
|
||||
}
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
|
||||
Mat sample = imread(_tf("grace_hopper_227.png"));
|
||||
ASSERT_TRUE(!sample.empty());
|
||||
@ -57,6 +58,7 @@ TEST(Test_TensorFlow, inception_accuracy)
|
||||
net = readNetFromTensorflow(model);
|
||||
ASSERT_FALSE(net.empty());
|
||||
}
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
|
||||
Mat sample = imread(_tf("grace_hopper_227.png"));
|
||||
ASSERT_TRUE(!sample.empty());
|
||||
@ -104,7 +106,7 @@ static void runTensorFlowNet(const std::string& prefix, int targetId = DNN_TARGE
|
||||
|
||||
ASSERT_FALSE(net.empty());
|
||||
|
||||
net.setPreferableBackend(DNN_BACKEND_DEFAULT);
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
net.setPreferableTarget(targetId);
|
||||
|
||||
cv::Mat input = blobFromNPY(inpPath);
|
||||
@ -234,7 +236,7 @@ TEST_P(Test_TensorFlow_nets, MobileNet_SSD)
|
||||
}
|
||||
|
||||
Net net = readNetFromTensorflow(netPath, netConfig);
|
||||
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
net.setPreferableTarget(GetParam());
|
||||
|
||||
net.setInput(inp);
|
||||
@ -256,6 +258,7 @@ TEST_P(Test_TensorFlow_nets, Inception_v2_SSD)
|
||||
Mat img = imread(findDataFile("dnn/street.png", false));
|
||||
Mat blob = blobFromImage(img, 1.0f / 127.5, Size(300, 300), Scalar(127.5, 127.5, 127.5), true, false);
|
||||
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
net.setPreferableTarget(GetParam());
|
||||
|
||||
net.setInput(blob);
|
||||
@ -276,6 +279,7 @@ TEST_P(Test_TensorFlow_nets, Inception_v2_Faster_RCNN)
|
||||
std::string model = findDataFile("dnn/faster_rcnn_inception_v2_coco_2018_01_28.pb", false);
|
||||
|
||||
Net net = readNetFromTensorflow(model, proto);
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
Mat img = imread(findDataFile("dnn/dog416.png", false));
|
||||
Mat blob = blobFromImage(img, 1.0f / 127.5, Size(800, 600), Scalar(127.5, 127.5, 127.5), true, false);
|
||||
|
||||
@ -295,6 +299,7 @@ TEST_P(Test_TensorFlow_nets, opencv_face_detector_uint8)
|
||||
Mat img = imread(findDataFile("gpu/lbpcascade/er.png", false));
|
||||
Mat blob = blobFromImage(img, 1.0, Size(), Scalar(104.0, 177.0, 123.0), false, false);
|
||||
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
net.setPreferableTarget(GetParam());
|
||||
|
||||
net.setInput(blob);
|
||||
@ -526,6 +531,7 @@ TEST(Test_TensorFlow, EAST_text_detection)
|
||||
std::string refGeometryPath = findDataFile("dnn/east_text_detection.geometry.npy", false);
|
||||
|
||||
Net net = readNet(findDataFile("dnn/frozen_east_text_detection.pb", false));
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
|
||||
Mat img = imread(imgPath);
|
||||
Mat inp = blobFromImage(img, 1.0, Size(), Scalar(123.68, 116.78, 103.94), true, false);
|
||||
|
@ -77,7 +77,7 @@ static void runTorchNet(String prefix, int targetId = DNN_TARGET_CPU, String out
|
||||
Net net = readNetFromTorch(_tf(prefix + "_net" + suffix), isBinary);
|
||||
ASSERT_FALSE(net.empty());
|
||||
|
||||
net.setPreferableBackend(DNN_BACKEND_DEFAULT);
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
net.setPreferableTarget(targetId);
|
||||
|
||||
Mat inp, outRef;
|
||||
@ -215,6 +215,7 @@ TEST_P(Test_Torch_nets, OpenFace_accuracy)
|
||||
const string model = findDataFile("dnn/openface_nn4.small2.v1.t7", false);
|
||||
Net net = readNetFromTorch(model);
|
||||
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
net.setPreferableTarget(GetParam());
|
||||
|
||||
Mat sample = imread(findDataFile("cv/shared/lena.png", false));
|
||||
@ -241,6 +242,7 @@ TEST_P(Test_Torch_nets, ENet_accuracy)
|
||||
ASSERT_TRUE(!net.empty());
|
||||
}
|
||||
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
net.setPreferableTarget(GetParam());
|
||||
|
||||
Mat sample = imread(_tf("street.png", false));
|
||||
@ -287,6 +289,7 @@ TEST_P(Test_Torch_nets, FastNeuralStyle_accuracy)
|
||||
const string model = findDataFile(models[i], false);
|
||||
Net net = readNetFromTorch(model);
|
||||
|
||||
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||
net.setPreferableTarget(GetParam());
|
||||
|
||||
Mat img = imread(findDataFile("dnn/googlenet_1.png", false));
|
||||
|
@ -21,12 +21,15 @@ const char* keys =
|
||||
"{ height | | Preprocess input image by resizing to a specific height. }"
|
||||
"{ rgb | | Indicate that model works with RGB input images instead BGR ones. }"
|
||||
"{ backend | 0 | Choose one of computation backends: "
|
||||
"0: default C++ backend, "
|
||||
"0: automatically (by default), "
|
||||
"1: Halide language (http://halide-lang.org/), "
|
||||
"2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit)}"
|
||||
"2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
|
||||
"3: OpenCV implementation }"
|
||||
"{ target | 0 | Choose one of target computation devices: "
|
||||
"0: CPU target (by default),"
|
||||
"1: OpenCL }";
|
||||
"0: CPU target (by default), "
|
||||
"1: OpenCL, "
|
||||
"2: OpenCL fp16 (half-float precision), "
|
||||
"3: VPU }";
|
||||
|
||||
using namespace cv;
|
||||
using namespace dnn;
|
||||
|
@ -3,8 +3,8 @@ import argparse
|
||||
import numpy as np
|
||||
import sys
|
||||
|
||||
backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE)
|
||||
targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL)
|
||||
backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV)
|
||||
targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD)
|
||||
|
||||
parser = argparse.ArgumentParser(description='Use this script to run classification deep learning networks using OpenCV.')
|
||||
parser.add_argument('--input', help='Path to input image or video file. Skip this argument to capture frames from a camera.')
|
||||
@ -32,13 +32,16 @@ parser.add_argument('--rgb', action='store_true',
|
||||
help='Indicate that model works with RGB input images instead BGR ones.')
|
||||
parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int,
|
||||
help="Choose one of computation backends: "
|
||||
"%d: default C++ backend, "
|
||||
"%d: automatically (by default), "
|
||||
"%d: Halide language (http://halide-lang.org/), "
|
||||
"%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit)" % backends)
|
||||
"%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
|
||||
"%d: OpenCV implementation" % backends)
|
||||
parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int,
|
||||
help='Choose one of target computation devices: '
|
||||
'%d: CPU target (by default), '
|
||||
'%d: OpenCL' % targets)
|
||||
'%d: OpenCL, '
|
||||
'%d: OpenCL fp16 (half-float precision), '
|
||||
'%d: VPU' % targets)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Load names of classes
|
||||
|
@ -107,7 +107,7 @@ int main(int argc, char **argv)
|
||||
// run the L channel through the network
|
||||
Mat inputBlob = blobFromImage(input);
|
||||
net.setInput(inputBlob);
|
||||
Mat result = net.forward("class8_ab");
|
||||
Mat result = net.forward();
|
||||
|
||||
// retrieve the calculated a,b channels from the network output
|
||||
Size siz(result.size[2], result.size[3]);
|
||||
|
@ -56,7 +56,7 @@ if __name__ == '__main__':
|
||||
img_l_rs -= 50 # subtract 50 for mean-centering
|
||||
|
||||
net.setInput(cv.dnn.blobFromImage(img_l_rs))
|
||||
ab_dec = net.forward('class8_ab')[0,:,:,:].transpose((1,2,0)) # this is our result
|
||||
ab_dec = net.forward()[0,:,:,:].transpose((1,2,0)) # this is our result
|
||||
|
||||
(H_out,W_out) = ab_dec.shape[:2]
|
||||
ab_dec_us = cv.resize(ab_dec, (W_orig, H_orig))
|
||||
|
@ -14,6 +14,7 @@ parser.add_argument('--median_filter', default=0, type=int, help='Kernel size of
|
||||
args = parser.parse_args()
|
||||
|
||||
net = cv.dnn.readNetFromTorch(args.model)
|
||||
net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV);
|
||||
|
||||
if args.input:
|
||||
cap = cv.VideoCapture(args.input)
|
||||
|
@ -27,6 +27,7 @@ args = parser.parse_args()
|
||||
|
||||
### Get OpenCV predictions #####################################################
|
||||
net = cv.dnn.readNetFromTensorflow(args.weights, args.prototxt)
|
||||
net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV);
|
||||
|
||||
detections = []
|
||||
for imgName in os.listdir(args.images):
|
||||
|
@ -23,12 +23,16 @@ const char* keys =
|
||||
"{ rgb | | Indicate that model works with RGB input images instead BGR ones. }"
|
||||
"{ thr | .5 | Confidence threshold. }"
|
||||
"{ backend | 0 | Choose one of computation backends: "
|
||||
"0: default C++ backend, "
|
||||
"0: automatically (by default), "
|
||||
"1: Halide language (http://halide-lang.org/), "
|
||||
"2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit)}"
|
||||
"{ target | 0 | Choose one of target computation devices: "
|
||||
"0: CPU target (by default),"
|
||||
"1: OpenCL }";
|
||||
"2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
|
||||
"3: OpenCV implementation }"
|
||||
"{ target | 0 | Choose one of target computation devices: "
|
||||
"0: CPU target (by default), "
|
||||
"1: OpenCL, "
|
||||
"2: OpenCL fp16 (half-float precision), "
|
||||
"3: VPU }";
|
||||
|
||||
|
||||
using namespace cv;
|
||||
using namespace dnn;
|
||||
|
@ -3,8 +3,8 @@ import argparse
|
||||
import sys
|
||||
import numpy as np
|
||||
|
||||
backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE)
|
||||
targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL)
|
||||
backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV)
|
||||
targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD)
|
||||
|
||||
parser = argparse.ArgumentParser(description='Use this script to run object detection deep learning networks using OpenCV.')
|
||||
parser.add_argument('--input', help='Path to input image or video file. Skip this argument to capture frames from a camera.')
|
||||
@ -33,13 +33,16 @@ parser.add_argument('--rgb', action='store_true',
|
||||
parser.add_argument('--thr', type=float, default=0.5, help='Confidence threshold')
|
||||
parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int,
|
||||
help="Choose one of computation backends: "
|
||||
"%d: default C++ backend, "
|
||||
"%d: automatically (by default), "
|
||||
"%d: Halide language (http://halide-lang.org/), "
|
||||
"%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit)" % backends)
|
||||
"%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
|
||||
"%d: OpenCV implementation" % backends)
|
||||
parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int,
|
||||
help='Choose one of target computation devices: '
|
||||
'%d: CPU target (by default), '
|
||||
'%d: OpenCL' % targets)
|
||||
'%d: OpenCL, '
|
||||
'%d: OpenCL fp16 (half-float precision), '
|
||||
'%d: VPU' % targets)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Load names of classes
|
||||
|
@ -16,9 +16,6 @@ parser.add_argument('--dataset', help='Specify what kind of model was trained. '
|
||||
parser.add_argument('--thr', default=0.1, type=float, help='Threshold value for pose parts heat map')
|
||||
parser.add_argument('--width', default=368, type=int, help='Resize input to specific width.')
|
||||
parser.add_argument('--height', default=368, type=int, help='Resize input to specific height.')
|
||||
parser.add_argument('--inf_engine', action='store_true',
|
||||
help='Enable Intel Inference Engine computational backend. '
|
||||
'Check that plugins folder is in LD_LIBRARY_PATH environment variable')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
@ -49,8 +46,6 @@ inWidth = args.width
|
||||
inHeight = args.height
|
||||
|
||||
net = cv.dnn.readNetFromCaffe(args.proto, args.model)
|
||||
if args.inf_engine:
|
||||
net.setPreferableBackend(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE)
|
||||
|
||||
cap = cv.VideoCapture(args.input if args.input else 0)
|
||||
|
||||
|
@ -24,12 +24,15 @@ const char* keys =
|
||||
"{ height | | Preprocess input image by resizing to a specific height. }"
|
||||
"{ rgb | | Indicate that model works with RGB input images instead BGR ones. }"
|
||||
"{ backend | 0 | Choose one of computation backends: "
|
||||
"0: default C++ backend, "
|
||||
"0: automatically (by default), "
|
||||
"1: Halide language (http://halide-lang.org/), "
|
||||
"2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit)}"
|
||||
"2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
|
||||
"3: OpenCV implementation }"
|
||||
"{ target | 0 | Choose one of target computation devices: "
|
||||
"0: CPU target (by default),"
|
||||
"1: OpenCL }";
|
||||
"0: CPU target (by default), "
|
||||
"1: OpenCL, "
|
||||
"2: OpenCL fp16 (half-float precision), "
|
||||
"3: VPU }";
|
||||
|
||||
using namespace cv;
|
||||
using namespace dnn;
|
||||
|
@ -3,8 +3,8 @@ import argparse
|
||||
import numpy as np
|
||||
import sys
|
||||
|
||||
backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE)
|
||||
targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL)
|
||||
backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV)
|
||||
targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD)
|
||||
|
||||
parser = argparse.ArgumentParser(description='Use this script to run semantic segmentation deep learning networks using OpenCV.')
|
||||
parser.add_argument('--input', help='Path to input image or video file. Skip this argument to capture frames from a camera.')
|
||||
@ -34,13 +34,16 @@ parser.add_argument('--rgb', action='store_true',
|
||||
help='Indicate that model works with RGB input images instead BGR ones.')
|
||||
parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int,
|
||||
help="Choose one of computation backends: "
|
||||
"%d: default C++ backend, "
|
||||
"%d: automatically (by default), "
|
||||
"%d: Halide language (http://halide-lang.org/), "
|
||||
"%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit)" % backends)
|
||||
"%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
|
||||
"%d: OpenCV implementation" % backends)
|
||||
parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int,
|
||||
help='Choose one of target computation devices: '
|
||||
'%d: CPU target (by default), '
|
||||
'%d: OpenCL' % targets)
|
||||
'%d: OpenCL, '
|
||||
'%d: OpenCL fp16 (half-float precision), '
|
||||
'%d: VPU' % targets)
|
||||
args = parser.parse_args()
|
||||
|
||||
np.random.seed(324)
|
||||
|
Loading…
Reference in New Issue
Block a user