mirror of
https://github.com/opencv/opencv.git
synced 2025-06-07 09:25:45 +08:00
dnn(perf): update perf tests
This commit is contained in:
parent
b8af7c5f86
commit
78788e1efb
@ -1,27 +1,15 @@
|
||||
#include "perf_precomp.hpp"
|
||||
#include <opencv2/dnn/shape_utils.hpp>
|
||||
|
||||
namespace cvtest
|
||||
namespace
|
||||
{
|
||||
|
||||
using std::tr1::tuple;
|
||||
using std::tr1::get;
|
||||
using std::tr1::make_tuple;
|
||||
using std::make_pair;
|
||||
using namespace perf;
|
||||
using namespace testing;
|
||||
using namespace cv;
|
||||
using namespace cv::dnn;
|
||||
|
||||
enum {STRIDE_OFF = 1, STRIDE_ON = 2};
|
||||
CV_ENUM(StrideSize, STRIDE_OFF, STRIDE_ON);
|
||||
|
||||
enum {GROUP_OFF = 1, GROUP_2 = 2};
|
||||
CV_ENUM(GroupSize, GROUP_OFF, GROUP_2);
|
||||
|
||||
//Squared Size
|
||||
#define SSZ(n) cv::Size(n, n)
|
||||
|
||||
typedef std::pair<MatShape, int> InpShapeNumOut;
|
||||
typedef tuple<Size, InpShapeNumOut, GroupSize, StrideSize> ConvParam; //kernel_size, inp shape, groups, stride
|
||||
typedef TestBaseWithParam<ConvParam> ConvolutionPerfTest;
|
||||
@ -77,11 +65,11 @@ PERF_TEST_P( ConvolutionPerfTest, perf, Combine(
|
||||
Ptr<Layer> layer = cv::dnn::LayerFactory::createLayerInstance("Convolution", lp);
|
||||
std::vector<MatShape> inputShapes(1, shape(inpBlob)), outShapes, internals;
|
||||
layer->getMemoryShapes(inputShapes, 0, outShapes, internals);
|
||||
for (int i = 0; i < outShapes.size(); i++)
|
||||
for (size_t i = 0; i < outShapes.size(); i++)
|
||||
{
|
||||
outBlobs.push_back(Mat(outShapes[i], CV_32F));
|
||||
}
|
||||
for (int i = 0; i < internals.size(); i++)
|
||||
for (size_t i = 0; i < internals.size(); i++)
|
||||
{
|
||||
internalBlobs.push_back(Mat());
|
||||
if (total(internals[i]))
|
||||
@ -95,12 +83,13 @@ PERF_TEST_P( ConvolutionPerfTest, perf, Combine(
|
||||
Mat outBlob2D = outBlobs[0].reshape(1, outBlobs[0].size[0]);
|
||||
declare.in(inpBlob2D, wgtBlob2D, WARMUP_RNG).out(outBlob2D).tbb_threads(cv::getNumThreads());
|
||||
|
||||
TEST_CYCLE_N(10)
|
||||
{
|
||||
layer->forward(inpBlobs, outBlobs, internalBlobs); /// warmup
|
||||
|
||||
PERF_SAMPLE_BEGIN()
|
||||
layer->forward(inpBlobs, outBlobs, internalBlobs);
|
||||
}
|
||||
PERF_SAMPLE_END()
|
||||
|
||||
SANITY_CHECK_NOTHING();
|
||||
}
|
||||
|
||||
}
|
||||
} // namespace
|
||||
|
@ -1,174 +0,0 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2017, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
|
||||
#include "perf_precomp.hpp"
|
||||
|
||||
namespace cvtest
|
||||
{
|
||||
|
||||
#ifdef HAVE_HALIDE
|
||||
using namespace cv;
|
||||
using namespace dnn;
|
||||
|
||||
static void loadNet(std::string weights, std::string proto, std::string scheduler,
|
||||
int inWidth, int inHeight, const std::string& outputLayer,
|
||||
const std::string& framework, int targetId, Net* net)
|
||||
{
|
||||
Mat input(inHeight, inWidth, CV_32FC3);
|
||||
randu(input, 0.0f, 1.0f);
|
||||
|
||||
weights = findDataFile(weights, false);
|
||||
if (!proto.empty())
|
||||
proto = findDataFile(proto, false);
|
||||
if (!scheduler.empty())
|
||||
scheduler = findDataFile(scheduler, false);
|
||||
if (framework == "caffe")
|
||||
{
|
||||
*net = cv::dnn::readNetFromCaffe(proto, weights);
|
||||
}
|
||||
else if (framework == "torch")
|
||||
{
|
||||
*net = cv::dnn::readNetFromTorch(weights);
|
||||
}
|
||||
else if (framework == "tensorflow")
|
||||
{
|
||||
*net = cv::dnn::readNetFromTensorflow(weights);
|
||||
}
|
||||
else
|
||||
CV_Error(Error::StsNotImplemented, "Unknown framework " + framework);
|
||||
|
||||
net->setInput(blobFromImage(input, 1.0, Size(), Scalar(), false));
|
||||
net->setPreferableBackend(DNN_BACKEND_HALIDE);
|
||||
net->setPreferableTarget(targetId);
|
||||
net->setHalideScheduler(scheduler);
|
||||
net->forward(outputLayer);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// CPU target
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
PERF_TEST(GoogLeNet, HalidePerfTest)
|
||||
{
|
||||
Net net;
|
||||
loadNet("dnn/bvlc_googlenet.caffemodel", "dnn/bvlc_googlenet.prototxt",
|
||||
"", 224, 224, "prob", "caffe", DNN_TARGET_CPU, &net);
|
||||
TEST_CYCLE() net.forward();
|
||||
SANITY_CHECK_NOTHING();
|
||||
}
|
||||
|
||||
PERF_TEST(AlexNet, HalidePerfTest)
|
||||
{
|
||||
Net net;
|
||||
loadNet("dnn/bvlc_alexnet.caffemodel", "dnn/bvlc_alexnet.prototxt",
|
||||
"dnn/halide_scheduler_alexnet.yml", 227, 227, "prob", "caffe",
|
||||
DNN_TARGET_CPU, &net);
|
||||
TEST_CYCLE() net.forward();
|
||||
SANITY_CHECK_NOTHING();
|
||||
}
|
||||
|
||||
PERF_TEST(ResNet50, HalidePerfTest)
|
||||
{
|
||||
Net net;
|
||||
loadNet("dnn/ResNet-50-model.caffemodel", "dnn/ResNet-50-deploy.prototxt",
|
||||
"dnn/halide_scheduler_resnet_50.yml", 224, 224, "prob", "caffe",
|
||||
DNN_TARGET_CPU, &net);
|
||||
TEST_CYCLE() net.forward();
|
||||
SANITY_CHECK_NOTHING();
|
||||
}
|
||||
|
||||
PERF_TEST(SqueezeNet_v1_1, HalidePerfTest)
|
||||
{
|
||||
Net net;
|
||||
loadNet("dnn/squeezenet_v1.1.caffemodel", "dnn/squeezenet_v1.1.prototxt",
|
||||
"dnn/halide_scheduler_squeezenet_v1_1.yml", 227, 227, "prob",
|
||||
"caffe", DNN_TARGET_CPU, &net);
|
||||
TEST_CYCLE() net.forward();
|
||||
SANITY_CHECK_NOTHING();
|
||||
}
|
||||
|
||||
PERF_TEST(Inception_5h, HalidePerfTest)
|
||||
{
|
||||
Net net;
|
||||
loadNet("dnn/tensorflow_inception_graph.pb", "",
|
||||
"dnn/halide_scheduler_inception_5h.yml",
|
||||
224, 224, "softmax2", "tensorflow", DNN_TARGET_CPU, &net);
|
||||
TEST_CYCLE() net.forward("softmax2");
|
||||
SANITY_CHECK_NOTHING();
|
||||
}
|
||||
|
||||
PERF_TEST(ENet, HalidePerfTest)
|
||||
{
|
||||
Net net;
|
||||
loadNet("dnn/Enet-model-best.net", "", "dnn/halide_scheduler_enet.yml",
|
||||
512, 256, "l367_Deconvolution", "torch", DNN_TARGET_CPU, &net);
|
||||
TEST_CYCLE() net.forward();
|
||||
SANITY_CHECK_NOTHING();
|
||||
}
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// OpenCL target
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
PERF_TEST(GoogLeNet_opencl, HalidePerfTest)
|
||||
{
|
||||
Net net;
|
||||
loadNet("dnn/bvlc_googlenet.caffemodel", "dnn/bvlc_googlenet.prototxt",
|
||||
"", 227, 227, "prob", "caffe", DNN_TARGET_OPENCL, &net);
|
||||
TEST_CYCLE() net.forward();
|
||||
SANITY_CHECK_NOTHING();
|
||||
}
|
||||
|
||||
PERF_TEST(AlexNet_opencl, HalidePerfTest)
|
||||
{
|
||||
Net net;
|
||||
loadNet("dnn/bvlc_alexnet.caffemodel", "dnn/bvlc_alexnet.prototxt",
|
||||
"dnn/halide_scheduler_opencl_alexnet.yml", 227, 227, "prob", "caffe",
|
||||
DNN_TARGET_OPENCL, &net);
|
||||
TEST_CYCLE() net.forward();
|
||||
SANITY_CHECK_NOTHING();
|
||||
}
|
||||
|
||||
PERF_TEST(ResNet50_opencl, HalidePerfTest)
|
||||
{
|
||||
Net net;
|
||||
loadNet("dnn/ResNet-50-model.caffemodel", "dnn/ResNet-50-deploy.prototxt",
|
||||
"dnn/halide_scheduler_opencl_resnet_50.yml", 224, 224, "prob", "caffe",
|
||||
DNN_TARGET_OPENCL, &net);
|
||||
TEST_CYCLE() net.forward();
|
||||
SANITY_CHECK_NOTHING();
|
||||
}
|
||||
|
||||
|
||||
PERF_TEST(SqueezeNet_v1_1_opencl, HalidePerfTest)
|
||||
{
|
||||
Net net;
|
||||
loadNet("dnn/squeezenet_v1.1.caffemodel", "dnn/squeezenet_v1.1.prototxt",
|
||||
"dnn/halide_scheduler_opencl_squeezenet_v1_1.yml", 227, 227, "prob",
|
||||
"caffe", DNN_TARGET_OPENCL, &net);
|
||||
TEST_CYCLE() net.forward();
|
||||
SANITY_CHECK_NOTHING();
|
||||
}
|
||||
|
||||
PERF_TEST(Inception_5h_opencl, HalidePerfTest)
|
||||
{
|
||||
Net net;
|
||||
loadNet("dnn/tensorflow_inception_graph.pb", "",
|
||||
"dnn/halide_scheduler_opencl_inception_5h.yml",
|
||||
224, 224, "softmax2", "tensorflow", DNN_TARGET_OPENCL, &net);
|
||||
TEST_CYCLE() net.forward("softmax2");
|
||||
SANITY_CHECK_NOTHING();
|
||||
}
|
||||
|
||||
PERF_TEST(ENet_opencl, HalidePerfTest)
|
||||
{
|
||||
Net net;
|
||||
loadNet("dnn/Enet-model-best.net", "", "dnn/halide_scheduler_opencl_enet.yml",
|
||||
512, 256, "l367_Deconvolution", "torch", DNN_TARGET_OPENCL, &net);
|
||||
TEST_CYCLE() net.forward();
|
||||
SANITY_CHECK_NOTHING();
|
||||
}
|
||||
#endif // HAVE_HALIDE
|
||||
|
||||
} // namespace cvtest
|
149
modules/dnn/perf/perf_net.cpp
Normal file
149
modules/dnn/perf/perf_net.cpp
Normal file
@ -0,0 +1,149 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2017, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
|
||||
#include "perf_precomp.hpp"
|
||||
#include "opencv2/core/ocl.hpp"
|
||||
|
||||
#include "opencv2/dnn/shape_utils.hpp"
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
#ifdef HAVE_HALIDE
|
||||
#define TEST_DNN_BACKEND DNN_BACKEND_DEFAULT, DNN_BACKEND_HALIDE
|
||||
#else
|
||||
#define TEST_DNN_BACKEND DNN_BACKEND_DEFAULT
|
||||
#endif
|
||||
#define TEST_DNN_TARGET DNN_TARGET_CPU, DNN_TARGET_OPENCL
|
||||
|
||||
CV_ENUM(DNNBackend, DNN_BACKEND_DEFAULT, DNN_BACKEND_HALIDE)
|
||||
CV_ENUM(DNNTarget, DNN_TARGET_CPU, DNN_TARGET_OPENCL)
|
||||
|
||||
class DNNTestNetwork : public ::perf::TestBaseWithParam< tuple<DNNBackend, DNNTarget> >
|
||||
{
|
||||
public:
|
||||
dnn::Backend backend;
|
||||
dnn::Target target;
|
||||
|
||||
dnn::Net net;
|
||||
|
||||
void processNet(std::string weights, std::string proto, std::string halide_scheduler,
|
||||
int inWidth, int inHeight, const std::string& outputLayer,
|
||||
const std::string& framework)
|
||||
{
|
||||
backend = (dnn::Backend)(int)get<0>(GetParam());
|
||||
target = (dnn::Target)(int)get<1>(GetParam());
|
||||
|
||||
if (backend == DNN_BACKEND_DEFAULT && target == DNN_TARGET_OPENCL)
|
||||
{
|
||||
#if 0 //defined(HAVE_OPENCL)
|
||||
if (!cv::ocl::useOpenCL())
|
||||
#endif
|
||||
{
|
||||
throw ::SkipTestException("OpenCL is not available/disabled in OpenCV");
|
||||
}
|
||||
}
|
||||
|
||||
Mat input(inHeight, inWidth, CV_32FC3);
|
||||
randu(input, 0.0f, 1.0f);
|
||||
|
||||
|
||||
weights = findDataFile(weights, false);
|
||||
if (!proto.empty())
|
||||
proto = findDataFile(proto, false);
|
||||
if (!halide_scheduler.empty() && backend == DNN_BACKEND_HALIDE)
|
||||
halide_scheduler = findDataFile(std::string("dnn/halide_scheduler_") + (target == DNN_TARGET_OPENCL ? "opencl_" : "") + halide_scheduler, true);
|
||||
if (framework == "caffe")
|
||||
{
|
||||
net = cv::dnn::readNetFromCaffe(proto, weights);
|
||||
}
|
||||
else if (framework == "torch")
|
||||
{
|
||||
net = cv::dnn::readNetFromTorch(weights);
|
||||
}
|
||||
else if (framework == "tensorflow")
|
||||
{
|
||||
net = cv::dnn::readNetFromTensorflow(weights);
|
||||
}
|
||||
else
|
||||
CV_Error(Error::StsNotImplemented, "Unknown framework " + framework);
|
||||
|
||||
net.setInput(blobFromImage(input, 1.0, Size(), Scalar(), false));
|
||||
net.setPreferableBackend(backend);
|
||||
net.setPreferableTarget(target);
|
||||
if (backend == DNN_BACKEND_HALIDE)
|
||||
{
|
||||
net.setHalideScheduler(halide_scheduler);
|
||||
}
|
||||
|
||||
MatShape netInputShape = shape(1, 3, inHeight, inWidth);
|
||||
size_t weightsMemory = 0, blobsMemory = 0;
|
||||
net.getMemoryConsumption(netInputShape, weightsMemory, blobsMemory);
|
||||
int64 flops = net.getFLOPS(netInputShape);
|
||||
|
||||
net.forward(outputLayer); // warmup
|
||||
|
||||
std::cout << "Memory consumption:" << std::endl;
|
||||
std::cout << " Weights(parameters): " << divUp(weightsMemory, 1u<<20) << " Mb" << std::endl;
|
||||
std::cout << " Blobs: " << divUp(blobsMemory, 1u<<20) << " Mb" << std::endl;
|
||||
std::cout << "Calculation complexity: " << flops * 1e-9 << " GFlops" << std::endl;
|
||||
|
||||
PERF_SAMPLE_BEGIN()
|
||||
net.forward();
|
||||
PERF_SAMPLE_END()
|
||||
|
||||
SANITY_CHECK_NOTHING();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
PERF_TEST_P_(DNNTestNetwork, AlexNet)
|
||||
{
|
||||
processNet("dnn/bvlc_alexnet.caffemodel", "dnn/bvlc_alexnet.prototxt",
|
||||
"alexnet.yml", 227, 227, "prob", "caffe");
|
||||
}
|
||||
|
||||
PERF_TEST_P_(DNNTestNetwork, GoogLeNet)
|
||||
{
|
||||
processNet("dnn/bvlc_googlenet.caffemodel", "dnn/bvlc_googlenet.prototxt",
|
||||
"", 224, 224, "prob", "caffe");
|
||||
}
|
||||
|
||||
PERF_TEST_P_(DNNTestNetwork, ResNet50)
|
||||
{
|
||||
processNet("dnn/ResNet-50-model.caffemodel", "dnn/ResNet-50-deploy.prototxt",
|
||||
"resnet_50.yml", 224, 224, "prob", "caffe");
|
||||
}
|
||||
|
||||
PERF_TEST_P_(DNNTestNetwork, SqueezeNet_v1_1)
|
||||
{
|
||||
processNet("dnn/squeezenet_v1.1.caffemodel", "dnn/squeezenet_v1.1.prototxt",
|
||||
"squeezenet_v1_1.yml", 227, 227, "prob", "caffe");
|
||||
}
|
||||
|
||||
PERF_TEST_P_(DNNTestNetwork, Inception_5h)
|
||||
{
|
||||
processNet("dnn/tensorflow_inception_graph.pb", "",
|
||||
"inception_5h.yml",
|
||||
224, 224, "softmax2", "tensorflow");
|
||||
}
|
||||
|
||||
PERF_TEST_P_(DNNTestNetwork, ENet)
|
||||
{
|
||||
processNet("dnn/Enet-model-best.net", "", "enet.yml",
|
||||
512, 256, "l367_Deconvolution", "torch");
|
||||
}
|
||||
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(/*nothing*/, DNNTestNetwork,
|
||||
testing::Combine(
|
||||
::testing::Values(TEST_DNN_BACKEND),
|
||||
DNNTarget::all()
|
||||
)
|
||||
);
|
||||
|
||||
} // namespace
|
@ -1,11 +1,3 @@
|
||||
#ifdef __GNUC__
|
||||
# pragma GCC diagnostic ignored "-Wmissing-declarations"
|
||||
# if defined __clang__ || defined __APPLE__
|
||||
# pragma GCC diagnostic ignored "-Wmissing-prototypes"
|
||||
# pragma GCC diagnostic ignored "-Wextra"
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifndef __OPENCV_PERF_PRECOMP_HPP__
|
||||
#define __OPENCV_PERF_PRECOMP_HPP__
|
||||
|
||||
@ -14,4 +6,9 @@
|
||||
#include <opencv2/highgui.hpp>
|
||||
#include <opencv2/dnn.hpp>
|
||||
|
||||
using namespace cvtest;
|
||||
using namespace perf;
|
||||
using namespace cv;
|
||||
using namespace dnn;
|
||||
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user