mirror of
https://github.com/opencv/opencv.git
synced 2025-08-04 21:36:35 +08:00
Merge pull request #14516 from dkurt:dnn_async_samples
This commit is contained in:
commit
e7338024f6
@ -2718,9 +2718,6 @@ AsyncMat Net::forwardAsync(const String& outputName)
|
|||||||
{
|
{
|
||||||
CV_TRACE_FUNCTION();
|
CV_TRACE_FUNCTION();
|
||||||
#ifdef CV_CXX11
|
#ifdef CV_CXX11
|
||||||
if (impl->preferableBackend != DNN_BACKEND_INFERENCE_ENGINE)
|
|
||||||
CV_Error(Error::StsNotImplemented, "Asynchronous forward for backend which is different from DNN_BACKEND_INFERENCE_ENGINE");
|
|
||||||
|
|
||||||
String layerName = outputName;
|
String layerName = outputName;
|
||||||
|
|
||||||
if (layerName.empty())
|
if (layerName.empty())
|
||||||
@ -2729,6 +2726,9 @@ AsyncMat Net::forwardAsync(const String& outputName)
|
|||||||
std::vector<LayerPin> pins(1, impl->getPinByAlias(layerName));
|
std::vector<LayerPin> pins(1, impl->getPinByAlias(layerName));
|
||||||
impl->setUpNet(pins);
|
impl->setUpNet(pins);
|
||||||
|
|
||||||
|
if (impl->preferableBackend != DNN_BACKEND_INFERENCE_ENGINE)
|
||||||
|
CV_Error(Error::StsNotImplemented, "Asynchronous forward for backend which is different from DNN_BACKEND_INFERENCE_ENGINE");
|
||||||
|
|
||||||
impl->isAsync = true;
|
impl->isAsync = true;
|
||||||
impl->forwardToLayer(impl->getLayerData(layerName));
|
impl->forwardToLayer(impl->getLayerData(layerName));
|
||||||
impl->isAsync = false;
|
impl->isAsync = false;
|
||||||
|
@ -5,6 +5,11 @@
|
|||||||
#include <opencv2/imgproc.hpp>
|
#include <opencv2/imgproc.hpp>
|
||||||
#include <opencv2/highgui.hpp>
|
#include <opencv2/highgui.hpp>
|
||||||
|
|
||||||
|
#ifdef CV_CXX11
|
||||||
|
#include <thread>
|
||||||
|
#include <queue>
|
||||||
|
#endif
|
||||||
|
|
||||||
#include "common.hpp"
|
#include "common.hpp"
|
||||||
|
|
||||||
std::string keys =
|
std::string keys =
|
||||||
@ -26,8 +31,9 @@ std::string keys =
|
|||||||
"0: CPU target (by default), "
|
"0: CPU target (by default), "
|
||||||
"1: OpenCL, "
|
"1: OpenCL, "
|
||||||
"2: OpenCL fp16 (half-float precision), "
|
"2: OpenCL fp16 (half-float precision), "
|
||||||
"3: VPU }";
|
"3: VPU }"
|
||||||
|
"{ async | 0 | Number of asynchronous forwards at the same time. "
|
||||||
|
"Choose 0 for synchronous mode }";
|
||||||
|
|
||||||
using namespace cv;
|
using namespace cv;
|
||||||
using namespace dnn;
|
using namespace dnn;
|
||||||
@ -35,13 +41,66 @@ using namespace dnn;
|
|||||||
float confThreshold, nmsThreshold;
|
float confThreshold, nmsThreshold;
|
||||||
std::vector<std::string> classes;
|
std::vector<std::string> classes;
|
||||||
|
|
||||||
|
inline void preprocess(const Mat& frame, Net& net, Size inpSize, float scale,
|
||||||
|
const Scalar& mean, bool swapRB);
|
||||||
|
|
||||||
void postprocess(Mat& frame, const std::vector<Mat>& out, Net& net);
|
void postprocess(Mat& frame, const std::vector<Mat>& out, Net& net);
|
||||||
|
|
||||||
void drawPred(int classId, float conf, int left, int top, int right, int bottom, Mat& frame);
|
void drawPred(int classId, float conf, int left, int top, int right, int bottom, Mat& frame);
|
||||||
|
|
||||||
void callback(int pos, void* userdata);
|
void callback(int pos, void* userdata);
|
||||||
|
|
||||||
std::vector<String> getOutputsNames(const Net& net);
|
#ifdef CV_CXX11
|
||||||
|
template <typename T>
|
||||||
|
class QueueFPS : public std::queue<T>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
QueueFPS() : counter(0) {}
|
||||||
|
|
||||||
|
void push(const T& entry)
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lock(mutex);
|
||||||
|
|
||||||
|
std::queue<T>::push(entry);
|
||||||
|
counter += 1;
|
||||||
|
if (counter == 1)
|
||||||
|
{
|
||||||
|
// Start counting from a second frame (warmup).
|
||||||
|
tm.reset();
|
||||||
|
tm.start();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
T get()
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lock(mutex);
|
||||||
|
T entry = this->front();
|
||||||
|
this->pop();
|
||||||
|
return entry;
|
||||||
|
}
|
||||||
|
|
||||||
|
float getFPS()
|
||||||
|
{
|
||||||
|
tm.stop();
|
||||||
|
double fps = counter / tm.getTimeSec();
|
||||||
|
tm.start();
|
||||||
|
return static_cast<float>(fps);
|
||||||
|
}
|
||||||
|
|
||||||
|
void clear()
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lock(mutex);
|
||||||
|
while (!this->empty())
|
||||||
|
this->pop();
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned int counter;
|
||||||
|
|
||||||
|
private:
|
||||||
|
TickMeter tm;
|
||||||
|
std::mutex mutex;
|
||||||
|
};
|
||||||
|
#endif // CV_CXX11
|
||||||
|
|
||||||
int main(int argc, char** argv)
|
int main(int argc, char** argv)
|
||||||
{
|
{
|
||||||
@ -67,6 +126,7 @@ int main(int argc, char** argv)
|
|||||||
bool swapRB = parser.get<bool>("rgb");
|
bool swapRB = parser.get<bool>("rgb");
|
||||||
int inpWidth = parser.get<int>("width");
|
int inpWidth = parser.get<int>("width");
|
||||||
int inpHeight = parser.get<int>("height");
|
int inpHeight = parser.get<int>("height");
|
||||||
|
size_t async = parser.get<int>("async");
|
||||||
CV_Assert(parser.has("model"));
|
CV_Assert(parser.has("model"));
|
||||||
std::string modelPath = findFile(parser.get<String>("model"));
|
std::string modelPath = findFile(parser.get<String>("model"));
|
||||||
std::string configPath = findFile(parser.get<String>("config"));
|
std::string configPath = findFile(parser.get<String>("config"));
|
||||||
@ -104,6 +164,108 @@ int main(int argc, char** argv)
|
|||||||
else
|
else
|
||||||
cap.open(parser.get<int>("device"));
|
cap.open(parser.get<int>("device"));
|
||||||
|
|
||||||
|
#ifdef CV_CXX11
|
||||||
|
bool process = true;
|
||||||
|
|
||||||
|
// Frames capturing thread
|
||||||
|
QueueFPS<Mat> framesQueue;
|
||||||
|
std::thread framesThread([&](){
|
||||||
|
Mat frame;
|
||||||
|
while (process)
|
||||||
|
{
|
||||||
|
cap >> frame;
|
||||||
|
if (!frame.empty())
|
||||||
|
framesQueue.push(frame.clone());
|
||||||
|
else
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Frames processing thread
|
||||||
|
QueueFPS<Mat> processedFramesQueue;
|
||||||
|
QueueFPS<std::vector<Mat> > predictionsQueue;
|
||||||
|
std::thread processingThread([&](){
|
||||||
|
std::queue<std::future<Mat> > futureOutputs;
|
||||||
|
Mat blob;
|
||||||
|
while (process)
|
||||||
|
{
|
||||||
|
// Get a next frame
|
||||||
|
Mat frame;
|
||||||
|
{
|
||||||
|
if (!framesQueue.empty())
|
||||||
|
{
|
||||||
|
frame = framesQueue.get();
|
||||||
|
if (async)
|
||||||
|
{
|
||||||
|
if (futureOutputs.size() == async)
|
||||||
|
frame = Mat();
|
||||||
|
}
|
||||||
|
else
|
||||||
|
framesQueue.clear(); // Skip the rest of frames
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process the frame
|
||||||
|
if (!frame.empty())
|
||||||
|
{
|
||||||
|
preprocess(frame, net, Size(inpWidth, inpHeight), scale, mean, swapRB);
|
||||||
|
processedFramesQueue.push(frame);
|
||||||
|
|
||||||
|
if (async)
|
||||||
|
{
|
||||||
|
futureOutputs.push(net.forwardAsync());
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
std::vector<Mat> outs;
|
||||||
|
net.forward(outs, outNames);
|
||||||
|
predictionsQueue.push(outs);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
while (!futureOutputs.empty() &&
|
||||||
|
futureOutputs.front().wait_for(std::chrono::seconds(0)) == std::future_status::ready)
|
||||||
|
{
|
||||||
|
Mat out = futureOutputs.front().get();
|
||||||
|
predictionsQueue.push({out});
|
||||||
|
futureOutputs.pop();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Postprocessing and rendering loop
|
||||||
|
while (waitKey(1) < 0)
|
||||||
|
{
|
||||||
|
if (predictionsQueue.empty())
|
||||||
|
continue;
|
||||||
|
|
||||||
|
std::vector<Mat> outs = predictionsQueue.get();
|
||||||
|
Mat frame = processedFramesQueue.get();
|
||||||
|
|
||||||
|
postprocess(frame, outs, net);
|
||||||
|
|
||||||
|
if (predictionsQueue.counter > 1)
|
||||||
|
{
|
||||||
|
std::string label = format("Camera: %.2f FPS", framesQueue.getFPS());
|
||||||
|
putText(frame, label, Point(0, 15), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 255, 0));
|
||||||
|
|
||||||
|
label = format("Network: %.2f FPS", predictionsQueue.getFPS());
|
||||||
|
putText(frame, label, Point(0, 30), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 255, 0));
|
||||||
|
|
||||||
|
label = format("Skipped frames: %d", framesQueue.counter - predictionsQueue.counter);
|
||||||
|
putText(frame, label, Point(0, 45), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 255, 0));
|
||||||
|
}
|
||||||
|
imshow(kWinName, frame);
|
||||||
|
}
|
||||||
|
|
||||||
|
process = false;
|
||||||
|
framesThread.join();
|
||||||
|
processingThread.join();
|
||||||
|
|
||||||
|
#else // CV_CXX11
|
||||||
|
if (async)
|
||||||
|
CV_Error(Error::StsNotImplemented, "Asynchronous forward is supported only with Inference Engine backend.");
|
||||||
|
|
||||||
// Process frames.
|
// Process frames.
|
||||||
Mat frame, blob;
|
Mat frame, blob;
|
||||||
while (waitKey(1) < 0)
|
while (waitKey(1) < 0)
|
||||||
@ -115,19 +277,8 @@ int main(int argc, char** argv)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a 4D blob from a frame.
|
preprocess(frame, net, Size(inpWidth, inpHeight), scale, mean, swapRB);
|
||||||
Size inpSize(inpWidth > 0 ? inpWidth : frame.cols,
|
|
||||||
inpHeight > 0 ? inpHeight : frame.rows);
|
|
||||||
blobFromImage(frame, blob, scale, inpSize, mean, swapRB, false);
|
|
||||||
|
|
||||||
// Run a model.
|
|
||||||
net.setInput(blob);
|
|
||||||
if (net.getLayer(0)->outputNameToIndex("im_info") != -1) // Faster-RCNN or R-FCN
|
|
||||||
{
|
|
||||||
resize(frame, frame, inpSize);
|
|
||||||
Mat imInfo = (Mat_<float>(1, 3) << inpSize.height, inpSize.width, 1.6f);
|
|
||||||
net.setInput(imInfo, "im_info");
|
|
||||||
}
|
|
||||||
std::vector<Mat> outs;
|
std::vector<Mat> outs;
|
||||||
net.forward(outs, outNames);
|
net.forward(outs, outNames);
|
||||||
|
|
||||||
@ -142,9 +293,29 @@ int main(int argc, char** argv)
|
|||||||
|
|
||||||
imshow(kWinName, frame);
|
imshow(kWinName, frame);
|
||||||
}
|
}
|
||||||
|
#endif // CV_CXX11
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline void preprocess(const Mat& frame, Net& net, Size inpSize, float scale,
|
||||||
|
const Scalar& mean, bool swapRB)
|
||||||
|
{
|
||||||
|
static Mat blob;
|
||||||
|
// Create a 4D blob from a frame.
|
||||||
|
if (inpSize.width <= 0) inpSize.width = frame.cols;
|
||||||
|
if (inpSize.height <= 0) inpSize.height = frame.rows;
|
||||||
|
blobFromImage(frame, blob, 1.0, inpSize, Scalar(), swapRB, false, CV_8U);
|
||||||
|
|
||||||
|
// Run a model.
|
||||||
|
net.setInput(blob, "", scale, mean);
|
||||||
|
if (net.getLayer(0)->outputNameToIndex("im_info") != -1) // Faster-RCNN or R-FCN
|
||||||
|
{
|
||||||
|
resize(frame, frame, inpSize);
|
||||||
|
Mat imInfo = (Mat_<float>(1, 3) << inpSize.height, inpSize.width, 1.6f);
|
||||||
|
net.setInput(imInfo, "im_info");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void postprocess(Mat& frame, const std::vector<Mat>& outs, Net& net)
|
void postprocess(Mat& frame, const std::vector<Mat>& outs, Net& net)
|
||||||
{
|
{
|
||||||
static std::vector<int> outLayers = net.getUnconnectedOutLayers();
|
static std::vector<int> outLayers = net.getUnconnectedOutLayers();
|
||||||
|
@ -1,6 +1,13 @@
|
|||||||
import cv2 as cv
|
import cv2 as cv
|
||||||
import argparse
|
import argparse
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
from threading import Thread
|
||||||
|
if sys.version_info[0] == '2':
|
||||||
|
import Queue as queue
|
||||||
|
else:
|
||||||
|
import queue
|
||||||
|
|
||||||
from common import *
|
from common import *
|
||||||
from tf_text_graph_common import readTextMessage
|
from tf_text_graph_common import readTextMessage
|
||||||
@ -35,6 +42,9 @@ parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU,
|
|||||||
'%d: OpenCL, '
|
'%d: OpenCL, '
|
||||||
'%d: OpenCL fp16 (half-float precision), '
|
'%d: OpenCL fp16 (half-float precision), '
|
||||||
'%d: VPU' % targets)
|
'%d: VPU' % targets)
|
||||||
|
parser.add_argument('--async', type=int, default=0,
|
||||||
|
help='Number of asynchronous forwards at the same time. '
|
||||||
|
'Choose 0 for synchronous mode')
|
||||||
args, _ = parser.parse_known_args()
|
args, _ = parser.parse_known_args()
|
||||||
add_preproc_args(args.zoo, parser, 'object_detection')
|
add_preproc_args(args.zoo, parser, 'object_detection')
|
||||||
parser = argparse.ArgumentParser(parents=[parser],
|
parser = argparse.ArgumentParser(parents=[parser],
|
||||||
@ -173,32 +183,125 @@ def callback(pos):
|
|||||||
cv.createTrackbar('Confidence threshold, %', winName, int(confThreshold * 100), 99, callback)
|
cv.createTrackbar('Confidence threshold, %', winName, int(confThreshold * 100), 99, callback)
|
||||||
|
|
||||||
cap = cv.VideoCapture(cv.samples.findFileOrKeep(args.input) if args.input else 0)
|
cap = cv.VideoCapture(cv.samples.findFileOrKeep(args.input) if args.input else 0)
|
||||||
|
|
||||||
|
class QueueFPS(queue.Queue):
|
||||||
|
def __init__(self):
|
||||||
|
queue.Queue.__init__(self)
|
||||||
|
self.startTime = 0
|
||||||
|
self.counter = 0
|
||||||
|
|
||||||
|
def put(self, v):
|
||||||
|
queue.Queue.put(self, v)
|
||||||
|
self.counter += 1
|
||||||
|
if self.counter == 1:
|
||||||
|
self.startTime = time.time()
|
||||||
|
|
||||||
|
def getFPS(self):
|
||||||
|
return self.counter / (time.time() - self.startTime)
|
||||||
|
|
||||||
|
|
||||||
|
process = True
|
||||||
|
|
||||||
|
#
|
||||||
|
# Frames capturing thread
|
||||||
|
#
|
||||||
|
framesQueue = QueueFPS()
|
||||||
|
def framesThreadBody():
|
||||||
|
global framesQueue, process
|
||||||
|
|
||||||
|
while process:
|
||||||
|
hasFrame, frame = cap.read()
|
||||||
|
if not hasFrame:
|
||||||
|
break
|
||||||
|
framesQueue.put(frame)
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# Frames processing thread
|
||||||
|
#
|
||||||
|
processedFramesQueue = queue.Queue()
|
||||||
|
predictionsQueue = QueueFPS()
|
||||||
|
def processingThreadBody():
|
||||||
|
global processedFramesQueue, predictionsQueue, args, process
|
||||||
|
|
||||||
|
futureOutputs = []
|
||||||
|
while process:
|
||||||
|
# Get a next frame
|
||||||
|
frame = None
|
||||||
|
try:
|
||||||
|
frame = framesQueue.get_nowait()
|
||||||
|
|
||||||
|
if args.async:
|
||||||
|
if len(futureOutputs) == args.async:
|
||||||
|
frame = None # Skip the frame
|
||||||
|
else:
|
||||||
|
framesQueue.queue.clear() # Skip the rest of frames
|
||||||
|
except queue.Empty:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
if not frame is None:
|
||||||
|
frameHeight = frame.shape[0]
|
||||||
|
frameWidth = frame.shape[1]
|
||||||
|
|
||||||
|
# Create a 4D blob from a frame.
|
||||||
|
inpWidth = args.width if args.width else frameWidth
|
||||||
|
inpHeight = args.height if args.height else frameHeight
|
||||||
|
blob = cv.dnn.blobFromImage(frame, size=(inpWidth, inpHeight), swapRB=args.rgb, ddepth=cv.CV_8U)
|
||||||
|
processedFramesQueue.put(frame)
|
||||||
|
|
||||||
|
# Run a model
|
||||||
|
net.setInput(blob, scalefactor=args.scale, mean=args.mean)
|
||||||
|
if net.getLayer(0).outputNameToIndex('im_info') != -1: # Faster-RCNN or R-FCN
|
||||||
|
frame = cv.resize(frame, (inpWidth, inpHeight))
|
||||||
|
net.setInput(np.array([[inpHeight, inpWidth, 1.6]], dtype=np.float32), 'im_info')
|
||||||
|
|
||||||
|
if args.async:
|
||||||
|
futureOutputs.append(net.forwardAsync())
|
||||||
|
else:
|
||||||
|
outs = net.forward(outNames)
|
||||||
|
predictionsQueue.put(np.copy(outs))
|
||||||
|
|
||||||
|
while futureOutputs and futureOutputs[0].wait_for(0) == 0:
|
||||||
|
out = futureOutputs[0].get()
|
||||||
|
predictionsQueue.put(np.copy([out]))
|
||||||
|
|
||||||
|
del futureOutputs[0]
|
||||||
|
|
||||||
|
|
||||||
|
framesThread = Thread(target=framesThreadBody)
|
||||||
|
framesThread.start()
|
||||||
|
|
||||||
|
processingThread = Thread(target=processingThreadBody)
|
||||||
|
processingThread.start()
|
||||||
|
|
||||||
|
#
|
||||||
|
# Postprocessing and rendering loop
|
||||||
|
#
|
||||||
while cv.waitKey(1) < 0:
|
while cv.waitKey(1) < 0:
|
||||||
hasFrame, frame = cap.read()
|
try:
|
||||||
if not hasFrame:
|
# Request prediction first because they put after frames
|
||||||
cv.waitKey()
|
outs = predictionsQueue.get_nowait()
|
||||||
break
|
frame = processedFramesQueue.get_nowait()
|
||||||
|
|
||||||
frameHeight = frame.shape[0]
|
postprocess(frame, outs)
|
||||||
frameWidth = frame.shape[1]
|
|
||||||
|
|
||||||
# Create a 4D blob from a frame.
|
# Put efficiency information.
|
||||||
inpWidth = args.width if args.width else frameWidth
|
if predictionsQueue.counter > 1:
|
||||||
inpHeight = args.height if args.height else frameHeight
|
label = 'Camera: %.2f FPS' % (framesQueue.getFPS())
|
||||||
blob = cv.dnn.blobFromImage(frame, args.scale, (inpWidth, inpHeight), args.mean, args.rgb, crop=False)
|
cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
|
||||||
|
|
||||||
# Run a model
|
label = 'Network: %.2f FPS' % (predictionsQueue.getFPS())
|
||||||
net.setInput(blob)
|
cv.putText(frame, label, (0, 30), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
|
||||||
if net.getLayer(0).outputNameToIndex('im_info') != -1: # Faster-RCNN or R-FCN
|
|
||||||
frame = cv.resize(frame, (inpWidth, inpHeight))
|
|
||||||
net.setInput(np.array([[inpHeight, inpWidth, 1.6]], dtype=np.float32), 'im_info')
|
|
||||||
outs = net.forward(outNames)
|
|
||||||
|
|
||||||
postprocess(frame, outs)
|
label = 'Skipped frames: %d' % (framesQueue.counter - predictionsQueue.counter)
|
||||||
|
cv.putText(frame, label, (0, 45), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
|
||||||
|
|
||||||
# Put efficiency information.
|
cv.imshow(winName, frame)
|
||||||
t, _ = net.getPerfProfile()
|
except queue.Empty:
|
||||||
label = 'Inference time: %.2f ms' % (t * 1000.0 / cv.getTickFrequency())
|
pass
|
||||||
cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
|
|
||||||
|
|
||||||
cv.imshow(winName, frame)
|
|
||||||
|
process = False
|
||||||
|
framesThread.join()
|
||||||
|
processingThread.join()
|
||||||
|
Loading…
Reference in New Issue
Block a user