opencv/samples/dnn/face_detect.cpp
Yuantao Feng 34d359fe03
Merge pull request #20422 from fengyuentau:dnn_face
Add DNN-based face detection and face recognition into modules/objdetect

* Add DNN-based face detector impl and interface

* Add a sample for DNN-based face detector

* add recog

* add notes

* move samples from samples/cpp to samples/dnn

* add documentation for dnn_face

* add set/get methods for input size, nms & score threshold and topk

* remove the DNN prefix from the face detector and face recognizer

* remove default values in the constructor of impl

* regenerate priors after setting input size

* two filenames for readnet

* Update face.hpp

* Update face_recognize.cpp

* Update face_match.cpp

* Update face.hpp

* Update face_recognize.cpp

* Update face_match.cpp

* Update face_recognize.cpp

* Update dnn_face.markdown

* Update dnn_face.markdown

* Update face.hpp

* Update dnn_face.markdown

* add regression test for face detection

* remove underscore prefix; fix warnings

* add reference & acknowledgement for face detection

* Update dnn_face.markdown

* Update dnn_face.markdown

* Update ts.hpp

* Update test_face.cpp

* Update face_match.cpp

* fix a compile error for python interface; add python examples for face detection and recognition

* Major changes for Vadim's comments:

* Replace class name FaceDetector with FaceDetectorYN in related failes

* Declare local mat before loop in modules/objdetect/src/face_detect.cpp

* Make input image and save flag optional in samples/dnn/face_detect(.cpp, .py)

* Add camera support in samples/dnn/face_detect(.cpp, .py)

* correct file paths for regression test

* fix convertion warnings; remove extra spaces

* update face_recog

* Update dnn_face.markdown

* Fix warnings and errors for the default CI reports:

* Remove trailing white spaces and extra new lines.

* Fix convertion warnings for windows and iOS.

* Add braces around initialization of subobjects.

* Fix warnings and errors for the default CI systems:

* Add prefix 'FR_' for each value name in enum DisType to solve the
redefinition error for iOS compilation; Modify other code accordingly

* Add bookmark '#tutorial_dnn_face' to solve warnings from doxygen

* Correct documentations to solve warnings from doxygen

* update FaceRecognizerSF

* Fix the error for CI to find ONNX models correctly

* add suffix f to float assignments

* add backend & target options for initializing face recognizer

* add checkeq for checking input size and preset size

* update test and threshold

* changes in response to alalek's comments:

* fix typos in samples/dnn/face_match.py

* import numpy before importing cv2

* add documentation to .setInputSize()

* remove extra include in face_recognize.cpp

* fix some bugs

* Update dnn_face.markdown

* update thresholds; remove useless code

* add time suffix to YuNet filename in test

* objdetect: update test code
2021-10-08 19:13:49 +00:00

132 lines
4.8 KiB
C++

#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/objdetect.hpp>
#include <iostream>
using namespace cv;
using namespace std;
static Mat visualize(Mat input, Mat faces, int thickness=2)
{
Mat output = input.clone();
for (int i = 0; i < faces.rows; i++)
{
// Print results
cout << "Face " << i
<< ", top-left coordinates: (" << faces.at<float>(i, 0) << ", " << faces.at<float>(i, 1) << "), "
<< "box width: " << faces.at<float>(i, 2) << ", box height: " << faces.at<float>(i, 3) << ", "
<< "score: " << faces.at<float>(i, 14) << "\n";
// Draw bounding box
rectangle(output, Rect2i(int(faces.at<float>(i, 0)), int(faces.at<float>(i, 1)), int(faces.at<float>(i, 2)), int(faces.at<float>(i, 3))), Scalar(0, 255, 0), thickness);
// Draw landmarks
circle(output, Point2i(int(faces.at<float>(i, 4)), int(faces.at<float>(i, 5))), 2, Scalar(255, 0, 0), thickness);
circle(output, Point2i(int(faces.at<float>(i, 6)), int(faces.at<float>(i, 7))), 2, Scalar( 0, 0, 255), thickness);
circle(output, Point2i(int(faces.at<float>(i, 8)), int(faces.at<float>(i, 9))), 2, Scalar( 0, 255, 0), thickness);
circle(output, Point2i(int(faces.at<float>(i, 10)), int(faces.at<float>(i, 11))), 2, Scalar(255, 0, 255), thickness);
circle(output, Point2i(int(faces.at<float>(i, 12)), int(faces.at<float>(i, 13))), 2, Scalar( 0, 255, 255), thickness);
}
return output;
}
int main(int argc, char ** argv)
{
CommandLineParser parser(argc, argv,
"{help h | | Print this message.}"
"{input i | | Path to the input image. Omit for detecting on default camera.}"
"{model m | yunet.onnx | Path to the model. Download yunet.onnx in https://github.com/ShiqiYu/libfacedetection.train/tree/master/tasks/task1/onnx.}"
"{score_threshold | 0.9 | Filter out faces of score < score_threshold.}"
"{nms_threshold | 0.3 | Suppress bounding boxes of iou >= nms_threshold.}"
"{top_k | 5000 | Keep top_k bounding boxes before NMS.}"
"{save s | false | Set true to save results. This flag is invalid when using camera.}"
"{vis v | true | Set true to open a window for result visualization. This flag is invalid when using camera.}"
);
if (argc == 1 || parser.has("help"))
{
parser.printMessage();
return -1;
}
String modelPath = parser.get<String>("model");
float scoreThreshold = parser.get<float>("score_threshold");
float nmsThreshold = parser.get<float>("nms_threshold");
int topK = parser.get<int>("top_k");
bool save = parser.get<bool>("save");
bool vis = parser.get<bool>("vis");
// Initialize FaceDetectorYN
Ptr<FaceDetectorYN> detector = FaceDetectorYN::create(modelPath, "", Size(320, 320), scoreThreshold, nmsThreshold, topK);
// If input is an image
if (parser.has("input"))
{
String input = parser.get<String>("input");
Mat image = imread(input);
// Set input size before inference
detector->setInputSize(image.size());
// Inference
Mat faces;
detector->detect(image, faces);
// Draw results on the input image
Mat result = visualize(image, faces);
// Save results if save is true
if(save)
{
cout << "Results saved to result.jpg\n";
imwrite("result.jpg", result);
}
// Visualize results
if (vis)
{
namedWindow(input, WINDOW_AUTOSIZE);
imshow(input, result);
waitKey(0);
}
}
else
{
int deviceId = 0;
VideoCapture cap;
cap.open(deviceId, CAP_ANY);
int frameWidth = int(cap.get(CAP_PROP_FRAME_WIDTH));
int frameHeight = int(cap.get(CAP_PROP_FRAME_HEIGHT));
detector->setInputSize(Size(frameWidth, frameHeight));
Mat frame;
TickMeter tm;
String msg = "FPS: ";
while(waitKey(1) < 0) // Press any key to exit
{
// Get frame
if (!cap.read(frame))
{
cerr << "No frames grabbed!\n";
break;
}
// Inference
Mat faces;
tm.start();
detector->detect(frame, faces);
tm.stop();
// Draw results on the input image
Mat result = visualize(frame, faces);
putText(result, msg + to_string(tm.getFPS()), Point(0, 15), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 255, 0));
// Visualize results
imshow("Live", result);
tm.reset();
}
}
}