mirror of
https://github.com/opencv/opencv.git
synced 2024-11-30 22:40:17 +08:00
448375d1e7
Replaced caffe model with onnx for colorization sample #25433 #25006 Improved sample for colorization with onnx model in cpp and python. Added a demo image in data folder for testing ### Pull Request Readiness Checklist See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request - [x] I agree to contribute to the project under Apache 2 License. - [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV - [x] The PR is proposed to the proper branch - [x] There is a reference to the original bug report and related work - [ ] There is accuracy test, performance test and test data in opencv_extra repository, if applicable Patch to opencv_extra has the same branch name. - [x] The feature is well documented and sample code can be built with the project CMake
118 lines
4.3 KiB
C++
118 lines
4.3 KiB
C++
// This file is part of OpenCV project.
|
|
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
|
// of this distribution and at http://opencv.org/license.html
|
|
// To download the onnx model, see: https://storage.googleapis.com/ailia-models/colorization/colorizer.onnx
|
|
|
|
#include <opencv2/dnn.hpp>
|
|
#include <opencv2/imgproc.hpp>
|
|
#include <opencv2/imgcodecs.hpp>
|
|
#include "common.hpp"
|
|
#include <opencv2/highgui.hpp>
|
|
#include <iostream>
|
|
|
|
using namespace cv;
|
|
using namespace std;
|
|
using namespace cv::dnn;
|
|
|
|
|
|
int main(int argc, char** argv) {
|
|
const string about =
|
|
"This sample demonstrates recoloring grayscale images with dnn.\n"
|
|
"This program is based on:\n"
|
|
" http://richzhang.github.io/colorization\n"
|
|
" https://github.com/richzhang/colorization\n"
|
|
"To download the onnx model:\n"
|
|
" https://storage.googleapis.com/ailia-models/colorization/colorizer.onnx\n";
|
|
|
|
const string param_keys =
|
|
"{ help h | | Print help message. }"
|
|
"{ input i | baboon.jpg | Path to the input image }"
|
|
"{ onnx_model_path | | Path to the ONNX model. Required. }";
|
|
|
|
const string backend_keys = format(
|
|
"{ backend | 0 | Choose one of computation backends: "
|
|
"%d: automatically (by default), "
|
|
"%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
|
|
"%d: OpenCV implementation, "
|
|
"%d: VKCOM, "
|
|
"%d: CUDA, "
|
|
"%d: WebNN }",
|
|
cv::dnn::DNN_BACKEND_DEFAULT, cv::dnn::DNN_BACKEND_INFERENCE_ENGINE, cv::dnn::DNN_BACKEND_OPENCV,
|
|
cv::dnn::DNN_BACKEND_VKCOM, cv::dnn::DNN_BACKEND_CUDA, cv::dnn::DNN_BACKEND_WEBNN);
|
|
const string target_keys = format(
|
|
"{ target | 0 | Choose one of target computation devices: "
|
|
"%d: CPU target (by default), "
|
|
"%d: OpenCL, "
|
|
"%d: OpenCL fp16 (half-float precision), "
|
|
"%d: VPU, "
|
|
"%d: Vulkan, "
|
|
"%d: CUDA, "
|
|
"%d: CUDA fp16 (half-float preprocess) }",
|
|
cv::dnn::DNN_TARGET_CPU, cv::dnn::DNN_TARGET_OPENCL, cv::dnn::DNN_TARGET_OPENCL_FP16,
|
|
cv::dnn::DNN_TARGET_MYRIAD, cv::dnn::DNN_TARGET_VULKAN, cv::dnn::DNN_TARGET_CUDA,
|
|
cv::dnn::DNN_TARGET_CUDA_FP16);
|
|
|
|
const string keys = param_keys + backend_keys + target_keys;
|
|
CommandLineParser parser(argc, argv, keys);
|
|
parser.about(about);
|
|
|
|
if (parser.has("help")) {
|
|
parser.printMessage();
|
|
return 0;
|
|
}
|
|
|
|
string inputImagePath = parser.get<string>("input");
|
|
string onnxModelPath = parser.get<string>("onnx_model_path");
|
|
int backendId = parser.get<int>("backend");
|
|
int targetId = parser.get<int>("target");
|
|
|
|
if (onnxModelPath.empty()) {
|
|
cerr << "The path to the ONNX model is required!" << endl;
|
|
return -1;
|
|
}
|
|
|
|
Mat imgGray = imread(samples::findFile(inputImagePath), IMREAD_GRAYSCALE);
|
|
if (imgGray.empty()) {
|
|
cerr << "Could not read the image: " << inputImagePath << endl;
|
|
return -1;
|
|
}
|
|
|
|
Mat imgL = imgGray;
|
|
imgL.convertTo(imgL, CV_32F, 100.0/255.0);
|
|
Mat imgLResized;
|
|
resize(imgL, imgLResized, Size(256, 256), 0, 0, INTER_CUBIC);
|
|
|
|
// Prepare the model
|
|
dnn::Net net = dnn::readNetFromONNX(onnxModelPath);
|
|
net.setPreferableBackend(backendId);
|
|
net.setPreferableTarget(targetId);
|
|
//! [Read and initialize network]
|
|
|
|
// Create blob from the image
|
|
Mat blob = dnn::blobFromImage(imgLResized, 1.0, Size(256, 256), Scalar(), false, false);
|
|
|
|
net.setInput(blob);
|
|
|
|
// Run inference
|
|
Mat result = net.forward();
|
|
Size siz(result.size[2], result.size[3]);
|
|
Mat a(siz, CV_32F, result.ptr(0,0));
|
|
Mat b(siz, CV_32F, result.ptr(0,1));
|
|
resize(a, a, imgGray.size());
|
|
resize(b, b, imgGray.size());
|
|
|
|
// merge, and convert back to BGR
|
|
Mat color, chn[] = {imgL, a, b};
|
|
|
|
// Proc
|
|
Mat lab;
|
|
merge(chn, 3, lab);
|
|
cvtColor(lab, color, COLOR_Lab2BGR);
|
|
|
|
imshow("input image", imgGray);
|
|
imshow("output image", color);
|
|
waitKey();
|
|
|
|
return 0;
|
|
}
|