Merge pull request #25756 from gursimarsingh:bug_fix/segmentation_sample

[BUG FIX] Segmentation sample u2netp model results #25756

PR resloves #25753 related to incorrect output from u2netp model in segmentation sample

### Pull Request Readiness Checklist

See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request

- [x] I agree to contribute to the project under Apache 2 License.
- [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV
- [x] The PR is proposed to the proper branch
- [x] There is a reference to the original bug report and related work
- [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable
      Patch to opencv_extra has the same branch name.
- [x] The feature is well documented and sample code can be built with the project CMake
This commit is contained in:
Gursimar Singh 2024-07-03 16:33:12 +05:30 committed by GitHub
parent 55a2a945b6
commit 96a8e6d76c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 34 additions and 31 deletions

View File

@ -275,4 +275,4 @@ u2netp:
width: 320 width: 320
height: 320 height: 320
rgb: true rgb: true
sample: "segmentation" sample: "segmentation"

View File

@ -79,7 +79,7 @@ int main(int argc, char **argv)
// Open file with classes names. // Open file with classes names.
if (parser.has("classes")) if (parser.has("classes"))
{ {
string file = parser.get<String>("classes"); string file = findFile(parser.get<String>("classes"));
ifstream ifs(file.c_str()); ifstream ifs(file.c_str());
if (!ifs.is_open()) if (!ifs.is_open())
CV_Error(Error::StsError, "File " + file + " not found"); CV_Error(Error::StsError, "File " + file + " not found");
@ -92,7 +92,7 @@ int main(int argc, char **argv)
// Open file with colors. // Open file with colors.
if (parser.has("colors")) if (parser.has("colors"))
{ {
string file = parser.get<String>("colors"); string file = findFile(parser.get<String>("colors"));
ifstream ifs(file.c_str()); ifstream ifs(file.c_str());
if (!ifs.is_open()) if (!ifs.is_open())
CV_Error(Error::StsError, "File " + file + " not found"); CV_Error(Error::StsError, "File " + file + " not found");
@ -146,29 +146,34 @@ int main(int argc, char **argv)
blobFromImage(frame, blob, scale, Size(inpWidth, inpHeight), mean, swapRB, false); blobFromImage(frame, blob, scale, Size(inpWidth, inpHeight), mean, swapRB, false);
//! [Set input blob] //! [Set input blob]
net.setInput(blob); net.setInput(blob);
//! [Make forward pass] //! [Set input blob]
Mat score = net.forward();
if (modelName == "u2netp") if (modelName == "u2netp")
{ {
Mat mask, thresholded_mask, foreground_overlay, background_overlay, foreground_segmented; vector<Mat> output;
mask = cv::Mat(score.size[2], score.size[3], CV_32F, score.ptr<float>(0, 0)); net.forward(output, net.getUnconnectedOutLayersNames());
mask.convertTo(mask, CV_8U, 255);
threshold(mask, thresholded_mask, 0, 255, THRESH_BINARY + THRESH_OTSU); Mat pred = output[0].reshape(1, output[0].size[2]);
resize(thresholded_mask, thresholded_mask, Size(frame.cols, frame.rows), 0, 0, INTER_AREA); pred.convertTo(pred, CV_8U, 255.0);
Mat mask;
resize(pred, mask, Size(frame.cols, frame.rows), 0, 0, INTER_AREA);
// Create overlays for foreground and background // Create overlays for foreground and background
foreground_overlay = Mat::zeros(frame.size(), frame.type()); Mat foreground_overlay;
background_overlay = Mat::zeros(frame.size(), frame.type());
// Set foreground (object) to red and background to blue // Set foreground (object) to red
foreground_overlay.setTo(Scalar(0, 0, 255), thresholded_mask); Mat all_zeros = Mat::zeros(frame.size(), CV_8UC1);
Mat inverted_mask; vector<Mat> channels = {all_zeros, all_zeros, mask};
bitwise_not(thresholded_mask, inverted_mask); merge(channels, foreground_overlay);
background_overlay.setTo(Scalar(255, 0, 0), inverted_mask);
// Blend the overlays with the original frame // Blend the overlays with the original frame
addWeighted(frame, 1, foreground_overlay, 0.5, 0, foreground_segmented); addWeighted(frame, 0.25, foreground_overlay, 0.75, 0, frame);
addWeighted(foreground_segmented, 1, background_overlay, 0.5, 0, frame);
} }
else else
{ {
//! [Make forward pass]
Mat score = net.forward();
//! [Make forward pass]
Mat segm; Mat segm;
colorizeSegmentation(score, segm); colorizeSegmentation(score, segm);
resize(segm, segm, frame.size(), 0, 0, INTER_NEAREST); resize(segm, segm, frame.size(), 0, 0, INTER_NEAREST);

View File

@ -75,14 +75,14 @@ def showLegend(classes):
classes = None classes = None
# Load a network # Load a network
net = cv.dnn.readNet(args.model) net = cv.dnn.readNetFromONNX(args.model)
net.setPreferableBackend(args.backend) net.setPreferableBackend(args.backend)
net.setPreferableTarget(args.target) net.setPreferableTarget(args.target)
winName = 'Deep learning semantic segmentation in OpenCV' winName = 'Deep learning semantic segmentation in OpenCV'
cv.namedWindow(winName, cv.WINDOW_NORMAL) cv.namedWindow(winName, cv.WINDOW_NORMAL)
cap = cv.VideoCapture(args.input if args.input else 0) cap = cv.VideoCapture(cv.samples.findFile(args.input) if args.input else 0)
legend = None legend = None
while cv.waitKey(1) < 0: while cv.waitKey(1) < 0:
hasFrame, frame = cap.read() hasFrame, frame = cap.read()
@ -96,26 +96,24 @@ while cv.waitKey(1) < 0:
# Create a 4D blob from a frame. # Create a 4D blob from a frame.
inpWidth = args.width if args.width else frameWidth inpWidth = args.width if args.width else frameWidth
inpHeight = args.height if args.height else frameHeight inpHeight = args.height if args.height else frameHeight
blob = cv.dnn.blobFromImage(frame, args.scale, (inpWidth, inpHeight), args.mean, args.rgb, crop=False)
blob = cv.dnn.blobFromImage(frame, args.scale, (inpWidth, inpHeight), args.mean, args.rgb, crop=False)
net.setInput(blob) net.setInput(blob)
score = net.forward()
if args.alias == 'u2netp': if args.alias == 'u2netp':
mask = score[0][0] output = net.forward(net.getUnconnectedOutLayersNames())
mask = mask.astype(np.uint8) pred = output[0][0, 0, :, :]
_, mask = cv.threshold(mask, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU) mask = (pred * 255).astype(np.uint8)
mask = cv.resize(mask, (frame.shape[1], frame.shape[0]), interpolation=cv.INTER_AREA) mask = cv.resize(mask, (frame.shape[1], frame.shape[0]), interpolation=cv.INTER_AREA)
# Create overlays for foreground and background # Create overlays for foreground and background
foreground_overlay = np.zeros_like(frame, dtype=np.uint8) foreground_overlay = np.zeros_like(frame, dtype=np.uint8)
background_overlay = np.zeros_like(frame, dtype=np.uint8)
# Set foreground (object) to red and background to blue # Set foreground (object) to red and background to blue
foreground_overlay[mask == 255] = [0, 0, 255] # Red foreground foreground_overlay[:, :, 2] = mask # Red foreground
background_overlay[mask == 0] = [255, 0, 0] # Blue background
# Blend the overlays with the original frame # Blend the overlays with the original frame
foreground_segmented = cv.addWeighted(frame, 1, foreground_overlay, 0.5, 0) frame = cv.addWeighted(frame, 0.25, foreground_overlay, 0.75, 0)
frame = cv.addWeighted(foreground_segmented, 1, background_overlay, 0.5, 0)
else: else:
score = net.forward()
numClasses = score.shape[1] numClasses = score.shape[1]
height = score.shape[2] height = score.shape[2]
width = score.shape[3] width = score.shape[3]