mirror of
https://github.com/opencv/opencv.git
synced 2024-12-15 18:09:11 +08:00
03994163b5
Raft support added in this sample code #24913 ### Pull Request Readiness Checklist See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request - [x] I agree to contribute to the project under Apache 2 License. - [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV - [x] The PR is proposed to the proper branch - [x] There is a reference to the original bug report and related work - [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable Patch to opencv_extra has the same branch name. - [x] The feature is well documented and sample code can be built with the project CMake fix: https://github.com/opencv/opencv/issues/24424 Update DNN Optical Flow sample with RAFT model I implemented both RAFT and FlowNet v2 leaving it to the user which one he wants to use to estimate the optical flow. Co-authored-by: Uday Sharma <uday@192.168.1.35>
121 lines
4.8 KiB
Python
121 lines
4.8 KiB
Python
#!/usr/bin/env python
|
|
'''
|
|
This sample using FlowNet v2 and RAFT model to calculate optical flow.
|
|
|
|
FlowNet v2 Original Paper: https://arxiv.org/abs/1612.01925.
|
|
FlowNet v2 Repo: https://github.com/lmb-freiburg/flownet2.
|
|
|
|
Download the converted .caffemodel model from https://drive.google.com/open?id=16qvE9VNmU39NttpZwZs81Ga8VYQJDaWZ
|
|
and .prototxt from https://drive.google.com/file/d/1RyNIUsan1ZOh2hpYIH36A-jofAvJlT6a/view?usp=sharing.
|
|
Otherwise download original model from https://lmb.informatik.uni-freiburg.de/resources/binaries/flownet2/flownet2-models.tar.gz,
|
|
convert .h5 model to .caffemodel and modify original .prototxt using .prototxt from link above.
|
|
|
|
RAFT Original Paper: https://arxiv.org/pdf/2003.12039.pdf
|
|
RAFT Repo: https://github.com/princeton-vl/RAFT
|
|
|
|
Download the .onnx model from here https://github.com/opencv/opencv_zoo/raw/281d232cd99cd920853106d853c440edd35eb442/models/optical_flow_estimation_raft/optical_flow_estimation_raft_2023aug.onnx.
|
|
'''
|
|
|
|
import argparse
|
|
import os.path
|
|
import numpy as np
|
|
import cv2 as cv
|
|
|
|
|
|
class OpticalFlow(object):
|
|
def __init__(self, model, height, width, proto=""):
|
|
if proto:
|
|
self.net = cv.dnn.readNetFromCaffe(proto, model)
|
|
else:
|
|
self.net = cv.dnn.readNet(model)
|
|
self.net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
|
|
self.height = height
|
|
self.width = width
|
|
|
|
def compute_flow(self, first_img, second_img):
|
|
inp0 = cv.dnn.blobFromImage(first_img, size=(self.width, self.height))
|
|
inp1 = cv.dnn.blobFromImage(second_img, size=(self.width, self.height))
|
|
self.net.setInputsNames(["img0", "img1"])
|
|
self.net.setInput(inp0, "img0")
|
|
self.net.setInput(inp1, "img1")
|
|
|
|
flow = self.net.forward()
|
|
output = self.motion_to_color(flow)
|
|
return output
|
|
|
|
def motion_to_color(self, flow):
|
|
arr = np.arange(0, 255, dtype=np.uint8)
|
|
colormap = cv.applyColorMap(arr, cv.COLORMAP_HSV)
|
|
colormap = colormap.squeeze(1)
|
|
|
|
flow = flow.squeeze(0)
|
|
fx, fy = flow[0, ...], flow[1, ...]
|
|
rad = np.sqrt(fx**2 + fy**2)
|
|
maxrad = rad.max() if rad.max() != 0 else 1
|
|
|
|
ncols = arr.size
|
|
rad = rad[..., np.newaxis] / maxrad
|
|
a = np.arctan2(-fy / maxrad, -fx / maxrad) / np.pi
|
|
fk = (a + 1) / 2.0 * (ncols - 1)
|
|
k0 = fk.astype(np.int32)
|
|
k1 = (k0 + 1) % ncols
|
|
f = fk[..., np.newaxis] - k0[..., np.newaxis]
|
|
|
|
col0 = colormap[k0] / 255.0
|
|
col1 = colormap[k1] / 255.0
|
|
col = (1 - f) * col0 + f * col1
|
|
col = np.where(rad <= 1, 1 - rad * (1 - col), col * 0.75)
|
|
output = (255.0 * col).astype(np.uint8)
|
|
return output
|
|
|
|
|
|
if __name__ == '__main__':
|
|
parser = argparse.ArgumentParser(description='Use this script to calculate optical flow',
|
|
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
|
parser.add_argument('-input', '-i', required=True, help='Path to input video file. Skip this argument to capture frames from a camera.')
|
|
parser.add_argument('--height', default=320, type=int, help='Input height')
|
|
parser.add_argument('--width', default=448, type=int, help='Input width')
|
|
parser.add_argument('--proto', '-p', default='', help='Path to prototxt.')
|
|
parser.add_argument('--model', '-m', required=True, help='Path to model.')
|
|
args, _ = parser.parse_known_args()
|
|
|
|
if not os.path.isfile(args.model):
|
|
raise OSError("Model does not exist")
|
|
if args.proto and not os.path.isfile(args.proto):
|
|
raise OSError("Prototxt does not exist")
|
|
|
|
winName = 'Calculation optical flow in OpenCV'
|
|
cv.namedWindow(winName, cv.WINDOW_NORMAL)
|
|
cap = cv.VideoCapture(args.input if args.input else 0)
|
|
hasFrame, first_frame = cap.read()
|
|
|
|
if args.proto:
|
|
divisor = 64.
|
|
var = {}
|
|
var['ADAPTED_WIDTH'] = int(np.ceil(args.width/divisor) * divisor)
|
|
var['ADAPTED_HEIGHT'] = int(np.ceil(args.height/divisor) * divisor)
|
|
var['SCALE_WIDTH'] = args.width / float(var['ADAPTED_WIDTH'])
|
|
var['SCALE_HEIGHT'] = args.height / float(var['ADAPTED_HEIGHT'])
|
|
|
|
config = ''
|
|
proto = open(args.proto).readlines()
|
|
for line in proto:
|
|
for key, value in var.items():
|
|
tag = "$%s$" % key
|
|
line = line.replace(tag, str(value))
|
|
config += line
|
|
|
|
caffemodel = open(args.model, 'rb').read()
|
|
|
|
opt_flow = OpticalFlow(caffemodel, var['ADAPTED_HEIGHT'], var['ADAPTED_WIDTH'], bytearray(config.encode()))
|
|
else:
|
|
opt_flow = OpticalFlow(args.model, 360, 480)
|
|
|
|
while cv.waitKey(1) < 0:
|
|
hasFrame, second_frame = cap.read()
|
|
if not hasFrame:
|
|
break
|
|
flow = opt_flow.compute_flow(first_frame, second_frame)
|
|
first_frame = second_frame
|
|
cv.imshow(winName, flow)
|