From 0e40c8a03158e599193d26635fcc13f110c22896 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Wed, 16 Oct 2019 18:49:33 +0300 Subject: [PATCH] fix pylint warnings pylint 1.8.3 --- modules/python/test/test_misc.py | 2 +- samples/dnn/fast_neural_style.py | 2 +- samples/dnn/mobilenet_ssd_accuracy.py | 2 +- samples/dnn/text_detection.py | 2 +- samples/dnn/tf_text_graph_common.py | 2 +- samples/dnn/tf_text_graph_ssd.py | 2 +- samples/python/browse.py | 2 +- samples/python/calibrate.py | 4 ++-- .../camera_calibration_show_extrinsics.py | 2 +- samples/python/color_histogram.py | 2 +- samples/python/edge.py | 2 +- samples/python/facedetect.py | 2 +- samples/python/fitline.py | 1 + samples/python/houghcircles.py | 2 +- samples/python/houghlines.py | 4 ++-- samples/python/kmeans.py | 2 +- samples/python/lappyr.py | 2 +- samples/python/opt_flow.py | 4 ++-- samples/python/peopledetect.py | 2 +- samples/python/stereo_match.py | 4 ++-- samples/python/turing.py | 2 +- .../core/mat_operations/mat_operations.py | 18 +++++++++--------- .../changing_contrast_brightness_image.py | 4 ++-- .../introduction_to_pca/introduction_to_pca.py | 4 ++-- samples/python/video_threaded.py | 2 +- samples/python/video_v4l2.py | 2 +- 26 files changed, 40 insertions(+), 39 deletions(-) diff --git a/modules/python/test/test_misc.py b/modules/python/test/test_misc.py index 892215b9a1..7114bea3af 100644 --- a/modules/python/test/test_misc.py +++ b/modules/python/test/test_misc.py @@ -96,7 +96,7 @@ class SamplesFindFile(NewOpenCVTests): def test_MissingFileException(self): try: - res = cv.samples.findFile('non_existed.file', True) + _res = cv.samples.findFile('non_existed.file', True) self.assertEqual("Dead code", 0) except cv.error as _e: pass diff --git a/samples/dnn/fast_neural_style.py b/samples/dnn/fast_neural_style.py index 6afd166be5..912c2f0832 100644 --- a/samples/dnn/fast_neural_style.py +++ b/samples/dnn/fast_neural_style.py @@ -14,7 +14,7 @@ parser.add_argument('--median_filter', default=0, type=int, help='Kernel size of args = parser.parse_args() net = cv.dnn.readNetFromTorch(cv.samples.findFile(args.model)) -net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV); +net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV) if args.input: cap = cv.VideoCapture(args.input) diff --git a/samples/dnn/mobilenet_ssd_accuracy.py b/samples/dnn/mobilenet_ssd_accuracy.py index 58395acbdf..23fb06b921 100644 --- a/samples/dnn/mobilenet_ssd_accuracy.py +++ b/samples/dnn/mobilenet_ssd_accuracy.py @@ -27,7 +27,7 @@ args = parser.parse_args() ### Get OpenCV predictions ##################################################### net = cv.dnn.readNetFromTensorflow(cv.samples.findFile(args.weights), cv.samples.findFile(args.prototxt)) -net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV); +net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV) detections = [] for imgName in os.listdir(args.images): diff --git a/samples/dnn/text_detection.py b/samples/dnn/text_detection.py index 9f7f159a54..9ea4c10190 100644 --- a/samples/dnn/text_detection.py +++ b/samples/dnn/text_detection.py @@ -134,7 +134,7 @@ def main(): for j in range(4): p1 = (vertices[j][0], vertices[j][1]) p2 = (vertices[(j + 1) % 4][0], vertices[(j + 1) % 4][1]) - cv.line(frame, p1, p2, (0, 255, 0), 1); + cv.line(frame, p1, p2, (0, 255, 0), 1) # Put efficiency information cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) diff --git a/samples/dnn/tf_text_graph_common.py b/samples/dnn/tf_text_graph_common.py index b46b1d492c..5aa1d30e39 100644 --- a/samples/dnn/tf_text_graph_common.py +++ b/samples/dnn/tf_text_graph_common.py @@ -21,7 +21,7 @@ def tokenize(s): elif token: tokens.append(token) token = "" - isString = (symbol == '\"' or symbol == '\'') ^ isString; + isString = (symbol == '\"' or symbol == '\'') ^ isString elif symbol == '{' or symbol == '}' or symbol == '[' or symbol == ']': if token: diff --git a/samples/dnn/tf_text_graph_ssd.py b/samples/dnn/tf_text_graph_ssd.py index beaca3f4e4..e6017b227e 100644 --- a/samples/dnn/tf_text_graph_ssd.py +++ b/samples/dnn/tf_text_graph_ssd.py @@ -122,7 +122,7 @@ def createSSDGraph(modelPath, configPath, outputPath): print('Input image size: %dx%d' % (image_width, image_height)) # Read the graph. - inpNames = ['image_tensor'] + _inpNames = ['image_tensor'] outNames = ['num_detections', 'detection_scores', 'detection_boxes', 'detection_classes'] writeTextGraph(modelPath, outputPath, outNames) diff --git a/samples/python/browse.py b/samples/python/browse.py index 14bd05a05d..edc791f9cd 100755 --- a/samples/python/browse.py +++ b/samples/python/browse.py @@ -45,7 +45,7 @@ def main(): small = img - for i in xrange(3): + for _i in xrange(3): small = cv.pyrDown(small) def onmouse(event, x, y, flags, param): diff --git a/samples/python/calibrate.py b/samples/python/calibrate.py index 2378d8bf1a..bca430b5a5 100755 --- a/samples/python/calibrate.py +++ b/samples/python/calibrate.py @@ -97,7 +97,7 @@ def main(): obj_points.append(pattern_points) # calculate camera distortion - rms, camera_matrix, dist_coefs, rvecs, tvecs = cv.calibrateCamera(obj_points, img_points, (w, h), None, None) + rms, camera_matrix, dist_coefs, _rvecs, _tvecs = cv.calibrateCamera(obj_points, img_points, (w, h), None, None) print("\nRMS:", rms) print("camera matrix:\n", camera_matrix) @@ -106,7 +106,7 @@ def main(): # undistort the image with the calibration print('') for fn in img_names if debug_dir else []: - path, name, ext = splitfn(fn) + _path, name, _ext = splitfn(fn) img_found = os.path.join(debug_dir, name + '_chess.png') outfile = os.path.join(debug_dir, name + '_undistorted.png') diff --git a/samples/python/camera_calibration_show_extrinsics.py b/samples/python/camera_calibration_show_extrinsics.py index 610138bc7b..0118b5b913 100755 --- a/samples/python/camera_calibration_show_extrinsics.py +++ b/samples/python/camera_calibration_show_extrinsics.py @@ -184,7 +184,7 @@ def main(): extrinsics = fs.getNode('extrinsic_parameters').mat() import matplotlib.pyplot as plt - from mpl_toolkits.mplot3d import Axes3D + from mpl_toolkits.mplot3d import Axes3D # pylint: disable=unused-variable fig = plt.figure() ax = fig.gca(projection='3d') diff --git a/samples/python/color_histogram.py b/samples/python/color_histogram.py index 0422d7282c..a1924bab8b 100755 --- a/samples/python/color_histogram.py +++ b/samples/python/color_histogram.py @@ -46,7 +46,7 @@ class App(): cam = video.create_capture(fn, fallback='synth:bg=baboon.jpg:class=chess:noise=0.05') while True: - flag, frame = cam.read() + _flag, frame = cam.read() cv.imshow('camera', frame) small = cv.pyrDown(frame) diff --git a/samples/python/edge.py b/samples/python/edge.py index ba04adecfe..e85c2f6288 100755 --- a/samples/python/edge.py +++ b/samples/python/edge.py @@ -38,7 +38,7 @@ def main(): cap = video.create_capture(fn) while True: - flag, img = cap.read() + _flag, img = cap.read() gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) thrs1 = cv.getTrackbarPos('thrs1', 'edge') thrs2 = cv.getTrackbarPos('thrs2', 'edge') diff --git a/samples/python/facedetect.py b/samples/python/facedetect.py index 1050cc5aff..488c92d5e5 100755 --- a/samples/python/facedetect.py +++ b/samples/python/facedetect.py @@ -48,7 +48,7 @@ def main(): cam = create_capture(video_src, fallback='synth:bg={}:noise=0.05'.format(cv.samples.findFile('samples/data/lena.jpg'))) while True: - ret, img = cam.read() + _ret, img = cam.read() gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) gray = cv.equalizeHist(gray) diff --git a/samples/python/fitline.py b/samples/python/fitline.py index 6705f39abb..db695cbb2b 100755 --- a/samples/python/fitline.py +++ b/samples/python/fitline.py @@ -88,6 +88,7 @@ def main(): update() ch = cv.waitKey(0) if ch == ord('f'): + global cur_func_name if PY3: cur_func_name = next(dist_func_names) else: diff --git a/samples/python/houghcircles.py b/samples/python/houghcircles.py index b8d3a1a019..416309aab0 100755 --- a/samples/python/houghcircles.py +++ b/samples/python/houghcircles.py @@ -30,7 +30,7 @@ def main(): circles = cv.HoughCircles(img, cv.HOUGH_GRADIENT, 1, 10, np.array([]), 100, 30, 1, 30) if circles is not None: # Check if circles have been found and only then iterate over these and add them to the image - a, b, c = circles.shape + _a, b, _c = circles.shape for i in range(b): cv.circle(cimg, (circles[0][i][0], circles[0][i][1]), circles[0][i][2], (0, 0, 255), 3, cv.LINE_AA) cv.circle(cimg, (circles[0][i][0], circles[0][i][1]), 2, (0, 255, 0), 3, cv.LINE_AA) # draw center of circle diff --git a/samples/python/houghlines.py b/samples/python/houghlines.py index 7c99cf2ae9..022b680f56 100755 --- a/samples/python/houghlines.py +++ b/samples/python/houghlines.py @@ -29,14 +29,14 @@ def main(): if True: # HoughLinesP lines = cv.HoughLinesP(dst, 1, math.pi/180.0, 40, np.array([]), 50, 10) - a,b,c = lines.shape + a, b, _c = lines.shape for i in range(a): cv.line(cdst, (lines[i][0][0], lines[i][0][1]), (lines[i][0][2], lines[i][0][3]), (0, 0, 255), 3, cv.LINE_AA) else: # HoughLines lines = cv.HoughLines(dst, 1, math.pi/180.0, 50, np.array([]), 0, 0) if lines is not None: - a,b,c = lines.shape + a, b, _c = lines.shape for i in range(a): rho = lines[i][0][0] theta = lines[i][0][1] diff --git a/samples/python/kmeans.py b/samples/python/kmeans.py index d7fcbe8083..1b1c9d6a04 100755 --- a/samples/python/kmeans.py +++ b/samples/python/kmeans.py @@ -33,7 +33,7 @@ def main(): points, _ = make_gaussians(cluster_n, img_size) term_crit = (cv.TERM_CRITERIA_EPS, 30, 0.1) - ret, labels, centers = cv.kmeans(points, cluster_n, None, term_crit, 10, 0) + _ret, labels, _centers = cv.kmeans(points, cluster_n, None, term_crit, 10, 0) img = np.zeros((img_size, img_size, 3), np.uint8) for (x, y), label in zip(np.int32(points), labels.ravel()): diff --git a/samples/python/lappyr.py b/samples/python/lappyr.py index 2ee73ecb1d..2835b98d13 100755 --- a/samples/python/lappyr.py +++ b/samples/python/lappyr.py @@ -60,7 +60,7 @@ def main(): cv.createTrackbar('%d'%i, 'level control', 5, 50, nothing) while True: - ret, frame = cap.read() + _ret, frame = cap.read() pyr = build_lappyr(frame, leveln) for i in xrange(leveln): diff --git a/samples/python/opt_flow.py b/samples/python/opt_flow.py index c4515582e7..76a0ac2caf 100755 --- a/samples/python/opt_flow.py +++ b/samples/python/opt_flow.py @@ -64,14 +64,14 @@ def main(): fn = 0 cam = video.create_capture(fn) - ret, prev = cam.read() + _ret, prev = cam.read() prevgray = cv.cvtColor(prev, cv.COLOR_BGR2GRAY) show_hsv = False show_glitch = False cur_glitch = prev.copy() while True: - ret, img = cam.read() + _ret, img = cam.read() gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) flow = cv.calcOpticalFlowFarneback(prevgray, gray, None, 0.5, 3, 15, 3, 5, 1.2, 0) prevgray = gray diff --git a/samples/python/peopledetect.py b/samples/python/peopledetect.py index d2a7fdeee5..bdd49cab6f 100755 --- a/samples/python/peopledetect.py +++ b/samples/python/peopledetect.py @@ -51,7 +51,7 @@ def main(): print('loading error') continue - found, w = hog.detectMultiScale(img, winStride=(8,8), padding=(32,32), scale=1.05) + found, _w = hog.detectMultiScale(img, winStride=(8,8), padding=(32,32), scale=1.05) found_filtered = [] for ri, r in enumerate(found): for qi, q in enumerate(found): diff --git a/samples/python/stereo_match.py b/samples/python/stereo_match.py index 969ea11dbb..4d5875b814 100755 --- a/samples/python/stereo_match.py +++ b/samples/python/stereo_match.py @@ -69,8 +69,8 @@ def main(): out_points = points[mask] out_colors = colors[mask] out_fn = 'out.ply' - write_ply('out.ply', out_points, out_colors) - print('%s saved' % 'out.ply') + write_ply(out_fn, out_points, out_colors) + print('%s saved' % out_fn) cv.imshow('left', imgL) cv.imshow('disparity', (disp-min_disp)/num_disp) diff --git a/samples/python/turing.py b/samples/python/turing.py index 27dbe02ad3..dc920d1295 100755 --- a/samples/python/turing.py +++ b/samples/python/turing.py @@ -32,7 +32,7 @@ def main(): w, h = 512, 512 - args, args_list = getopt.getopt(sys.argv[1:], 'o:', []) + args, _args_list = getopt.getopt(sys.argv[1:], 'o:', []) args = dict(args) out = None if '-o' in args: diff --git a/samples/python/tutorial_code/core/mat_operations/mat_operations.py b/samples/python/tutorial_code/core/mat_operations/mat_operations.py index e9ec03699d..f237074fb6 100644 --- a/samples/python/tutorial_code/core/mat_operations/mat_operations.py +++ b/samples/python/tutorial_code/core/mat_operations/mat_operations.py @@ -25,13 +25,13 @@ def access_pixel(): y = 0 x = 0 ## [Pixel access 1] - intensity = img[y,x] + _intensity = img[y,x] ## [Pixel access 1] ## [Pixel access 3] - blue = img[y,x,0] - green = img[y,x,1] - red = img[y,x,2] + _blue = img[y,x,0] + _green = img[y,x,1] + _red = img[y,x,2] ## [Pixel access 3] ## [Pixel access 5] @@ -42,12 +42,12 @@ def reference_counting(): # Memory management and reference counting ## [Reference counting 2] img = cv.imread('image.jpg') - img1 = np.copy(img) + _img1 = np.copy(img) ## [Reference counting 2] ## [Reference counting 3] img = cv.imread('image.jpg') - sobelx = cv.Sobel(img, cv.CV_32F, 1, 0); + _sobelx = cv.Sobel(img, cv.CV_32F, 1, 0) ## [Reference counting 3] def primitive_operations(): @@ -57,17 +57,17 @@ def primitive_operations(): ## [Set image to black] ## [Select ROI] - smallImg = img[10:110,10:110] + _smallImg = img[10:110,10:110] ## [Select ROI] ## [BGR to Gray] img = cv.imread('image.jpg') - grey = cv.cvtColor(img, cv.COLOR_BGR2GRAY) + _grey = cv.cvtColor(img, cv.COLOR_BGR2GRAY) ## [BGR to Gray] src = np.ones((4,4), np.uint8) ## [Convert to CV_32F] - dst = src.astype(np.float32) + _dst = src.astype(np.float32) ## [Convert to CV_32F] def visualize_images(): diff --git a/samples/python/tutorial_code/imgProc/changing_contrast_brightness_image/changing_contrast_brightness_image.py b/samples/python/tutorial_code/imgProc/changing_contrast_brightness_image/changing_contrast_brightness_image.py index b3f316396a..127a0f4325 100644 --- a/samples/python/tutorial_code/imgProc/changing_contrast_brightness_image/changing_contrast_brightness_image.py +++ b/samples/python/tutorial_code/imgProc/changing_contrast_brightness_image/changing_contrast_brightness_image.py @@ -25,8 +25,8 @@ def gammaCorrection(): res = cv.LUT(img_original, lookUpTable) ## [changing-contrast-brightness-gamma-correction] - img_gamma_corrected = cv.hconcat([img_original, res]); - cv.imshow("Gamma correction", img_gamma_corrected); + img_gamma_corrected = cv.hconcat([img_original, res]) + cv.imshow("Gamma correction", img_gamma_corrected) def on_linear_transform_alpha_trackbar(val): global alpha diff --git a/samples/python/tutorial_code/ml/introduction_to_pca/introduction_to_pca.py b/samples/python/tutorial_code/ml/introduction_to_pca/introduction_to_pca.py index 62c7516bf0..2c39bd9578 100644 --- a/samples/python/tutorial_code/ml/introduction_to_pca/introduction_to_pca.py +++ b/samples/python/tutorial_code/ml/introduction_to_pca/introduction_to_pca.py @@ -85,13 +85,13 @@ _, contours, _ = cv.findContours(bw, cv.RETR_LIST, cv.CHAIN_APPROX_NONE) for i, c in enumerate(contours): # Calculate the area of each contour - area = cv.contourArea(c); + area = cv.contourArea(c) # Ignore contours that are too small or too large if area < 1e2 or 1e5 < area: continue # Draw each contour only for visualisation purposes - cv.drawContours(src, contours, i, (0, 0, 255), 2); + cv.drawContours(src, contours, i, (0, 0, 255), 2) # Find the orientation of each shape getOrientation(c, src) ## [contours] diff --git a/samples/python/video_threaded.py b/samples/python/video_threaded.py index 4886db3d80..cbc73d296b 100755 --- a/samples/python/video_threaded.py +++ b/samples/python/video_threaded.py @@ -70,7 +70,7 @@ def main(): draw_str(res, (20, 60), "frame interval : %.1f ms" % (frame_interval.value*1000)) cv.imshow('threaded video', res) if len(pending) < threadn: - ret, frame = cap.read() + _ret, frame = cap.read() t = clock() frame_interval.update(t - last_frame_time) last_frame_time = t diff --git a/samples/python/video_v4l2.py b/samples/python/video_v4l2.py index 68f22699b1..61b1e35804 100644 --- a/samples/python/video_v4l2.py +++ b/samples/python/video_v4l2.py @@ -42,7 +42,7 @@ def main(): cv.createTrackbar("Focus", "Video", focus, 100, lambda v: cap.set(cv.CAP_PROP_FOCUS, v / 100)) while True: - status, img = cap.read() + _status, img = cap.read() fourcc = decode_fourcc(cap.get(cv.CAP_PROP_FOURCC))