mirror of
https://github.com/opencv/opencv.git
synced 2025-06-08 01:53:19 +08:00
python(test): enable pylint checks for tests
This commit is contained in:
parent
e1b102e9a6
commit
936234d5b1
@ -853,6 +853,7 @@ if(ANDROID OR NOT UNIX)
|
|||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(COMMAND ocv_pylint_finalize)
|
if(COMMAND ocv_pylint_finalize)
|
||||||
|
ocv_pylint_add_directory(${CMAKE_CURRENT_LIST_DIR}/modules/python/test)
|
||||||
ocv_pylint_add_directory(${CMAKE_CURRENT_LIST_DIR}/samples/python)
|
ocv_pylint_add_directory(${CMAKE_CURRENT_LIST_DIR}/samples/python)
|
||||||
ocv_pylint_add_directory(${CMAKE_CURRENT_LIST_DIR}/samples/dnn)
|
ocv_pylint_add_directory(${CMAKE_CURRENT_LIST_DIR}/samples/dnn)
|
||||||
ocv_pylint_add_directory_recurse(${CMAKE_CURRENT_LIST_DIR}/samples/python/tutorial_code)
|
ocv_pylint_add_directory_recurse(${CMAKE_CURRENT_LIST_DIR}/samples/python/tutorial_code)
|
||||||
|
@ -75,7 +75,6 @@ class Hackathon244Tests(NewOpenCVTests):
|
|||||||
fd = cv2.FastFeatureDetector_create(30, True)
|
fd = cv2.FastFeatureDetector_create(30, True)
|
||||||
img = self.get_sample("samples/data/right02.jpg", 0)
|
img = self.get_sample("samples/data/right02.jpg", 0)
|
||||||
img = cv2.medianBlur(img, 3)
|
img = cv2.medianBlur(img, 3)
|
||||||
imgc = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
|
|
||||||
keypoints = fd.detect(img)
|
keypoints = fd.detect(img)
|
||||||
self.assertTrue(600 <= len(keypoints) <= 700)
|
self.assertTrue(600 <= len(keypoints) <= 700)
|
||||||
for kpt in keypoints:
|
for kpt in keypoints:
|
||||||
@ -99,7 +98,6 @@ class Hackathon244Tests(NewOpenCVTests):
|
|||||||
np.random.seed(244)
|
np.random.seed(244)
|
||||||
a = np.random.randn(npt,2).astype('float32')*50 + 150
|
a = np.random.randn(npt,2).astype('float32')*50 + 150
|
||||||
|
|
||||||
img = np.zeros((300, 300, 3), dtype='uint8')
|
|
||||||
be = cv2.fitEllipse(a)
|
be = cv2.fitEllipse(a)
|
||||||
br = cv2.minAreaRect(a)
|
br = cv2.minAreaRect(a)
|
||||||
mc, mr = cv2.minEnclosingCircle(a)
|
mc, mr = cv2.minEnclosingCircle(a)
|
||||||
@ -138,10 +136,10 @@ class Hackathon244Tests(NewOpenCVTests):
|
|||||||
|
|
||||||
def test_umat_handle(self):
|
def test_umat_handle(self):
|
||||||
a_um = cv2.UMat(256, 256, cv2.CV_32F)
|
a_um = cv2.UMat(256, 256, cv2.CV_32F)
|
||||||
ctx_handle = cv2.UMat.context() # obtain context handle
|
_ctx_handle = cv2.UMat.context() # obtain context handle
|
||||||
queue_handle = cv2.UMat.queue() # obtain queue handle
|
_queue_handle = cv2.UMat.queue() # obtain queue handle
|
||||||
a_handle = a_um.handle(cv2.ACCESS_READ) # obtain buffer handle
|
_a_handle = a_um.handle(cv2.ACCESS_READ) # obtain buffer handle
|
||||||
offset = a_um.offset # obtain buffer offset
|
_offset = a_um.offset # obtain buffer offset
|
||||||
|
|
||||||
def test_umat_matching(self):
|
def test_umat_matching(self):
|
||||||
img1 = self.get_sample("samples/data/right01.jpg")
|
img1 = self.get_sample("samples/data/right01.jpg")
|
||||||
@ -186,11 +184,11 @@ class Hackathon244Tests(NewOpenCVTests):
|
|||||||
p0_umat = cv2.UMat(np.array(sorted(p0_umat.get(), key=lambda p: tuple(p[0]))))
|
p0_umat = cv2.UMat(np.array(sorted(p0_umat.get(), key=lambda p: tuple(p[0]))))
|
||||||
self.assertTrue(np.allclose(p0_umat.get(), p0))
|
self.assertTrue(np.allclose(p0_umat.get(), p0))
|
||||||
|
|
||||||
p1_mask_err = cv2.calcOpticalFlowPyrLK(img1, img2, p0, None)
|
_p1_mask_err = cv2.calcOpticalFlowPyrLK(img1, img2, p0, None)
|
||||||
|
|
||||||
p1_mask_err_umat0 = map(cv2.UMat.get, cv2.calcOpticalFlowPyrLK(img1, img2, p0_umat, None))
|
_p1_mask_err_umat0 = map(cv2.UMat.get, cv2.calcOpticalFlowPyrLK(img1, img2, p0_umat, None))
|
||||||
p1_mask_err_umat1 = map(cv2.UMat.get, cv2.calcOpticalFlowPyrLK(cv2.UMat(img1), img2, p0_umat, None))
|
_p1_mask_err_umat1 = map(cv2.UMat.get, cv2.calcOpticalFlowPyrLK(cv2.UMat(img1), img2, p0_umat, None))
|
||||||
p1_mask_err_umat2 = map(cv2.UMat.get, cv2.calcOpticalFlowPyrLK(img1, cv2.UMat(img2), p0_umat, None))
|
_p1_mask_err_umat2 = map(cv2.UMat.get, cv2.calcOpticalFlowPyrLK(img1, cv2.UMat(img2), p0_umat, None))
|
||||||
|
|
||||||
# # results of OCL optical flow differs from CPU implementation, so result can not be easily compared
|
# # results of OCL optical flow differs from CPU implementation, so result can not be easily compared
|
||||||
# for p1_mask_err_umat in [p1_mask_err_umat0, p1_mask_err_umat1, p1_mask_err_umat2]:
|
# for p1_mask_err_umat in [p1_mask_err_umat0, p1_mask_err_umat1, p1_mask_err_umat2]:
|
||||||
@ -212,5 +210,5 @@ if __name__ == '__main__':
|
|||||||
except KeyError:
|
except KeyError:
|
||||||
print('Missing opencv extra repository. Some of tests may fail.')
|
print('Missing opencv extra repository. Some of tests may fail.')
|
||||||
random.seed(0)
|
random.seed(0)
|
||||||
unit_argv = [sys.argv[0]] + other;
|
unit_argv = [sys.argv[0]] + other
|
||||||
unittest.main(argv=unit_argv)
|
unittest.main(argv=unit_argv)
|
||||||
|
@ -16,8 +16,6 @@ from tests_common import NewOpenCVTests
|
|||||||
class calibration_test(NewOpenCVTests):
|
class calibration_test(NewOpenCVTests):
|
||||||
|
|
||||||
def test_calibration(self):
|
def test_calibration(self):
|
||||||
|
|
||||||
from glob import glob
|
|
||||||
img_names = []
|
img_names = []
|
||||||
for i in range(1, 15):
|
for i in range(1, 15):
|
||||||
if i < 10:
|
if i < 10:
|
||||||
@ -34,7 +32,6 @@ class calibration_test(NewOpenCVTests):
|
|||||||
obj_points = []
|
obj_points = []
|
||||||
img_points = []
|
img_points = []
|
||||||
h, w = 0, 0
|
h, w = 0, 0
|
||||||
img_names_undistort = []
|
|
||||||
for fn in img_names:
|
for fn in img_names:
|
||||||
img = self.get_sample(fn, 0)
|
img = self.get_sample(fn, 0)
|
||||||
if img is None:
|
if img is None:
|
||||||
@ -53,7 +50,7 @@ class calibration_test(NewOpenCVTests):
|
|||||||
obj_points.append(pattern_points)
|
obj_points.append(pattern_points)
|
||||||
|
|
||||||
# calculate camera distortion
|
# calculate camera distortion
|
||||||
rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, (w, h), None, None, flags = 0)
|
rms, camera_matrix, dist_coefs, _rvecs, _tvecs = cv2.calibrateCamera(obj_points, img_points, (w, h), None, None, flags = 0)
|
||||||
|
|
||||||
eps = 0.01
|
eps = 0.01
|
||||||
normCamEps = 10.0
|
normCamEps = 10.0
|
||||||
|
@ -73,7 +73,7 @@ class camshift_test(NewOpenCVTests):
|
|||||||
prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
|
prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
|
||||||
prob &= mask
|
prob &= mask
|
||||||
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
|
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
|
||||||
track_box, self.track_window = cv2.CamShift(prob, self.track_window, term_crit)
|
_track_box, self.track_window = cv2.CamShift(prob, self.track_window, term_crit)
|
||||||
|
|
||||||
trackingRect = np.array(self.track_window)
|
trackingRect = np.array(self.track_window)
|
||||||
trackingRect[2] += trackingRect[0]
|
trackingRect[2] += trackingRect[0]
|
||||||
|
@ -71,7 +71,7 @@ class KNearest(StatModel):
|
|||||||
self.model.train(samples, cv2.ml.ROW_SAMPLE, responses)
|
self.model.train(samples, cv2.ml.ROW_SAMPLE, responses)
|
||||||
|
|
||||||
def predict(self, samples):
|
def predict(self, samples):
|
||||||
retval, results, neigh_resp, dists = self.model.findNearest(samples, self.k)
|
_retval, results, _neigh_resp, _dists = self.model.findNearest(samples, self.k)
|
||||||
return results.ravel()
|
return results.ravel()
|
||||||
|
|
||||||
class SVM(StatModel):
|
class SVM(StatModel):
|
||||||
@ -147,7 +147,7 @@ class digits_test(NewOpenCVTests):
|
|||||||
samples = preprocess_hog(digits2)
|
samples = preprocess_hog(digits2)
|
||||||
|
|
||||||
train_n = int(0.9*len(samples))
|
train_n = int(0.9*len(samples))
|
||||||
digits_train, digits_test = np.split(digits2, [train_n])
|
_digits_train, digits_test = np.split(digits2, [train_n])
|
||||||
samples_train, samples_test = np.split(samples, [train_n])
|
samples_train, samples_test = np.split(samples, [train_n])
|
||||||
labels_train, labels_test = np.split(labels, [train_n])
|
labels_train, labels_test = np.split(labels, [train_n])
|
||||||
errors = list()
|
errors = list()
|
||||||
|
@ -23,8 +23,6 @@ from tests_common import NewOpenCVTests, intersectionRate
|
|||||||
class facedetect_test(NewOpenCVTests):
|
class facedetect_test(NewOpenCVTests):
|
||||||
|
|
||||||
def test_facedetect(self):
|
def test_facedetect(self):
|
||||||
import sys, getopt
|
|
||||||
|
|
||||||
cascade_fn = self.repoPath + '/data/haarcascades/haarcascade_frontalface_alt.xml'
|
cascade_fn = self.repoPath + '/data/haarcascades/haarcascade_frontalface_alt.xml'
|
||||||
nested_fn = self.repoPath + '/data/haarcascades/haarcascade_eye.xml'
|
nested_fn = self.repoPath + '/data/haarcascades/haarcascade_eye.xml'
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ def intersectionRate(s1, s2):
|
|||||||
x1, y1, x2, y2 = s1
|
x1, y1, x2, y2 = s1
|
||||||
s1 = np.array([[x1, y1], [x2,y1], [x2, y2], [x1, y2]])
|
s1 = np.array([[x1, y1], [x2,y1], [x2, y2], [x1, y2]])
|
||||||
|
|
||||||
area, intersection = cv2.intersectConvexConvex(s1, np.array(s2))
|
area, _intersection = cv2.intersectConvexConvex(s1, np.array(s2))
|
||||||
return 2 * area / (cv2.contourArea(s1) + cv2.contourArea(np.array(s2)))
|
return 2 * area / (cv2.contourArea(s1) + cv2.contourArea(np.array(s2)))
|
||||||
|
|
||||||
from tests_common import NewOpenCVTests
|
from tests_common import NewOpenCVTests
|
||||||
|
@ -15,7 +15,7 @@ import cv2
|
|||||||
def make_gaussians(cluster_n, img_size):
|
def make_gaussians(cluster_n, img_size):
|
||||||
points = []
|
points = []
|
||||||
ref_distrs = []
|
ref_distrs = []
|
||||||
for i in xrange(cluster_n):
|
for _ in xrange(cluster_n):
|
||||||
mean = (0.1 + 0.8*random.rand(2)) * img_size
|
mean = (0.1 + 0.8*random.rand(2)) * img_size
|
||||||
a = (random.rand(2, 2)-0.5)*img_size*0.1
|
a = (random.rand(2, 2)-0.5)*img_size*0.1
|
||||||
cov = np.dot(a.T, a) + img_size*0.05*np.eye(2)
|
cov = np.dot(a.T, a) + img_size*0.05*np.eye(2)
|
||||||
@ -44,7 +44,7 @@ class gaussian_mix_test(NewOpenCVTests):
|
|||||||
em.trainEM(points)
|
em.trainEM(points)
|
||||||
means = em.getMeans()
|
means = em.getMeans()
|
||||||
covs = em.getCovs() # Known bug: https://github.com/opencv/opencv/pull/4232
|
covs = em.getCovs() # Known bug: https://github.com/opencv/opencv/pull/4232
|
||||||
found_distrs = zip(means, covs)
|
#found_distrs = zip(means, covs)
|
||||||
|
|
||||||
matches_count = 0
|
matches_count = 0
|
||||||
|
|
||||||
|
@ -27,10 +27,10 @@ class TestGoodFeaturesToTrack_test(NewOpenCVTests):
|
|||||||
self.assertTrue(cv2.norm(results[t][i][0] - results2[t][i][0]) == 0)
|
self.assertTrue(cv2.norm(results[t][i][0] - results2[t][i][0]) == 0)
|
||||||
|
|
||||||
for t0,t1 in zip(threshes, threshes[1:]):
|
for t0,t1 in zip(threshes, threshes[1:]):
|
||||||
r0 = results[t0]
|
r0 = results[t0]
|
||||||
r1 = results[t1]
|
r1 = results[t1]
|
||||||
# Increasing thresh should make result list shorter
|
# Increasing thresh should make result list shorter
|
||||||
self.assertTrue(len(r0) > len(r1))
|
self.assertTrue(len(r0) > len(r1))
|
||||||
# Increasing thresh should monly truncate result list
|
# Increasing thresh should monly truncate result list
|
||||||
for i in range(len(r1)):
|
for i in range(len(r1)):
|
||||||
self.assertTrue(cv2.norm(r1[i][0] - r0[i][0])==0)
|
self.assertTrue(cv2.norm(r1[i][0] - r0[i][0])==0)
|
@ -17,7 +17,6 @@ from tests_common import NewOpenCVTests
|
|||||||
def circleApproximation(circle):
|
def circleApproximation(circle):
|
||||||
|
|
||||||
nPoints = 30
|
nPoints = 30
|
||||||
phi = 0
|
|
||||||
dPhi = 2*pi / nPoints
|
dPhi = 2*pi / nPoints
|
||||||
contour = []
|
contour = []
|
||||||
for i in range(nPoints):
|
for i in range(nPoints):
|
||||||
|
@ -21,7 +21,7 @@ def make_gaussians(cluster_n, img_size):
|
|||||||
points = []
|
points = []
|
||||||
ref_distrs = []
|
ref_distrs = []
|
||||||
sizes = []
|
sizes = []
|
||||||
for i in xrange(cluster_n):
|
for _ in xrange(cluster_n):
|
||||||
mean = (0.1 + 0.8*random.rand(2)) * img_size
|
mean = (0.1 + 0.8*random.rand(2)) * img_size
|
||||||
a = (random.rand(2, 2)-0.5)*img_size*0.1
|
a = (random.rand(2, 2)-0.5)*img_size*0.1
|
||||||
cov = np.dot(a.T, a) + img_size*0.05*np.eye(2)
|
cov = np.dot(a.T, a) + img_size*0.05*np.eye(2)
|
||||||
@ -59,7 +59,7 @@ class kmeans_test(NewOpenCVTests):
|
|||||||
points, _, clusterSizes = make_gaussians(cluster_n, img_size)
|
points, _, clusterSizes = make_gaussians(cluster_n, img_size)
|
||||||
|
|
||||||
term_crit = (cv2.TERM_CRITERIA_EPS, 30, 0.1)
|
term_crit = (cv2.TERM_CRITERIA_EPS, 30, 0.1)
|
||||||
ret, labels, centers = cv2.kmeans(points, cluster_n, None, term_crit, 10, 0)
|
_ret, labels, centers = cv2.kmeans(points, cluster_n, None, term_crit, 10, 0)
|
||||||
|
|
||||||
self.assertEqual(len(centers), cluster_n)
|
self.assertEqual(len(centers), cluster_n)
|
||||||
|
|
||||||
|
@ -59,12 +59,12 @@ class RTrees(LetterStatModel):
|
|||||||
self.model = cv2.ml.RTrees_create()
|
self.model = cv2.ml.RTrees_create()
|
||||||
|
|
||||||
def train(self, samples, responses):
|
def train(self, samples, responses):
|
||||||
sample_n, var_n = samples.shape
|
#sample_n, var_n = samples.shape
|
||||||
self.model.setMaxDepth(20)
|
self.model.setMaxDepth(20)
|
||||||
self.model.train(samples, cv2.ml.ROW_SAMPLE, responses.astype(int))
|
self.model.train(samples, cv2.ml.ROW_SAMPLE, responses.astype(int))
|
||||||
|
|
||||||
def predict(self, samples):
|
def predict(self, samples):
|
||||||
ret, resp = self.model.predict(samples)
|
_ret, resp = self.model.predict(samples)
|
||||||
return resp.ravel()
|
return resp.ravel()
|
||||||
|
|
||||||
|
|
||||||
@ -76,7 +76,7 @@ class KNearest(LetterStatModel):
|
|||||||
self.model.train(samples, cv2.ml.ROW_SAMPLE, responses)
|
self.model.train(samples, cv2.ml.ROW_SAMPLE, responses)
|
||||||
|
|
||||||
def predict(self, samples):
|
def predict(self, samples):
|
||||||
retval, results, neigh_resp, dists = self.model.findNearest(samples, k = 10)
|
_retval, results, _neigh_resp, _dists = self.model.findNearest(samples, k = 10)
|
||||||
return results.ravel()
|
return results.ravel()
|
||||||
|
|
||||||
|
|
||||||
@ -85,7 +85,7 @@ class Boost(LetterStatModel):
|
|||||||
self.model = cv2.ml.Boost_create()
|
self.model = cv2.ml.Boost_create()
|
||||||
|
|
||||||
def train(self, samples, responses):
|
def train(self, samples, responses):
|
||||||
sample_n, var_n = samples.shape
|
_sample_n, var_n = samples.shape
|
||||||
new_samples = self.unroll_samples(samples)
|
new_samples = self.unroll_samples(samples)
|
||||||
new_responses = self.unroll_responses(responses)
|
new_responses = self.unroll_responses(responses)
|
||||||
var_types = np.array([cv2.ml.VAR_NUMERICAL] * var_n + [cv2.ml.VAR_CATEGORICAL, cv2.ml.VAR_CATEGORICAL], np.uint8)
|
var_types = np.array([cv2.ml.VAR_NUMERICAL] * var_n + [cv2.ml.VAR_CATEGORICAL, cv2.ml.VAR_CATEGORICAL], np.uint8)
|
||||||
@ -96,7 +96,7 @@ class Boost(LetterStatModel):
|
|||||||
|
|
||||||
def predict(self, samples):
|
def predict(self, samples):
|
||||||
new_samples = self.unroll_samples(samples)
|
new_samples = self.unroll_samples(samples)
|
||||||
ret, resp = self.model.predict(new_samples)
|
_ret, resp = self.model.predict(new_samples)
|
||||||
|
|
||||||
return resp.ravel().reshape(-1, self.class_n).argmax(1)
|
return resp.ravel().reshape(-1, self.class_n).argmax(1)
|
||||||
|
|
||||||
@ -113,7 +113,7 @@ class SVM(LetterStatModel):
|
|||||||
self.model.train(samples, cv2.ml.ROW_SAMPLE, responses.astype(int))
|
self.model.train(samples, cv2.ml.ROW_SAMPLE, responses.astype(int))
|
||||||
|
|
||||||
def predict(self, samples):
|
def predict(self, samples):
|
||||||
ret, resp = self.model.predict(samples)
|
_ret, resp = self.model.predict(samples)
|
||||||
return resp.ravel()
|
return resp.ravel()
|
||||||
|
|
||||||
|
|
||||||
@ -122,7 +122,7 @@ class MLP(LetterStatModel):
|
|||||||
self.model = cv2.ml.ANN_MLP_create()
|
self.model = cv2.ml.ANN_MLP_create()
|
||||||
|
|
||||||
def train(self, samples, responses):
|
def train(self, samples, responses):
|
||||||
sample_n, var_n = samples.shape
|
_sample_n, var_n = samples.shape
|
||||||
new_responses = self.unroll_responses(responses).reshape(-1, self.class_n)
|
new_responses = self.unroll_responses(responses).reshape(-1, self.class_n)
|
||||||
layer_sizes = np.int32([var_n, 100, 100, self.class_n])
|
layer_sizes = np.int32([var_n, 100, 100, self.class_n])
|
||||||
|
|
||||||
@ -136,7 +136,7 @@ class MLP(LetterStatModel):
|
|||||||
self.model.train(samples, cv2.ml.ROW_SAMPLE, np.float32(new_responses))
|
self.model.train(samples, cv2.ml.ROW_SAMPLE, np.float32(new_responses))
|
||||||
|
|
||||||
def predict(self, samples):
|
def predict(self, samples):
|
||||||
ret, resp = self.model.predict(samples)
|
_ret, resp = self.model.predict(samples)
|
||||||
return resp.argmax(-1)
|
return resp.argmax(-1)
|
||||||
|
|
||||||
from tests_common import NewOpenCVTests
|
from tests_common import NewOpenCVTests
|
||||||
|
@ -27,8 +27,8 @@ feature_params = dict( maxCorners = 1000,
|
|||||||
blockSize = 19 )
|
blockSize = 19 )
|
||||||
|
|
||||||
def checkedTrace(img0, img1, p0, back_threshold = 1.0):
|
def checkedTrace(img0, img1, p0, back_threshold = 1.0):
|
||||||
p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
|
p1, _st, _err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
|
||||||
p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
|
p0r, _st, _err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
|
||||||
d = abs(p0-p0r).reshape(-1, 2).max(-1)
|
d = abs(p0-p0r).reshape(-1, 2).max(-1)
|
||||||
status = d < back_threshold
|
status = d < back_threshold
|
||||||
return p1, status
|
return p1, status
|
||||||
@ -77,11 +77,11 @@ class lk_homography_test(NewOpenCVTests):
|
|||||||
if len(self.p0) < 4:
|
if len(self.p0) < 4:
|
||||||
self.p0 = None
|
self.p0 = None
|
||||||
continue
|
continue
|
||||||
H, status = cv2.findHomography(self.p0, self.p1, cv2.RANSAC, 5.0)
|
_H, status = cv2.findHomography(self.p0, self.p1, cv2.RANSAC, 5.0)
|
||||||
|
|
||||||
goodPointsInRect = 0
|
goodPointsInRect = 0
|
||||||
goodPointsOutsideRect = 0
|
goodPointsOutsideRect = 0
|
||||||
for (x0, y0), (x1, y1), good in zip(self.p0[:,0], self.p1[:,0], status[:,0]):
|
for (_x0, _y0), (x1, y1), good in zip(self.p0[:,0], self.p1[:,0], status[:,0]):
|
||||||
if good:
|
if good:
|
||||||
if isPointInRect((x1,y1), self.render.getCurrentRect()):
|
if isPointInRect((x1,y1), self.render.getCurrentRect()):
|
||||||
goodPointsInRect += 1
|
goodPointsInRect += 1
|
||||||
@ -91,6 +91,6 @@ class lk_homography_test(NewOpenCVTests):
|
|||||||
isForegroundHomographyFound = True
|
isForegroundHomographyFound = True
|
||||||
self.assertGreater(float(goodPointsInRect) / (self.numFeaturesInRectOnStart + 1), 0.6)
|
self.assertGreater(float(goodPointsInRect) / (self.numFeaturesInRectOnStart + 1), 0.6)
|
||||||
else:
|
else:
|
||||||
p = cv2.goodFeaturesToTrack(frame_gray, **feature_params)
|
self.p0 = cv2.goodFeaturesToTrack(frame_gray, **feature_params)
|
||||||
|
|
||||||
self.assertEqual(isForegroundHomographyFound, True)
|
self.assertEqual(isForegroundHomographyFound, True)
|
||||||
|
@ -63,8 +63,8 @@ class lk_track_test(NewOpenCVTests):
|
|||||||
if len(self.tracks) > 0:
|
if len(self.tracks) > 0:
|
||||||
img0, img1 = self.prev_gray, frame_gray
|
img0, img1 = self.prev_gray, frame_gray
|
||||||
p0 = np.float32([tr[-1][0] for tr in self.tracks]).reshape(-1, 1, 2)
|
p0 = np.float32([tr[-1][0] for tr in self.tracks]).reshape(-1, 1, 2)
|
||||||
p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
|
p1, _st, _err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
|
||||||
p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
|
p0r, _st, _err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
|
||||||
d = abs(p0-p0r).reshape(-1, 2).max(-1)
|
d = abs(p0-p0r).reshape(-1, 2).max(-1)
|
||||||
good = d < 1
|
good = d < 1
|
||||||
new_tracks = []
|
new_tracks = []
|
||||||
|
@ -37,7 +37,7 @@ class mser_test(NewOpenCVTests):
|
|||||||
mserExtractor.setDelta(kDelta)
|
mserExtractor.setDelta(kDelta)
|
||||||
np.random.seed(10)
|
np.random.seed(10)
|
||||||
|
|
||||||
for i in range(100):
|
for _i in range(100):
|
||||||
|
|
||||||
use_big_image = int(np.random.rand(1,1)*7) != 0
|
use_big_image = int(np.random.rand(1,1)*7) != 0
|
||||||
invert = int(np.random.rand(1,1)*2) != 0
|
invert = int(np.random.rand(1,1)*2) != 0
|
||||||
|
@ -38,7 +38,7 @@ class peopledetect_test(NewOpenCVTests):
|
|||||||
|
|
||||||
img = self.get_sample(dirPath + sample, 0)
|
img = self.get_sample(dirPath + sample, 0)
|
||||||
|
|
||||||
found, w = hog.detectMultiScale(img, winStride=(8,8), padding=(32,32), scale=1.05)
|
found, _w = hog.detectMultiScale(img, winStride=(8,8), padding=(32,32), scale=1.05)
|
||||||
found_filtered = []
|
found_filtered = []
|
||||||
for ri, r in enumerate(found):
|
for ri, r in enumerate(found):
|
||||||
for qi, q in enumerate(found):
|
for qi, q in enumerate(found):
|
||||||
|
@ -7,8 +7,8 @@ class shape_test(NewOpenCVTests):
|
|||||||
|
|
||||||
def test_computeDistance(self):
|
def test_computeDistance(self):
|
||||||
|
|
||||||
a = self.get_sample('samples/data/shape_sample/1.png', cv2.IMREAD_GRAYSCALE);
|
a = self.get_sample('samples/data/shape_sample/1.png', cv2.IMREAD_GRAYSCALE)
|
||||||
b = self.get_sample('samples/data/shape_sample/2.png', cv2.IMREAD_GRAYSCALE);
|
b = self.get_sample('samples/data/shape_sample/2.png', cv2.IMREAD_GRAYSCALE)
|
||||||
|
|
||||||
_, ca, _ = cv2.findContours(a, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS)
|
_, ca, _ = cv2.findContours(a, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS)
|
||||||
_, cb, _ = cv2.findContours(b, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS)
|
_, cb, _ = cv2.findContours(b, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS)
|
||||||
|
@ -30,8 +30,8 @@ def find_squares(img):
|
|||||||
bin = cv2.Canny(gray, 0, 50, apertureSize=5)
|
bin = cv2.Canny(gray, 0, 50, apertureSize=5)
|
||||||
bin = cv2.dilate(bin, None)
|
bin = cv2.dilate(bin, None)
|
||||||
else:
|
else:
|
||||||
retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
|
_retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
|
||||||
bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
|
bin, contours, _hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
|
||||||
for cnt in contours:
|
for cnt in contours:
|
||||||
cnt_len = cv2.arcLength(cnt, True)
|
cnt_len = cv2.arcLength(cnt, True)
|
||||||
cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
|
cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
|
||||||
@ -44,7 +44,7 @@ def find_squares(img):
|
|||||||
return squares
|
return squares
|
||||||
|
|
||||||
def intersectionRate(s1, s2):
|
def intersectionRate(s1, s2):
|
||||||
area, intersection = cv2.intersectConvexConvex(np.array(s1), np.array(s2))
|
area, _intersection = cv2.intersectConvexConvex(np.array(s1), np.array(s2))
|
||||||
return 2 * area / (cv2.contourArea(np.array(s1)) + cv2.contourArea(np.array(s2)))
|
return 2 * area / (cv2.contourArea(np.array(s1)) + cv2.contourArea(np.array(s2)))
|
||||||
|
|
||||||
def filterSquares(squares, square):
|
def filterSquares(squares, square):
|
||||||
|
@ -11,7 +11,7 @@ class stitching_test(NewOpenCVTests):
|
|||||||
img2 = self.get_sample('stitching/a2.png')
|
img2 = self.get_sample('stitching/a2.png')
|
||||||
|
|
||||||
stitcher = cv2.createStitcher(False)
|
stitcher = cv2.createStitcher(False)
|
||||||
(result, pano) = stitcher.stitch((img1, img2))
|
(_result, pano) = stitcher.stitch((img1, img2))
|
||||||
|
|
||||||
#cv2.imshow("pano", pano)
|
#cv2.imshow("pano", pano)
|
||||||
#cv2.waitKey()
|
#cv2.waitKey()
|
||||||
|
@ -70,7 +70,7 @@ def intersectionRate(s1, s2):
|
|||||||
x1, y1, x2, y2 = s2
|
x1, y1, x2, y2 = s2
|
||||||
s2 = np.array([[x1, y1], [x2,y1], [x2, y2], [x1, y2]])
|
s2 = np.array([[x1, y1], [x2,y1], [x2, y2], [x1, y2]])
|
||||||
|
|
||||||
area, intersection = cv2.intersectConvexConvex(s1, s2)
|
area, _intersection = cv2.intersectConvexConvex(s1, s2)
|
||||||
return 2 * area / (cv2.contourArea(s1) + cv2.contourArea(s2))
|
return 2 * area / (cv2.contourArea(s1) + cv2.contourArea(s2))
|
||||||
|
|
||||||
def isPointInRect(p, rect):
|
def isPointInRect(p, rect):
|
||||||
|
Loading…
Reference in New Issue
Block a user