mirror of
https://github.com/opencv/opencv.git
synced 2025-01-18 14:13:15 +08:00
Python samples adapted for Python3 compatibility
Common fixes: - print function - int / float division - map, zip iterators in py3 but lists in py2 Known bugs with opencv 3.0.0 - digits.py, called via digits_video.py: https://github.com/Itseez/opencv/issues/4969 - gaussian_mix.py: https://github.com/Itseez/opencv/pull/4232 - video_v4l2.py: https://github.com/Itseez/opencv/pull/5474 Not working: - letter_recog.py due to changed ml_StatModel.train() signature
This commit is contained in:
parent
5cdf0e3e89
commit
4ed2d6328b
@ -19,6 +19,9 @@ USAGE
|
||||
Press left mouse button on a feature point to see its matching point.
|
||||
'''
|
||||
|
||||
# Python 2/3 compatibility
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import cv2
|
||||
|
||||
@ -96,15 +99,15 @@ def affine_detect(detector, img, mask=None, pool=None):
|
||||
ires = pool.imap(f, params)
|
||||
|
||||
for i, (k, d) in enumerate(ires):
|
||||
print 'affine sampling: %d / %d\r' % (i+1, len(params)),
|
||||
print('affine sampling: %d / %d\r' % (i+1, len(params)), end='')
|
||||
keypoints.extend(k)
|
||||
descrs.extend(d)
|
||||
|
||||
print
|
||||
print()
|
||||
return keypoints, np.array(descrs)
|
||||
|
||||
if __name__ == '__main__':
|
||||
print __doc__
|
||||
print(__doc__)
|
||||
|
||||
import sys, getopt
|
||||
opts, args = getopt.getopt(sys.argv[1:], '', ['feature='])
|
||||
@ -121,23 +124,23 @@ if __name__ == '__main__':
|
||||
detector, matcher = init_feature(feature_name)
|
||||
|
||||
if img1 is None:
|
||||
print 'Failed to load fn1:', fn1
|
||||
print('Failed to load fn1:', fn1)
|
||||
sys.exit(1)
|
||||
|
||||
if img2 is None:
|
||||
print 'Failed to load fn2:', fn2
|
||||
print('Failed to load fn2:', fn2)
|
||||
sys.exit(1)
|
||||
|
||||
if detector is None:
|
||||
print 'unknown feature:', feature_name
|
||||
print('unknown feature:', feature_name)
|
||||
sys.exit(1)
|
||||
|
||||
print 'using', feature_name
|
||||
print('using', feature_name)
|
||||
|
||||
pool=ThreadPool(processes = cv2.getNumberOfCPUs())
|
||||
kp1, desc1 = affine_detect(detector, img1, pool=pool)
|
||||
kp2, desc2 = affine_detect(detector, img2, pool=pool)
|
||||
print 'img1 - %d features, img2 - %d features' % (len(kp1), len(kp2))
|
||||
print('img1 - %d features, img2 - %d features' % (len(kp1), len(kp2)))
|
||||
|
||||
def match_and_draw(win):
|
||||
with Timer('matching'):
|
||||
@ -145,12 +148,12 @@ if __name__ == '__main__':
|
||||
p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
|
||||
if len(p1) >= 4:
|
||||
H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
|
||||
print '%d / %d inliers/matched' % (np.sum(status), len(status))
|
||||
print('%d / %d inliers/matched' % (np.sum(status), len(status)))
|
||||
# do not draw outliers (there will be a lot of them)
|
||||
kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]
|
||||
else:
|
||||
H, status = None, None
|
||||
print '%d matches found, not enough for homography estimation' % len(p1)
|
||||
print('%d matches found, not enough for homography estimation' % len(p1))
|
||||
|
||||
vis = explore_match(win, img1, img2, kp_pairs, None, H)
|
||||
|
||||
|
@ -9,6 +9,14 @@ Usage:
|
||||
A trackbar is put up which controls the contour level from -3 to 3
|
||||
'''
|
||||
|
||||
# Python 2/3 compatibility
|
||||
from __future__ import print_function
|
||||
import sys
|
||||
PY3 = sys.version_info[0] == 3
|
||||
|
||||
if PY3:
|
||||
xrange = range
|
||||
|
||||
import numpy as np
|
||||
import cv2
|
||||
|
||||
@ -16,8 +24,8 @@ def make_image():
|
||||
img = np.zeros((500, 500), np.uint8)
|
||||
black, white = 0, 255
|
||||
for i in xrange(6):
|
||||
dx = (i%2)*250 - 30
|
||||
dy = (i/2)*150
|
||||
dx = int((i%2)*250 - 30)
|
||||
dy = int((i/2.)*150)
|
||||
|
||||
if i == 0:
|
||||
for j in xrange(11):
|
||||
@ -41,7 +49,7 @@ def make_image():
|
||||
return img
|
||||
|
||||
if __name__ == '__main__':
|
||||
print __doc__
|
||||
print(__doc__)
|
||||
|
||||
img = make_image()
|
||||
h, w = img.shape[:2]
|
||||
@ -52,7 +60,7 @@ if __name__ == '__main__':
|
||||
def update(levels):
|
||||
vis = np.zeros((h, w, 3), np.uint8)
|
||||
levels = levels - 3
|
||||
cv2.drawContours( vis, contours, (-1, 3)[levels <= 0], (128,255,255),
|
||||
cv2.drawContours( vis, contours, (-1, 2)[levels <= 0], (128,255,255),
|
||||
3, cv2.LINE_AA, hierarchy, abs(levels) )
|
||||
cv2.imshow('contours', vis)
|
||||
update(3)
|
||||
|
@ -23,6 +23,10 @@ Usage:
|
||||
digits.py
|
||||
'''
|
||||
|
||||
|
||||
# Python 2/3 compatibility
|
||||
from __future__ import print_function
|
||||
|
||||
# built-in modules
|
||||
from multiprocessing.pool import ThreadPool
|
||||
|
||||
@ -50,7 +54,7 @@ def split2d(img, cell_size, flatten=True):
|
||||
return cells
|
||||
|
||||
def load_digits(fn):
|
||||
print 'loading "%s" ...' % fn
|
||||
print('loading "%s" ...' % fn)
|
||||
digits_img = cv2.imread(fn, 0)
|
||||
digits = split2d(digits_img, (SZ, SZ))
|
||||
labels = np.repeat(np.arange(CLASS_N), len(digits)/CLASS_N)
|
||||
@ -67,7 +71,7 @@ def deskew(img):
|
||||
|
||||
class StatModel(object):
|
||||
def load(self, fn):
|
||||
self.model.load(fn)
|
||||
self.model.load(fn) # Known bug: https://github.com/Itseez/opencv/issues/4969
|
||||
def save(self, fn):
|
||||
self.model.save(fn)
|
||||
|
||||
@ -101,14 +105,14 @@ class SVM(StatModel):
|
||||
def evaluate_model(model, digits, samples, labels):
|
||||
resp = model.predict(samples)
|
||||
err = (labels != resp).mean()
|
||||
print 'error: %.2f %%' % (err*100)
|
||||
print('error: %.2f %%' % (err*100))
|
||||
|
||||
confusion = np.zeros((10, 10), np.int32)
|
||||
for i, j in zip(labels, resp):
|
||||
confusion[i, j] += 1
|
||||
print 'confusion matrix:'
|
||||
print confusion
|
||||
print
|
||||
print('confusion matrix:')
|
||||
print(confusion)
|
||||
print()
|
||||
|
||||
vis = []
|
||||
for img, flag in zip(digits, resp == labels):
|
||||
@ -145,17 +149,17 @@ def preprocess_hog(digits):
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
print __doc__
|
||||
print(__doc__)
|
||||
|
||||
digits, labels = load_digits(DIGITS_FN)
|
||||
|
||||
print 'preprocessing...'
|
||||
print('preprocessing...')
|
||||
# shuffle digits
|
||||
rand = np.random.RandomState(321)
|
||||
shuffle = rand.permutation(len(digits))
|
||||
digits, labels = digits[shuffle], labels[shuffle]
|
||||
|
||||
digits2 = map(deskew, digits)
|
||||
digits2 = list(map(deskew, digits))
|
||||
samples = preprocess_hog(digits2)
|
||||
|
||||
train_n = int(0.9*len(samples))
|
||||
@ -165,18 +169,18 @@ if __name__ == '__main__':
|
||||
labels_train, labels_test = np.split(labels, [train_n])
|
||||
|
||||
|
||||
print 'training KNearest...'
|
||||
print('training KNearest...')
|
||||
model = KNearest(k=4)
|
||||
model.train(samples_train, labels_train)
|
||||
vis = evaluate_model(model, digits_test, samples_test, labels_test)
|
||||
cv2.imshow('KNearest test', vis)
|
||||
|
||||
print 'training SVM...'
|
||||
print('training SVM...')
|
||||
model = SVM(C=2.67, gamma=5.383)
|
||||
model.train(samples_train, labels_train)
|
||||
vis = evaluate_model(model, digits_test, samples_test, labels_test)
|
||||
cv2.imshow('SVM test', vis)
|
||||
print 'saving SVM as "digits_svm.dat"...'
|
||||
print('saving SVM as "digits_svm.dat"...')
|
||||
model.save('digits_svm.dat')
|
||||
|
||||
cv2.waitKey(0)
|
||||
|
@ -13,6 +13,14 @@ Usage:
|
||||
|
||||
'''
|
||||
|
||||
# Python 2/3 compatibility
|
||||
from __future__ import print_function
|
||||
import sys
|
||||
PY3 = sys.version_info[0] == 3
|
||||
|
||||
if PY3:
|
||||
xrange = range
|
||||
|
||||
import numpy as np
|
||||
import cv2
|
||||
from multiprocessing.pool import ThreadPool
|
||||
@ -33,10 +41,10 @@ def cross_validate(model_class, params, samples, labels, kfold = 3, pool = None)
|
||||
model.train(train_samples, train_labels)
|
||||
resp = model.predict(test_samples)
|
||||
score = (resp != test_labels).mean()
|
||||
print ".",
|
||||
print(".", end='')
|
||||
return score
|
||||
if pool is None:
|
||||
scores = map(f, xrange(kfold))
|
||||
scores = list(map(f, xrange(kfold)))
|
||||
else:
|
||||
scores = pool.map(f, xrange(kfold))
|
||||
return np.mean(scores)
|
||||
@ -50,7 +58,7 @@ class App(object):
|
||||
digits, labels = load_digits(DIGITS_FN)
|
||||
shuffle = np.random.permutation(len(digits))
|
||||
digits, labels = digits[shuffle], labels[shuffle]
|
||||
digits2 = map(deskew, digits)
|
||||
digits2 = list(map(deskew, digits))
|
||||
samples = preprocess_hog(digits2)
|
||||
return samples, labels
|
||||
|
||||
@ -68,7 +76,7 @@ class App(object):
|
||||
scores = np.zeros((len(Cs), len(gammas)))
|
||||
scores[:] = np.nan
|
||||
|
||||
print 'adjusting SVM (may take a long time) ...'
|
||||
print('adjusting SVM (may take a long time) ...')
|
||||
def f(job):
|
||||
i, j = job
|
||||
samples, labels = self.get_dataset()
|
||||
@ -79,20 +87,21 @@ class App(object):
|
||||
ires = self.run_jobs(f, np.ndindex(*scores.shape))
|
||||
for count, (i, j, score) in enumerate(ires):
|
||||
scores[i, j] = score
|
||||
print '%d / %d (best error: %.2f %%, last: %.2f %%)' % (count+1, scores.size, np.nanmin(scores)*100, score*100)
|
||||
print scores
|
||||
print('%d / %d (best error: %.2f %%, last: %.2f %%)' %
|
||||
(count+1, scores.size, np.nanmin(scores)*100, score*100))
|
||||
print(scores)
|
||||
|
||||
print 'writing score table to "svm_scores.npz"'
|
||||
print('writing score table to "svm_scores.npz"')
|
||||
np.savez('svm_scores.npz', scores=scores, Cs=Cs, gammas=gammas)
|
||||
|
||||
i, j = np.unravel_index(scores.argmin(), scores.shape)
|
||||
best_params = dict(C = Cs[i], gamma=gammas[j])
|
||||
print 'best params:', best_params
|
||||
print 'best error: %.2f %%' % (scores.min()*100)
|
||||
print('best params:', best_params)
|
||||
print('best error: %.2f %%' % (scores.min()*100))
|
||||
return best_params
|
||||
|
||||
def adjust_KNearest(self):
|
||||
print 'adjusting KNearest ...'
|
||||
print('adjusting KNearest ...')
|
||||
def f(k):
|
||||
samples, labels = self.get_dataset()
|
||||
err = cross_validate(KNearest, dict(k=k), samples, labels)
|
||||
@ -101,9 +110,9 @@ class App(object):
|
||||
for k, err in self.run_jobs(f, xrange(1, 9)):
|
||||
if err < best_err:
|
||||
best_err, best_k = err, k
|
||||
print 'k = %d, error: %.2f %%' % (k, err*100)
|
||||
print('k = %d, error: %.2f %%' % (k, err*100))
|
||||
best_params = dict(k=best_k)
|
||||
print 'best params:', best_params, 'err: %.2f' % (best_err*100)
|
||||
print('best params:', best_params, 'err: %.2f' % (best_err*100))
|
||||
return best_params
|
||||
|
||||
|
||||
@ -111,14 +120,14 @@ if __name__ == '__main__':
|
||||
import getopt
|
||||
import sys
|
||||
|
||||
print __doc__
|
||||
print(__doc__)
|
||||
|
||||
args, _ = getopt.getopt(sys.argv[1:], '', ['model='])
|
||||
args = dict(args)
|
||||
args.setdefault('--model', 'svm')
|
||||
args.setdefault('--env', '')
|
||||
if args['--model'] not in ['svm', 'knearest']:
|
||||
print 'unknown model "%s"' % args['--model']
|
||||
print('unknown model "%s"' % args['--model'])
|
||||
sys.exit(1)
|
||||
|
||||
t = clock()
|
||||
@ -127,4 +136,4 @@ if __name__ == '__main__':
|
||||
app.adjust_KNearest()
|
||||
else:
|
||||
app.adjust_SVM()
|
||||
print 'work time: %f s' % (clock() - t)
|
||||
print('work time: %f s' % (clock() - t))
|
||||
|
@ -1,5 +1,8 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Python 2/3 compatibility
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import cv2
|
||||
|
||||
@ -22,7 +25,7 @@ def main():
|
||||
|
||||
classifier_fn = 'digits_svm.dat'
|
||||
if not os.path.exists(classifier_fn):
|
||||
print '"%s" not found, run digits.py first' % classifier_fn
|
||||
print('"%s" not found, run digits.py first' % classifier_fn)
|
||||
return
|
||||
model = SVM()
|
||||
model.load(classifier_fn)
|
||||
|
@ -14,6 +14,7 @@ Usage:
|
||||
from __future__ import print_function
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
# relative module
|
||||
import video
|
||||
@ -45,7 +46,7 @@ if __name__ == '__main__':
|
||||
thrs2 = cv2.getTrackbarPos('thrs2', 'edge')
|
||||
edge = cv2.Canny(gray, thrs1, thrs2, apertureSize=5)
|
||||
vis = img.copy()
|
||||
vis /= 2
|
||||
vis = np.uint8(vis/2.)
|
||||
vis[edge != 0] = (0, 255, 0)
|
||||
cv2.imshow('edge', vis)
|
||||
ch = cv2.waitKey(5) & 0xFF
|
||||
|
@ -22,6 +22,9 @@ Keys:
|
||||
Select a textured planar object to track by drawing a box with a mouse.
|
||||
'''
|
||||
|
||||
# Python 2/3 compatibility
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import cv2
|
||||
|
||||
@ -84,7 +87,7 @@ class App:
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
print __doc__
|
||||
print(__doc__)
|
||||
|
||||
import sys
|
||||
try:
|
||||
|
@ -14,6 +14,9 @@ USAGE
|
||||
Press left mouse button on a feature point to see its matching point.
|
||||
'''
|
||||
|
||||
# Python 2/3 compatibility
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import cv2
|
||||
from common import anorm, getsize
|
||||
@ -82,8 +85,10 @@ def explore_match(win, img1, img2, kp_pairs, status = None, H = None):
|
||||
|
||||
if status is None:
|
||||
status = np.ones(len(kp_pairs), np.bool_)
|
||||
p1 = np.int32([kpp[0].pt for kpp in kp_pairs])
|
||||
p2 = np.int32([kpp[1].pt for kpp in kp_pairs]) + (w1, 0)
|
||||
p1, p2 = [], [] # python 2 / python 3 change of zip unpacking
|
||||
for kpp in kp_pairs:
|
||||
p1.append(np.int32(kpp[0].pt))
|
||||
p2.append(np.int32(np.array(kpp[1].pt) + [w1, 0]))
|
||||
|
||||
green = (0, 255, 0)
|
||||
red = (0, 0, 255)
|
||||
@ -133,7 +138,7 @@ def explore_match(win, img1, img2, kp_pairs, status = None, H = None):
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
print __doc__
|
||||
print(__doc__)
|
||||
|
||||
import sys, getopt
|
||||
opts, args = getopt.getopt(sys.argv[1:], '', ['feature='])
|
||||
@ -150,33 +155,33 @@ if __name__ == '__main__':
|
||||
detector, matcher = init_feature(feature_name)
|
||||
|
||||
if img1 is None:
|
||||
print 'Failed to load fn1:', fn1
|
||||
print('Failed to load fn1:', fn1)
|
||||
sys.exit(1)
|
||||
|
||||
if img2 is None:
|
||||
print 'Failed to load fn2:', fn2
|
||||
print('Failed to load fn2:', fn2)
|
||||
sys.exit(1)
|
||||
|
||||
if detector is None:
|
||||
print 'unknown feature:', feature_name
|
||||
print('unknown feature:', feature_name)
|
||||
sys.exit(1)
|
||||
|
||||
print 'using', feature_name
|
||||
print('using', feature_name)
|
||||
|
||||
kp1, desc1 = detector.detectAndCompute(img1, None)
|
||||
kp2, desc2 = detector.detectAndCompute(img2, None)
|
||||
print 'img1 - %d features, img2 - %d features' % (len(kp1), len(kp2))
|
||||
print('img1 - %d features, img2 - %d features' % (len(kp1), len(kp2)))
|
||||
|
||||
def match_and_draw(win):
|
||||
print 'matching...'
|
||||
print('matching...')
|
||||
raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2
|
||||
p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
|
||||
if len(p1) >= 4:
|
||||
H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
|
||||
print '%d / %d inliers/matched' % (np.sum(status), len(status))
|
||||
print('%d / %d inliers/matched' % (np.sum(status), len(status)))
|
||||
else:
|
||||
H, status = None, None
|
||||
print '%d matches found, not enough for homography estimation' % len(p1)
|
||||
print('%d matches found, not enough for homography estimation' % len(p1))
|
||||
|
||||
vis = explore_match(win, img1, img2, kp_pairs, status, H)
|
||||
|
||||
|
@ -1,5 +1,13 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Python 2/3 compatibility
|
||||
from __future__ import print_function
|
||||
import sys
|
||||
PY3 = sys.version_info[0] == 3
|
||||
|
||||
if PY3:
|
||||
xrange = range
|
||||
|
||||
import numpy as np
|
||||
from numpy import random
|
||||
import cv2
|
||||
@ -30,19 +38,21 @@ if __name__ == '__main__':
|
||||
cluster_n = 5
|
||||
img_size = 512
|
||||
|
||||
print 'press any key to update distributions, ESC - exit\n'
|
||||
print('press any key to update distributions, ESC - exit\n')
|
||||
|
||||
while True:
|
||||
print 'sampling distributions...'
|
||||
print('sampling distributions...')
|
||||
points, ref_distrs = make_gaussians(cluster_n, img_size)
|
||||
|
||||
print 'EM (opencv) ...'
|
||||
em = cv2.EM(cluster_n, cv2.EM_COV_MAT_GENERIC)
|
||||
em.train(points)
|
||||
means = em.getMat('means')
|
||||
covs = em.getMatVector('covs')
|
||||
print('EM (opencv) ...')
|
||||
em = cv2.ml.EM_create()
|
||||
em.setClustersNumber(cluster_n)
|
||||
em.setCovarianceMatrixType(cv2.ml.EM_COV_MAT_GENERIC)
|
||||
em.trainEM(points)
|
||||
means = em.getMeans()
|
||||
covs = em.getCovs() # Known bug: https://github.com/Itseez/opencv/pull/4232
|
||||
found_distrs = zip(means, covs)
|
||||
print 'ready!\n'
|
||||
print('ready!\n')
|
||||
|
||||
img = np.zeros((img_size, img_size, 3), np.uint8)
|
||||
for x, y in np.int32(points):
|
||||
|
@ -15,6 +15,9 @@ Usage : python hist.py <image_file>
|
||||
Abid Rahman 3/14/12 debug Gary Bradski
|
||||
'''
|
||||
|
||||
# Python 2/3 compatibility
|
||||
from __future__ import print_function
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
@ -38,8 +41,8 @@ def hist_curve(im):
|
||||
def hist_lines(im):
|
||||
h = np.zeros((300,256,3))
|
||||
if len(im.shape)!=2:
|
||||
print "hist_lines applicable only for grayscale images"
|
||||
#print "so converting image to grayscale for representation"
|
||||
print("hist_lines applicable only for grayscale images")
|
||||
#print("so converting image to grayscale for representation"
|
||||
im = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
|
||||
hist_item = cv2.calcHist([im],[0],None,[256],[0,256])
|
||||
cv2.normalize(hist_item,hist_item,0,255,cv2.NORM_MINMAX)
|
||||
@ -58,18 +61,18 @@ if __name__ == '__main__':
|
||||
fname = sys.argv[1]
|
||||
else :
|
||||
fname = '../data/lena.jpg'
|
||||
print "usage : python hist.py <image_file>"
|
||||
print("usage : python hist.py <image_file>")
|
||||
|
||||
im = cv2.imread(fname)
|
||||
|
||||
if im is None:
|
||||
print 'Failed to load image file:', fname
|
||||
print('Failed to load image file:', fname)
|
||||
sys.exit(1)
|
||||
|
||||
gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
|
||||
|
||||
|
||||
print ''' Histogram plotting \n
|
||||
print(''' Histogram plotting \n
|
||||
Keymap :\n
|
||||
a - show histogram for color image in curve mode \n
|
||||
b - show histogram in bin mode \n
|
||||
@ -77,7 +80,7 @@ if __name__ == '__main__':
|
||||
d - show histogram for color image in curve mode \n
|
||||
e - show histogram for a normalized image in curve mode \n
|
||||
Esc - exit \n
|
||||
'''
|
||||
''')
|
||||
|
||||
cv2.imshow('image',im)
|
||||
while True:
|
||||
@ -86,31 +89,31 @@ if __name__ == '__main__':
|
||||
curve = hist_curve(im)
|
||||
cv2.imshow('histogram',curve)
|
||||
cv2.imshow('image',im)
|
||||
print 'a'
|
||||
print('a')
|
||||
elif k == ord('b'):
|
||||
print 'b'
|
||||
print('b')
|
||||
lines = hist_lines(im)
|
||||
cv2.imshow('histogram',lines)
|
||||
cv2.imshow('image',gray)
|
||||
elif k == ord('c'):
|
||||
print 'c'
|
||||
print('c')
|
||||
equ = cv2.equalizeHist(gray)
|
||||
lines = hist_lines(equ)
|
||||
cv2.imshow('histogram',lines)
|
||||
cv2.imshow('image',equ)
|
||||
elif k == ord('d'):
|
||||
print 'd'
|
||||
print('d')
|
||||
curve = hist_curve(gray)
|
||||
cv2.imshow('histogram',curve)
|
||||
cv2.imshow('image',gray)
|
||||
elif k == ord('e'):
|
||||
print 'e'
|
||||
norm = cv2.normalize(gray,alpha = 0,beta = 255,norm_type = cv2.NORM_MINMAX)
|
||||
print('e')
|
||||
norm = cv2.normalize(gray, gray, alpha = 0,beta = 255,norm_type = cv2.NORM_MINMAX)
|
||||
lines = hist_lines(norm)
|
||||
cv2.imshow('histogram',lines)
|
||||
cv2.imshow('image',norm)
|
||||
elif k == 27:
|
||||
print 'ESC'
|
||||
print('ESC')
|
||||
cv2.destroyAllWindows()
|
||||
break
|
||||
cv2.destroyAllWindows()
|
||||
|
@ -10,6 +10,9 @@ Keyboard shortcuts:
|
||||
space - generate new distribution
|
||||
'''
|
||||
|
||||
# Python 2/3 compatibility
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import cv2
|
||||
|
||||
@ -19,7 +22,7 @@ if __name__ == '__main__':
|
||||
cluster_n = 5
|
||||
img_size = 512
|
||||
|
||||
print __doc__
|
||||
print(__doc__)
|
||||
|
||||
# generating bright palette
|
||||
colors = np.zeros((1, cluster_n, 3), np.uint8)
|
||||
@ -28,7 +31,7 @@ if __name__ == '__main__':
|
||||
colors = cv2.cvtColor(colors, cv2.COLOR_HSV2BGR)[0]
|
||||
|
||||
while True:
|
||||
print 'sampling distributions...'
|
||||
print('sampling distributions...')
|
||||
points, _ = make_gaussians(cluster_n, img_size)
|
||||
|
||||
term_crit = (cv2.TERM_CRITERIA_EPS, 30, 0.1)
|
||||
@ -36,7 +39,8 @@ if __name__ == '__main__':
|
||||
|
||||
img = np.zeros((img_size, img_size, 3), np.uint8)
|
||||
for (x, y), label in zip(np.int32(points), labels.ravel()):
|
||||
c = map(int, colors[label])
|
||||
c = list(map(int, colors[label]))
|
||||
|
||||
cv2.circle(img, (x, y), 1, c, -1)
|
||||
|
||||
cv2.imshow('gaussian mixture', img)
|
||||
|
@ -64,7 +64,7 @@ if __name__ == '__main__':
|
||||
|
||||
pyr = build_lappyr(frame, leveln)
|
||||
for i in xrange(leveln):
|
||||
v = cv2.getTrackbarPos('%d'%i, 'level control') / 5
|
||||
v = int(cv2.getTrackbarPos('%d'%i, 'level control') / 5)
|
||||
pyr[i] *= v
|
||||
res = merge_lappyr(pyr)
|
||||
|
||||
|
@ -25,6 +25,9 @@ USAGE:
|
||||
Models: RTrees, KNearest, Boost, SVM, MLP
|
||||
'''
|
||||
|
||||
# Python 2/3 compatibility
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import cv2
|
||||
|
||||
@ -58,22 +61,22 @@ class LetterStatModel(object):
|
||||
|
||||
class RTrees(LetterStatModel):
|
||||
def __init__(self):
|
||||
self.model = cv2.RTrees()
|
||||
self.model = cv2.ml.RTrees_create()
|
||||
|
||||
def train(self, samples, responses):
|
||||
sample_n, var_n = samples.shape
|
||||
var_types = np.array([cv2.CV_VAR_NUMERICAL] * var_n + [cv2.CV_VAR_CATEGORICAL], np.uint8)
|
||||
var_types = np.array([cv2.ml.VAR_NUMERICAL] * var_n + [cv2.ml.VAR_CATEGORICAL], np.uint8)
|
||||
#CvRTParams(10,10,0,false,15,0,true,4,100,0.01f,CV_TERMCRIT_ITER));
|
||||
params = dict(max_depth=10 )
|
||||
self.model.train(samples, cv2.CV_ROW_SAMPLE, responses, varType = var_types, params = params)
|
||||
self.model.train(samples, cv2.ml.ROW_SAMPLE, responses, varType = var_types, params = params)
|
||||
|
||||
def predict(self, samples):
|
||||
return np.float32( [self.model.predict(s) for s in samples] )
|
||||
return [self.model.predict(s) for s in samples]
|
||||
|
||||
|
||||
class KNearest(LetterStatModel):
|
||||
def __init__(self):
|
||||
self.model = cv2.KNearest()
|
||||
self.model = cv2.ml.KNearest_create()
|
||||
|
||||
def train(self, samples, responses):
|
||||
self.model.train(samples, responses)
|
||||
@ -85,16 +88,16 @@ class KNearest(LetterStatModel):
|
||||
|
||||
class Boost(LetterStatModel):
|
||||
def __init__(self):
|
||||
self.model = cv2.Boost()
|
||||
self.model = cv2.ml.Boost_create()
|
||||
|
||||
def train(self, samples, responses):
|
||||
sample_n, var_n = samples.shape
|
||||
new_samples = self.unroll_samples(samples)
|
||||
new_responses = self.unroll_responses(responses)
|
||||
var_types = np.array([cv2.CV_VAR_NUMERICAL] * var_n + [cv2.CV_VAR_CATEGORICAL, cv2.CV_VAR_CATEGORICAL], np.uint8)
|
||||
var_types = np.array([cv2.ml.VAR_NUMERICAL] * var_n + [cv2.ml.VAR_CATEGORICAL, cv2.ml.VAR_CATEGORICAL], np.uint8)
|
||||
#CvBoostParams(CvBoost::REAL, 100, 0.95, 5, false, 0 )
|
||||
params = dict(max_depth=5) #, use_surrogates=False)
|
||||
self.model.train(new_samples, cv2.CV_ROW_SAMPLE, new_responses, varType = var_types, params=params)
|
||||
self.model.train(new_samples, cv2.ml.ROW_SAMPLE, new_responses, varType = var_types, params=params)
|
||||
|
||||
def predict(self, samples):
|
||||
new_samples = self.unroll_samples(samples)
|
||||
@ -105,11 +108,11 @@ class Boost(LetterStatModel):
|
||||
|
||||
class SVM(LetterStatModel):
|
||||
def __init__(self):
|
||||
self.model = cv2.SVM()
|
||||
self.model = cv2.ml.SVM_create()
|
||||
|
||||
def train(self, samples, responses):
|
||||
params = dict( kernel_type = cv2.SVM_LINEAR,
|
||||
svm_type = cv2.SVM_C_SVC,
|
||||
params = dict( kernel_type = cv2.ml.SVM_LINEAR,
|
||||
svm_type = cv2.ml.SVM_C_SVC,
|
||||
C = 1 )
|
||||
self.model.train(samples, responses, params = params)
|
||||
|
||||
@ -119,7 +122,7 @@ class SVM(LetterStatModel):
|
||||
|
||||
class MLP(LetterStatModel):
|
||||
def __init__(self):
|
||||
self.model = cv2.ANN_MLP()
|
||||
self.model = cv2.ml.ANN_MLP_create()
|
||||
|
||||
def train(self, samples, responses):
|
||||
sample_n, var_n = samples.shape
|
||||
@ -130,7 +133,7 @@ class MLP(LetterStatModel):
|
||||
|
||||
# CvANN_MLP_TrainParams::BACKPROP,0.001
|
||||
params = dict( term_crit = (cv2.TERM_CRITERIA_COUNT, 300, 0.01),
|
||||
train_method = cv2.ANN_MLP_TRAIN_PARAMS_BACKPROP,
|
||||
train_method = cv2.ml.ANN_MLP_TRAIN_PARAMS_BACKPROP,
|
||||
bp_dw_scale = 0.001,
|
||||
bp_moment_scale = 0.0 )
|
||||
self.model.train(samples, np.float32(new_responses), None, params = params)
|
||||
@ -144,7 +147,7 @@ if __name__ == '__main__':
|
||||
import getopt
|
||||
import sys
|
||||
|
||||
print __doc__
|
||||
print(__doc__)
|
||||
|
||||
models = [RTrees, KNearest, Boost, SVM, MLP] # NBayes
|
||||
models = dict( [(cls.__name__.lower(), cls) for cls in models] )
|
||||
@ -155,7 +158,7 @@ if __name__ == '__main__':
|
||||
args.setdefault('--model', 'rtrees')
|
||||
args.setdefault('--data', '../data/letter-recognition.data')
|
||||
|
||||
print 'loading data %s ...' % args['--data']
|
||||
print('loading data %s ...' % args['--data'])
|
||||
samples, responses = load_base(args['--data'])
|
||||
Model = models[args['--model']]
|
||||
model = Model()
|
||||
@ -163,20 +166,20 @@ if __name__ == '__main__':
|
||||
train_n = int(len(samples)*model.train_ratio)
|
||||
if '--load' in args:
|
||||
fn = args['--load']
|
||||
print 'loading model from %s ...' % fn
|
||||
print('loading model from %s ...' % fn)
|
||||
model.load(fn)
|
||||
else:
|
||||
print 'training %s ...' % Model.__name__
|
||||
print('training %s ...' % Model.__name__)
|
||||
model.train(samples[:train_n], responses[:train_n])
|
||||
|
||||
print 'testing...'
|
||||
print('testing...')
|
||||
train_rate = np.mean(model.predict(samples[:train_n]) == responses[:train_n])
|
||||
test_rate = np.mean(model.predict(samples[train_n:]) == responses[train_n:])
|
||||
|
||||
print 'train rate: %f test rate: %f' % (train_rate*100, test_rate*100)
|
||||
print('train rate: %f test rate: %f' % (train_rate*100, test_rate*100))
|
||||
|
||||
if '--save' in args:
|
||||
fn = args['--save']
|
||||
print 'saving model to %s ...' % fn
|
||||
print('saving model to %s ...' % fn)
|
||||
model.save(fn)
|
||||
cv2.destroyAllWindows()
|
||||
|
@ -1,11 +1,13 @@
|
||||
#!/usr/bin/env python
|
||||
'''
|
||||
mouse_and_match.py [-i path | --input path: default ./]
|
||||
mouse_and_match.py [-i path | --input path: default ../data/]
|
||||
|
||||
Demonstrate using a mouse to interact with an image:
|
||||
Read in the images in a directory one by one
|
||||
Allow the user to select parts of an image with a mouse
|
||||
When they let go of the mouse, it correlates (using matchTemplate) that patch with the image.
|
||||
|
||||
SPACE for next image
|
||||
ESC to exit
|
||||
'''
|
||||
|
||||
@ -54,8 +56,10 @@ def onmouse(event, x, y, flags, param):
|
||||
drag_start = None
|
||||
|
||||
if __name__ == '__main__':
|
||||
print(__doc__)
|
||||
|
||||
parser = argparse.ArgumentParser(description='Demonstrate mouse interaction with images')
|
||||
parser.add_argument("-i","--input", default='./', help="Input directory.")
|
||||
parser.add_argument("-i","--input", default='../data/', help="Input directory.")
|
||||
args = parser.parse_args()
|
||||
path = args.input
|
||||
|
||||
@ -68,7 +72,7 @@ if __name__ == '__main__':
|
||||
print(infile)
|
||||
|
||||
img=cv2.imread(infile,1)
|
||||
if img == None:
|
||||
if img is None:
|
||||
continue
|
||||
sel = (0,0,0,0)
|
||||
drag_start = None
|
||||
|
@ -1,5 +1,8 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Python 2/3 compatibility
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import cv2
|
||||
import video
|
||||
@ -15,7 +18,7 @@ Keys:
|
||||
|
||||
def draw_flow(img, flow, step=16):
|
||||
h, w = img.shape[:2]
|
||||
y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1)
|
||||
y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1).astype(int)
|
||||
fx, fy = flow[y,x].T
|
||||
lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)
|
||||
lines = np.int32(lines + 0.5)
|
||||
@ -47,7 +50,7 @@ def warp_flow(img, flow):
|
||||
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
print help_message
|
||||
print(help_message)
|
||||
try:
|
||||
fn = sys.argv[1]
|
||||
except:
|
||||
@ -78,10 +81,10 @@ if __name__ == '__main__':
|
||||
break
|
||||
if ch == ord('1'):
|
||||
show_hsv = not show_hsv
|
||||
print 'HSV flow visualization is', ['off', 'on'][show_hsv]
|
||||
print('HSV flow visualization is', ['off', 'on'][show_hsv])
|
||||
if ch == ord('2'):
|
||||
show_glitch = not show_glitch
|
||||
if show_glitch:
|
||||
cur_glitch = img.copy()
|
||||
print 'glitch is', ['off', 'on'][show_glitch]
|
||||
print('glitch is', ['off', 'on'][show_glitch])
|
||||
cv2.destroyAllWindows()
|
||||
|
@ -35,7 +35,9 @@ if __name__ == '__main__':
|
||||
hog = cv2.HOGDescriptor()
|
||||
hog.setSVMDetector( cv2.HOGDescriptor_getDefaultPeopleDetector() )
|
||||
|
||||
for fn in it.chain(*map(glob, sys.argv[1:])):
|
||||
|
||||
default = ['../data/basketball2.png '] if len(sys.argv[1:]) == 0 else []
|
||||
for fn in it.chain(*map(glob, default + sys.argv[1:])):
|
||||
print(fn, ' - ',)
|
||||
try:
|
||||
img = cv2.imread(fn)
|
||||
|
@ -22,6 +22,9 @@ Select a textured planar object to track by drawing a box with a mouse.
|
||||
Use 'focal' slider to adjust to camera focal length for proper video augmentation.
|
||||
'''
|
||||
|
||||
# Python 2/3 compatibility
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import cv2
|
||||
import video
|
||||
@ -97,7 +100,7 @@ class App:
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
print __doc__
|
||||
print(__doc__)
|
||||
|
||||
import sys
|
||||
try:
|
||||
|
@ -21,6 +21,14 @@ Keys:
|
||||
Select a textured planar object to track by drawing a box with a mouse.
|
||||
'''
|
||||
|
||||
# Python 2/3 compatibility
|
||||
from __future__ import print_function
|
||||
import sys
|
||||
PY3 = sys.version_info[0] == 3
|
||||
|
||||
if PY3:
|
||||
xrange = range
|
||||
|
||||
import numpy as np
|
||||
import cv2
|
||||
|
||||
@ -64,6 +72,7 @@ class PlaneTracker:
|
||||
self.detector = cv2.ORB_create( nfeatures = 1000 )
|
||||
self.matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329)
|
||||
self.targets = []
|
||||
self.frame_points = []
|
||||
|
||||
def add_target(self, image, rect, data=None):
|
||||
'''Add a new tracking target.'''
|
||||
@ -87,8 +96,8 @@ class PlaneTracker:
|
||||
|
||||
def track(self, frame):
|
||||
'''Returns a list of detected TrackedTarget objects'''
|
||||
frame_points, frame_descrs = self.detect_features(frame)
|
||||
if len(frame_points) < MIN_MATCH_COUNT:
|
||||
self.frame_points, frame_descrs = self.detect_features(frame)
|
||||
if len(self.frame_points) < MIN_MATCH_COUNT:
|
||||
return []
|
||||
matches = self.matcher.knnMatch(frame_descrs, k = 2)
|
||||
matches = [m[0] for m in matches if len(m) == 2 and m[0].distance < m[1].distance * 0.75]
|
||||
@ -103,7 +112,7 @@ class PlaneTracker:
|
||||
continue
|
||||
target = self.targets[imgIdx]
|
||||
p0 = [target.keypoints[m.trainIdx].pt for m in matches]
|
||||
p1 = [frame_points[m.queryIdx].pt for m in matches]
|
||||
p1 = [self.frame_points[m.queryIdx].pt for m in matches]
|
||||
p0, p1 = np.float32((p0, p1))
|
||||
H, status = cv2.findHomography(p0, p1, cv2.RANSAC, 3.0)
|
||||
status = status.ravel() != 0
|
||||
@ -169,7 +178,7 @@ class App:
|
||||
break
|
||||
|
||||
if __name__ == '__main__':
|
||||
print __doc__
|
||||
print(__doc__)
|
||||
|
||||
import sys
|
||||
try:
|
||||
|
@ -6,6 +6,9 @@ Simple example of stereo image matching and point cloud generation.
|
||||
Resulting .ply file cam be easily viewed using MeshLab ( http://meshlab.sourceforge.net/ )
|
||||
'''
|
||||
|
||||
# Python 2/3 compatibility
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import cv2
|
||||
|
||||
@ -25,13 +28,13 @@ def write_ply(fn, verts, colors):
|
||||
verts = verts.reshape(-1, 3)
|
||||
colors = colors.reshape(-1, 3)
|
||||
verts = np.hstack([verts, colors])
|
||||
with open(fn, 'w') as f:
|
||||
f.write(ply_header % dict(vert_num=len(verts)))
|
||||
np.savetxt(f, verts, '%f %f %f %d %d %d')
|
||||
with open(fn, 'wb') as f:
|
||||
f.write((ply_header % dict(vert_num=len(verts))).encode('utf-8'))
|
||||
np.savetxt(f, verts, fmt='%f %f %f %d %d %d ')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
print 'loading images...'
|
||||
print('loading images...')
|
||||
imgL = cv2.pyrDown( cv2.imread('../data/aloeL.jpg') ) # downscale images for faster processing
|
||||
imgR = cv2.pyrDown( cv2.imread('../data/aloeR.jpg') )
|
||||
|
||||
@ -50,10 +53,10 @@ if __name__ == '__main__':
|
||||
speckleRange = 32
|
||||
)
|
||||
|
||||
print 'computing disparity...'
|
||||
print('computing disparity...')
|
||||
disp = stereo.compute(imgL, imgR).astype(np.float32) / 16.0
|
||||
|
||||
print 'generating 3d point cloud...',
|
||||
print('generating 3d point cloud...',)
|
||||
h, w = imgL.shape[:2]
|
||||
f = 0.8*w # guess for focal length
|
||||
Q = np.float32([[1, 0, 0, -0.5*w],
|
||||
@ -67,7 +70,7 @@ if __name__ == '__main__':
|
||||
out_colors = colors[mask]
|
||||
out_fn = 'out.ply'
|
||||
write_ply('out.ply', out_points, out_colors)
|
||||
print '%s saved' % 'out.ply'
|
||||
print('%s saved' % 'out.ply')
|
||||
|
||||
cv2.imshow('left', imgL)
|
||||
cv2.imshow('disparity', (disp-min_disp)/num_disp)
|
||||
|
@ -10,6 +10,9 @@ Usage:
|
||||
texture_flow.py [<image>]
|
||||
'''
|
||||
|
||||
# Python 2/3 compatibility
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import cv2
|
||||
|
||||
@ -22,7 +25,7 @@ if __name__ == '__main__':
|
||||
|
||||
img = cv2.imread(fn)
|
||||
if img is None:
|
||||
print 'Failed to load image file:', fn
|
||||
print('Failed to load image file:', fn)
|
||||
sys.exit(1)
|
||||
|
||||
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
||||
@ -36,7 +39,7 @@ if __name__ == '__main__':
|
||||
vis[:] = (192 + np.uint32(vis)) / 2
|
||||
d = 12
|
||||
points = np.dstack( np.mgrid[d/2:w:d, d/2:h:d] ).reshape(-1, 2)
|
||||
for x, y in points:
|
||||
for x, y in np.int32(points):
|
||||
vx, vy = np.int32(flow[y, x]*d)
|
||||
cv2.line(vis, (x-vx, y-vy), (x+vx, y+vy), (0, 0, 0), 1, cv2.LINE_AA)
|
||||
cv2.imshow('input', img)
|
||||
|
@ -182,7 +182,7 @@ if __name__ == '__main__':
|
||||
if len(sources) == 0:
|
||||
sources = [ 0 ]
|
||||
|
||||
caps = map(create_capture, sources)
|
||||
caps = list(map(create_capture, sources))
|
||||
shot_idx = 0
|
||||
while True:
|
||||
imgs = []
|
||||
|
@ -14,6 +14,9 @@ Keys:
|
||||
|
||||
'''
|
||||
|
||||
# Python 2/3 compatibility
|
||||
from __future__ import print_function
|
||||
|
||||
import cv2
|
||||
|
||||
def decode_fourcc(v):
|
||||
@ -24,13 +27,13 @@ font = cv2.FONT_HERSHEY_SIMPLEX
|
||||
color = (0, 255, 0)
|
||||
|
||||
cap = cv2.VideoCapture(0)
|
||||
cap.set(cv2.CAP_PROP_AUTOFOCUS, False)
|
||||
cap.set(cv2.CAP_PROP_AUTOFOCUS, False) # Known bug: https://github.com/Itseez/opencv/pull/5474
|
||||
|
||||
cv2.namedWindow("Video")
|
||||
|
||||
convert_rgb = True
|
||||
fps = int(cap.get(cv2.CAP_PROP_FPS))
|
||||
focus = int(cap.get(cv2.CAP_PROP_FOCUS)) * 100
|
||||
focus = int(min(cap.get(cv2.CAP_PROP_FOCUS) * 100, 2**31-1)) # ceil focus to C_LONG as Python3 int can go to +inf
|
||||
|
||||
cv2.createTrackbar("FPS", "Video", fps, 30, lambda v: cap.set(cv2.CAP_PROP_FPS, v))
|
||||
cv2.createTrackbar("Focus", "Video", focus, 100, lambda v: cap.set(cv2.CAP_PROP_FOCUS, v / 100))
|
||||
@ -55,7 +58,7 @@ while True:
|
||||
cv2.putText(img, "FPS: {}".format(fps), (15, 80), font, 1.0, color)
|
||||
cv2.imshow("Video", img)
|
||||
|
||||
k = cv2.waitKey(1)
|
||||
k = 0xFF & cv2.waitKey(1)
|
||||
|
||||
if k == 27:
|
||||
break
|
||||
|
@ -22,7 +22,8 @@ Keys
|
||||
'''
|
||||
|
||||
|
||||
|
||||
# Python 2/3 compatibility
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import cv2
|
||||
@ -44,7 +45,7 @@ class App:
|
||||
self.sketch = Sketcher('img', [self.markers_vis, self.markers], self.get_colors)
|
||||
|
||||
def get_colors(self):
|
||||
return map(int, self.colors[self.cur_marker]), self.cur_marker
|
||||
return list(map(int, self.colors[self.cur_marker])), self.cur_marker
|
||||
|
||||
def watershed(self):
|
||||
m = self.markers.copy()
|
||||
@ -60,13 +61,13 @@ class App:
|
||||
break
|
||||
if ch >= ord('1') and ch <= ord('7'):
|
||||
self.cur_marker = ch - ord('0')
|
||||
print 'marker: ', self.cur_marker
|
||||
print('marker: ', self.cur_marker)
|
||||
if ch == ord(' ') or (self.sketch.dirty and self.auto_update):
|
||||
self.watershed()
|
||||
self.sketch.dirty = False
|
||||
if ch in [ord('a'), ord('A')]:
|
||||
self.auto_update = not self.auto_update
|
||||
print 'auto_update if', ['off', 'on'][self.auto_update]
|
||||
print('auto_update if', ['off', 'on'][self.auto_update])
|
||||
if ch in [ord('r'), ord('R')]:
|
||||
self.markers[:] = 0
|
||||
self.markers_vis[:] = self.img
|
||||
@ -80,5 +81,5 @@ if __name__ == '__main__':
|
||||
fn = sys.argv[1]
|
||||
except:
|
||||
fn = '../data/fruits.jpg'
|
||||
print __doc__
|
||||
print(__doc__)
|
||||
App(fn).run()
|
||||
|
Loading…
Reference in New Issue
Block a user