mirror of
https://github.com/opencv/opencv.git
synced 2025-06-07 17:44:04 +08:00
Merge pull request #8809 from berak:fix_py_tut_braces_py3
This commit is contained in:
commit
35de1c9fdf
@ -218,7 +218,7 @@ for i in xrange(len(objpoints)):
|
||||
error = cv2.norm(imgpoints[i], imgpoints2, cv2.NORM_L2)/len(imgpoints2)
|
||||
mean_error += error
|
||||
|
||||
print "total error: ", mean_error/len(objpoints)
|
||||
print( "total error: {}".format(mean_error/len(objpoints)) )
|
||||
@endcode
|
||||
Additional Resources
|
||||
--------------------
|
||||
|
@ -30,18 +30,18 @@ You can access a pixel value by its row and column coordinates. For BGR image, i
|
||||
of Blue, Green, Red values. For grayscale image, just corresponding intensity is returned.
|
||||
@code{.py}
|
||||
>>> px = img[100,100]
|
||||
>>> print px
|
||||
>>> print( px )
|
||||
[157 166 200]
|
||||
|
||||
# accessing only blue pixel
|
||||
>>> blue = img[100,100,0]
|
||||
>>> print blue
|
||||
>>> print( blue )
|
||||
157
|
||||
@endcode
|
||||
You can modify the pixel values the same way.
|
||||
@code{.py}
|
||||
>>> img[100,100] = [255,255,255]
|
||||
>>> print img[100,100]
|
||||
>>> print( img[100,100] )
|
||||
[255 255 255]
|
||||
@endcode
|
||||
|
||||
@ -76,7 +76,7 @@ etc.
|
||||
Shape of image is accessed by img.shape. It returns a tuple of number of rows, columns and channels
|
||||
(if image is color):
|
||||
@code{.py}
|
||||
>>> print img.shape
|
||||
>>> print( img.shape )
|
||||
(342, 548, 3)
|
||||
@endcode
|
||||
|
||||
@ -85,12 +85,12 @@ good method to check if loaded image is grayscale or color image.
|
||||
|
||||
Total number of pixels is accessed by `img.size`:
|
||||
@code{.py}
|
||||
>>> print img.size
|
||||
>>> print( img.size )
|
||||
562248
|
||||
@endcode
|
||||
Image datatype is obtained by \`img.dtype\`:
|
||||
@code{.py}
|
||||
>>> print img.dtype
|
||||
>>> print( img.dtype )
|
||||
uint8
|
||||
@endcode
|
||||
|
||||
|
@ -23,10 +23,10 @@ For example, consider below sample:
|
||||
>>> x = np.uint8([250])
|
||||
>>> y = np.uint8([10])
|
||||
|
||||
>>> print cv2.add(x,y) # 250+10 = 260 => 255
|
||||
>>> print( cv2.add(x,y) ) # 250+10 = 260 => 255
|
||||
[[255]]
|
||||
|
||||
>>> print x+y # 250+10 = 260 % 256 = 4
|
||||
>>> print( x+y ) # 250+10 = 260 % 256 = 4
|
||||
[4]
|
||||
@endcode
|
||||
It will be more visible when you add two images. OpenCV function will provide a better result. So
|
||||
|
@ -44,7 +44,7 @@ for i in xrange(5,49,2):
|
||||
img1 = cv2.medianBlur(img1,i)
|
||||
e2 = cv2.getTickCount()
|
||||
t = (e2 - e1)/cv2.getTickFrequency()
|
||||
print t
|
||||
print( t )
|
||||
|
||||
# Result I got is 0.521107655 seconds
|
||||
@endcode
|
||||
|
@ -69,8 +69,8 @@ kp = star.detect(img,None)
|
||||
# compute the descriptors with BRIEF
|
||||
kp, des = brief.compute(img, kp)
|
||||
|
||||
print brief.descriptorSize()
|
||||
print des.shape
|
||||
print( brief.descriptorSize() )
|
||||
print( des.shape )
|
||||
@endcode
|
||||
The function brief.getDescriptorSize() gives the \f$n_d\f$ size used in bytes. By default it is 32. Next one
|
||||
is matching, which will be done in another chapter.
|
||||
|
@ -108,10 +108,10 @@ kp = fast.detect(img,None)
|
||||
img2 = cv2.drawKeypoints(img, kp, None, color=(255,0,0))
|
||||
|
||||
# Print all default params
|
||||
print "Threshold: ", fast.getThreshold()
|
||||
print "nonmaxSuppression: ", fast.getNonmaxSuppression()
|
||||
print "neighborhood: ", fast.getType()
|
||||
print "Total Keypoints with nonmaxSuppression: ", len(kp)
|
||||
print( "Threshold: {}".format(fast.getThreshold()) )
|
||||
print( "nonmaxSuppression:{}".format(fast.getNonmaxSuppression()) )
|
||||
print( "neighborhood: {}".format(fast.getType()) )
|
||||
print( "Total Keypoints with nonmaxSuppression: {}".format(len(kp)) )
|
||||
|
||||
cv2.imwrite('fast_true.png',img2)
|
||||
|
||||
@ -119,7 +119,7 @@ cv2.imwrite('fast_true.png',img2)
|
||||
fast.setNonmaxSuppression(0)
|
||||
kp = fast.detect(img,None)
|
||||
|
||||
print "Total Keypoints without nonmaxSuppression: ", len(kp)
|
||||
print( "Total Keypoints without nonmaxSuppression: {}".format(len(kp)) )
|
||||
|
||||
img3 = cv2.drawKeypoints(img, kp, None, color=(255,0,0))
|
||||
|
||||
|
@ -85,7 +85,7 @@ if len(good)>MIN_MATCH_COUNT:
|
||||
img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA)
|
||||
|
||||
else:
|
||||
print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)
|
||||
print( "Not enough matches are found - {}/{}".format(len(good), MIN_MATCH_COUNT) )
|
||||
matchesMask = None
|
||||
@endcode
|
||||
Finally we draw our inliers (if successfully found the object) or matching keypoints (if failed).
|
||||
|
@ -92,7 +92,7 @@ examples are shown in Python terminal since it is just same as SIFT only.
|
||||
While matching, we may need all those features, but not now. So we increase the Hessian Threshold.
|
||||
@code{.py}
|
||||
# Check present Hessian threshold
|
||||
>>> print surf.getHessianThreshold()
|
||||
>>> print( surf.getHessianThreshold() )
|
||||
400.0
|
||||
|
||||
# We set it to some 50000. Remember, it is just for representing in picture.
|
||||
@ -102,7 +102,7 @@ While matching, we may need all those features, but not now. So we increase the
|
||||
# Again compute keypoints and check its number.
|
||||
>>> kp, des = surf.detectAndCompute(img,None)
|
||||
|
||||
>>> print len(kp)
|
||||
>>> print( len(kp) )
|
||||
47
|
||||
@endcode
|
||||
It is less than 50. Let's draw it on the image.
|
||||
@ -119,7 +119,7 @@ on wings of butterfly. You can test it with other images.
|
||||
Now I want to apply U-SURF, so that it won't find the orientation.
|
||||
@code{.py}
|
||||
# Check upright flag, if it False, set it to True
|
||||
>>> print surf.getUpright()
|
||||
>>> print( surf.getUpright() )
|
||||
False
|
||||
|
||||
>>> surf.setUpright(True)
|
||||
@ -139,7 +139,7 @@ etc, this is better.
|
||||
Finally we check the descriptor size and change it to 128 if it is only 64-dim.
|
||||
@code{.py}
|
||||
# Find size of descriptor
|
||||
>>> print surf.descriptorSize()
|
||||
>>> print( surf.descriptorSize() )
|
||||
64
|
||||
|
||||
# That means flag, "extended" is False.
|
||||
@ -149,9 +149,9 @@ Finally we check the descriptor size and change it to 128 if it is only 64-dim.
|
||||
# So we make it to True to get 128-dim descriptors.
|
||||
>>> surf.extended = True
|
||||
>>> kp, des = surf.detectAndCompute(img,None)
|
||||
>>> print surf.descriptorSize()
|
||||
>>> print( surf.descriptorSize() )
|
||||
128
|
||||
>>> print des.shape
|
||||
>>> print( des.shape )
|
||||
(47, 128)
|
||||
@endcode
|
||||
Remaining part is matching which we will do in another chapter.
|
||||
|
@ -21,7 +21,7 @@ in Python terminal:
|
||||
@code{.py}
|
||||
import cv2
|
||||
events = [i for i in dir(cv2) if 'EVENT' in i]
|
||||
print events
|
||||
print( events )
|
||||
@endcode
|
||||
Creating mouse callback function has a specific format which is same everywhere. It differs only in
|
||||
what the function does. So our mouse callback function does one thing, it draws a circle where we
|
||||
|
@ -24,7 +24,7 @@ commands in your Python terminal :
|
||||
@code{.py}
|
||||
>>> import cv2
|
||||
>>> flags = [i for i in dir(cv2) if i.startswith('COLOR_')]
|
||||
>>> print flags
|
||||
>>> print( flags )
|
||||
@endcode
|
||||
@note For HSV, Hue range is [0,179], Saturation range is [0,255] and Value range is [0,255].
|
||||
Different softwares use different scales. So if you are comparing OpenCV values with them, you need
|
||||
@ -96,7 +96,7 @@ terminal:
|
||||
@code{.py}
|
||||
>>> green = np.uint8([[[0,255,0 ]]])
|
||||
>>> hsv_green = cv2.cvtColor(green,cv2.COLOR_BGR2HSV)
|
||||
>>> print hsv_green
|
||||
>>> print( hsv_green )
|
||||
[[[ 60 255 255]]]
|
||||
@endcode
|
||||
Now you take [H-10, 100,100] and [H+10, 255, 255] as lower bound and upper bound respectively. Apart
|
||||
|
@ -27,7 +27,7 @@ im2,contours,hierarchy = cv2.findContours(thresh, 1, 2)
|
||||
|
||||
cnt = contours[0]
|
||||
M = cv2.moments(cnt)
|
||||
print M
|
||||
print( M )
|
||||
@endcode
|
||||
From this moments, you can extract useful data like area, centroid etc. Centroid is given by the
|
||||
relations, \f$C_x = \frac{M_{10}}{M_{00}}\f$ and \f$C_y = \frac{M_{01}}{M_{00}}\f$. This can be done as
|
||||
|
@ -99,7 +99,7 @@ im2,contours,hierarchy = cv2.findContours(thresh2,2,1)
|
||||
cnt2 = contours[0]
|
||||
|
||||
ret = cv2.matchShapes(cnt1,cnt2,1,0.0)
|
||||
print ret
|
||||
print( ret )
|
||||
@endcode
|
||||
I tried matching shapes with different shapes given below:
|
||||
|
||||
|
@ -218,7 +218,7 @@ for i in xrange(1,256):
|
||||
|
||||
# find otsu's threshold value with OpenCV function
|
||||
ret, otsu = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
|
||||
print thresh,ret
|
||||
print( "{} {}".format(thresh,ret) )
|
||||
@endcode
|
||||
*(Some of the functions may be new here, but we will cover them in coming chapters)*
|
||||
|
||||
|
@ -186,12 +186,12 @@ using IPython magic command %timeit.
|
||||
@code{.py}
|
||||
In [16]: img = cv2.imread('messi5.jpg',0)
|
||||
In [17]: rows,cols = img.shape
|
||||
In [18]: print rows,cols
|
||||
In [18]: print("{} {}".format(rows,cols))
|
||||
342 548
|
||||
|
||||
In [19]: nrows = cv2.getOptimalDFTSize(rows)
|
||||
In [20]: ncols = cv2.getOptimalDFTSize(cols)
|
||||
In [21]: print nrows, ncols
|
||||
In [21]: print("{} {}".format(nrows,ncols))
|
||||
360 576
|
||||
@endcode
|
||||
See, the size (342,548) is modified to (360, 576). Now let's pad it with zeros (for OpenCV) and find
|
||||
|
@ -51,7 +51,7 @@ ret,result,neighbours,dist = knn.findNearest(test,k=5)
|
||||
matches = result==test_labels
|
||||
correct = np.count_nonzero(matches)
|
||||
accuracy = correct*100.0/result.size
|
||||
print accuracy
|
||||
print( accuracy )
|
||||
@endcode
|
||||
So our basic OCR app is ready. This particular example gave me an accuracy of 91%. One option
|
||||
improve accuracy is to add more data for training, especially the wrong ones. So instead of finding
|
||||
@ -64,7 +64,7 @@ np.savez('knn_data.npz',train=train, train_labels=train_labels)
|
||||
|
||||
# Now load the data
|
||||
with np.load('knn_data.npz') as data:
|
||||
print data.files
|
||||
print( data.files )
|
||||
train = data['train']
|
||||
train_labels = data['train_labels']
|
||||
@endcode
|
||||
@ -109,7 +109,7 @@ ret, result, neighbours, dist = knn.findNearest(testData, k=5)
|
||||
|
||||
correct = np.count_nonzero(result == labels)
|
||||
accuracy = correct*100.0/10000
|
||||
print accuracy
|
||||
print( accuracy )
|
||||
@endcode
|
||||
It gives me an accuracy of 93.22%. Again, if you want to increase accuracy, you can iteratively add
|
||||
error data in each level.
|
||||
|
@ -118,9 +118,9 @@ knn = cv2.ml.KNearest_create()
|
||||
knn.train(trainData, cv2.ml.ROW_SAMPLE, responses)
|
||||
ret, results, neighbours ,dist = knn.findNearest(newcomer, 3)
|
||||
|
||||
print "result: ", results,"\n"
|
||||
print "neighbours: ", neighbours,"\n"
|
||||
print "distance: ", dist
|
||||
print( "result: {}\n".format(results) )
|
||||
print( "neighbours: {}\n".format(neighbours) )
|
||||
print( "distance: {}\n".format(dist) )
|
||||
|
||||
plt.show()
|
||||
@endcode
|
||||
|
@ -30,7 +30,7 @@ $ yum install numpy opencv*
|
||||
Open Python IDLE (or IPython) and type following codes in Python terminal.
|
||||
@code{.py}
|
||||
>>> import cv2
|
||||
>>> print cv2.__version__
|
||||
>>> print( cv2.__version__ )
|
||||
@endcode
|
||||
If the results are printed out without any errors, congratulations !!! You have installed
|
||||
OpenCV-Python successfully.
|
||||
@ -218,7 +218,7 @@ Installation is over. All files are installed in /usr/local/ folder. But to use
|
||||
should be able to find OpenCV module. You have two options for that.
|
||||
|
||||
-# **Move the module to any folder in Python Path** : Python path can be found out by entering
|
||||
import sys;print sys.path in Python terminal. It will print out many locations. Move
|
||||
`import sys; print(sys.path)` in Python terminal. It will print out many locations. Move
|
||||
/usr/local/lib/python2.7/site-packages/cv2.so to any of this folder. For example,
|
||||
@code{.sh}
|
||||
su mv /usr/local/lib/python2.7/site-packages/cv2.so /usr/lib/python2.7/site-packages
|
||||
|
@ -36,7 +36,7 @@ Installing OpenCV from prebuilt binaries
|
||||
-# Open Python IDLE and type following codes in Python terminal.
|
||||
@code
|
||||
>>> import cv2
|
||||
>>> print cv2.__version__
|
||||
>>> print( cv2.__version__ )
|
||||
@endcode
|
||||
|
||||
If the results are printed out without any errors, congratulations !!! You have installed
|
||||
|
Loading…
Reference in New Issue
Block a user