python: 'cv2.' -> 'cv.' via 'import cv2 as cv'

This commit is contained in:
Alexander Alekhin 2017-12-11 12:55:03 +03:00
parent 9665dde678
commit 5560db73bf
162 changed files with 2083 additions and 2084 deletions

View File

@ -68,5 +68,5 @@ this contour approximation method.
If you pass cv.ContourApproximationModes.CHAIN_APPROX_NONE.value, all the boundary points are stored. But actually do we need all
the points? For eg, you found the contour of a straight line. Do you need all the points on the line
to represent that line? No, we need just two end points of that line. This is what
cv2.CHAIN_APPROX_SIMPLE does. It removes all redundant points and compresses the contour, thereby
cv.CHAIN_APPROX_SIMPLE does. It removes all redundant points and compresses the contour, thereby
saving memory.

View File

@ -80,7 +80,7 @@ pass in terms of square size).
### Setup
So to find pattern in chess board, we use the function, **cv2.findChessboardCorners()**. We also
So to find pattern in chess board, we use the function, **cv.findChessboardCorners()**. We also
need to pass what kind of pattern we are looking, like 8x8 grid, 5x5 grid etc. In this example, we
use 7x6 grid. (Normally a chess board has 8x8 squares and 7x7 internal corners). It returns the
corner points and retval which will be True if pattern is obtained. These corners will be placed in
@ -95,19 +95,19 @@ are not sure out of 14 images given, how many are good. So we read all the image
ones.
@sa Instead of chess board, we can use some circular grid, but then use the function
**cv2.findCirclesGrid()** to find the pattern. It is said that less number of images are enough when
**cv.findCirclesGrid()** to find the pattern. It is said that less number of images are enough when
using circular grid.
Once we find the corners, we can increase their accuracy using **cv2.cornerSubPix()**. We can also
draw the pattern using **cv2.drawChessboardCorners()**. All these steps are included in below code:
Once we find the corners, we can increase their accuracy using **cv.cornerSubPix()**. We can also
draw the pattern using **cv.drawChessboardCorners()**. All these steps are included in below code:
@code{.py}
import numpy as np
import cv2
import cv2 as cv
import glob
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*7,3), np.float32)
@ -120,25 +120,25 @@ imgpoints = [] # 2d points in image plane.
images = glob.glob('*.jpg')
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv.imread(fname)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (7,6), None)
ret, corners = cv.findChessboardCorners(gray, (7,6), None)
# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
corners2=cv2.cornerSubPix(gray,corners, (11,11), (-1,-1), criteria)
corners2 = cv.cornerSubPix(gray,corners, (11,11), (-1,-1), criteria)
imgpoints.append(corners)
# Draw and display the corners
cv2.drawChessboardCorners(img, (7,6), corners2, ret)
cv2.imshow('img', img)
cv2.waitKey(500)
cv.drawChessboardCorners(img, (7,6), corners2, ret)
cv.imshow('img', img)
cv.waitKey(500)
cv2.destroyAllWindows()
cv.destroyAllWindows()
@endcode
One image with pattern drawn on it is shown below:
@ -147,37 +147,37 @@ One image with pattern drawn on it is shown below:
### Calibration
So now we have our object points and image points we are ready to go for calibration. For that we
use the function, **cv2.calibrateCamera()**. It returns the camera matrix, distortion coefficients,
use the function, **cv.calibrateCamera()**. It returns the camera matrix, distortion coefficients,
rotation and translation vectors etc.
@code{.py}
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
@endcode
### Undistortion
We have got what we were trying. Now we can take an image and undistort it. OpenCV comes with two
methods, we will see both. But before that, we can refine the camera matrix based on a free scaling
parameter using **cv2.getOptimalNewCameraMatrix()**. If the scaling parameter alpha=0, it returns
parameter using **cv.getOptimalNewCameraMatrix()**. If the scaling parameter alpha=0, it returns
undistorted image with minimum unwanted pixels. So it may even remove some pixels at image corners.
If alpha=1, all pixels are retained with some extra black images. It also returns an image ROI which
can be used to crop the result.
So we take a new image (left12.jpg in this case. That is the first image in this chapter)
@code{.py}
img = cv2.imread('left12.jpg')
img = cv.imread('left12.jpg')
h, w = img.shape[:2]
newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx, dist, (w,h), 1, (w,h))
newcameramtx, roi = cv.getOptimalNewCameraMatrix(mtx, dist, (w,h), 1, (w,h))
@endcode
#### 1. Using **cv2.undistort()**
#### 1. Using **cv.undistort()**
This is the shortest path. Just call the function and use ROI obtained above to crop the result.
@code{.py}
# undistort
dst = cv2.undistort(img, mtx, dist, None, newcameramtx)
dst = cv.undistort(img, mtx, dist, None, newcameramtx)
# crop the image
x, y, w, h = roi
dst = dst[y:y+h, x:x+w]
cv2.imwrite('calibresult.png', dst)
cv.imwrite('calibresult.png', dst)
@endcode
#### 2. Using **remapping**
@ -185,13 +185,13 @@ This is curved path. First find a mapping function from distorted image to undis
use the remap function.
@code{.py}
# undistort
mapx, mapy = cv2.initUndistortRectifyMap(mtx, dist, None, newcameramtx, (w,h), 5)
dst = cv2.remap(img, mapx, mapy, cv2.INTER_LINEAR)
mapx, mapy = cv.initUndistortRectifyMap(mtx, dist, None, newcameramtx, (w,h), 5)
dst = cv.remap(img, mapx, mapy, cv.INTER_LINEAR)
# crop the image
x, y, w, h = roi
dst = dst[y:y+h, x:x+w]
cv2.imwrite('calibresult.png', dst)
cv.imwrite('calibresult.png', dst)
@endcode
Both the methods give the same result. See the result below:
@ -207,15 +207,15 @@ Re-projection Error
Re-projection error gives a good estimation of just how exact is the found parameters. This should
be as close to zero as possible. Given the intrinsic, distortion, rotation and translation matrices,
we first transform the object point to image point using **cv2.projectPoints()**. Then we calculate
we first transform the object point to image point using **cv.projectPoints()**. Then we calculate
the absolute norm between what we got with our transformation and the corner finding algorithm. To
find the average error we calculate the arithmetical mean of the errors calculate for all the
calibration images.
@code{.py}
mean_error = 0
for i in xrange(len(objpoints)):
imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)
error = cv2.norm(imgpoints[i], imgpoints2, cv2.NORM_L2)/len(imgpoints2)
imgpoints2, _ = cv.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)
error = cv.norm(imgpoints[i], imgpoints2, cv.NORM_L2)/len(imgpoints2)
mean_error += error
print( "total error: {}".format(mean_error/len(objpoints)) )

View File

@ -38,13 +38,13 @@ Code
Below code snippet shows a simple procedure to create a disparity map.
@code{.py}
import numpy as np
import cv2
import cv2 as cv
from matplotlib import pyplot as plt
imgL = cv2.imread('tsukuba_l.png',0)
imgR = cv2.imread('tsukuba_r.png',0)
imgL = cv.imread('tsukuba_l.png',0)
imgR = cv.imread('tsukuba_r.png',0)
stereo = cv2.StereoBM_create(numDisparities=16, blockSize=15)
stereo = cv.StereoBM_create(numDisparities=16, blockSize=15)
disparity = stereo.compute(imgL,imgR)
plt.imshow(disparity,'gray')
plt.show()

View File

@ -72,14 +72,14 @@ Code
So first we need to find as many possible matches between two images to find the fundamental matrix.
For this, we use SIFT descriptors with FLANN based matcher and ratio test.
@code{.py}
import cv2
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img1 = cv2.imread('myleft.jpg',0) #queryimage # left image
img2 = cv2.imread('myright.jpg',0) #trainimage # right image
img1 = cv.imread('myleft.jpg',0) #queryimage # left image
img2 = cv.imread('myright.jpg',0) #trainimage # right image
sift = cv2.SIFT()
sift = cv.SIFT()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
@ -90,7 +90,7 @@ FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params,search_params)
flann = cv.FlannBasedMatcher(index_params,search_params)
matches = flann.knnMatch(des1,des2,k=2)
good = []
@ -108,7 +108,7 @@ Now we have the list of best matches from both the images. Let's find the Fundam
@code{.py}
pts1 = np.int32(pts1)
pts2 = np.int32(pts2)
F, mask = cv2.findFundamentalMat(pts1,pts2,cv2.FM_LMEDS)
F, mask = cv.findFundamentalMat(pts1,pts2,cv.FM_LMEDS)
# We select only inlier points
pts1 = pts1[mask.ravel()==1]
@ -122,28 +122,28 @@ def drawlines(img1,img2,lines,pts1,pts2):
''' img1 - image on which we draw the epilines for the points in img2
lines - corresponding epilines '''
r,c = img1.shape
img1 = cv2.cvtColor(img1,cv2.COLOR_GRAY2BGR)
img2 = cv2.cvtColor(img2,cv2.COLOR_GRAY2BGR)
img1 = cv.cvtColor(img1,cv.COLOR_GRAY2BGR)
img2 = cv.cvtColor(img2,cv.COLOR_GRAY2BGR)
for r,pt1,pt2 in zip(lines,pts1,pts2):
color = tuple(np.random.randint(0,255,3).tolist())
x0,y0 = map(int, [0, -r[2]/r[1] ])
x1,y1 = map(int, [c, -(r[2]+r[0]*c)/r[1] ])
img1 = cv2.line(img1, (x0,y0), (x1,y1), color,1)
img1 = cv2.circle(img1,tuple(pt1),5,color,-1)
img2 = cv2.circle(img2,tuple(pt2),5,color,-1)
img1 = cv.line(img1, (x0,y0), (x1,y1), color,1)
img1 = cv.circle(img1,tuple(pt1),5,color,-1)
img2 = cv.circle(img2,tuple(pt2),5,color,-1)
return img1,img2
@endcode
Now we find the epilines in both the images and draw them.
@code{.py}
# Find epilines corresponding to points in right image (second image) and
# drawing its lines on left image
lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1,1,2), 2,F)
lines1 = cv.computeCorrespondEpilines(pts2.reshape(-1,1,2), 2,F)
lines1 = lines1.reshape(-1,3)
img5,img6 = drawlines(img1,img2,lines1,pts1,pts2)
# Find epilines corresponding to points in left image (first image) and
# drawing its lines on right image
lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1,1,2), 1,F)
lines2 = cv.computeCorrespondEpilines(pts1.reshape(-1,1,2), 1,F)
lines2 = lines2.reshape(-1,3)
img3,img4 = drawlines(img2,img1,lines2,pts2,pts1)

View File

@ -24,8 +24,8 @@ should feel like it is perpendicular to our chessboard plane.
First, let's load the camera matrix and distortion coefficients from the previous calibration
result.
@code{.py}
import cv2
import numpy as np
import cv2 as cv
import glob
# Load previously saved data
@ -33,13 +33,13 @@ with np.load('B.npz') as X:
mtx, dist, _, _ = [X[i] for i in ('mtx','dist','rvecs','tvecs')]
@endcode
Now let's create a function, draw which takes the corners in the chessboard (obtained using
**cv2.findChessboardCorners()**) and **axis points** to draw a 3D axis.
**cv.findChessboardCorners()**) and **axis points** to draw a 3D axis.
@code{.py}
def draw(img, corners, imgpts):
corner = tuple(corners[0].ravel())
img = cv2.line(img, corner, tuple(imgpts[0].ravel()), (255,0,0), 5)
img = cv2.line(img, corner, tuple(imgpts[1].ravel()), (0,255,0), 5)
img = cv2.line(img, corner, tuple(imgpts[2].ravel()), (0,0,255), 5)
img = cv.line(img, corner, tuple(imgpts[0].ravel()), (255,0,0), 5)
img = cv.line(img, corner, tuple(imgpts[1].ravel()), (0,255,0), 5)
img = cv.line(img, corner, tuple(imgpts[2].ravel()), (0,0,255), 5)
return img
@endcode
Then as in previous case, we create termination criteria, object points (3D points of corners in
@ -48,7 +48,7 @@ of length 3 (units will be in terms of chess square size since we calibrated bas
our X axis is drawn from (0,0,0) to (3,0,0), so for Y axis. For Z axis, it is drawn from (0,0,0) to
(0,0,-3). Negative denotes it is drawn towards the camera.
@code{.py}
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
objp = np.zeros((6*7,3), np.float32)
objp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2)
@ -56,32 +56,32 @@ axis = np.float32([[3,0,0], [0,3,0], [0,0,-3]]).reshape(-1,3)
@endcode
Now, as usual, we load each image. Search for 7x6 grid. If found, we refine it with subcorner
pixels. Then to calculate the rotation and translation, we use the function,
**cv2.solvePnPRansac()**. Once we those transformation matrices, we use them to project our **axis
**cv.solvePnPRansac()**. Once we those transformation matrices, we use them to project our **axis
points** to the image plane. In simple words, we find the points on image plane corresponding to
each of (3,0,0),(0,3,0),(0,0,3) in 3D space. Once we get them, we draw lines from the first corner
to each of these points using our draw() function. Done !!!
@code{.py}
for fname in glob.glob('left*.jpg'):
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (7,6),None)
img = cv.imread(fname)
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
ret, corners = cv.findChessboardCorners(gray, (7,6),None)
if ret == True:
corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
corners2 = cv.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
# Find the rotation and translation vectors.
ret,rvecs, tvecs, inliers = cv2.solvePnP(objp, corners2, mtx, dist)
ret,rvecs, tvecs, inliers = cv.solvePnP(objp, corners2, mtx, dist)
# project 3D points to image plane
imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs, mtx, dist)
imgpts, jac = cv.projectPoints(axis, rvecs, tvecs, mtx, dist)
img = draw(img,corners2,imgpts)
cv2.imshow('img',img)
k = cv2.waitKey(0) & 0xFF
cv.imshow('img',img)
k = cv.waitKey(0) & 0xFF
if k == ord('s'):
cv2.imwrite(fname[:6]+'.png', img)
cv.imwrite(fname[:6]+'.png', img)
cv2.destroyAllWindows()
cv.destroyAllWindows()
@endcode
See some results below. Notice that each axis is 3 squares long.:
@ -97,14 +97,14 @@ def draw(img, corners, imgpts):
imgpts = np.int32(imgpts).reshape(-1,2)
# draw ground floor in green
img = cv2.drawContours(img, [imgpts[:4]],-1,(0,255,0),-3)
img = cv.drawContours(img, [imgpts[:4]],-1,(0,255,0),-3)
# draw pillars in blue color
for i,j in zip(range(4),range(4,8)):
img = cv2.line(img, tuple(imgpts[i]), tuple(imgpts[j]),(255),3)
img = cv.line(img, tuple(imgpts[i]), tuple(imgpts[j]),(255),3)
# draw top layer in red color
img = cv2.drawContours(img, [imgpts[4:]],-1,(0,0,255),3)
img = cv.drawContours(img, [imgpts[4:]],-1,(0,0,255),3)
return img
@endcode

View File

@ -21,10 +21,10 @@ Accessing and Modifying pixel values
Let's load a color image first:
@code{.py}
>>> import cv2
>>> import numpy as np
>>> import cv2 as cv
>>> img = cv2.imread('messi5.jpg')
>>> img = cv.imread('messi5.jpg')
@endcode
You can access a pixel value by its row and column coordinates. For BGR image, it returns an array
of Blue, Green, Red values. For grayscale image, just corresponding intensity is returned.
@ -122,8 +122,8 @@ Sometimes you will need to work separately on B,G,R channels of image. In this c
to split the BGR images to single channels. In other cases, you may need to join these individual
channels to a BGR image. You can do it simply by:
@code{.py}
>>> b,g,r = cv2.split(img)
>>> img = cv2.merge((b,g,r))
>>> b,g,r = cv.split(img)
>>> img = cv.merge((b,g,r))
@endcode
Or
@code
@ -137,14 +137,14 @@ Numpy indexing is faster:
**Warning**
cv2.split() is a costly operation (in terms of time). So do it only if you need it. Otherwise go
cv.split() is a costly operation (in terms of time). So do it only if you need it. Otherwise go
for Numpy indexing.
Making Borders for Images (Padding)
-----------------------------------
If you want to create a border around the image, something like a photo frame, you can use
**cv2.copyMakeBorder()**. But it has more applications for convolution operation, zero
**cv.copyMakeBorder()**. But it has more applications for convolution operation, zero
padding etc. This function takes following arguments:
- **src** - input image
@ -152,34 +152,34 @@ padding etc. This function takes following arguments:
directions
- **borderType** - Flag defining what kind of border to be added. It can be following types:
- **cv2.BORDER_CONSTANT** - Adds a constant colored border. The value should be given
- **cv.BORDER_CONSTANT** - Adds a constant colored border. The value should be given
as next argument.
- **cv2.BORDER_REFLECT** - Border will be mirror reflection of the border elements,
- **cv.BORDER_REFLECT** - Border will be mirror reflection of the border elements,
like this : *fedcba|abcdefgh|hgfedcb*
- **cv2.BORDER_REFLECT_101** or **cv2.BORDER_DEFAULT** - Same as above, but with a
- **cv.BORDER_REFLECT_101** or **cv.BORDER_DEFAULT** - Same as above, but with a
slight change, like this : *gfedcb|abcdefgh|gfedcba*
- **cv2.BORDER_REPLICATE** - Last element is replicated throughout, like this:
- **cv.BORDER_REPLICATE** - Last element is replicated throughout, like this:
*aaaaaa|abcdefgh|hhhhhhh*
- **cv2.BORDER_WRAP** - Can't explain, it will look like this :
- **cv.BORDER_WRAP** - Can't explain, it will look like this :
*cdefgh|abcdefgh|abcdefg*
- **value** - Color of border if border type is cv2.BORDER_CONSTANT
- **value** - Color of border if border type is cv.BORDER_CONSTANT
Below is a sample code demonstrating all these border types for better understanding:
@code{.py}
import cv2
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
BLUE = [255,0,0]
img1 = cv2.imread('opencv-logo.png')
img1 = cv.imread('opencv-logo.png')
replicate = cv2.copyMakeBorder(img1,10,10,10,10,cv2.BORDER_REPLICATE)
reflect = cv2.copyMakeBorder(img1,10,10,10,10,cv2.BORDER_REFLECT)
reflect101 = cv2.copyMakeBorder(img1,10,10,10,10,cv2.BORDER_REFLECT_101)
wrap = cv2.copyMakeBorder(img1,10,10,10,10,cv2.BORDER_WRAP)
constant= cv2.copyMakeBorder(img1,10,10,10,10,cv2.BORDER_CONSTANT,value=BLUE)
replicate = cv.copyMakeBorder(img1,10,10,10,10,cv.BORDER_REPLICATE)
reflect = cv.copyMakeBorder(img1,10,10,10,10,cv.BORDER_REFLECT)
reflect101 = cv.copyMakeBorder(img1,10,10,10,10,cv.BORDER_REFLECT_101)
wrap = cv.copyMakeBorder(img1,10,10,10,10,cv.BORDER_WRAP)
constant= cv.copyMakeBorder(img1,10,10,10,10,cv.BORDER_CONSTANT,value=BLUE)
plt.subplot(231),plt.imshow(img1,'gray'),plt.title('ORIGINAL')
plt.subplot(232),plt.imshow(replicate,'gray'),plt.title('REPLICATE')

View File

@ -6,12 +6,12 @@ Goal
- Learn several arithmetic operations on images like addition, subtraction, bitwise operations
etc.
- You will learn these functions : **cv2.add()**, **cv2.addWeighted()** etc.
- You will learn these functions : **cv.add()**, **cv.addWeighted()** etc.
Image Addition
--------------
You can add two images by OpenCV function, cv2.add() or simply by numpy operation,
You can add two images by OpenCV function, cv.add() or simply by numpy operation,
res = img1 + img2. Both images should be of same depth and type, or second image can just be a
scalar value.
@ -23,7 +23,7 @@ For example, consider below sample:
>>> x = np.uint8([250])
>>> y = np.uint8([10])
>>> print( cv2.add(x,y) ) # 250+10 = 260 => 255
>>> print( cv.add(x,y) ) # 250+10 = 260 => 255
[[255]]
>>> print( x+y ) # 250+10 = 260 % 256 = 4
@ -44,20 +44,20 @@ By varying \f$\alpha\f$ from \f$0 \rightarrow 1\f$, you can perform a cool trans
another.
Here I took two images to blend them together. First image is given a weight of 0.7 and second image
is given 0.3. cv2.addWeighted() applies following equation on the image.
is given 0.3. cv.addWeighted() applies following equation on the image.
\f[dst = \alpha \cdot img1 + \beta \cdot img2 + \gamma\f]
Here \f$\gamma\f$ is taken as zero.
@code{.py}
img1 = cv2.imread('ml.png')
img2 = cv2.imread('opencv-logo.png')
img1 = cv.imread('ml.png')
img2 = cv.imread('opencv-logo.png')
dst = cv2.addWeighted(img1,0.7,img2,0.3,0)
dst = cv.addWeighted(img1,0.7,img2,0.3,0)
cv2.imshow('dst',dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv.imshow('dst',dst)
cv.waitKey(0)
cv.destroyAllWindows()
@endcode
Check the result below:
@ -76,31 +76,31 @@ ROI as we did in last chapter. But OpenCV logo is a not a rectangular shape. So
bitwise operations as below:
@code{.py}
# Load two images
img1 = cv2.imread('messi5.jpg')
img2 = cv2.imread('opencv-logo.png')
img1 = cv.imread('messi5.jpg')
img2 = cv.imread('opencv-logo.png')
# I want to put logo on top-left corner, So I create a ROI
rows,cols,channels = img2.shape
roi = img1[0:rows, 0:cols ]
# Now create a mask of logo and create its inverse mask also
img2gray = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
img2gray = cv.cvtColor(img2,cv.COLOR_BGR2GRAY)
ret, mask = cv.threshold(img2gray, 10, 255, cv.THRESH_BINARY)
mask_inv = cv.bitwise_not(mask)
# Now black-out the area of logo in ROI
img1_bg = cv2.bitwise_and(roi,roi,mask = mask_inv)
img1_bg = cv.bitwise_and(roi,roi,mask = mask_inv)
# Take only region of logo from logo image.
img2_fg = cv2.bitwise_and(img2,img2,mask = mask)
img2_fg = cv.bitwise_and(img2,img2,mask = mask)
# Put logo in ROI and modify the main image
dst = cv2.add(img1_bg,img2_fg)
dst = cv.add(img1_bg,img2_fg)
img1[0:rows, 0:cols ] = dst
cv2.imshow('res',img1)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv.imshow('res',img1)
cv.waitKey(0)
cv.destroyAllWindows()
@endcode
See the result below. Left image shows the mask we created. Right image shows the final result. For
more understanding, display all the intermediate images in the above code, especially img1_bg and
@ -115,4 +115,4 @@ Exercises
---------
-# Create a slide show of images in a folder with smooth transition between images using
cv2.addWeighted function
cv.addWeighted function

View File

@ -10,7 +10,7 @@ So in this chapter, you will learn
- To measure the performance of your code.
- Some tips to improve the performance of your code.
- You will see these functions : **cv2.getTickCount**, **cv2.getTickFrequency** etc.
- You will see these functions : **cv.getTickCount**, **cv.getTickFrequency** etc.
Apart from OpenCV, Python also provides a module **time** which is helpful in measuring the time of
execution. Another module **profile** helps to get detailed report on the code, like how much time
@ -21,34 +21,34 @@ ones, and for more details, check links in **Additional Resouces** section.
Measuring Performance with OpenCV
---------------------------------
**cv2.getTickCount** function returns the number of clock-cycles after a reference event (like the
**cv.getTickCount** function returns the number of clock-cycles after a reference event (like the
moment machine was switched ON) to the moment this function is called. So if you call it before and
after the function execution, you get number of clock-cycles used to execute a function.
**cv2.getTickFrequency** function returns the frequency of clock-cycles, or the number of
**cv.getTickFrequency** function returns the frequency of clock-cycles, or the number of
clock-cycles per second. So to find the time of execution in seconds, you can do following:
@code{.py}
e1 = cv2.getTickCount()
e1 = cv.getTickCount()
# your code execution
e2 = cv2.getTickCount()
time = (e2 - e1)/ cv2.getTickFrequency()
e2 = cv.getTickCount()
time = (e2 - e1)/ cv.getTickFrequency()
@endcode
We will demonstrate with following example. Following example apply median filtering with a kernel
of odd size ranging from 5 to 49. (Don't worry about what will the result look like, that is not our
goal):
@code{.py}
img1 = cv2.imread('messi5.jpg')
img1 = cv.imread('messi5.jpg')
e1 = cv2.getTickCount()
e1 = cv.getTickCount()
for i in xrange(5,49,2):
img1 = cv2.medianBlur(img1,i)
e2 = cv2.getTickCount()
t = (e2 - e1)/cv2.getTickFrequency()
img1 = cv.medianBlur(img1,i)
e2 = cv.getTickCount()
t = (e2 - e1)/cv.getTickFrequency()
print( t )
# Result I got is 0.521107655 seconds
@endcode
@note You can do the same with time module. Instead of cv2.getTickCount, use time.time() function.
@note You can do the same with time module. Instead of cv.getTickCount, use time.time() function.
Then take the difference of two times.
Default Optimization in OpenCV
@ -57,23 +57,23 @@ Default Optimization in OpenCV
Many of the OpenCV functions are optimized using SSE2, AVX etc. It contains unoptimized code also.
So if our system support these features, we should exploit them (almost all modern day processors
support them). It is enabled by default while compiling. So OpenCV runs the optimized code if it is
enabled, else it runs the unoptimized code. You can use **cv2.useOptimized()** to check if it is
enabled/disabled and **cv2.setUseOptimized()** to enable/disable it. Let's see a simple example.
enabled, else it runs the unoptimized code. You can use **cv.useOptimized()** to check if it is
enabled/disabled and **cv.setUseOptimized()** to enable/disable it. Let's see a simple example.
@code{.py}
# check if optimization is enabled
In [5]: cv2.useOptimized()
In [5]: cv.useOptimized()
Out[5]: True
In [6]: %timeit res = cv2.medianBlur(img,49)
In [6]: %timeit res = cv.medianBlur(img,49)
10 loops, best of 3: 34.9 ms per loop
# Disable it
In [7]: cv2.setUseOptimized(False)
In [7]: cv.setUseOptimized(False)
In [8]: cv2.useOptimized()
In [8]: cv.useOptimized()
Out[8]: False
In [9]: %timeit res = cv2.medianBlur(img,49)
In [9]: %timeit res = cv.medianBlur(img,49)
10 loops, best of 3: 64.1 ms per loop
@endcode
See, optimized median filtering is \~2x faster than unoptimized version. If you check its source,
@ -115,11 +115,11 @@ working on this issue)*
one or two elements, Python scalar is better than Numpy arrays. Numpy takes advantage when size of
array is a little bit bigger.
We will try one more example. This time, we will compare the performance of **cv2.countNonZero()**
We will try one more example. This time, we will compare the performance of **cv.countNonZero()**
and **np.count_nonzero()** for same image.
@code{.py}
In [35]: %timeit z = cv2.countNonZero(img)
In [35]: %timeit z = cv.countNonZero(img)
100000 loops, best of 3: 15.8 us per loop
In [36]: %timeit z = np.count_nonzero(img)

View File

@ -52,16 +52,16 @@ detector is called STAR detector in OpenCV)
note, that you need [opencv contrib](https://github.com/opencv/opencv_contrib)) to use this.
@code{.py}
import numpy as np
import cv2
import cv2 as cv
from matplotlib import pyplot as plt
img = cv2.imread('simple.jpg',0)
img = cv.imread('simple.jpg',0)
# Initiate FAST detector
star = cv2.xfeatures2d.StarDetector_create()
star = cv.xfeatures2d.StarDetector_create()
# Initiate BRIEF extractor
brief = cv2.xfeatures2d.BriefDescriptorExtractor_create()
brief = cv.xfeatures2d.BriefDescriptorExtractor_create()
# find the keypoints with STAR
kp = star.detect(img,None)

View File

@ -90,22 +90,22 @@ FAST Feature Detector in OpenCV
It is called as any other feature detector in OpenCV. If you want, you can specify the threshold,
whether non-maximum suppression to be applied or not, the neighborhood to be used etc.
For the neighborhood, three flags are defined, cv2.FAST_FEATURE_DETECTOR_TYPE_5_8,
cv2.FAST_FEATURE_DETECTOR_TYPE_7_12 and cv2.FAST_FEATURE_DETECTOR_TYPE_9_16. Below is a
For the neighborhood, three flags are defined, cv.FAST_FEATURE_DETECTOR_TYPE_5_8,
cv.FAST_FEATURE_DETECTOR_TYPE_7_12 and cv.FAST_FEATURE_DETECTOR_TYPE_9_16. Below is a
simple code on how to detect and draw the FAST feature points.
@code{.py}
import numpy as np
import cv2
import cv2 as cv
from matplotlib import pyplot as plt
img = cv2.imread('simple.jpg',0)
img = cv.imread('simple.jpg',0)
# Initiate FAST object with default values
fast = cv2.FastFeatureDetector_create()
fast = cv.FastFeatureDetector_create()
# find and draw the keypoints
kp = fast.detect(img,None)
img2 = cv2.drawKeypoints(img, kp, None, color=(255,0,0))
img2 = cv.drawKeypoints(img, kp, None, color=(255,0,0))
# Print all default params
print( "Threshold: {}".format(fast.getThreshold()) )
@ -113,7 +113,7 @@ print( "nonmaxSuppression:{}".format(fast.getNonmaxSuppression()) )
print( "neighborhood: {}".format(fast.getType()) )
print( "Total Keypoints with nonmaxSuppression: {}".format(len(kp)) )
cv2.imwrite('fast_true.png',img2)
cv.imwrite('fast_true.png',img2)
# Disable nonmaxSuppression
fast.setNonmaxSuppression(0)
@ -121,9 +121,9 @@ kp = fast.detect(img,None)
print( "Total Keypoints without nonmaxSuppression: {}".format(len(kp)) )
img3 = cv2.drawKeypoints(img, kp, None, color=(255,0,0))
img3 = cv.drawKeypoints(img, kp, None, color=(255,0,0))
cv2.imwrite('fast_false.png',img3)
cv.imwrite('fast_false.png',img3)
@endcode
See the results. First image shows FAST with nonmaxSuppression and second one without
nonmaxSuppression:

View File

@ -16,15 +16,15 @@ another trainImage, found the features in that image too and we found the best m
In short, we found locations of some parts of an object in another cluttered image. This information
is sufficient to find the object exactly on the trainImage.
For that, we can use a function from calib3d module, ie **cv2.findHomography()**. If we pass the set
For that, we can use a function from calib3d module, ie **cv.findHomography()**. If we pass the set
of points from both the images, it will find the perpective transformation of that object. Then we
can use **cv2.perspectiveTransform()** to find the object. It needs atleast four correct points to
can use **cv.perspectiveTransform()** to find the object. It needs atleast four correct points to
find the transformation.
We have seen that there can be some possible errors while matching which may affect the result. To
solve this problem, algorithm uses RANSAC or LEAST_MEDIAN (which can be decided by the flags). So
good matches which provide correct estimation are called inliers and remaining are called outliers.
**cv2.findHomography()** returns a mask which specifies the inlier and outlier points.
**cv.findHomography()** returns a mask which specifies the inlier and outlier points.
So let's do it !!!
@ -35,16 +35,16 @@ First, as usual, let's find SIFT features in images and apply the ratio test to
matches.
@code{.py}
import numpy as np
import cv2
import cv2 as cv
from matplotlib import pyplot as plt
MIN_MATCH_COUNT = 10
img1 = cv2.imread('box.png',0) # queryImage
img2 = cv2.imread('box_in_scene.png',0) # trainImage
img1 = cv.imread('box.png',0) # queryImage
img2 = cv.imread('box_in_scene.png',0) # trainImage
# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()
sift = cv.xfeatures2d.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
@ -54,7 +54,7 @@ FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
flann = cv.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)
@ -75,14 +75,14 @@ if len(good)>MIN_MATCH_COUNT:
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h,w,d = img1.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
dst = cv.perspectiveTransform(pts,M)
img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA)
img2 = cv.polylines(img2,[np.int32(dst)],True,255,3, cv.LINE_AA)
else:
print( "Not enough matches are found - {}/{}".format(len(good), MIN_MATCH_COUNT) )
@ -95,7 +95,7 @@ draw_params = dict(matchColor = (0,255,0), # draw matches in green color
matchesMask = matchesMask, # draw only inliers
flags = 2)
img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params)
img3 = cv.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params)
plt.imshow(img3, 'gray'),plt.show()
@endcode

View File

@ -7,7 +7,7 @@ Goal
In this chapter,
- We will understand the concepts behind Harris Corner Detection.
- We will see the functions: **cv2.cornerHarris()**, **cv2.cornerSubPix()**
- We will see the functions: **cv.cornerHarris()**, **cv.cornerSubPix()**
Theory
------
@ -35,7 +35,7 @@ where
I_x I_y & I_y I_y \end{bmatrix}\f]
Here, \f$I_x\f$ and \f$I_y\f$ are image derivatives in x and y directions respectively. (Can be easily found
out using **cv2.Sobel()**).
out using **cv.Sobel()**).
Then comes the main part. After this, they created a score, basically an equation, which will
determine if a window can contain a corner or not.
@ -65,7 +65,7 @@ suitable give you the corners in the image. We will do it with a simple image.
Harris Corner Detector in OpenCV
--------------------------------
OpenCV has the function **cv2.cornerHarris()** for this purpose. Its arguments are :
OpenCV has the function **cv.cornerHarris()** for this purpose. Its arguments are :
- **img** - Input image, it should be grayscale and float32 type.
- **blockSize** - It is the size of neighbourhood considered for corner detection
@ -74,25 +74,25 @@ OpenCV has the function **cv2.cornerHarris()** for this purpose. Its arguments a
See the example below:
@code{.py}
import cv2
import numpy as np
import cv2 as cv
filename = 'chessboard.png'
img = cv2.imread(filename)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
img = cv.imread(filename)
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
gray = np.float32(gray)
dst = cv2.cornerHarris(gray,2,3,0.04)
dst = cv.cornerHarris(gray,2,3,0.04)
#result is dilated for marking the corners, not important
dst = cv2.dilate(dst,None)
dst = cv.dilate(dst,None)
# Threshold for an optimal value, it may vary depending on the image.
img[dst>0.01*dst.max()]=[0,0,255]
cv2.imshow('dst',img)
if cv2.waitKey(0) & 0xff == 27:
cv2.destroyAllWindows()
cv.imshow('dst',img)
if cv.waitKey(0) & 0xff == 27:
cv.destroyAllWindows()
@endcode
Below are the three results:
@ -102,7 +102,7 @@ Corner with SubPixel Accuracy
-----------------------------
Sometimes, you may need to find the corners with maximum accuracy. OpenCV comes with a function
**cv2.cornerSubPix()** which further refines the corners detected with sub-pixel accuracy. Below is
**cv.cornerSubPix()** which further refines the corners detected with sub-pixel accuracy. Below is
an example. As usual, we need to find the harris corners first. Then we pass the centroids of these
corners (There may be a bunch of pixels at a corner, we take their centroid) to refine them. Harris
corners are marked in red pixels and refined corners are marked in green pixels. For this function,
@ -110,26 +110,26 @@ we have to define the criteria when to stop the iteration. We stop it after a sp
iteration or a certain accuracy is achieved, whichever occurs first. We also need to define the size
of neighbourhood it would search for corners.
@code{.py}
import cv2
import numpy as np
import cv2 as cv
filename = 'chessboard2.jpg'
img = cv2.imread(filename)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
img = cv.imread(filename)
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
# find Harris corners
gray = np.float32(gray)
dst = cv2.cornerHarris(gray,2,3,0.04)
dst = cv2.dilate(dst,None)
ret, dst = cv2.threshold(dst,0.01*dst.max(),255,0)
dst = cv.cornerHarris(gray,2,3,0.04)
dst = cv.dilate(dst,None)
ret, dst = cv.threshold(dst,0.01*dst.max(),255,0)
dst = np.uint8(dst)
# find centroids
ret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst)
ret, labels, stats, centroids = cv.connectedComponentsWithStats(dst)
# define the criteria to stop and refine the corners
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)
corners = cv2.cornerSubPix(gray,np.float32(centroids),(5,5),(-1,-1),criteria)
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 100, 0.001)
corners = cv.cornerSubPix(gray,np.float32(centroids),(5,5),(-1,-1),criteria)
# Now draw them
res = np.hstack((centroids,corners))
@ -137,7 +137,7 @@ res = np.int0(res)
img[res[:,1],res[:,0]]=[0,0,255]
img[res[:,3],res[:,2]] = [0,255,0]
cv2.imwrite('subpixel5.png',img)
cv.imwrite('subpixel5.png',img)
@endcode
Below is the result, where some important locations are shown in zoomed window to visualize:

View File

@ -15,11 +15,11 @@ Brute-Force matcher is simple. It takes the descriptor of one feature in first s
with all other features in second set using some distance calculation. And the closest one is
returned.
For BF matcher, first we have to create the BFMatcher object using **cv2.BFMatcher()**. It takes two
For BF matcher, first we have to create the BFMatcher object using **cv.BFMatcher()**. It takes two
optional params. First one is normType. It specifies the distance measurement to be used. By
default, it is cv2.NORM_L2. It is good for SIFT, SURF etc (cv2.NORM_L1 is also there). For binary
string based descriptors like ORB, BRIEF, BRISK etc, cv2.NORM_HAMMING should be used, which used
Hamming distance as measurement. If ORB is using WTA_K == 3 or 4, cv2.NORM_HAMMING2 should be
default, it is cv.NORM_L2. It is good for SIFT, SURF etc (cv.NORM_L1 is also there). For binary
string based descriptors like ORB, BRIEF, BRISK etc, cv.NORM_HAMMING should be used, which used
Hamming distance as measurement. If ORB is using WTA_K == 3 or 4, cv.NORM_HAMMING2 should be
used.
Second param is boolean variable, crossCheck which is false by default. If it is true, Matcher
@ -32,9 +32,9 @@ Once it is created, two important methods are *BFMatcher.match()* and *BFMatcher
one returns the best match. Second method returns k best matches where k is specified by the user.
It may be useful when we need to do additional work on that.
Like we used cv2.drawKeypoints() to draw keypoints, **cv2.drawMatches()** helps us to draw the
Like we used cv.drawKeypoints() to draw keypoints, **cv.drawMatches()** helps us to draw the
matches. It stacks two images horizontally and draw lines from first image to second image showing
best matches. There is also **cv2.drawMatchesKnn** which draws all the k best matches. If k=2, it
best matches. There is also **cv.drawMatchesKnn** which draws all the k best matches. If k=2, it
will draw two match-lines for each keypoint. So we have to pass a mask if we want to selectively
draw it.
@ -50,27 +50,27 @@ We are using ORB descriptors to match features. So let's start with loading imag
descriptors etc.
@code{.py}
import numpy as np
import cv2
import cv2 as cv
import matplotlib.pyplot as plt
img1 = cv2.imread('box.png',0) # queryImage
img2 = cv2.imread('box_in_scene.png',0) # trainImage
img1 = cv.imread('box.png',0) # queryImage
img2 = cv.imread('box_in_scene.png',0) # trainImage
# Initiate ORB detector
orb = cv2.ORB_create()
orb = cv.ORB_create()
# find the keypoints and descriptors with ORB
kp1, des1 = orb.detectAndCompute(img1,None)
kp2, des2 = orb.detectAndCompute(img2,None)
@endcode
Next we create a BFMatcher object with distance measurement cv2.NORM_HAMMING (since we are using
Next we create a BFMatcher object with distance measurement cv.NORM_HAMMING (since we are using
ORB) and crossCheck is switched on for better results. Then we use Matcher.match() method to get the
best matches in two images. We sort them in ascending order of their distances so that best matches
(with low distance) come to front. Then we draw only first 10 matches (Just for sake of visibility.
You can increase it as you like)
@code{.py}
# create BFMatcher object
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)
# Match descriptors.
matches = bf.match(des1,des2)
@ -79,7 +79,7 @@ matches = bf.match(des1,des2)
matches = sorted(matches, key = lambda x:x.distance)
# Draw first 10 matches.
img3 = cv2.drawMatches(img1,kp1,img2,kp2,matches[:10], flags=2)
img3 = cv.drawMatches(img1,kp1,img2,kp2,matches[:10], flags=2)
plt.imshow(img3),plt.show()
@endcode
@ -103,21 +103,21 @@ This time, we will use BFMatcher.knnMatch() to get k best matches. In this examp
so that we can apply ratio test explained by D.Lowe in his paper.
@code{.py}
import numpy as np
import cv2
import cv2 as cv
from matplotlib import pyplot as plt
img1 = cv2.imread('box.png',0) # queryImage
img2 = cv2.imread('box_in_scene.png',0) # trainImage
img1 = cv.imread('box.png',0) # queryImage
img2 = cv.imread('box_in_scene.png',0) # trainImage
# Initiate SIFT detector
sift = cv2.SIFT()
sift = cv.SIFT()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
# BFMatcher with default params
bf = cv2.BFMatcher()
bf = cv.BFMatcher()
matches = bf.knnMatch(des1,des2, k=2)
# Apply ratio test
@ -126,8 +126,8 @@ for m,n in matches:
if m.distance < 0.75*n.distance:
good.append([m])
# cv2.drawMatchesKnn expects list of lists as matches.
img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good,flags=2)
# cv.drawMatchesKnn expects list of lists as matches.
img3 = cv.drawMatchesKnn(img1,kp1,img2,kp2,good,flags=2)
plt.imshow(img3),plt.show()
@endcode
@ -167,14 +167,14 @@ you want to change the value, pass search_params = dict(checks=100).
With these informations, we are good to go.
@code{.py}
import numpy as np
import cv2
import cv2 as cv
from matplotlib import pyplot as plt
img1 = cv2.imread('box.png',0) # queryImage
img2 = cv2.imread('box_in_scene.png',0) # trainImage
img1 = cv.imread('box.png',0) # queryImage
img2 = cv.imread('box_in_scene.png',0) # trainImage
# Initiate SIFT detector
sift = cv2.SIFT()
sift = cv.SIFT()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
@ -185,7 +185,7 @@ FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks=50) # or pass empty dictionary
flann = cv2.FlannBasedMatcher(index_params,search_params)
flann = cv.FlannBasedMatcher(index_params,search_params)
matches = flann.knnMatch(des1,des2,k=2)
@ -202,7 +202,7 @@ draw_params = dict(matchColor = (0,255,0),
matchesMask = matchesMask,
flags = 0)
img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,matches,None,**draw_params)
img3 = cv.drawMatchesKnn(img1,kp1,img2,kp2,matches,None,**draw_params)
plt.imshow(img3,),plt.show()
@endcode

View File

@ -52,7 +52,7 @@ choice in low-power devices for panorama stitching etc.
ORB in OpenCV
-------------
As usual, we have to create an ORB object with the function, **cv2.ORB()** or using feature2d common
As usual, we have to create an ORB object with the function, **cv.ORB()** or using feature2d common
interface. It has a number of optional parameters. Most useful ones are nFeatures which denotes
maximum number of features to be retained (by default 500), scoreType which denotes whether Harris
score or FAST score to rank the features (by default, Harris score) etc. Another parameter, WTA_K
@ -64,13 +64,13 @@ is defined by NORM_HAMMING2.
Below is a simple code which shows the use of ORB.
@code{.py}
import numpy as np
import cv2
import cv2 as cv
from matplotlib import pyplot as plt
img = cv2.imread('simple.jpg',0)
img = cv.imread('simple.jpg',0)
# Initiate ORB detector
orb = cv2.ORB_create()
orb = cv.ORB_create()
# find the keypoints with ORB
kp = orb.detect(img,None)
@ -79,7 +79,7 @@ kp = orb.detect(img,None)
kp, des = orb.compute(img, kp)
# draw only keypoints location,not size and orientation
img2 = cv2.drawKeypoints(img, kp, None, color=(0,255,0), flags=0)
img2 = cv.drawKeypoints(img, kp, None, color=(0,255,0), flags=0)
plt.imshow(img2), plt.show()
@endcode
See the result below:

View File

@ -7,7 +7,7 @@ Goal
In this chapter,
- We will learn about the another corner detector: Shi-Tomasi Corner Detector
- We will see the function: **cv2.goodFeaturesToTrack()**
- We will see the function: **cv.goodFeaturesToTrack()**
Theory
------
@ -33,7 +33,7 @@ From the figure, you can see that only when \f$\lambda_1\f$ and \f$\lambda_2\f$
Code
----
OpenCV has a function, **cv2.goodFeaturesToTrack()**. It finds N strongest corners in the image by
OpenCV has a function, **cv.goodFeaturesToTrack()**. It finds N strongest corners in the image by
Shi-Tomasi method (or Harris Corner Detection, if you specify it). As usual, image should be a
grayscale image. Then you specify number of corners you want to find. Then you specify the quality
level, which is a value between 0-1, which denotes the minimum quality of corner below which
@ -47,18 +47,18 @@ minimum distance and returns N strongest corners.
In below example, we will try to find 25 best corners:
@code{.py}
import numpy as np
import cv2
import cv2 as cv
from matplotlib import pyplot as plt
img = cv2.imread('blox.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
img = cv.imread('blox.jpg')
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
corners = cv2.goodFeaturesToTrack(gray,25,0.01,10)
corners = cv.goodFeaturesToTrack(gray,25,0.01,10)
corners = np.int0(corners)
for i in corners:
x,y = i.ravel()
cv2.circle(img,(x,y),3,255,-1)
cv.circle(img,(x,y),3,255,-1)
plt.imshow(img),plt.show()
@endcode

View File

@ -113,30 +113,30 @@ So now let's see SIFT functionalities available in OpenCV. Let's start with keyp
draw them. First we have to construct a SIFT object. We can pass different parameters to it which
are optional and they are well explained in docs.
@code{.py}
import cv2
import numpy as np
import cv2 as cv
img = cv2.imread('home.jpg')
gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
img = cv.imread('home.jpg')
gray= cv.cvtColor(img,cv.COLOR_BGR2GRAY)
sift = cv2.xfeatures2d.SIFT_create()
sift = cv.xfeatures2d.SIFT_create()
kp = sift.detect(gray,None)
img=cv2.drawKeypoints(gray,kp,img)
img=cv.drawKeypoints(gray,kp,img)
cv2.imwrite('sift_keypoints.jpg',img)
cv.imwrite('sift_keypoints.jpg',img)
@endcode
**sift.detect()** function finds the keypoint in the images. You can pass a mask if you want to
search only a part of image. Each keypoint is a special structure which has many attributes like its
(x,y) coordinates, size of the meaningful neighbourhood, angle which specifies its orientation,
response that specifies strength of keypoints etc.
OpenCV also provides **cv2.drawKeyPoints()** function which draws the small circles on the locations
of keypoints. If you pass a flag, **cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS** to it, it will
OpenCV also provides **cv.drawKeyPoints()** function which draws the small circles on the locations
of keypoints. If you pass a flag, **cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS** to it, it will
draw a circle with size of keypoint and it will even show its orientation. See below example.
@code{.py}
img=cv2.drawKeypoints(gray,kp,img,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imwrite('sift_keypoints.jpg',img)
img=cv.drawKeypoints(gray,kp,img,flags=cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv.imwrite('sift_keypoints.jpg',img)
@endcode
See the two results below:
@ -151,7 +151,7 @@ Now to calculate the descriptor, OpenCV provides two methods.
We will see the second method:
@code{.py}
sift = cv2.xfeatures2d.SIFT_create()
sift = cv.xfeatures2d.SIFT_create()
kp, des = sift.detectAndCompute(gray,None)
@endcode
Here kp will be a list of keypoints and des is a numpy array of shape

View File

@ -76,11 +76,11 @@ and descriptors.
First we will see a simple demo on how to find SURF keypoints and descriptors and draw it. All
examples are shown in Python terminal since it is just same as SIFT only.
@code{.py}
>>> img = cv2.imread('fly.png',0)
>>> img = cv.imread('fly.png',0)
# Create SURF object. You can specify params here or later.
# Here I set Hessian Threshold to 400
>>> surf = cv2.xfeatures2d.SURF_create(400)
>>> surf = cv.xfeatures2d.SURF_create(400)
# Find keypoints and descriptors directly
>>> kp, des = surf.detectAndCompute(img,None)
@ -107,7 +107,7 @@ While matching, we may need all those features, but not now. So we increase the
@endcode
It is less than 50. Let's draw it on the image.
@code{.py}
>>> img2 = cv2.drawKeypoints(img,kp,None,(255,0,0),4)
>>> img2 = cv.drawKeypoints(img,kp,None,(255,0,0),4)
>>> plt.imshow(img2),plt.show()
@endcode
@ -126,7 +126,7 @@ False
# Recompute the feature points and draw it
>>> kp = surf.detect(img,None)
>>> img2 = cv2.drawKeypoints(img,kp,None,(255,0,0),4)
>>> img2 = cv.drawKeypoints(img,kp,None,(255,0,0),4)
>>> plt.imshow(img2),plt.show()
@endcode

View File

@ -5,8 +5,8 @@ Goal
----
- Learn to draw different geometric shapes with OpenCV
- You will learn these functions : **cv2.line()**, **cv2.circle()** , **cv2.rectangle()**,
**cv2.ellipse()**, **cv2.putText()** etc.
- You will learn these functions : **cv.line()**, **cv.circle()** , **cv.rectangle()**,
**cv.ellipse()**, **cv.putText()** etc.
Code
----
@ -19,7 +19,7 @@ In all the above functions, you will see some common arguments as given below:
- thickness : Thickness of the line or circle etc. If **-1** is passed for closed figures like
circles, it will fill the shape. *default thickness = 1*
- lineType : Type of line, whether 8-connected, anti-aliased line etc. *By default, it is
8-connected.* cv2.LINE_AA gives anti-aliased line which looks great for curves.
8-connected.* cv.LINE_AA gives anti-aliased line which looks great for curves.
### Drawing Line
@ -27,27 +27,27 @@ To draw a line, you need to pass starting and ending coordinates of line. We wil
image and draw a blue line on it from top-left to bottom-right corners.
@code{.py}
import numpy as np
import cv2
import cv2 as cv
# Create a black image
img = np.zeros((512,512,3), np.uint8)
# Draw a diagonal blue line with thickness of 5 px
cv2.line(img,(0,0),(511,511),(255,0,0),5)
cv.line(img,(0,0),(511,511),(255,0,0),5)
@endcode
### Drawing Rectangle
To draw a rectangle, you need top-left corner and bottom-right corner of rectangle. This time we
will draw a green rectangle at the top-right corner of image.
@code{.py}
cv2.rectangle(img,(384,0),(510,128),(0,255,0),3)
cv.rectangle(img,(384,0),(510,128),(0,255,0),3)
@endcode
### Drawing Circle
To draw a circle, you need its center coordinates and radius. We will draw a circle inside the
rectangle drawn above.
@code{.py}
cv2.circle(img,(447,63), 63, (0,0,255), -1)
cv.circle(img,(447,63), 63, (0,0,255), -1)
@endcode
### Drawing Ellipse
@ -55,10 +55,10 @@ To draw the ellipse, we need to pass several arguments. One argument is the cent
Next argument is axes lengths (major axis length, minor axis length). angle is the angle of rotation
of ellipse in anti-clockwise direction. startAngle and endAngle denotes the starting and ending of
ellipse arc measured in clockwise direction from major axis. i.e. giving values 0 and 360 gives the
full ellipse. For more details, check the documentation of **cv2.ellipse()**. Below example draws a
full ellipse. For more details, check the documentation of **cv.ellipse()**. Below example draws a
half ellipse at the center of the image.
@code{.py}
cv2.ellipse(img,(256,256),(100,50),0,0,180,255,-1)
cv.ellipse(img,(256,256),(100,50),0,0,180,255,-1)
@endcode
### Drawing Polygon
@ -68,30 +68,30 @@ polygon of with four vertices in yellow color.
@code{.py}
pts = np.array([[10,5],[20,30],[70,20],[50,10]], np.int32)
pts = pts.reshape((-1,1,2))
cv2.polylines(img,[pts],True,(0,255,255))
cv.polylines(img,[pts],True,(0,255,255))
@endcode
@note If third argument is False, you will get a polylines joining all the points, not a closed
shape.
@note cv2.polylines() can be used to draw multiple lines. Just create a list of all the lines you
@note cv.polylines() can be used to draw multiple lines. Just create a list of all the lines you
want to draw and pass it to the function. All lines will be drawn individually. It is a much better
and faster way to draw a group of lines than calling cv2.line() for each line.
and faster way to draw a group of lines than calling cv.line() for each line.
### Adding Text to Images:
To put texts in images, you need specify following things.
- Text data that you want to write
- Position coordinates of where you want put it (i.e. bottom-left corner where data starts).
- Font type (Check **cv2.putText()** docs for supported fonts)
- Font type (Check **cv.putText()** docs for supported fonts)
- Font Scale (specifies the size of font)
- regular things like color, thickness, lineType etc. For better look, lineType = cv2.LINE_AA
- regular things like color, thickness, lineType etc. For better look, lineType = cv.LINE_AA
is recommended.
We will write **OpenCV** on our image in white color.
@code{.py}
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,'OpenCV',(10,500), font, 4,(255,255,255),2,cv2.LINE_AA)
font = cv.FONT_HERSHEY_SIMPLEX
cv.putText(img,'OpenCV',(10,500), font, 4,(255,255,255),2,cv.LINE_AA)
@endcode
### Result

View File

@ -5,7 +5,7 @@ Goals
-----
- Here, you will learn how to read an image, how to display it and how to save it back
- You will learn these functions : **cv2.imread()**, **cv2.imshow()** , **cv2.imwrite()**
- You will learn these functions : **cv.imread()**, **cv.imshow()** , **cv.imwrite()**
- Optionally, you will learn how to display images with Matplotlib
Using OpenCV
@ -13,25 +13,25 @@ Using OpenCV
### Read an image
Use the function **cv2.imread()** to read an image. The image should be in the working directory or
Use the function **cv.imread()** to read an image. The image should be in the working directory or
a full path of image should be given.
Second argument is a flag which specifies the way image should be read.
- cv2.IMREAD_COLOR : Loads a color image. Any transparency of image will be neglected. It is the
- cv.IMREAD_COLOR : Loads a color image. Any transparency of image will be neglected. It is the
default flag.
- cv2.IMREAD_GRAYSCALE : Loads image in grayscale mode
- cv2.IMREAD_UNCHANGED : Loads image as such including alpha channel
- cv.IMREAD_GRAYSCALE : Loads image in grayscale mode
- cv.IMREAD_UNCHANGED : Loads image as such including alpha channel
@note Instead of these three flags, you can simply pass integers 1, 0 or -1 respectively.
See the code below:
@code{.py}
import numpy as np
import cv2
import cv2 as cv
# Load an color image in grayscale
img = cv2.imread('messi5.jpg',0)
img = cv.imread('messi5.jpg',0)
@endcode
**warning**
@ -40,21 +40,21 @@ Even if the image path is wrong, it won't throw any error, but `print img` will
### Display an image
Use the function **cv2.imshow()** to display an image in a window. The window automatically fits to
Use the function **cv.imshow()** to display an image in a window. The window automatically fits to
the image size.
First argument is a window name which is a string. second argument is our image. You can create as
many windows as you wish, but with different window names.
@code{.py}
cv2.imshow('image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv.imshow('image',img)
cv.waitKey(0)
cv.destroyAllWindows()
@endcode
A screenshot of the window will look like this (in Fedora-Gnome machine):
![image](images/opencv_screenshot.jpg)
**cv2.waitKey()** is a keyboard binding function. Its argument is the time in milliseconds. The
**cv.waitKey()** is a keyboard binding function. Its argument is the time in milliseconds. The
function waits for specified milliseconds for any keyboard event. If you press any key in that time,
the program continues. If **0** is passed, it waits indefinitely for a key stroke. It can also be
set to detect specific key strokes like, if key a is pressed etc which we will discuss below.
@ -62,30 +62,30 @@ set to detect specific key strokes like, if key a is pressed etc which we will d
@note Besides binding keyboard events this function also processes many other GUI events, so you
MUST use it to actually display the image.
**cv2.destroyAllWindows()** simply destroys all the windows we created. If you want to destroy any
specific window, use the function **cv2.destroyWindow()** where you pass the exact window name as
**cv.destroyAllWindows()** simply destroys all the windows we created. If you want to destroy any
specific window, use the function **cv.destroyWindow()** where you pass the exact window name as
the argument.
@note There is a special case where you can already create a window and load image to it later. In
that case, you can specify whether window is resizable or not. It is done with the function
**cv2.namedWindow()**. By default, the flag is cv2.WINDOW_AUTOSIZE. But if you specify flag to be
cv2.WINDOW_NORMAL, you can resize window. It will be helpful when image is too large in dimension
**cv.namedWindow()**. By default, the flag is cv.WINDOW_AUTOSIZE. But if you specify flag to be
cv.WINDOW_NORMAL, you can resize window. It will be helpful when image is too large in dimension
and adding track bar to windows.
See the code below:
@code{.py}
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv.namedWindow('image', cv.WINDOW_NORMAL)
cv.imshow('image',img)
cv.waitKey(0)
cv.destroyAllWindows()
@endcode
### Write an image
Use the function **cv2.imwrite()** to save an image.
Use the function **cv.imwrite()** to save an image.
First argument is the file name, second argument is the image you want to save.
@code{.py}
cv2.imwrite('messigray.png',img)
cv.imwrite('messigray.png',img)
@endcode
This will save the image in PNG format in the working directory.
@ -95,22 +95,22 @@ Below program loads an image in grayscale, displays it, save the image if you pr
simply exit without saving if you press ESC key.
@code{.py}
import numpy as np
import cv2
import cv2 as cv
img = cv2.imread('messi5.jpg',0)
cv2.imshow('image',img)
k = cv2.waitKey(0)
img = cv.imread('messi5.jpg',0)
cv.imshow('image',img)
k = cv.waitKey(0)
if k == 27: # wait for ESC key to exit
cv2.destroyAllWindows()
cv.destroyAllWindows()
elif k == ord('s'): # wait for 's' key to save and exit
cv2.imwrite('messigray.png',img)
cv2.destroyAllWindows()
cv.imwrite('messigray.png',img)
cv.destroyAllWindows()
@endcode
**warning**
If you are using a 64-bit machine, you will have to modify `k = cv2.waitKey(0)` line as follows :
`k = cv2.waitKey(0) & 0xFF`
If you are using a 64-bit machine, you will have to modify `k = cv.waitKey(0)` line as follows :
`k = cv.waitKey(0) & 0xFF`
Using Matplotlib
----------------
@ -120,10 +120,10 @@ will see them in coming articles. Here, you will learn how to display image with
zoom images, save it etc using Matplotlib.
@code{.py}
import numpy as np
import cv2
import cv2 as cv
from matplotlib import pyplot as plt
img = cv2.imread('messi5.jpg',0)
img = cv.imread('messi5.jpg',0)
plt.imshow(img, cmap = 'gray', interpolation = 'bicubic')
plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis
plt.show()

View File

@ -5,7 +5,7 @@ Goal
----
- Learn to handle mouse events in OpenCV
- You will learn these functions : **cv2.setMouseCallback()**
- You will learn these functions : **cv.setMouseCallback()**
Simple Demo
-----------
@ -19,32 +19,32 @@ double-click etc. It gives us the coordinates (x,y) for every mouse event. With
location, we can do whatever we like. To list all available events available, run the following code
in Python terminal:
@code{.py}
import cv2
events = [i for i in dir(cv2) if 'EVENT' in i]
import cv2 as cv
events = [i for i in dir(cv) if 'EVENT' in i]
print( events )
@endcode
Creating mouse callback function has a specific format which is same everywhere. It differs only in
what the function does. So our mouse callback function does one thing, it draws a circle where we
double-click. So see the code below. Code is self-explanatory from comments :
@code{.py}
import cv2
import numpy as np
import cv2 as cv
# mouse callback function
def draw_circle(event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDBLCLK:
cv2.circle(img,(x,y),100,(255,0,0),-1)
if event == cv.EVENT_LBUTTONDBLCLK:
cv.circle(img,(x,y),100,(255,0,0),-1)
# Create a black image, a window and bind the function to window
img = np.zeros((512,512,3), np.uint8)
cv2.namedWindow('image')
cv2.setMouseCallback('image',draw_circle)
cv.namedWindow('image')
cv.setMouseCallback('image',draw_circle)
while(1):
cv2.imshow('image',img)
if cv2.waitKey(20) & 0xFF == 27:
cv.imshow('image',img)
if cv.waitKey(20) & 0xFF == 27:
break
cv2.destroyAllWindows()
cv.destroyAllWindows()
@endcode
More Advanced Demo
------------------
@ -55,8 +55,8 @@ function has two parts, one to draw rectangle and other to draw the circles. Thi
will be really helpful in creating and understanding some interactive applications like object
tracking, image segmentation etc.
@code{.py}
import cv2
import numpy as np
import cv2 as cv
drawing = False # true if mouse is pressed
mode = True # if True, draw rectangle. Press 'm' to toggle to curve
@ -66,40 +66,40 @@ ix,iy = -1,-1
def draw_circle(event,x,y,flags,param):
global ix,iy,drawing,mode
if event == cv2.EVENT_LBUTTONDOWN:
if event == cv.EVENT_LBUTTONDOWN:
drawing = True
ix,iy = x,y
elif event == cv2.EVENT_MOUSEMOVE:
elif event == cv.EVENT_MOUSEMOVE:
if drawing == True:
if mode == True:
cv2.rectangle(img,(ix,iy),(x,y),(0,255,0),-1)
cv.rectangle(img,(ix,iy),(x,y),(0,255,0),-1)
else:
cv2.circle(img,(x,y),5,(0,0,255),-1)
cv.circle(img,(x,y),5,(0,0,255),-1)
elif event == cv2.EVENT_LBUTTONUP:
elif event == cv.EVENT_LBUTTONUP:
drawing = False
if mode == True:
cv2.rectangle(img,(ix,iy),(x,y),(0,255,0),-1)
cv.rectangle(img,(ix,iy),(x,y),(0,255,0),-1)
else:
cv2.circle(img,(x,y),5,(0,0,255),-1)
cv.circle(img,(x,y),5,(0,0,255),-1)
@endcode
Next we have to bind this mouse callback function to OpenCV window. In the main loop, we should set
a keyboard binding for key 'm' to toggle between rectangle and circle.
@code{.py}
img = np.zeros((512,512,3), np.uint8)
cv2.namedWindow('image')
cv2.setMouseCallback('image',draw_circle)
cv.namedWindow('image')
cv.setMouseCallback('image',draw_circle)
while(1):
cv2.imshow('image',img)
k = cv2.waitKey(1) & 0xFF
cv.imshow('image',img)
k = cv.waitKey(1) & 0xFF
if k == ord('m'):
mode = not mode
elif k == 27:
break
cv2.destroyAllWindows()
cv.destroyAllWindows()
@endcode
Additional Resources
--------------------

View File

@ -5,7 +5,7 @@ Goal
----
- Learn to bind trackbar to OpenCV windows
- You will learn these functions : **cv2.getTrackbarPos()**, **cv2.createTrackbar()** etc.
- You will learn these functions : **cv.getTrackbarPos()**, **cv.createTrackbar()** etc.
Code Demo
---------
@ -14,7 +14,7 @@ Here we will create a simple application which shows the color you specify. You
shows the color and three trackbars to specify each of B,G,R colors. You slide the trackbar and
correspondingly window color changes. By default, initial color will be set to Black.
For cv2.getTrackbarPos() function, first argument is the trackbar name, second one is the window
For cv.getTrackbarPos() function, first argument is the trackbar name, second one is the window
name to which it is attached, third argument is the default value, fourth one is the maximum value
and fifth one is the callback function which is executed everytime trackbar value changes. The
callback function always has a default argument which is the trackbar position. In our case,
@ -25,43 +25,43 @@ doesn't have button functionality. So you can use trackbar to get such functiona
application, we have created one switch in which application works only if switch is ON, otherwise
screen is always black.
@code{.py}
import cv2
import numpy as np
import cv2 as cv
def nothing(x):
pass
# Create a black image, a window
img = np.zeros((300,512,3), np.uint8)
cv2.namedWindow('image')
cv.namedWindow('image')
# create trackbars for color change
cv2.createTrackbar('R','image',0,255,nothing)
cv2.createTrackbar('G','image',0,255,nothing)
cv2.createTrackbar('B','image',0,255,nothing)
cv.createTrackbar('R','image',0,255,nothing)
cv.createTrackbar('G','image',0,255,nothing)
cv.createTrackbar('B','image',0,255,nothing)
# create switch for ON/OFF functionality
switch = '0 : OFF \n1 : ON'
cv2.createTrackbar(switch, 'image',0,1,nothing)
cv.createTrackbar(switch, 'image',0,1,nothing)
while(1):
cv2.imshow('image',img)
k = cv2.waitKey(1) & 0xFF
cv.imshow('image',img)
k = cv.waitKey(1) & 0xFF
if k == 27:
break
# get current positions of four trackbars
r = cv2.getTrackbarPos('R','image')
g = cv2.getTrackbarPos('G','image')
b = cv2.getTrackbarPos('B','image')
s = cv2.getTrackbarPos(switch,'image')
r = cv.getTrackbarPos('R','image')
g = cv.getTrackbarPos('G','image')
b = cv.getTrackbarPos('B','image')
s = cv.getTrackbarPos(switch,'image')
if s == 0:
img[:] = 0
else:
img[:] = [b,g,r]
cv2.destroyAllWindows()
cv.destroyAllWindows()
@endcode
The screenshot of the application looks like below :

View File

@ -6,7 +6,7 @@ Goal
- Learn to read video, display video and save video.
- Learn to capture from Camera and display it.
- You will learn these functions : **cv2.VideoCapture()**, **cv2.VideoWriter()**
- You will learn these functions : **cv.VideoCapture()**, **cv.VideoWriter()**
Capture Video from Camera
-------------------------
@ -22,25 +22,25 @@ the second camera by passing 1 and so on. After that, you can capture frame-by-f
end, don't forget to release the capture.
@code{.py}
import numpy as np
import cv2
import cv2 as cv
cap = cv2.VideoCapture(0)
cap = cv.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('frame',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
cv.imshow('frame',gray)
if cv.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
cv.destroyAllWindows()
@endcode
`cap.read()` returns a bool (`True`/`False`). If frame is read correctly, it will be `True`. So you can
check end of the video by checking this return value.
@ -55,9 +55,9 @@ video) and full details can be seen here: cv::VideoCapture::get().
Some of these values can be modified using **cap.set(propId, value)**. Value is the new value you
want.
For example, I can check the frame width and height by `cap.get(cv2.CAP_PROP_FRAME_WIDTH)` and `cap.get(cv2.CAP_PROP_FRAME_HEIGHT)`. It gives me
640x480 by default. But I want to modify it to 320x240. Just use `ret = cap.set(cv2.CAP_PROP_FRAME_WIDTH,320)` and
`ret = cap.set(cv2.CAP_PROP_FRAME_HEIGHT,240)`.
For example, I can check the frame width and height by `cap.get(cv.CAP_PROP_FRAME_WIDTH)` and `cap.get(cv.CAP_PROP_FRAME_HEIGHT)`. It gives me
640x480 by default. But I want to modify it to 320x240. Just use `ret = cap.set(cv.CAP_PROP_FRAME_WIDTH,320)` and
`ret = cap.set(cv.CAP_PROP_FRAME_HEIGHT,240)`.
@note If you are getting error, make sure camera is working fine using any other camera application
(like Cheese in Linux).
@ -66,26 +66,26 @@ Playing Video from file
-----------------------
It is same as capturing from Camera, just change camera index with video file name. Also while
displaying the frame, use appropriate time for `cv2.waitKey()`. If it is too less, video will be very
displaying the frame, use appropriate time for `cv.waitKey()`. If it is too less, video will be very
fast and if it is too high, video will be slow (Well, that is how you can display videos in slow
motion). 25 milliseconds will be OK in normal cases.
@code{.py}
import numpy as np
import cv2
import cv2 as cv
cap = cv2.VideoCapture('vtest.avi')
cap = cv.VideoCapture('vtest.avi')
while(cap.isOpened()):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
cv2.imshow('frame',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
cv.imshow('frame',gray)
if cv.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
cv.destroyAllWindows()
@endcode
@note Make sure proper versions of ffmpeg or gstreamer is installed. Sometimes, it is a headache to
@ -95,7 +95,7 @@ Saving a Video
--------------
So we capture a video, process it frame-by-frame and we want to save that video. For images, it is
very simple, just use `cv2.imwrite()`. Here a little more work is required.
very simple, just use `cv.imwrite()`. Here a little more work is required.
This time we create a **VideoWriter** object. We should specify the output file name (eg:
output.avi). Then we should specify the **FourCC** code (details in next paragraph). Then number of
@ -111,30 +111,30 @@ platform dependent. Following codecs works fine for me.
- In Windows: DIVX (More to be tested and added)
- In OSX: MJPG (.mp4), DIVX (.avi), X264 (.mkv).
FourCC code is passed as `cv2.VideoWriter_fourcc('M','J','P','G')` or
`cv2.VideoWriter_fourcc(*'MJPG')` for MJPG.
FourCC code is passed as `cv.VideoWriter_fourcc('M','J','P','G')` or
`cv.VideoWriter_fourcc(*'MJPG')` for MJPG.
Below code capture from a Camera, flip every frame in vertical direction and saves it.
@code{.py}
import numpy as np
import cv2
import cv2 as cv
cap = cv2.VideoCapture(0)
cap = cv.VideoCapture(0)
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
fourcc = cv.VideoWriter_fourcc(*'XVID')
out = cv.VideoWriter('output.avi',fourcc, 20.0, (640,480))
while(cap.isOpened()):
ret, frame = cap.read()
if ret==True:
frame = cv2.flip(frame,0)
frame = cv.flip(frame,0)
# write the flipped frame
out.write(frame)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
cv.imshow('frame',frame)
if cv.waitKey(1) & 0xFF == ord('q'):
break
else:
break
@ -142,7 +142,7 @@ while(cap.isOpened()):
# Release everything if job is finished
cap.release()
out.release()
cv2.destroyAllWindows()
cv.destroyAllWindows()
@endcode
Additional Resources

View File

@ -7,7 +7,7 @@ Goal
In this chapter, we will learn about
- Concept of Canny edge detection
- OpenCV functions for that : **cv2.Canny()**
- OpenCV functions for that : **cv.Canny()**
Theory
------
@ -72,19 +72,19 @@ So what we finally get is strong edges in the image.
Canny Edge Detection in OpenCV
------------------------------
OpenCV puts all the above in single function, **cv2.Canny()**. We will see how to use it. First
OpenCV puts all the above in single function, **cv.Canny()**. We will see how to use it. First
argument is our input image. Second and third arguments are our minVal and maxVal respectively.
Third argument is aperture_size. It is the size of Sobel kernel used for find image gradients. By
default it is 3. Last argument is L2gradient which specifies the equation for finding gradient
magnitude. If it is True, it uses the equation mentioned above which is more accurate, otherwise it
uses this function: \f$Edge\_Gradient \; (G) = |G_x| + |G_y|\f$. By default, it is False.
@code{.py}
import cv2
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img = cv2.imread('messi5.jpg',0)
edges = cv2.Canny(img,100,200)
img = cv.imread('messi5.jpg',0)
edges = cv.Canny(img,100,200)
plt.subplot(121),plt.imshow(img,cmap = 'gray')
plt.title('Original Image'), plt.xticks([]), plt.yticks([])

View File

@ -7,7 +7,7 @@ Goal
- In this tutorial, you will learn how to convert images from one color-space to another, like
BGR \f$\leftrightarrow\f$ Gray, BGR \f$\leftrightarrow\f$ HSV etc.
- In addition to that, we will create an application which extracts a colored object in a video
- You will learn following functions : **cv2.cvtColor()**, **cv2.inRange()** etc.
- You will learn following functions : **cv.cvtColor()**, **cv.inRange()** etc.
Changing Color-space
--------------------
@ -15,15 +15,15 @@ Changing Color-space
There are more than 150 color-space conversion methods available in OpenCV. But we will look into
only two which are most widely used ones, BGR \f$\leftrightarrow\f$ Gray and BGR \f$\leftrightarrow\f$ HSV.
For color conversion, we use the function cv2.cvtColor(input_image, flag) where flag determines the
For color conversion, we use the function cv.cvtColor(input_image, flag) where flag determines the
type of conversion.
For BGR \f$\rightarrow\f$ Gray conversion we use the flags cv2.COLOR_BGR2GRAY. Similarly for BGR
\f$\rightarrow\f$ HSV, we use the flag cv2.COLOR_BGR2HSV. To get other flags, just run following
For BGR \f$\rightarrow\f$ Gray conversion we use the flags cv.COLOR_BGR2GRAY. Similarly for BGR
\f$\rightarrow\f$ HSV, we use the flag cv.COLOR_BGR2HSV. To get other flags, just run following
commands in your Python terminal :
@code{.py}
>>> import cv2
>>> flags = [i for i in dir(cv2) if i.startswith('COLOR_')]
>>> import cv2 as cv
>>> flags = [i for i in dir(cv) if i.startswith('COLOR_')]
>>> print( flags )
@endcode
@note For HSV, Hue range is [0,179], Saturation range is [0,255] and Value range is [0,255].
@ -44,10 +44,10 @@ a blue colored object. So here is the method:
Below is the code which are commented in detail :
@code{.py}
import cv2
import cv2 as cv
import numpy as np
cap = cv2.VideoCapture(0)
cap = cv.VideoCapture(0)
while(1):
@ -55,26 +55,26 @@ while(1):
_, frame = cap.read()
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
# define range of blue color in HSV
lower_blue = np.array([110,50,50])
upper_blue = np.array([130,255,255])
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower_blue, upper_blue)
mask = cv.inRange(hsv, lower_blue, upper_blue)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(frame,frame, mask= mask)
res = cv.bitwise_and(frame,frame, mask= mask)
cv2.imshow('frame',frame)
cv2.imshow('mask',mask)
cv2.imshow('res',res)
k = cv2.waitKey(5) & 0xFF
cv.imshow('frame',frame)
cv.imshow('mask',mask)
cv.imshow('res',res)
k = cv.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
cv.destroyAllWindows()
@endcode
Below image shows tracking of the blue object:
@ -90,12 +90,12 @@ How to find HSV values to track?
--------------------------------
This is a common question found in [stackoverflow.com](http://www.stackoverflow.com). It is very simple and
you can use the same function, cv2.cvtColor(). Instead of passing an image, you just pass the BGR
you can use the same function, cv.cvtColor(). Instead of passing an image, you just pass the BGR
values you want. For example, to find the HSV value of Green, try following commands in Python
terminal:
@code{.py}
>>> green = np.uint8([[[0,255,0 ]]])
>>> hsv_green = cv2.cvtColor(green,cv2.COLOR_BGR2HSV)
>>> hsv_green = cv.cvtColor(green,cv.COLOR_BGR2HSV)
>>> print( hsv_green )
[[[ 60 255 255]]]
@endcode

View File

@ -16,17 +16,17 @@ Image moments help you to calculate some features like center of mass of the obj
object etc. Check out the wikipedia page on [Image
Moments](http://en.wikipedia.org/wiki/Image_moment)
The function **cv2.moments()** gives a dictionary of all moment values calculated. See below:
The function **cv.moments()** gives a dictionary of all moment values calculated. See below:
@code{.py}
import cv2
import numpy as np
import cv2 as cv
img = cv2.imread('star.jpg',0)
ret,thresh = cv2.threshold(img,127,255,0)
im2,contours,hierarchy = cv2.findContours(thresh, 1, 2)
img = cv.imread('star.jpg',0)
ret,thresh = cv.threshold(img,127,255,0)
im2,contours,hierarchy = cv.findContours(thresh, 1, 2)
cnt = contours[0]
M = cv2.moments(cnt)
M = cv.moments(cnt)
print( M )
@endcode
From this moments, you can extract useful data like area, centroid etc. Centroid is given by the
@ -40,18 +40,18 @@ cy = int(M['m01']/M['m00'])
2. Contour Area
---------------
Contour area is given by the function **cv2.contourArea()** or from moments, **M['m00']**.
Contour area is given by the function **cv.contourArea()** or from moments, **M['m00']**.
@code{.py}
area = cv2.contourArea(cnt)
area = cv.contourArea(cnt)
@endcode
3. Contour Perimeter
--------------------
It is also called arc length. It can be found out using **cv2.arcLength()** function. Second
It is also called arc length. It can be found out using **cv.arcLength()** function. Second
argument specify whether shape is a closed contour (if passed True), or just a curve.
@code{.py}
perimeter = cv2.arcLength(cnt,True)
perimeter = cv.arcLength(cnt,True)
@endcode
4. Contour Approximation
@ -68,8 +68,8 @@ you can use this function to approximate the shape. In this, second argument is
which is maximum distance from contour to approximated contour. It is an accuracy parameter. A wise
selection of epsilon is needed to get the correct output.
@code{.py}
epsilon = 0.1*cv2.arcLength(cnt,True)
approx = cv2.approxPolyDP(cnt,epsilon,True)
epsilon = 0.1*cv.arcLength(cnt,True)
approx = cv.approxPolyDP(cnt,epsilon,True)
@endcode
Below, in second image, green line shows the approximated curve for epsilon = 10% of arc length.
Third image shows the same for epsilon = 1% of the arc length. Third argument specifies whether
@ -81,7 +81,7 @@ curve is closed or not.
--------------
Convex Hull will look similar to contour approximation, but it is not (Both may provide same results
in some cases). Here, **cv2.convexHull()** function checks a curve for convexity defects and
in some cases). Here, **cv.convexHull()** function checks a curve for convexity defects and
corrects it. Generally speaking, convex curves are the curves which are always bulged out, or
at-least flat. And if it is bulged inside, it is called convexity defects. For example, check the
below image of hand. Red line shows the convex hull of hand. The double-sided arrow marks shows the
@ -91,7 +91,7 @@ convexity defects, which are the local maximum deviations of hull from contours.
There is a little bit things to discuss about it its syntax:
@code{.py}
hull = cv2.convexHull(points[, hull[, clockwise[, returnPoints]]
hull = cv.convexHull(points[, hull[, clockwise[, returnPoints]]
@endcode
Arguments details:
@ -104,7 +104,7 @@ Arguments details:
So to get a convex hull as in above image, following is sufficient:
@code{.py}
hull = cv2.convexHull(cnt)
hull = cv.convexHull(cnt)
@endcode
But if you want to find convexity defects, you need to pass returnPoints = False. To understand it,
we will take the rectangle image above. First I found its contour as cnt. Now I found its convex
@ -119,10 +119,10 @@ You will see it again when we discuss about convexity defects.
6. Checking Convexity
---------------------
There is a function to check if a curve is convex or not, **cv2.isContourConvex()**. It just return
There is a function to check if a curve is convex or not, **cv.isContourConvex()**. It just return
whether True or False. Not a big deal.
@code{.py}
k = cv2.isContourConvex(cnt)
k = cv.isContourConvex(cnt)
@endcode
7. Bounding Rectangle
@ -133,25 +133,25 @@ There are two types of bounding rectangles.
### 7.a. Straight Bounding Rectangle
It is a straight rectangle, it doesn't consider the rotation of the object. So area of the bounding
rectangle won't be minimum. It is found by the function **cv2.boundingRect()**.
rectangle won't be minimum. It is found by the function **cv.boundingRect()**.
Let (x,y) be the top-left coordinate of the rectangle and (w,h) be its width and height.
@code{.py}
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
x,y,w,h = cv.boundingRect(cnt)
cv.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
@endcode
### 7.b. Rotated Rectangle
Here, bounding rectangle is drawn with minimum area, so it considers the rotation also. The function
used is **cv2.minAreaRect()**. It returns a Box2D structure which contains following detals - (
used is **cv.minAreaRect()**. It returns a Box2D structure which contains following detals - (
center (x,y), (width, height), angle of rotation ). But to draw this rectangle, we need 4 corners of
the rectangle. It is obtained by the function **cv2.boxPoints()**
the rectangle. It is obtained by the function **cv.boxPoints()**
@code{.py}
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
rect = cv.minAreaRect(cnt)
box = cv.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(img,[box],0,(0,0,255),2)
cv.drawContours(img,[box],0,(0,0,255),2)
@endcode
Both the rectangles are shown in a single image. Green rectangle shows the normal bounding rect. Red
rectangle is the rotated rect.
@ -161,13 +161,13 @@ rectangle is the rotated rect.
8. Minimum Enclosing Circle
---------------------------
Next we find the circumcircle of an object using the function **cv2.minEnclosingCircle()**. It is a
Next we find the circumcircle of an object using the function **cv.minEnclosingCircle()**. It is a
circle which completely covers the object with minimum area.
@code{.py}
(x,y),radius = cv2.minEnclosingCircle(cnt)
(x,y),radius = cv.minEnclosingCircle(cnt)
center = (int(x),int(y))
radius = int(radius)
cv2.circle(img,center,radius,(0,255,0),2)
cv.circle(img,center,radius,(0,255,0),2)
@endcode
![image](images/circumcircle.png)
@ -177,8 +177,8 @@ cv2.circle(img,center,radius,(0,255,0),2)
Next one is to fit an ellipse to an object. It returns the rotated rectangle in which the ellipse is
inscribed.
@code{.py}
ellipse = cv2.fitEllipse(cnt)
cv2.ellipse(img,ellipse,(0,255,0),2)
ellipse = cv.fitEllipse(cnt)
cv.ellipse(img,ellipse,(0,255,0),2)
@endcode
![image](images/fitellipse.png)
@ -189,10 +189,10 @@ Similarly we can fit a line to a set of points. Below image contains a set of wh
approximate a straight line to it.
@code{.py}
rows,cols = img.shape[:2]
[vx,vy,x,y] = cv2.fitLine(cnt, cv2.DIST_L2,0,0.01,0.01)
[vx,vy,x,y] = cv.fitLine(cnt, cv.DIST_L2,0,0.01,0.01)
lefty = int((-x*vy/vx) + y)
righty = int(((cols-x)*vy/vx)+y)
cv2.line(img,(cols-1,righty),(0,lefty),(0,255,0),2)
cv.line(img,(cols-1,righty),(0,lefty),(0,255,0),2)
@endcode
![image](images/fitline.jpg)

View File

@ -15,7 +15,7 @@ It is the ratio of width to height of bounding rect of the object.
\f[Aspect \; Ratio = \frac{Width}{Height}\f]
@code{.py}
x,y,w,h = cv2.boundingRect(cnt)
x,y,w,h = cv.boundingRect(cnt)
aspect_ratio = float(w)/h
@endcode
@ -26,8 +26,8 @@ Extent is the ratio of contour area to bounding rectangle area.
\f[Extent = \frac{Object \; Area}{Bounding \; Rectangle \; Area}\f]
@code{.py}
area = cv2.contourArea(cnt)
x,y,w,h = cv2.boundingRect(cnt)
area = cv.contourArea(cnt)
x,y,w,h = cv.boundingRect(cnt)
rect_area = w*h
extent = float(area)/rect_area
@endcode
@ -39,9 +39,9 @@ Solidity is the ratio of contour area to its convex hull area.
\f[Solidity = \frac{Contour \; Area}{Convex \; Hull \; Area}\f]
@code{.py}
area = cv2.contourArea(cnt)
hull = cv2.convexHull(cnt)
hull_area = cv2.contourArea(hull)
area = cv.contourArea(cnt)
hull = cv.convexHull(cnt)
hull_area = cv.contourArea(hull)
solidity = float(area)/hull_area
@endcode
@ -52,7 +52,7 @@ Equivalent Diameter is the diameter of the circle whose area is same as the cont
\f[Equivalent \; Diameter = \sqrt{\frac{4 \times Contour \; Area}{\pi}}\f]
@code{.py}
area = cv2.contourArea(cnt)
area = cv.contourArea(cnt)
equi_diameter = np.sqrt(4*area/np.pi)
@endcode
@ -62,7 +62,7 @@ equi_diameter = np.sqrt(4*area/np.pi)
Orientation is the angle at which object is directed. Following method also gives the Major Axis and
Minor Axis lengths.
@code{.py}
(x,y),(MA,ma),angle = cv2.fitEllipse(cnt)
(x,y),(MA,ma),angle = cv.fitEllipse(cnt)
@endcode
6. Mask and Pixel Points
@ -71,9 +71,9 @@ Minor Axis lengths.
In some cases, we may need all the points which comprises that object. It can be done as follows:
@code{.py}
mask = np.zeros(imgray.shape,np.uint8)
cv2.drawContours(mask,[cnt],0,255,-1)
cv.drawContours(mask,[cnt],0,255,-1)
pixelpoints = np.transpose(np.nonzero(mask))
#pixelpoints = cv2.findNonZero(mask)
#pixelpoints = cv.findNonZero(mask)
@endcode
Here, two methods, one using Numpy functions, next one using OpenCV function (last commented line)
are given to do the same. Results are also same, but with a slight difference. Numpy gives
@ -85,7 +85,7 @@ basically the answers will be interchanged. Note that, **row = x** and **column
We can find these parameters using a mask image.
@code{.py}
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(imgray,mask = mask)
min_val, max_val, min_loc, max_loc = cv.minMaxLoc(imgray,mask = mask)
@endcode
8. Mean Color or Mean Intensity
@ -94,7 +94,7 @@ min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(imgray,mask = mask)
Here, we can find the average color of an object. Or it can be average intensity of the object in
grayscale mode. We again use the same mask to do it.
@code{.py}
mean_val = cv2.mean(im,mask = mask)
mean_val = cv.mean(im,mask = mask)
@endcode
9. Extreme Points

View File

@ -6,7 +6,7 @@ Goal
- Understand what contours are.
- Learn to find contours, draw contours etc
- You will see these functions : **cv2.findContours()**, **cv2.drawContours()**
- You will see these functions : **cv.findContours()**, **cv.drawContours()**
What are contours?
------------------
@ -24,14 +24,14 @@ detection and recognition.
Let's see how to find contours of a binary image:
@code{.py}
import numpy as np
import cv2
import cv2 as cv
im = cv2.imread('test.jpg')
imgray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 127, 255, 0)
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
im = cv.imread('test.jpg')
imgray = cv.cvtColor(im, cv.COLOR_BGR2GRAY)
ret, thresh = cv.threshold(imgray, 127, 255, 0)
im2, contours, hierarchy = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
@endcode
See, there are three arguments in **cv2.findContours()** function, first one is source image, second
See, there are three arguments in **cv.findContours()** function, first one is source image, second
is contour retrieval mode, third is contour approximation method. And it outputs a modified image, the contours and
hierarchy. contours is a Python list of all the contours in the image. Each individual contour is a
Numpy array of (x,y) coordinates of boundary points of the object.
@ -42,7 +42,7 @@ the values given to them in code sample will work fine for all images.
How to draw the contours?
-------------------------
To draw the contours, cv2.drawContours function is used. It can also be used to draw any shape
To draw the contours, cv.drawContours function is used. It can also be used to draw any shape
provided you have its boundary points. Its first argument is source image, second argument is the
contours which should be passed as a Python list, third argument is index of contours (useful when
drawing individual contour. To draw all contours, pass -1) and remaining arguments are color,
@ -50,16 +50,16 @@ thickness etc.
* To draw all the contours in an image:
@code{.py}
cv2.drawContours(img, contours, -1, (0,255,0), 3)
cv.drawContours(img, contours, -1, (0,255,0), 3)
@endcode
* To draw an individual contour, say 4th contour:
@code{.py}
cv2.drawContours(img, contours, 3, (0,255,0), 3)
cv.drawContours(img, contours, 3, (0,255,0), 3)
@endcode
* But most of the time, below method will be useful:
@code{.py}
cnt = contours[4]
cv2.drawContours(img, [cnt], 0, (0,255,0), 3)
cv.drawContours(img, [cnt], 0, (0,255,0), 3)
@endcode
@note Last two methods are same, but when you go forward, you will see last one is more useful.
@ -67,21 +67,21 @@ cv2.drawContours(img, [cnt], 0, (0,255,0), 3)
Contour Approximation Method
============================
This is the third argument in cv2.findContours function. What does it denote actually?
This is the third argument in cv.findContours function. What does it denote actually?
Above, we told that contours are the boundaries of a shape with same intensity. It stores the (x,y)
coordinates of the boundary of a shape. But does it store all the coordinates ? That is specified by
this contour approximation method.
If you pass cv2.CHAIN_APPROX_NONE, all the boundary points are stored. But actually do we need all
If you pass cv.CHAIN_APPROX_NONE, all the boundary points are stored. But actually do we need all
the points? For eg, you found the contour of a straight line. Do you need all the points on the line
to represent that line? No, we need just two end points of that line. This is what
cv2.CHAIN_APPROX_SIMPLE does. It removes all redundant points and compresses the contour, thereby
cv.CHAIN_APPROX_SIMPLE does. It removes all redundant points and compresses the contour, thereby
saving memory.
Below image of a rectangle demonstrate this technique. Just draw a circle on all the coordinates in
the contour array (drawn in blue color). First image shows points I got with cv2.CHAIN_APPROX_NONE
(734 points) and second image shows the one with cv2.CHAIN_APPROX_SIMPLE (only 4 points). See, how
the contour array (drawn in blue color). First image shows points I got with cv.CHAIN_APPROX_NONE
(734 points) and second image shows the one with cv.CHAIN_APPROX_SIMPLE (only 4 points). See, how
much memory it saves!!!
![image](images/none.jpg)

View File

@ -10,9 +10,9 @@ Theory
------
In the last few articles on contours, we have worked with several functions related to contours
provided by OpenCV. But when we found the contours in image using **cv2.findContours()** function,
we have passed an argument, **Contour Retrieval Mode**. We usually passed **cv2.RETR_LIST** or
**cv2.RETR_TREE** and it worked nice. But what does it actually mean ?
provided by OpenCV. But when we found the contours in image using **cv.findContours()** function,
we have passed an argument, **Contour Retrieval Mode**. We usually passed **cv.RETR_LIST** or
**cv.RETR_TREE** and it worked nice. But what does it actually mean ?
Also, in the output, we got three arrays, first is the image, second is our contours, and one more
output which we named as **hierarchy** (Please checkout the codes in previous articles). But we
@ -23,7 +23,7 @@ That is what we are going to deal in this article.
### What is Hierarchy?
Normally we use the **cv2.findContours()** function to detect objects in an image, right ? Sometimes
Normally we use the **cv.findContours()** function to detect objects in an image, right ? Sometimes
objects are in different locations. But in some cases, some shapes are inside other shapes. Just
like nested figures. In this case, we call outer one as **parent** and inner one as **child**. This
way, contours in an image has some relationship to each other. And we can specify how one contour is
@ -82,8 +82,8 @@ contour-3a. For contour-3a, it is contour-3 and so on.
@note If there is no child or parent, that field is taken as -1
So now we know about the hierarchy style used in OpenCV, we can check into Contour Retrieval Modes
in OpenCV with the help of same image given above. ie what do flags like cv2.RETR_LIST,
cv2.RETR_TREE, cv2.RETR_CCOMP, cv2.RETR_EXTERNAL etc mean?
in OpenCV with the help of same image given above. ie what do flags like cv.RETR_LIST,
cv.RETR_TREE, cv.RETR_CCOMP, cv.RETR_EXTERNAL etc mean?
Contour Retrieval Mode
----------------------
@ -185,7 +185,7 @@ array([[[ 3, -1, 1, -1],
And this is the final guy, Mr.Perfect. It retrieves all the contours and creates a full family
hierarchy list. **It even tells, who is the grandpa, father, son, grandson and even beyond... :)**.
For examle, I took above image, rewrite the code for cv2.RETR_TREE, reorder the contours as per the
For examle, I took above image, rewrite the code for cv.RETR_TREE, reorder the contours as per the
result given by OpenCV and analyze it. Again, red letters give the contour number and green letters
give the hierarchy order.

View File

@ -17,11 +17,11 @@ Theory and Code
We saw what is convex hull in second chapter about contours. Any deviation of the object from this
hull can be considered as convexity defect.
OpenCV comes with a ready-made function to find this, **cv2.convexityDefects()**. A basic function
OpenCV comes with a ready-made function to find this, **cv.convexityDefects()**. A basic function
call would look like below:
@code{.py}
hull = cv2.convexHull(cnt,returnPoints = False)
defects = cv2.convexityDefects(cnt,hull)
hull = cv.convexHull(cnt,returnPoints = False)
defects = cv.convexityDefects(cnt,hull)
@endcode
@note Remember we have to pass returnPoints = False while finding convex hull, in order to find
@ -33,29 +33,29 @@ line joining start point and end point, then draw a circle at the farthest point
three values returned are indices of cnt. So we have to bring those values from cnt.
@code{.py}
import cv2
import cv2 as cv
import numpy as np
img = cv2.imread('star.jpg')
img_gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(img_gray, 127, 255,0)
im2,contours,hierarchy = cv2.findContours(thresh,2,1)
img = cv.imread('star.jpg')
img_gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
ret,thresh = cv.threshold(img_gray, 127, 255,0)
im2,contours,hierarchy = cv.findContours(thresh,2,1)
cnt = contours[0]
hull = cv2.convexHull(cnt,returnPoints = False)
defects = cv2.convexityDefects(cnt,hull)
hull = cv.convexHull(cnt,returnPoints = False)
defects = cv.convexityDefects(cnt,hull)
for i in range(defects.shape[0]):
s,e,f,d = defects[i,0]
start = tuple(cnt[s][0])
end = tuple(cnt[e][0])
far = tuple(cnt[f][0])
cv2.line(img,start,end,[0,255,0],2)
cv2.circle(img,far,5,[0,0,255],-1)
cv.line(img,start,end,[0,255,0],2)
cv.circle(img,far,5,[0,0,255],-1)
cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv.imshow('img',img)
cv.waitKey(0)
cv.destroyAllWindows()
@endcode
And see the result:
@ -69,7 +69,7 @@ if point is on the contour.
For example, we can check the point (50,50) as follows:
@code{.py}
dist = cv2.pointPolygonTest(cnt,(50,50),True)
dist = cv.pointPolygonTest(cnt,(50,50),True)
@endcode
In the function, third argument is measureDist. If it is True, it finds the signed distance. If
False, it finds whether the point is inside or outside or on the contour (it returns +1, -1, 0
@ -80,25 +80,25 @@ time consuming process. So, making it False gives about 2-3X speedup.
### 3. Match Shapes
OpenCV comes with a function **cv2.matchShapes()** which enables us to compare two shapes, or two
OpenCV comes with a function **cv.matchShapes()** which enables us to compare two shapes, or two
contours and returns a metric showing the similarity. The lower the result, the better match it is.
It is calculated based on the hu-moment values. Different measurement methods are explained in the
docs.
@code{.py}
import cv2
import cv2 as cv
import numpy as np
img1 = cv2.imread('star.jpg',0)
img2 = cv2.imread('star2.jpg',0)
img1 = cv.imread('star.jpg',0)
img2 = cv.imread('star2.jpg',0)
ret, thresh = cv2.threshold(img1, 127, 255,0)
ret, thresh2 = cv2.threshold(img2, 127, 255,0)
im2,contours,hierarchy = cv2.findContours(thresh,2,1)
ret, thresh = cv.threshold(img1, 127, 255,0)
ret, thresh2 = cv.threshold(img2, 127, 255,0)
im2,contours,hierarchy = cv.findContours(thresh,2,1)
cnt1 = contours[0]
im2,contours,hierarchy = cv2.findContours(thresh2,2,1)
im2,contours,hierarchy = cv.findContours(thresh2,2,1)
cnt2 = contours[0]
ret = cv2.matchShapes(cnt1,cnt2,1,0.0)
ret = cv.matchShapes(cnt1,cnt2,1,0.0)
print( ret )
@endcode
I tried matching shapes with different shapes given below:
@ -115,7 +115,7 @@ See, even image rotation doesn't affect much on this comparison.
@sa [Hu-Moments](http://en.wikipedia.org/wiki/Image_moment#Rotation_invariant_moments) are seven
moments invariant to translation, rotation and scale. Seventh one is skew-invariant. Those values
can be found using **cv2.HuMoments()** function.
can be found using **cv.HuMoments()** function.
Additional Resources
====================
@ -123,10 +123,10 @@ Additional Resources
Exercises
---------
-# Check the documentation for **cv2.pointPolygonTest()**, you can find a nice image in Red and
-# Check the documentation for **cv.pointPolygonTest()**, you can find a nice image in Red and
Blue color. It represents the distance from all pixels to the white curve on it. All pixels
inside curve is blue depending on the distance. Similarly outside points are red. Contour edges
are marked with White. So problem is simple. Write a code to create such a representation of
distance.
-# Compare images of digits or letters using **cv2.matchShapes()**. ( That would be a simple step
-# Compare images of digits or letters using **cv.matchShapes()**. ( That would be a simple step
towards OCR )

View File

@ -15,7 +15,7 @@ As in one-dimensional signals, images also can be filtered with various low-pass
high-pass filters(HPF) etc. LPF helps in removing noises, blurring the images etc. HPF filters helps
in finding edges in the images.
OpenCV provides a function **cv2.filter2D()** to convolve a kernel with an image. As an example, we
OpenCV provides a function **cv.filter2D()** to convolve a kernel with an image. As an example, we
will try an averaging filter on an image. A 5x5 averaging filter kernel will look like below:
\f[K = \frac{1}{25} \begin{bmatrix} 1 & 1 & 1 & 1 & 1 \\ 1 & 1 & 1 & 1 & 1 \\ 1 & 1 & 1 & 1 & 1 \\ 1 & 1 & 1 & 1 & 1 \\ 1 & 1 & 1 & 1 & 1 \end{bmatrix}\f]
@ -24,14 +24,14 @@ Operation is like this: keep this kernel above a pixel, add all the 25 pixels be
take its average and replace the central pixel with the new average value. It continues this
operation for all the pixels in the image. Try this code and check the result:
@code{.py}
import cv2
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img = cv2.imread('opencv_logo.png')
img = cv.imread('opencv_logo.png')
kernel = np.ones((5,5),np.float32)/25
dst = cv2.filter2D(img,-1,kernel)
dst = cv.filter2D(img,-1,kernel)
plt.subplot(121),plt.imshow(img),plt.title('Original')
plt.xticks([]), plt.yticks([])
@ -55,23 +55,23 @@ blur the edges too). OpenCV provides mainly four types of blurring techniques.
This is done by convolving image with a normalized box filter. It simply takes the average of all
the pixels under kernel area and replace the central element. This is done by the function
**cv2.blur()** or **cv2.boxFilter()**. Check the docs for more details about the kernel. We should
**cv.blur()** or **cv.boxFilter()**. Check the docs for more details about the kernel. We should
specify the width and height of kernel. A 3x3 normalized box filter would look like below:
\f[K = \frac{1}{9} \begin{bmatrix} 1 & 1 & 1 \\ 1 & 1 & 1 \\ 1 & 1 & 1 \end{bmatrix}\f]
@note If you don't want to use normalized box filter, use **cv2.boxFilter()**. Pass an argument
@note If you don't want to use normalized box filter, use **cv.boxFilter()**. Pass an argument
normalize=False to the function.
Check a sample demo below with a kernel of 5x5 size:
@code{.py}
import cv2
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('opencv-logo-white.png')
img = cv.imread('opencv-logo-white.png')
blur = cv2.blur(img,(5,5))
blur = cv.blur(img,(5,5))
plt.subplot(121),plt.imshow(img),plt.title('Original')
plt.xticks([]), plt.yticks([])
@ -86,17 +86,17 @@ Result:
### 2. Gaussian Blurring
In this, instead of box filter, gaussian kernel is used. It is done with the function,
**cv2.GaussianBlur()**. We should specify the width and height of kernel which should be positive
**cv.GaussianBlur()**. We should specify the width and height of kernel which should be positive
and odd. We also should specify the standard deviation in X and Y direction, sigmaX and sigmaY
respectively. If only sigmaX is specified, sigmaY is taken as same as sigmaX. If both are given as
zeros, they are calculated from kernel size. Gaussian blurring is highly effective in removing
gaussian noise from the image.
If you want, you can create a Gaussian kernel with the function, **cv2.getGaussianKernel()**.
If you want, you can create a Gaussian kernel with the function, **cv.getGaussianKernel()**.
The above code can be modified for Gaussian blurring:
@code{.py}
blur = cv2.GaussianBlur(img,(5,5),0)
blur = cv.GaussianBlur(img,(5,5),0)
@endcode
Result:
@ -104,7 +104,7 @@ Result:
### 3. Median Blurring
Here, the function **cv2.medianBlur()** takes median of all the pixels under kernel area and central
Here, the function **cv.medianBlur()** takes median of all the pixels under kernel area and central
element is replaced with this median value. This is highly effective against salt-and-pepper noise
in the images. Interesting thing is that, in the above filters, central element is a newly
calculated value which may be a pixel value in the image or a new value. But in median blurring,
@ -113,7 +113,7 @@ effectively. Its kernel size should be a positive odd integer.
In this demo, I added a 50% noise to our original image and applied median blur. Check the result:
@code{.py}
median = cv2.medianBlur(img,5)
median = cv.medianBlur(img,5)
@endcode
Result:
@ -121,7 +121,7 @@ Result:
### 4. Bilateral Filtering
**cv2.bilateralFilter()** is highly effective in noise removal while keeping edges sharp. But the
**cv.bilateralFilter()** is highly effective in noise removal while keeping edges sharp. But the
operation is slower compared to other filters. We already saw that gaussian filter takes the a
neighbourhood around the pixel and find its gaussian weighted average. This gaussian filter is a
function of space alone, that is, nearby pixels are considered while filtering. It doesn't consider
@ -136,7 +136,7 @@ pixels at edges will have large intensity variation.
Below samples shows use bilateral filter (For details on arguments, visit docs).
@code{.py}
blur = cv2.bilateralFilter(img,9,75,75)
blur = cv.bilateralFilter(img,9,75,75)
@endcode
Result:

View File

@ -6,35 +6,35 @@ Goals
- Learn to apply different geometric transformation to images like translation, rotation, affine
transformation etc.
- You will see these functions: **cv2.getPerspectiveTransform**
- You will see these functions: **cv.getPerspectiveTransform**
Transformations
---------------
OpenCV provides two transformation functions, **cv2.warpAffine** and **cv2.warpPerspective**, with
which you can have all kinds of transformations. **cv2.warpAffine** takes a 2x3 transformation
matrix while **cv2.warpPerspective** takes a 3x3 transformation matrix as input.
OpenCV provides two transformation functions, **cv.warpAffine** and **cv.warpPerspective**, with
which you can have all kinds of transformations. **cv.warpAffine** takes a 2x3 transformation
matrix while **cv.warpPerspective** takes a 3x3 transformation matrix as input.
### Scaling
Scaling is just resizing of the image. OpenCV comes with a function **cv2.resize()** for this
Scaling is just resizing of the image. OpenCV comes with a function **cv.resize()** for this
purpose. The size of the image can be specified manually, or you can specify the scaling factor.
Different interpolation methods are used. Preferable interpolation methods are **cv2.INTER_AREA**
for shrinking and **cv2.INTER_CUBIC** (slow) & **cv2.INTER_LINEAR** for zooming. By default,
interpolation method used is **cv2.INTER_LINEAR** for all resizing purposes. You can resize an
Different interpolation methods are used. Preferable interpolation methods are **cv.INTER_AREA**
for shrinking and **cv.INTER_CUBIC** (slow) & **cv.INTER_LINEAR** for zooming. By default,
interpolation method used is **cv.INTER_LINEAR** for all resizing purposes. You can resize an
input image either of following methods:
@code{.py}
import cv2
import numpy as np
import cv2 as cv
img = cv2.imread('messi5.jpg')
img = cv.imread('messi5.jpg')
res = cv2.resize(img,None,fx=2, fy=2, interpolation = cv2.INTER_CUBIC)
res = cv.resize(img,None,fx=2, fy=2, interpolation = cv.INTER_CUBIC)
#OR
height, width = img.shape[:2]
res = cv2.resize(img,(2*width, 2*height), interpolation = cv2.INTER_CUBIC)
res = cv.resize(img,(2*width, 2*height), interpolation = cv.INTER_CUBIC)
@endcode
### Translation
@ -43,25 +43,25 @@ be \f$(t_x,t_y)\f$, you can create the transformation matrix \f$\textbf{M}\f$ as
\f[M = \begin{bmatrix} 1 & 0 & t_x \\ 0 & 1 & t_y \end{bmatrix}\f]
You can take make it into a Numpy array of type np.float32 and pass it into **cv2.warpAffine()**
You can take make it into a Numpy array of type np.float32 and pass it into **cv.warpAffine()**
function. See below example for a shift of (100,50):
@code{.py}
import cv2
import numpy as np
import cv2 as cv
img = cv2.imread('messi5.jpg',0)
img = cv.imread('messi5.jpg',0)
rows,cols = img.shape
M = np.float32([[1,0,100],[0,1,50]])
dst = cv2.warpAffine(img,M,(cols,rows))
dst = cv.warpAffine(img,M,(cols,rows))
cv2.imshow('img',dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv.imshow('img',dst)
cv.waitKey(0)
cv.destroyAllWindows()
@endcode
**warning**
Third argument of the **cv2.warpAffine()** function is the size of the output image, which should
Third argument of the **cv.warpAffine()** function is the size of the output image, which should
be in the form of **(width, height)**. Remember width = number of columns, and height = number of
rows.
@ -84,14 +84,14 @@ where:
\f[\begin{array}{l} \alpha = scale \cdot \cos \theta , \\ \beta = scale \cdot \sin \theta \end{array}\f]
To find this transformation matrix, OpenCV provides a function, **cv2.getRotationMatrix2D**. Check
To find this transformation matrix, OpenCV provides a function, **cv.getRotationMatrix2D**. Check
below example which rotates the image by 90 degree with respect to center without any scaling.
@code{.py}
img = cv2.imread('messi5.jpg',0)
img = cv.imread('messi5.jpg',0)
rows,cols = img.shape
M = cv2.getRotationMatrix2D((cols/2,rows/2),90,1)
dst = cv2.warpAffine(img,M,(cols,rows))
M = cv.getRotationMatrix2D((cols/2,rows/2),90,1)
dst = cv.warpAffine(img,M,(cols,rows))
@endcode
See the result:
@ -101,20 +101,20 @@ See the result:
In affine transformation, all parallel lines in the original image will still be parallel in the
output image. To find the transformation matrix, we need three points from input image and their
corresponding locations in output image. Then **cv2.getAffineTransform** will create a 2x3 matrix
which is to be passed to **cv2.warpAffine**.
corresponding locations in output image. Then **cv.getAffineTransform** will create a 2x3 matrix
which is to be passed to **cv.warpAffine**.
Check below example, and also look at the points I selected (which are marked in Green color):
@code{.py}
img = cv2.imread('drawing.png')
img = cv.imread('drawing.png')
rows,cols,ch = img.shape
pts1 = np.float32([[50,50],[200,50],[50,200]])
pts2 = np.float32([[10,100],[200,50],[100,250]])
M = cv2.getAffineTransform(pts1,pts2)
M = cv.getAffineTransform(pts1,pts2)
dst = cv2.warpAffine(img,M,(cols,rows))
dst = cv.warpAffine(img,M,(cols,rows))
plt.subplot(121),plt.imshow(img),plt.title('Input')
plt.subplot(122),plt.imshow(dst),plt.title('Output')
@ -130,20 +130,20 @@ For perspective transformation, you need a 3x3 transformation matrix. Straight l
straight even after the transformation. To find this transformation matrix, you need 4 points on the
input image and corresponding points on the output image. Among these 4 points, 3 of them should not
be collinear. Then transformation matrix can be found by the function
**cv2.getPerspectiveTransform**. Then apply **cv2.warpPerspective** with this 3x3 transformation
**cv.getPerspectiveTransform**. Then apply **cv.warpPerspective** with this 3x3 transformation
matrix.
See the code below:
@code{.py}
img = cv2.imread('sudoku.png')
img = cv.imread('sudoku.png')
rows,cols,ch = img.shape
pts1 = np.float32([[56,65],[368,52],[28,387],[389,390]])
pts2 = np.float32([[0,0],[300,0],[0,300],[300,300]])
M = cv2.getPerspectiveTransform(pts1,pts2)
M = cv.getPerspectiveTransform(pts1,pts2)
dst = cv2.warpPerspective(img,M,(300,300))
dst = cv.warpPerspective(img,M,(300,300))
plt.subplot(121),plt.imshow(img),plt.title('Input')
plt.subplot(122),plt.imshow(dst),plt.title('Output')

View File

@ -64,24 +64,24 @@ It is illustrated in below image (Image Courtesy: <http://www.cs.ru.ac.za/resear
Demo
----
Now we go for grabcut algorithm with OpenCV. OpenCV has the function, **cv2.grabCut()** for this. We
Now we go for grabcut algorithm with OpenCV. OpenCV has the function, **cv.grabCut()** for this. We
will see its arguments first:
- *img* - Input image
- *mask* - It is a mask image where we specify which areas are background, foreground or
probable background/foreground etc. It is done by the following flags, **cv2.GC_BGD,
cv2.GC_FGD, cv2.GC_PR_BGD, cv2.GC_PR_FGD**, or simply pass 0,1,2,3 to image.
probable background/foreground etc. It is done by the following flags, **cv.GC_BGD,
cv.GC_FGD, cv.GC_PR_BGD, cv.GC_PR_FGD**, or simply pass 0,1,2,3 to image.
- *rect* - It is the coordinates of a rectangle which includes the foreground object in the
format (x,y,w,h)
- *bdgModel*, *fgdModel* - These are arrays used by the algorithm internally. You just create
two np.float64 type zero arrays of size (1,65).
- *iterCount* - Number of iterations the algorithm should run.
- *mode* - It should be **cv2.GC_INIT_WITH_RECT** or **cv2.GC_INIT_WITH_MASK** or combined
- *mode* - It should be **cv.GC_INIT_WITH_RECT** or **cv.GC_INIT_WITH_MASK** or combined
which decides whether we are drawing rectangle or final touchup strokes.
First let's see with rectangular mode. We load the image, create a similar mask image. We create
*fgdModel* and *bgdModel*. We give the rectangle parameters. It's all straight-forward. Let the
algorithm run for 5 iterations. Mode should be *cv2.GC_INIT_WITH_RECT* since we are using
algorithm run for 5 iterations. Mode should be *cv.GC_INIT_WITH_RECT* since we are using
rectangle. Then run the grabcut. It modifies the mask image. In the new mask image, pixels will be
marked with four flags denoting background/foreground as specified above. So we modify the mask such
that all 0-pixels and 2-pixels are put to 0 (ie background) and all 1-pixels and 3-pixels are put to
@ -89,17 +89,17 @@ that all 0-pixels and 2-pixels are put to 0 (ie background) and all 1-pixels and
segmented image.
@code{.py}
import numpy as np
import cv2
import cv2 as cv
from matplotlib import pyplot as plt
img = cv2.imread('messi5.jpg')
img = cv.imread('messi5.jpg')
mask = np.zeros(img.shape[:2],np.uint8)
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
rect = (50,50,450,290)
cv2.grabCut(img,mask,rect,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_RECT)
cv.grabCut(img,mask,rect,bgdModel,fgdModel,5,cv.GC_INIT_WITH_RECT)
mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')
img = img*mask2[:,:,np.newaxis]
@ -122,14 +122,14 @@ remaining background with gray. Then loaded that mask image in OpenCV, edited or
got with corresponding values in newly added mask image. Check the code below:*
@code{.py}
# newmask is the mask image I manually labelled
newmask = cv2.imread('newmask.png',0)
newmask = cv.imread('newmask.png',0)
# whereever it is marked white (sure foreground), change mask=1
# whereever it is marked black (sure background), change mask=0
mask[newmask == 0] = 0
mask[newmask == 255] = 1
mask, bgdModel, fgdModel = cv2.grabCut(img,mask,None,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_MASK)
mask, bgdModel, fgdModel = cv.grabCut(img,mask,None,bgdModel,fgdModel,5,cv.GC_INIT_WITH_MASK)
mask = np.where((mask==2)|(mask==0),0,1).astype('uint8')
img = img*mask[:,:,np.newaxis]

View File

@ -7,7 +7,7 @@ Goal
In this chapter, we will learn to:
- Find Image gradients, edges etc
- We will see following functions : **cv2.Sobel()**, **cv2.Scharr()**, **cv2.Laplacian()** etc
- We will see following functions : **cv.Sobel()**, **cv.Scharr()**, **cv.Laplacian()** etc
Theory
------
@ -38,15 +38,15 @@ Code
Below code shows all operators in a single diagram. All kernels are of 5x5 size. Depth of output
image is passed -1 to get the result in np.uint8 type.
@code{.py}
import cv2
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img = cv2.imread('dave.jpg',0)
img = cv.imread('dave.jpg',0)
laplacian = cv2.Laplacian(img,cv2.CV_64F)
sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5)
sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=5)
laplacian = cv.Laplacian(img,cv.CV_64F)
sobelx = cv.Sobel(img,cv.CV_64F,1,0,ksize=5)
sobely = cv.Sobel(img,cv.CV_64F,0,1,ksize=5)
plt.subplot(2,2,1),plt.imshow(img,cmap = 'gray')
plt.title('Original'), plt.xticks([]), plt.yticks([])
@ -66,26 +66,26 @@ Result:
One Important Matter!
---------------------
In our last example, output datatype is cv2.CV_8U or np.uint8. But there is a slight problem with
In our last example, output datatype is cv.CV_8U or np.uint8. But there is a slight problem with
that. Black-to-White transition is taken as Positive slope (it has a positive value) while
White-to-Black transition is taken as a Negative slope (It has negative value). So when you convert
data to np.uint8, all negative slopes are made zero. In simple words, you miss that edge.
If you want to detect both edges, better option is to keep the output datatype to some higher forms,
like cv2.CV_16S, cv2.CV_64F etc, take its absolute value and then convert back to cv2.CV_8U.
like cv.CV_16S, cv.CV_64F etc, take its absolute value and then convert back to cv.CV_8U.
Below code demonstrates this procedure for a horizontal Sobel filter and difference in results.
@code{.py}
import cv2
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img = cv2.imread('box.png',0)
img = cv.imread('box.png',0)
# Output dtype = cv2.CV_8U
sobelx8u = cv2.Sobel(img,cv2.CV_8U,1,0,ksize=5)
# Output dtype = cv.CV_8U
sobelx8u = cv.Sobel(img,cv.CV_8U,1,0,ksize=5)
# Output dtype = cv2.CV_64F. Then take its absolute and convert to cv2.CV_8U
sobelx64f = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5)
# Output dtype = cv.CV_64F. Then take its absolute and convert to cv.CV_8U
sobelx64f = cv.Sobel(img,cv.CV_64F,1,0,ksize=5)
abs_sobel64f = np.absolute(sobelx64f)
sobel_8u = np.uint8(abs_sobel64f)

View File

@ -23,7 +23,7 @@ will be useful in understanding further topics like Histogram Back-Projection.
2D Histogram in OpenCV
----------------------
It is quite simple and calculated using the same function, **cv2.calcHist()**. For color histograms,
It is quite simple and calculated using the same function, **cv.calcHist()**. For color histograms,
we need to convert the image from BGR to HSV. (Remember, for 1D histogram, we converted from BGR to
Grayscale). For 2D histograms, its parameters will be modified as follows:
@ -34,13 +34,13 @@ Grayscale). For 2D histograms, its parameters will be modified as follows:
Now check the code below:
@code{.py}
import cv2
import numpy as np
import cv2 as cv
img = cv2.imread('home.jpg')
hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
img = cv.imread('home.jpg')
hsv = cv.cvtColor(img,cv.COLOR_BGR2HSV)
hist = cv2.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])
hist = cv.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])
@endcode
That's it.
@ -50,12 +50,12 @@ That's it.
Numpy also provides a specific function for this : **np.histogram2d()**. (Remember, for 1D histogram
we used **np.histogram()** ).
@code{.py}
import cv2
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img = cv2.imread('home.jpg')
hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
img = cv.imread('home.jpg')
hsv = cv.cvtColor(img,cv.COLOR_BGR2HSV)
hist, xbins, ybins = np.histogram2d(h.ravel(),s.ravel(),[180,256],[[0,180],[0,256]])
@endcode
@ -67,10 +67,10 @@ Now we can check how to plot this color histogram.
Plotting 2D Histograms
----------------------
### Method - 1 : Using cv2.imshow()
### Method - 1 : Using cv.imshow()
The result we get is a two dimensional array of size 180x256. So we can show them as we do normally,
using cv2.imshow() function. It will be a grayscale image and it won't give much idea what colors
using cv.imshow() function. It will be a grayscale image and it won't give much idea what colors
are there, unless you know the Hue values of different colors.
### Method - 2 : Using Matplotlib
@ -84,13 +84,13 @@ I prefer this method. It is simple and better.
Consider code:
@code{.py}
import cv2
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img = cv2.imread('home.jpg')
hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
hist = cv2.calcHist( [hsv], [0, 1], None, [180, 256], [0, 180, 0, 256] )
img = cv.imread('home.jpg')
hsv = cv.cvtColor(img,cv.COLOR_BGR2HSV)
hist = cv.calcHist( [hsv], [0, 1], None, [180, 256], [0, 180, 0, 256] )
plt.imshow(hist,interpolation = 'nearest')
plt.show()

View File

@ -33,82 +33,81 @@ Algorithm in Numpy
-# First we need to calculate the color histogram of both the object we need to find (let it be
'M') and the image where we are going to search (let it be 'I').
@code{.py}
import cv2
import numpy as np
from matplotlib import pyplot as plt
import cv2 as cvfrom matplotlib import pyplot as plt
#roi is the object or region of object we need to find
roi = cv2.imread('rose_red.png')
hsv = cv2.cvtColor(roi,cv2.COLOR_BGR2HSV)
roi = cv.imread('rose_red.png')
hsv = cv.cvtColor(roi,cv.COLOR_BGR2HSV)
#target is the image we search in
target = cv2.imread('rose.png')
hsvt = cv2.cvtColor(target,cv2.COLOR_BGR2HSV)
target = cv.imread('rose.png')
hsvt = cv.cvtColor(target,cv.COLOR_BGR2HSV)
# Find the histograms using calcHist. Can be done with np.histogram2d also
M = cv2.calcHist([hsv],[0, 1], None, [180, 256], [0, 180, 0, 256] )
I = cv2.calcHist([hsvt],[0, 1], None, [180, 256], [0, 180, 0, 256] )
M = cv.calcHist([hsv],[0, 1], None, [180, 256], [0, 180, 0, 256] )
I = cv.calcHist([hsvt],[0, 1], None, [180, 256], [0, 180, 0, 256] )
@endcode
2. Find the ratio \f$R = \frac{M}{I}\f$. Then backproject R, ie use R as palette and create a new image
with every pixel as its corresponding probability of being target. ie B(x,y) = R[h(x,y),s(x,y)]
where h is hue and s is saturation of the pixel at (x,y). After that apply the condition
\f$B(x,y) = min[B(x,y), 1]\f$.
@code{.py}
h,s,v = cv2.split(hsvt)
h,s,v = cv.split(hsvt)
B = R[h.ravel(),s.ravel()]
B = np.minimum(B,1)
B = B.reshape(hsvt.shape[:2])
@endcode
3. Now apply a convolution with a circular disc, \f$B = D \ast B\f$, where D is the disc kernel.
@code{.py}
disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
cv2.filter2D(B,-1,disc,B)
disc = cv.getStructuringElement(cv.MORPH_ELLIPSE,(5,5))
cv.filter2D(B,-1,disc,B)
B = np.uint8(B)
cv2.normalize(B,B,0,255,cv2.NORM_MINMAX)
cv.normalize(B,B,0,255,cv.NORM_MINMAX)
@endcode
4. Now the location of maximum intensity gives us the location of object. If we are expecting a
region in the image, thresholding for a suitable value gives a nice result.
@code{.py}
ret,thresh = cv2.threshold(B,50,255,0)
ret,thresh = cv.threshold(B,50,255,0)
@endcode
That's it !!
Backprojection in OpenCV
------------------------
OpenCV provides an inbuilt function **cv2.calcBackProject()**. Its parameters are almost same as the
**cv2.calcHist()** function. One of its parameter is histogram which is histogram of the object and
OpenCV provides an inbuilt function **cv.calcBackProject()**. Its parameters are almost same as the
**cv.calcHist()** function. One of its parameter is histogram which is histogram of the object and
we have to find it. Also, the object histogram should be normalized before passing on to the
backproject function. It returns the probability image. Then we convolve the image with a disc
kernel and apply threshold. Below is my code and output :
@code{.py}
import cv2
import numpy as np
import cv2 as cv
roi = cv2.imread('rose_red.png')
hsv = cv2.cvtColor(roi,cv2.COLOR_BGR2HSV)
roi = cv.imread('rose_red.png')
hsv = cv.cvtColor(roi,cv.COLOR_BGR2HSV)
target = cv2.imread('rose.png')
hsvt = cv2.cvtColor(target,cv2.COLOR_BGR2HSV)
target = cv.imread('rose.png')
hsvt = cv.cvtColor(target,cv.COLOR_BGR2HSV)
# calculating object histogram
roihist = cv2.calcHist([hsv],[0, 1], None, [180, 256], [0, 180, 0, 256] )
roihist = cv.calcHist([hsv],[0, 1], None, [180, 256], [0, 180, 0, 256] )
# normalize histogram and apply backprojection
cv2.normalize(roihist,roihist,0,255,cv2.NORM_MINMAX)
dst = cv2.calcBackProject([hsvt],[0,1],roihist,[0,180,0,256],1)
cv.normalize(roihist,roihist,0,255,cv.NORM_MINMAX)
dst = cv.calcBackProject([hsvt],[0,1],roihist,[0,180,0,256],1)
# Now convolute with circular disc
disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
cv2.filter2D(dst,-1,disc,dst)
disc = cv.getStructuringElement(cv.MORPH_ELLIPSE,(5,5))
cv.filter2D(dst,-1,disc,dst)
# threshold and binary AND
ret,thresh = cv2.threshold(dst,50,255,0)
thresh = cv2.merge((thresh,thresh,thresh))
res = cv2.bitwise_and(target,thresh)
ret,thresh = cv.threshold(dst,50,255,0)
thresh = cv.merge((thresh,thresh,thresh))
res = cv.bitwise_and(target,thresh)
res = np.vstack((target,thresh,res))
cv2.imwrite('res.jpg',res)
cv.imwrite('res.jpg',res)
@endcode
Below is one example I worked with. I used the region inside blue rectangle as sample object and I
wanted to extract the full ground.

View File

@ -7,7 +7,7 @@ Goal
Learn to
- Find histograms, using both OpenCV and Numpy functions
- Plot histograms, using OpenCV and Matplotlib functions
- You will see these functions : **cv2.calcHist()**, **np.histogram()** etc.
- You will see these functions : **cv.calcHist()**, **np.histogram()** etc.
Theory
------
@ -57,10 +57,10 @@ intensity values.
### 1. Histogram Calculation in OpenCV
So now we use **cv2.calcHist()** function to find the histogram. Let's familiarize with the function
So now we use **cv.calcHist()** function to find the histogram. Let's familiarize with the function
and its parameters :
<center><em>cv2.calcHist(images, channels, mask, histSize, ranges[, hist[, accumulate]])</em></center>
<center><em>cv.calcHist(images, channels, mask, histSize, ranges[, hist[, accumulate]])</em></center>
-# images : it is the source image of type uint8 or float32. it should be given in square brackets,
ie, "[img]".
@ -78,8 +78,8 @@ and its parameters :
So let's start with a sample image. Simply load an image in grayscale mode and find its full
histogram.
@code{.py}
img = cv2.imread('home.jpg',0)
hist = cv2.calcHist([img],[0],None,[256],[0,256])
img = cv.imread('home.jpg',0)
hist = cv.calcHist([img],[0],None,[256],[0,256])
@endcode
hist is a 256x1 array, each value corresponds to number of pixels in that image with its
corresponding pixel value.
@ -118,11 +118,11 @@ Matplotlib comes with a histogram plotting function : matplotlib.pyplot.hist()
It directly finds the histogram and plot it. You need not use calcHist() or np.histogram() function
to find the histogram. See the code below:
@code{.py}
import cv2
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img = cv2.imread('home.jpg',0)
img = cv.imread('home.jpg',0)
plt.hist(img.ravel(),256,[0,256]); plt.show()
@endcode
You will get a plot as below :
@ -132,14 +132,14 @@ You will get a plot as below :
Or you can use normal plot of matplotlib, which would be good for BGR plot. For that, you need to
find the histogram data first. Try below code:
@code{.py}
import cv2
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img = cv2.imread('home.jpg')
img = cv.imread('home.jpg')
color = ('b','g','r')
for i,col in enumerate(color):
histr = cv2.calcHist([img],[i],None,[256],[0,256])
histr = cv.calcHist([img],[i],None,[256],[0,256])
plt.plot(histr,color = col)
plt.xlim([0,256])
plt.show()
@ -154,28 +154,28 @@ should be due to the sky)
### 2. Using OpenCV
Well, here you adjust the values of histograms along with its bin values to look like x,y
coordinates so that you can draw it using cv2.line() or cv2.polyline() function to generate same
coordinates so that you can draw it using cv.line() or cv.polyline() function to generate same
image as above. This is already available with OpenCV-Python2 official samples. Check the
code at samples/python/hist.py.
Application of Mask
-------------------
We used cv2.calcHist() to find the histogram of the full image. What if you want to find histograms
We used cv.calcHist() to find the histogram of the full image. What if you want to find histograms
of some regions of an image? Just create a mask image with white color on the region you want to
find histogram and black otherwise. Then pass this as the mask.
@code{.py}
img = cv2.imread('home.jpg',0)
img = cv.imread('home.jpg',0)
# create a mask
mask = np.zeros(img.shape[:2], np.uint8)
mask[100:300, 100:400] = 255
masked_img = cv2.bitwise_and(img,img,mask = mask)
masked_img = cv.bitwise_and(img,img,mask = mask)
# Calculate histogram with mask and without mask
# Check third argument for mask
hist_full = cv2.calcHist([img],[0],None,[256],[0,256])
hist_mask = cv2.calcHist([img],[0],mask,[256],[0,256])
hist_full = cv.calcHist([img],[0],None,[256],[0,256])
hist_mask = cv.calcHist([img],[0],mask,[256],[0,256])
plt.subplot(221), plt.imshow(img, 'gray')
plt.subplot(222), plt.imshow(mask,'gray')

View File

@ -26,11 +26,11 @@ a very good explanation with worked out examples, so that you would understand a
after reading that. Instead, here we will see its Numpy implementation. After that, we will see
OpenCV function.
@code{.py}
import cv2
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img = cv2.imread('wiki.jpg',0)
img = cv.imread('wiki.jpg',0)
hist,bins = np.histogram(img.flatten(),256,[0,256])
@ -76,15 +76,15 @@ histogram equalized to make them all with same lighting conditions.
Histograms Equalization in OpenCV
---------------------------------
OpenCV has a function to do this, **cv2.equalizeHist()**. Its input is just grayscale image and
OpenCV has a function to do this, **cv.equalizeHist()**. Its input is just grayscale image and
output is our histogram equalized image.
Below is a simple code snippet showing its usage for same image we used :
@code{.py}
img = cv2.imread('wiki.jpg',0)
equ = cv2.equalizeHist(img)
img = cv.imread('wiki.jpg',0)
equ = cv.equalizeHist(img)
res = np.hstack((img,equ)) #stacking images side-by-side
cv2.imwrite('res.png',res)
cv.imwrite('res.png',res)
@endcode
![image](images/equalization_opencv.jpg)
@ -122,15 +122,15 @@ applied.
Below code snippet shows how to apply CLAHE in OpenCV:
@code{.py}
import numpy as np
import cv2
import cv2 as cv
img = cv2.imread('tsukuba_l.png',0)
img = cv.imread('tsukuba_l.png',0)
# create a CLAHE object (Arguments are optional).
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
clahe = cv.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
cl1 = clahe.apply(img)
cv2.imwrite('clahe_2.jpg',cl1)
cv.imwrite('clahe_2.jpg',cl1)
@endcode
See the result below and compare it with results above, especially the statue region:

View File

@ -6,7 +6,7 @@ Goal
In this chapter,
- We will learn to use Hough Transform to find circles in an image.
- We will see these functions: **cv2.HoughCircles()**
- We will see these functions: **cv.HoughCircles()**
Theory
------
@ -17,29 +17,29 @@ equation, we can see we have 3 parameters, so we need a 3D accumulator for hough
would be highly ineffective. So OpenCV uses more trickier method, **Hough Gradient Method** which
uses the gradient information of edges.
The function we use here is **cv2.HoughCircles()**. It has plenty of arguments which are well
The function we use here is **cv.HoughCircles()**. It has plenty of arguments which are well
explained in the documentation. So we directly go to the code.
@code{.py}
import cv2
import numpy as np
import cv2 as cv
img = cv2.imread('opencv-logo-white.png',0)
img = cv2.medianBlur(img,5)
cimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)
img = cv.imread('opencv-logo-white.png',0)
img = cv.medianBlur(img,5)
cimg = cv.cvtColor(img,cv.COLOR_GRAY2BGR)
circles = cv2.HoughCircles(img,cv2.HOUGH_GRADIENT,1,20,
circles = cv.HoughCircles(img,cv.HOUGH_GRADIENT,1,20,
param1=50,param2=30,minRadius=0,maxRadius=0)
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# draw the outer circle
cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)
cv.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3)
cv.circle(cimg,(i[0],i[1]),2,(0,0,255),3)
cv2.imshow('detected circles',cimg)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv.imshow('detected circles',cimg)
cv.waitKey(0)
cv.destroyAllWindows()
@endcode
Result is shown below:

View File

@ -7,7 +7,7 @@ Goal
In this chapter,
- We will understand the concept of the Hough Transform.
- We will see how to use it to detect lines in an image.
- We will see the following functions: **cv2.HoughLines()**, **cv2.HoughLinesP()**
- We will see the following functions: **cv.HoughLines()**, **cv.HoughLinesP()**
Theory
------
@ -62,7 +62,7 @@ denote they are the parameters of possible lines in the image. (Image courtesy:
Hough Transform in OpenCV
=========================
Everything explained above is encapsulated in the OpenCV function, **cv2.HoughLines()**. It simply returns an array of :math:(rho,
Everything explained above is encapsulated in the OpenCV function, **cv.HoughLines()**. It simply returns an array of :math:(rho,
theta)\` values. \f$\rho\f$ is measured in pixels and \f$\theta\f$ is measured in radians. First parameter,
Input image should be a binary image, so apply threshold or use canny edge detection before
applying hough transform. Second and third parameters are \f$\rho\f$ and \f$\theta\f$ accuracies
@ -88,7 +88,7 @@ Hough Transform and Probabilistic Hough Transform in Hough space. (Image Courtes
OpenCV implementation is based on Robust Detection of Lines Using the Progressive Probabilistic
Hough Transform by Matas, J. and Galambos, C. and Kittler, J.V. @cite Matas00. The function used is
**cv2.HoughLinesP()**. It has two new arguments.
**cv.HoughLinesP()**. It has two new arguments.
- **minLineLength** - Minimum length of line. Line segments shorter than this are rejected.
- **maxLineGap** - Maximum allowed gap between line segments to treat them as a single line.

View File

@ -7,8 +7,8 @@ Goal
In this chapter,
- We will learn different morphological operations like Erosion, Dilation, Opening, Closing
etc.
- We will see different functions like : **cv2.erode()**, **cv2.dilate()**,
**cv2.morphologyEx()** etc.
- We will see different functions like : **cv.erode()**, **cv.dilate()**,
**cv.morphologyEx()** etc.
Theory
------
@ -35,12 +35,12 @@ detach two connected objects etc.
Here, as an example, I would use a 5x5 kernel with full of ones. Let's see it how it works:
@code{.py}
import cv2
import cv2 as cv
import numpy as np
img = cv2.imread('j.png',0)
img = cv.imread('j.png',0)
kernel = np.ones((5,5),np.uint8)
erosion = cv2.erode(img,kernel,iterations = 1)
erosion = cv.erode(img,kernel,iterations = 1)
@endcode
Result:
@ -54,7 +54,7 @@ Normally, in cases like noise removal, erosion is followed by dilation. Because,
white noises, but it also shrinks our object. So we dilate it. Since noise is gone, they won't come
back, but our object area increases. It is also useful in joining broken parts of an object.
@code{.py}
dilation = cv2.dilate(img,kernel,iterations = 1)
dilation = cv.dilate(img,kernel,iterations = 1)
@endcode
Result:
@ -63,9 +63,9 @@ Result:
### 3. Opening
Opening is just another name of **erosion followed by dilation**. It is useful in removing noise, as
we explained above. Here we use the function, **cv2.morphologyEx()**
we explained above. Here we use the function, **cv.morphologyEx()**
@code{.py}
opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
opening = cv.morphologyEx(img, cv.MORPH_OPEN, kernel)
@endcode
Result:
@ -76,7 +76,7 @@ Result:
Closing is reverse of Opening, **Dilation followed by Erosion**. It is useful in closing small holes
inside the foreground objects, or small black points on the object.
@code{.py}
closing = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
closing = cv.morphologyEx(img, cv.MORPH_CLOSE, kernel)
@endcode
Result:
@ -88,7 +88,7 @@ It is the difference between dilation and erosion of an image.
The result will look like the outline of the object.
@code{.py}
gradient = cv2.morphologyEx(img, cv2.MORPH_GRADIENT, kernel)
gradient = cv.morphologyEx(img, cv.MORPH_GRADIENT, kernel)
@endcode
Result:
@ -99,7 +99,7 @@ Result:
It is the difference between input image and Opening of the image. Below example is done for a 9x9
kernel.
@code{.py}
tophat = cv2.morphologyEx(img, cv2.MORPH_TOPHAT, kernel)
tophat = cv.morphologyEx(img, cv.MORPH_TOPHAT, kernel)
@endcode
Result:
@ -109,7 +109,7 @@ Result:
It is the difference between the closing of the input image and input image.
@code{.py}
blackhat = cv2.morphologyEx(img, cv2.MORPH_BLACKHAT, kernel)
blackhat = cv.morphologyEx(img, cv.MORPH_BLACKHAT, kernel)
@endcode
Result:
@ -120,11 +120,11 @@ Structuring Element
We manually created a structuring elements in the previous examples with help of Numpy. It is
rectangular shape. But in some cases, you may need elliptical/circular shaped kernels. So for this
purpose, OpenCV has a function, **cv2.getStructuringElement()**. You just pass the shape and size of
purpose, OpenCV has a function, **cv.getStructuringElement()**. You just pass the shape and size of
the kernel, you get the desired kernel.
@code{.py}
# Rectangular Kernel
>>> cv2.getStructuringElement(cv2.MORPH_RECT,(5,5))
>>> cv.getStructuringElement(cv.MORPH_RECT,(5,5))
array([[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
@ -132,7 +132,7 @@ array([[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1]], dtype=uint8)
# Elliptical Kernel
>>> cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
>>> cv.getStructuringElement(cv.MORPH_ELLIPSE,(5,5))
array([[0, 0, 1, 0, 0],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
@ -140,7 +140,7 @@ array([[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0]], dtype=uint8)
# Cross-shaped Kernel
>>> cv2.getStructuringElement(cv2.MORPH_CROSS,(5,5))
>>> cv.getStructuringElement(cv.MORPH_CROSS,(5,5))
array([[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[1, 1, 1, 1, 1],

View File

@ -7,7 +7,7 @@ Goal
In this chapter,
- We will learn about Image Pyramids
- We will use Image pyramids to create a new fruit, "Orapple"
- We will see these functions: **cv2.pyrUp()**, **cv2.pyrDown()**
- We will see these functions: **cv.pyrUp()**, **cv.pyrDown()**
Theory
------
@ -28,18 +28,18 @@ contribution from 5 pixels in underlying level with gaussian weights. By doing s
image becomes \f$M/2 \times N/2\f$ image. So area reduces to one-fourth of original area. It is called
an Octave. The same pattern continues as we go upper in pyramid (ie, resolution decreases).
Similarly while expanding, area becomes 4 times in each level. We can find Gaussian pyramids using
**cv2.pyrDown()** and **cv2.pyrUp()** functions.
**cv.pyrDown()** and **cv.pyrUp()** functions.
@code{.py}
img = cv2.imread('messi5.jpg')
lower_reso = cv2.pyrDown(higher_reso)
img = cv.imread('messi5.jpg')
lower_reso = cv.pyrDown(higher_reso)
@endcode
Below is the 4 levels in an image pyramid.
![image](images/messipyr.jpg)
Now you can go down the image pyramid with **cv2.pyrUp()** function.
Now you can go down the image pyramid with **cv.pyrUp()** function.
@code{.py}
higher_reso2 = cv2.pyrUp(lower_reso)
higher_reso2 = cv.pyrUp(lower_reso)
@endcode
Remember, higher_reso2 is not equal to higher_reso, because once you decrease the resolution, you
loose the information. Below image is 3 level down the pyramid created from smallest image in
@ -79,38 +79,38 @@ blending, Laplacian Pyramids etc. Simply it is done as follows:
Below is the full code. (For sake of simplicity, each step is done separately which may take more
memory. You can optimize it if you want so).
@code{.py}
import cv2
import cv2 as cv
import numpy as np,sys
A = cv2.imread('apple.jpg')
B = cv2.imread('orange.jpg')
A = cv.imread('apple.jpg')
B = cv.imread('orange.jpg')
# generate Gaussian pyramid for A
G = A.copy()
gpA = [G]
for i in xrange(6):
G = cv2.pyrDown(G)
G = cv.pyrDown(G)
gpA.append(G)
# generate Gaussian pyramid for B
G = B.copy()
gpB = [G]
for i in xrange(6):
G = cv2.pyrDown(G)
G = cv.pyrDown(G)
gpB.append(G)
# generate Laplacian Pyramid for A
lpA = [gpA[5]]
for i in xrange(5,0,-1):
GE = cv2.pyrUp(gpA[i])
L = cv2.subtract(gpA[i-1],GE)
GE = cv.pyrUp(gpA[i])
L = cv.subtract(gpA[i-1],GE)
lpA.append(L)
# generate Laplacian Pyramid for B
lpB = [gpB[5]]
for i in xrange(5,0,-1):
GE = cv2.pyrUp(gpB[i])
L = cv2.subtract(gpB[i-1],GE)
GE = cv.pyrUp(gpB[i])
L = cv.subtract(gpB[i-1],GE)
lpB.append(L)
# Now add left and right halves of images in each level
@ -123,14 +123,14 @@ for la,lb in zip(lpA,lpB):
# now reconstruct
ls_ = LS[0]
for i in xrange(1,6):
ls_ = cv2.pyrUp(ls_)
ls_ = cv2.add(ls_, LS[i])
ls_ = cv.pyrUp(ls_)
ls_ = cv.add(ls_, LS[i])
# image with direct connecting each half
real = np.hstack((A[:,:cols/2],B[:,cols/2:]))
cv2.imwrite('Pyramid_blending2.jpg',ls_)
cv2.imwrite('Direct_blending.jpg',real)
cv.imwrite('Pyramid_blending2.jpg',ls_)
cv.imwrite('Direct_blending.jpg',real)
@endcode
Additional Resources
--------------------

View File

@ -6,24 +6,24 @@ Goals
In this chapter, you will learn
- To find objects in an image using Template Matching
- You will see these functions : **cv2.matchTemplate()**, **cv2.minMaxLoc()**
- You will see these functions : **cv.matchTemplate()**, **cv.minMaxLoc()**
Theory
------
Template Matching is a method for searching and finding the location of a template image in a larger
image. OpenCV comes with a function **cv2.matchTemplate()** for this purpose. It simply slides the
image. OpenCV comes with a function **cv.matchTemplate()** for this purpose. It simply slides the
template image over the input image (as in 2D convolution) and compares the template and patch of
input image under the template image. Several comparison methods are implemented in OpenCV. (You can
check docs for more details). It returns a grayscale image, where each pixel denotes how much does
the neighbourhood of that pixel match with template.
If input image is of size (WxH) and template image is of size (wxh), output image will have a size
of (W-w+1, H-h+1). Once you got the result, you can use **cv2.minMaxLoc()** function to find where
of (W-w+1, H-h+1). Once you got the result, you can use **cv.minMaxLoc()** function to find where
is the maximum/minimum value. Take it as the top-left corner of rectangle and take (w,h) as width
and height of the rectangle. That rectangle is your region of template.
@note If you are using cv2.TM_SQDIFF as comparison method, minimum value gives the best match.
@note If you are using cv.TM_SQDIFF as comparison method, minimum value gives the best match.
Template Matching in OpenCV
---------------------------
@ -34,35 +34,35 @@ Here, as an example, we will search for Messi's face in his photo. So I created
We will try all the comparison methods so that we can see how their results look like:
@code{.py}
import cv2
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('messi5.jpg',0)
img = cv.imread('messi5.jpg',0)
img2 = img.copy()
template = cv2.imread('template.jpg',0)
template = cv.imread('template.jpg',0)
w, h = template.shape[::-1]
# All the 6 methods for comparison in a list
methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',
'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']
methods = ['cv.TM_CCOEFF', 'cv.TM_CCOEFF_NORMED', 'cv.TM_CCORR',
'cv.TM_CCORR_NORMED', 'cv.TM_SQDIFF', 'cv.TM_SQDIFF_NORMED']
for meth in methods:
img = img2.copy()
method = eval(meth)
# Apply template Matching
res = cv2.matchTemplate(img,template,method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
res = cv.matchTemplate(img,template,method)
min_val, max_val, min_loc, max_loc = cv.minMaxLoc(res)
# If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
if method in [cv.TM_SQDIFF, cv.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
cv2.rectangle(img,top_left, bottom_right, 255, 2)
cv.rectangle(img,top_left, bottom_right, 255, 2)
plt.subplot(121),plt.imshow(res,cmap = 'gray')
plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
@ -74,56 +74,56 @@ for meth in methods:
@endcode
See the results below:
- cv2.TM_CCOEFF
- cv.TM_CCOEFF
![image](images/template_ccoeff_1.jpg)
- cv2.TM_CCOEFF_NORMED
- cv.TM_CCOEFF_NORMED
![image](images/template_ccoeffn_2.jpg)
- cv2.TM_CCORR
- cv.TM_CCORR
![image](images/template_ccorr_3.jpg)
- cv2.TM_CCORR_NORMED
- cv.TM_CCORR_NORMED
![image](images/template_ccorrn_4.jpg)
- cv2.TM_SQDIFF
- cv.TM_SQDIFF
![image](images/template_sqdiff_5.jpg)
- cv2.TM_SQDIFF_NORMED
- cv.TM_SQDIFF_NORMED
![image](images/template_sqdiffn_6.jpg)
You can see that the result using **cv2.TM_CCORR** is not good as we expected.
You can see that the result using **cv.TM_CCORR** is not good as we expected.
Template Matching with Multiple Objects
---------------------------------------
In the previous section, we searched image for Messi's face, which occurs only once in the image.
Suppose you are searching for an object which has multiple occurances, **cv2.minMaxLoc()** won't
Suppose you are searching for an object which has multiple occurances, **cv.minMaxLoc()** won't
give you all the locations. In that case, we will use thresholding. So in this example, we will use
a screenshot of the famous game **Mario** and we will find the coins in it.
@code{.py}
import cv2
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
img_rgb = cv2.imread('mario.png')
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread('mario_coin.png',0)
img_rgb = cv.imread('mario.png')
img_gray = cv.cvtColor(img_rgb, cv.COLOR_BGR2GRAY)
template = cv.imread('mario_coin.png',0)
w, h = template.shape[::-1]
res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
res = cv.matchTemplate(img_gray,template,cv.TM_CCOEFF_NORMED)
threshold = 0.8
loc = np.where( res >= threshold)
for pt in zip(*loc[::-1]):
cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
cv.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
cv2.imwrite('res.png',img_rgb)
cv.imwrite('res.png',img_rgb)
@endcode
Result:

View File

@ -6,24 +6,24 @@ Goal
- In this tutorial, you will learn Simple thresholding, Adaptive thresholding, Otsu's thresholding
etc.
- You will learn these functions : **cv2.threshold**, **cv2.adaptiveThreshold** etc.
- You will learn these functions : **cv.threshold**, **cv.adaptiveThreshold** etc.
Simple Thresholding
-------------------
Here, the matter is straight forward. If pixel value is greater than a threshold value, it is
assigned one value (may be white), else it is assigned another value (may be black). The function
used is **cv2.threshold**. First argument is the source image, which **should be a grayscale
used is **cv.threshold**. First argument is the source image, which **should be a grayscale
image**. Second argument is the threshold value which is used to classify the pixel values. Third
argument is the maxVal which represents the value to be given if pixel value is more than (sometimes
less than) the threshold value. OpenCV provides different styles of thresholding and it is decided
by the fourth parameter of the function. Different types are:
- cv2.THRESH_BINARY
- cv2.THRESH_BINARY_INV
- cv2.THRESH_TRUNC
- cv2.THRESH_TOZERO
- cv2.THRESH_TOZERO_INV
- cv.THRESH_BINARY
- cv.THRESH_BINARY_INV
- cv.THRESH_TRUNC
- cv.THRESH_TOZERO
- cv.THRESH_TOZERO_INV
Documentation clearly explain what each type is meant for. Please check out the documentation.
@ -32,16 +32,16 @@ our **thresholded image**.
Code :
@code{.py}
import cv2
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('gradient.png',0)
ret,thresh1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
ret,thresh2 = cv2.threshold(img,127,255,cv2.THRESH_BINARY_INV)
ret,thresh3 = cv2.threshold(img,127,255,cv2.THRESH_TRUNC)
ret,thresh4 = cv2.threshold(img,127,255,cv2.THRESH_TOZERO)
ret,thresh5 = cv2.threshold(img,127,255,cv2.THRESH_TOZERO_INV)
img = cv.imread('gradient.png',0)
ret,thresh1 = cv.threshold(img,127,255,cv.THRESH_BINARY)
ret,thresh2 = cv.threshold(img,127,255,cv.THRESH_BINARY_INV)
ret,thresh3 = cv.threshold(img,127,255,cv.THRESH_TRUNC)
ret,thresh4 = cv.threshold(img,127,255,cv.THRESH_TOZERO)
ret,thresh5 = cv.threshold(img,127,255,cv.THRESH_TOZERO_INV)
titles = ['Original Image','BINARY','BINARY_INV','TRUNC','TOZERO','TOZERO_INV']
images = [img, thresh1, thresh2, thresh3, thresh4, thresh5]
@ -72,8 +72,8 @@ results for images with varying illumination.
It has three special input params and only one output argument.
**Adaptive Method** - It decides how thresholding value is calculated.
- cv2.ADAPTIVE_THRESH_MEAN_C : threshold value is the mean of neighbourhood area.
- cv2.ADAPTIVE_THRESH_GAUSSIAN_C : threshold value is the weighted sum of neighbourhood
- cv.ADAPTIVE_THRESH_MEAN_C : threshold value is the mean of neighbourhood area.
- cv.ADAPTIVE_THRESH_GAUSSIAN_C : threshold value is the weighted sum of neighbourhood
values where weights are a gaussian window.
**Block Size** - It decides the size of neighbourhood area.
@ -83,18 +83,18 @@ It has three special input params and only one output argument.
Below piece of code compares global thresholding and adaptive thresholding for an image with varying
illumination:
@code{.py}
import cv2
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('sudoku.png',0)
img = cv2.medianBlur(img,5)
img = cv.imread('sudoku.png',0)
img = cv.medianBlur(img,5)
ret,th1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
th2 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
cv2.THRESH_BINARY,11,2)
th3 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY,11,2)
ret,th1 = cv.threshold(img,127,255,cv.THRESH_BINARY)
th2 = cv.adaptiveThreshold(img,255,cv.ADAPTIVE_THRESH_MEAN_C,\
cv.THRESH_BINARY,11,2)
th3 = cv.adaptiveThreshold(img,255,cv.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv.THRESH_BINARY,11,2)
titles = ['Original Image', 'Global Thresholding (v = 127)',
'Adaptive Mean Thresholding', 'Adaptive Gaussian Thresholding']
@ -124,7 +124,7 @@ That is what Otsu binarization does. So in simple words, it automatically calcul
value from image histogram for a bimodal image. (For images which are not bimodal, binarization
wont be accurate.)
For this, our cv2.threshold() function is used, but pass an extra flag, cv2.THRESH_OTSU. **For
For this, our cv.threshold() function is used, but pass an extra flag, cv.THRESH_OTSU. **For
threshold value, simply pass zero**. Then the algorithm finds the optimal threshold value and
returns you as the second output, retVal. If Otsu thresholding is not used, retVal is same as the
threshold value you used.
@ -134,21 +134,21 @@ for a value of 127. In second case, I applied Otsus thresholding directly. In
filtered image with a 5x5 gaussian kernel to remove the noise, then applied Otsu thresholding. See
how noise filtering improves the result.
@code{.py}
import cv2
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('noisy2.png',0)
img = cv.imread('noisy2.png',0)
# global thresholding
ret1,th1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
ret1,th1 = cv.threshold(img,127,255,cv.THRESH_BINARY)
# Otsu's thresholding
ret2,th2 = cv2.threshold(img,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
ret2,th2 = cv.threshold(img,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)
# Otsu's thresholding after Gaussian filtering
blur = cv2.GaussianBlur(img,(5,5),0)
ret3,th3 = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
blur = cv.GaussianBlur(img,(5,5),0)
ret3,th3 = cv.threshold(blur,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)
# plot all the images and their histograms
images = [img, 0, th1,
@ -188,11 +188,11 @@ where
It actually finds a value of t which lies in between two peaks such that variances to both classes
are minimum. It can be simply implemented in Python as follows:
@code{.py}
img = cv2.imread('noisy2.png',0)
blur = cv2.GaussianBlur(img,(5,5),0)
img = cv.imread('noisy2.png',0)
blur = cv.GaussianBlur(img,(5,5),0)
# find normalized_histogram, and its cumulative distribution function
hist = cv2.calcHist([blur],[0],None,[256],[0,256])
hist = cv.calcHist([blur],[0],None,[256],[0,256])
hist_norm = hist.ravel()/hist.max()
Q = hist_norm.cumsum()
@ -217,7 +217,7 @@ for i in xrange(1,256):
thresh = i
# find otsu's threshold value with OpenCV function
ret, otsu = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
ret, otsu = cv.threshold(blur,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)
print( "{} {}".format(thresh,ret) )
@endcode
*(Some of the functions may be new here, but we will cover them in coming chapters)*

View File

@ -8,7 +8,7 @@ In this section, we will learn
- To find the Fourier Transform of images using OpenCV
- To utilize the FFT functions available in Numpy
- Some applications of Fourier Transform
- We will see following functions : **cv2.dft()**, **cv2.idft()** etc
- We will see following functions : **cv.dft()**, **cv.idft()** etc
Theory
------
@ -50,11 +50,11 @@ you want to bring it to center, you need to shift the result by \f$\frac{N}{2}\f
directions. This is simply done by the function, **np.fft.fftshift()**. (It is more easier to
analyze). Once you found the frequency transform, you can find the magnitude spectrum.
@code{.py}
import cv2
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('messi5.jpg',0)
img = cv.imread('messi5.jpg',0)
f = np.fft.fft2(img)
fshift = np.fft.fftshift(f)
magnitude_spectrum = 20*np.log(np.abs(fshift))
@ -112,21 +112,21 @@ Better option is Gaussian Windows.
Fourier Transform in OpenCV
---------------------------
OpenCV provides the functions **cv2.dft()** and **cv2.idft()** for this. It returns the same result
OpenCV provides the functions **cv.dft()** and **cv.idft()** for this. It returns the same result
as previous, but with two channels. First channel will have the real part of the result and second
channel will have the imaginary part of the result. The input image should be converted to
np.float32 first. We will see how to do it.
@code{.py}
import numpy as np
import cv2
import cv2 as cv
from matplotlib import pyplot as plt
img = cv2.imread('messi5.jpg',0)
img = cv.imread('messi5.jpg',0)
dft = cv2.dft(np.float32(img),flags = cv2.DFT_COMPLEX_OUTPUT)
dft = cv.dft(np.float32(img),flags = cv.DFT_COMPLEX_OUTPUT)
dft_shift = np.fft.fftshift(dft)
magnitude_spectrum = 20*np.log(cv2.magnitude(dft_shift[:,:,0],dft_shift[:,:,1]))
magnitude_spectrum = 20*np.log(cv.magnitude(dft_shift[:,:,0],dft_shift[:,:,1]))
plt.subplot(121),plt.imshow(img, cmap = 'gray')
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
@ -135,7 +135,7 @@ plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
plt.show()
@endcode
@note You can also use **cv2.cartToPolar()** which returns both magnitude and phase in a single shot
@note You can also use **cv.cartToPolar()** which returns both magnitude and phase in a single shot
So, now we have to do inverse DFT. In previous session, we created a HPF, this time we will see how
to remove high frequency contents in the image, ie we apply LPF to image. It actually blurs the
@ -153,8 +153,8 @@ mask[crow-30:crow+30, ccol-30:ccol+30] = 1
# apply mask and inverse DFT
fshift = dft_shift*mask
f_ishift = np.fft.ifftshift(fshift)
img_back = cv2.idft(f_ishift)
img_back = cv2.magnitude(img_back[:,:,0],img_back[:,:,1])
img_back = cv.idft(f_ishift)
img_back = cv.magnitude(img_back[:,:,0],img_back[:,:,1])
plt.subplot(121),plt.imshow(img, cmap = 'gray')
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
@ -166,7 +166,7 @@ See the result:
![image](images/fft4.jpg)
@note As usual, OpenCV functions **cv2.dft()** and **cv2.idft()** are faster than Numpy
@note As usual, OpenCV functions **cv.dft()** and **cv.idft()** are faster than Numpy
counterparts. But Numpy functions are more user-friendly. For more details about performance issues,
see below section.
@ -180,23 +180,23 @@ the array to any optimal size (by padding zeros) before finding DFT. For OpenCV,
manually pad zeros. But for Numpy, you specify the new size of FFT calculation, and it will
automatically pad zeros for you.
So how do we find this optimal size ? OpenCV provides a function, **cv2.getOptimalDFTSize()** for
this. It is applicable to both **cv2.dft()** and **np.fft.fft2()**. Let's check their performance
So how do we find this optimal size ? OpenCV provides a function, **cv.getOptimalDFTSize()** for
this. It is applicable to both **cv.dft()** and **np.fft.fft2()**. Let's check their performance
using IPython magic command %timeit.
@code{.py}
In [16]: img = cv2.imread('messi5.jpg',0)
In [16]: img = cv.imread('messi5.jpg',0)
In [17]: rows,cols = img.shape
In [18]: print("{} {}".format(rows,cols))
342 548
In [19]: nrows = cv2.getOptimalDFTSize(rows)
In [20]: ncols = cv2.getOptimalDFTSize(cols)
In [19]: nrows = cv.getOptimalDFTSize(rows)
In [20]: ncols = cv.getOptimalDFTSize(cols)
In [21]: print("{} {}".format(nrows,ncols))
360 576
@endcode
See, the size (342,548) is modified to (360, 576). Now let's pad it with zeros (for OpenCV) and find
their DFT calculation performance. You can do it by creating a new big zero array and copy the data
to it, or use **cv2.copyMakeBorder()**.
to it, or use **cv.copyMakeBorder()**.
@code{.py}
nimg = np.zeros((nrows,ncols))
nimg[:rows,:cols] = img
@ -205,8 +205,8 @@ OR:
@code{.py}
right = ncols - cols
bottom = nrows - rows
bordertype = cv2.BORDER_CONSTANT #just to avoid line breakup in PDF file
nimg = cv2.copyMakeBorder(img,0,bottom,0,right,bordertype, value = 0)
bordertype = cv.BORDER_CONSTANT #just to avoid line breakup in PDF file
nimg = cv.copyMakeBorder(img,0,bottom,0,right,bordertype, value = 0)
@endcode
Now we calculate the DFT performance comparison of Numpy function:
@code{.py}
@ -217,9 +217,9 @@ In [23]: %timeit fft2 = np.fft.fft2(img,[nrows,ncols])
@endcode
It shows a 4x speedup. Now we will try the same with OpenCV functions.
@code{.py}
In [24]: %timeit dft1= cv2.dft(np.float32(img),flags=cv2.DFT_COMPLEX_OUTPUT)
In [24]: %timeit dft1= cv.dft(np.float32(img),flags=cv.DFT_COMPLEX_OUTPUT)
100 loops, best of 3: 13.5 ms per loop
In [27]: %timeit dft2= cv2.dft(np.float32(nimg),flags=cv2.DFT_COMPLEX_OUTPUT)
In [27]: %timeit dft2= cv.dft(np.float32(nimg),flags=cv.DFT_COMPLEX_OUTPUT)
100 loops, best of 3: 3.11 ms per loop
@endcode
It also shows a 4x speed-up. You can also see that OpenCV functions are around 3x faster than Numpy
@ -232,7 +232,7 @@ A similar question was asked in a forum. The question is, why Laplacian is a hig
Sobel is a HPF? etc. And the first answer given to it was in terms of Fourier Transform. Just take
the fourier transform of Laplacian for some higher size of FFT. Analyze it:
@code{.py}
import cv2
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
@ -240,7 +240,7 @@ from matplotlib import pyplot as plt
mean_filter = np.ones((3,3))
# creating a guassian filter
x = cv2.getGaussianKernel(5,10)
x = cv.getGaussianKernel(5,10)
gaussian = x*x.T
# different edge detecting filters

View File

@ -6,7 +6,7 @@ Goal
In this chapter,
- We will learn to use marker-based image segmentation using watershed algorithm
- We will see: **cv2.watershed()**
- We will see: **cv.watershed()**
Theory
------
@ -45,12 +45,12 @@ We start with finding an approximate estimate of the coins. For that, we can use
binarization.
@code{.py}
import numpy as np
import cv2
import cv2 as cv
from matplotlib import pyplot as plt
img = cv2.imread('coins.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
img = cv.imread('coins.png')
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
ret, thresh = cv.threshold(gray,0,255,cv.THRESH_BINARY_INV+cv.THRESH_OTSU)
@endcode
Result:
@ -78,18 +78,18 @@ obtained from subtracting sure_fg area from sure_bg area.
@code{.py}
# noise removal
kernel = np.ones((3,3),np.uint8)
opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 2)
opening = cv.morphologyEx(thresh,cv.MORPH_OPEN,kernel, iterations = 2)
# sure background area
sure_bg = cv2.dilate(opening,kernel,iterations=3)
sure_bg = cv.dilate(opening,kernel,iterations=3)
# Finding sure foreground area
dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)
ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)
dist_transform = cv.distanceTransform(opening,cv.DIST_L2,5)
ret, sure_fg = cv.threshold(dist_transform,0.7*dist_transform.max(),255,0)
# Finding unknown region
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg,sure_fg)
unknown = cv.subtract(sure_bg,sure_fg)
@endcode
See the result. In the thresholded image, we get some regions of coins which we are sure of coins
and they are detached now. (In some cases, you may be interested in only foreground segmentation,
@ -103,7 +103,7 @@ Now we know for sure which are region of coins, which are background and all. So
(it is an array of same size as that of original image, but with int32 datatype) and label the
regions inside it. The regions we know for sure (whether foreground or background) are labelled with
any positive integers, but different integers, and the area we don't know for sure are just left as
zero. For this we use **cv2.connectedComponents()**. It labels background of the image with 0, then
zero. For this we use **cv.connectedComponents()**. It labels background of the image with 0, then
other objects are labelled with integers starting from 1.
But we know that if background is marked with 0, watershed will consider it as unknown area. So we
@ -111,7 +111,7 @@ want to mark it with different integer. Instead, we will mark unknown region, de
with 0.
@code{.py}
# Marker labelling
ret, markers = cv2.connectedComponents(sure_fg)
ret, markers = cv.connectedComponents(sure_fg)
# Add one to all labels so that sure background is not 0, but 1
markers = markers+1
@ -128,7 +128,7 @@ compared to unknown region.
Now our marker is ready. It is time for final step, apply watershed. Then marker image will be
modified. The boundary region will be marked with -1.
@code{.py}
markers = cv2.watershed(img,markers)
markers = cv.watershed(img,markers)
img[markers == -1] = [255,0,0]
@endcode
See the result below. For some coins, the region where they touch are segmented properly and for

View File

@ -4,7 +4,7 @@ K-Means Clustering in OpenCV {#tutorial_py_kmeans_opencv}
Goal
----
- Learn to use **cv2.kmeans()** function in OpenCV for data clustering
- Learn to use **cv.kmeans()** function in OpenCV for data clustering
Understanding Parameters
------------------------
@ -16,9 +16,9 @@ Understanding Parameters
-# **nclusters(K)** : Number of clusters required at end
-# **criteria** : It is the iteration termination criteria. When this criteria is satisfied, algorithm iteration stops. Actually, it should be a tuple of 3 parameters. They are \`( type, max_iter, epsilon )\`:
-# type of termination criteria. It has 3 flags as below:
- **cv2.TERM_CRITERIA_EPS** - stop the algorithm iteration if specified accuracy, *epsilon*, is reached.
- **cv2.TERM_CRITERIA_MAX_ITER** - stop the algorithm after the specified number of iterations, *max_iter*.
- **cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER** - stop the iteration when any of the above condition is met.
- **cv.TERM_CRITERIA_EPS** - stop the algorithm iteration if specified accuracy, *epsilon*, is reached.
- **cv.TERM_CRITERIA_MAX_ITER** - stop the algorithm after the specified number of iterations, *max_iter*.
- **cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER** - stop the iteration when any of the above condition is met.
-# max_iter - An integer specifying maximum number of iterations.
-# epsilon - Required accuracy
@ -26,7 +26,7 @@ Understanding Parameters
initial labellings. The algorithm returns the labels that yield the best compactness. This
compactness is returned as output.
-# **flags** : This flag is used to specify how initial centers are taken. Normally two flags are
used for this : **cv2.KMEANS_PP_CENTERS** and **cv2.KMEANS_RANDOM_CENTERS**.
used for this : **cv.KMEANS_PP_CENTERS** and **cv.KMEANS_RANDOM_CENTERS**.
### Output parameters
@ -47,7 +47,7 @@ t-shirt problem where you use only height of people to decide the size of t-shir
So we start by creating data and plot it in Matplotlib
@code{.py}
import numpy as np
import cv2
import cv2 as cv
from matplotlib import pyplot as plt
x = np.random.randint(25,100,25)
@ -70,13 +70,13 @@ that, whenever 10 iterations of algorithm is ran, or an accuracy of epsilon = 1.
the algorithm and return the answer.
@code{.py}
# Define criteria = ( type, max_iter = 10 , epsilon = 1.0 )
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 10, 1.0)
# Set flags (Just to avoid line break in the code)
flags = cv2.KMEANS_RANDOM_CENTERS
flags = cv.KMEANS_RANDOM_CENTERS
# Apply KMeans
compactness,labels,centers = cv2.kmeans(z,2,None,criteria,10,flags)
compactness,labels,centers = cv.kmeans(z,2,None,criteria,10,flags)
@endcode
This gives us the compactness, labels and centers. In this case, I got centers as 60 and 207. Labels
will have the same size as that of test data where each data will be labelled as '0','1','2' etc.
@ -117,7 +117,7 @@ Check image below:
Now I am directly moving to the code:
@code{.py}
import numpy as np
import cv2
import cv2 as cv
from matplotlib import pyplot as plt
X = np.random.randint(25,50,(25,2))
@ -128,8 +128,8 @@ Z = np.vstack((X,Y))
Z = np.float32(Z)
# define criteria and apply kmeans()
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
ret,label,center=cv2.kmeans(Z,2,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 10, 1.0)
ret,label,center=cv.kmeans(Z,2,None,criteria,10,cv.KMEANS_RANDOM_CENTERS)
# Now separate the data, Note the flatten()
A = Z[label.ravel()==0]
@ -161,27 +161,27 @@ specified number of colors. And again we need to reshape it back to the shape of
Below is the code:
@code{.py}
import numpy as np
import cv2
import cv2 as cv
img = cv2.imread('home.jpg')
img = cv.imread('home.jpg')
Z = img.reshape((-1,3))
# convert to np.float32
Z = np.float32(Z)
# define criteria, number of clusters(K) and apply kmeans()
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 10, 1.0)
K = 8
ret,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)
ret,label,center=cv.kmeans(Z,K,None,criteria,10,cv.KMEANS_RANDOM_CENTERS)
# Now convert back into uint8, and make original image
center = np.uint8(center)
res = center[label.flatten()]
res2 = res.reshape((img.shape))
cv2.imshow('res2',res2)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv.imshow('res2',res2)
cv.waitKey(0)
cv.destroyAllWindows()
@endcode
See the result below for K=8:

View File

@ -20,11 +20,11 @@ pixels. It is the simplest feature set we can create. We use first 250 samples o
train_data, and next 250 samples as test_data. So let's prepare them first.
@code{.py}
import numpy as np
import cv2
import cv2 as cv
from matplotlib import pyplot as plt
img = cv2.imread('digits.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
img = cv.imread('digits.png')
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
# Now we split the image to 5000 cells, each 20x20 size
cells = [np.hsplit(row,100) for row in np.vsplit(gray,50)]
@ -42,8 +42,8 @@ train_labels = np.repeat(k,250)[:,np.newaxis]
test_labels = train_labels.copy()
# Initiate kNN, train the data, then test it with test data for k=1
knn = cv2.ml.KNearest_create()
knn.train(train, cv2.ml.ROW_SAMPLE, train_labels)
knn = cv.ml.KNearest_create()
knn.train(train, cv.ml.ROW_SAMPLE, train_labels)
ret,result,neighbours,dist = knn.findNearest(test,k=5)
# Now we check the accuracy of classification
@ -87,7 +87,7 @@ There are 20000 samples available, so we take first 10000 data as training sampl
10000 as test samples. We should change the alphabets to ascii characters because we can't work with
alphabets directly.
@code{.py}
import cv2
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
@ -103,8 +103,8 @@ responses, trainData = np.hsplit(train,[1])
labels, testData = np.hsplit(test,[1])
# Initiate the kNN, classify, measure accuracy.
knn = cv2.ml.KNearest_create()
knn.train(trainData, cv2.ml.ROW_SAMPLE, responses)
knn = cv.ml.KNearest_create()
knn.train(trainData, cv.ml.ROW_SAMPLE, responses)
ret, result, neighbours, dist = knn.findNearest(testData, k=5)
correct = np.count_nonzero(result == labels)

View File

@ -73,7 +73,7 @@ We do all these with the help of Random Number Generator in Numpy.
Then we plot it with the help of Matplotlib. Red families are shown as Red Triangles and Blue
families are shown as Blue Squares.
@code{.py}
import cv2
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
@ -114,8 +114,8 @@ So let's see how it works. New comer is marked in green color.
newcomer = np.random.randint(0,100,(1,2)).astype(np.float32)
plt.scatter(newcomer[:,0],newcomer[:,1],80,'g','o')
knn = cv2.ml.KNearest_create()
knn.train(trainData, cv2.ml.ROW_SAMPLE, responses)
knn = cv.ml.KNearest_create()
knn.train(trainData, cv.ml.ROW_SAMPLE, responses)
ret, results, neighbours ,dist = knn.findNearest(newcomer, 3)
print( "result: {}\n".format(results) )

View File

@ -94,13 +94,13 @@ First we need to load the required XML classifiers. Then load our input image (o
grayscale mode.
@code{.py}
import numpy as np
import cv2
import cv2 as cv
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
face_cascade = cv.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv.CascadeClassifier('haarcascade_eye.xml')
img = cv2.imread('sachin.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv.imread('sachin.jpg')
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
@endcode
Now we find the faces in the image. If faces are found, it returns the positions of detected faces
as Rect(x,y,w,h). Once we get these locations, we can create a ROI for the face and apply eye
@ -108,16 +108,16 @@ detection on this ROI (since eyes are always on the face !!! ).
@code{.py}
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
cv.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
cv.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv.imshow('img',img)
cv.waitKey(0)
cv.destroyAllWindows()
@endcode
Result looks like below:

View File

@ -53,12 +53,12 @@ Pay attention for the data types, as the images should be 1-channel or 3-channel
8-bit (np.uint8) and the exposure times need to be float32 and in seconds.
@code{.py}
import cv2
import cv2 as cv
import numpy as np
# Loading exposure images into a list
img_fn = ["img0.jpg", "img1.jpg", "img2.jpg", "img3.jpg"]
img_list = [cv2.imread(fn) for fn in img_fn]
img_list = [cv.imread(fn) for fn in img_fn]
exposure_times = np.array([15.0, 2.5, 0.25, 0.0333], dtype=np.float32)
@endcode
@ -71,9 +71,9 @@ full dynamic range of all exposure images.
@code{.py}
# Merge exposures to HDR image
merge_debvec = cv2.createMergeDebevec()
merge_debvec = cv.createMergeDebevec()
hdr_debvec = merge_debvec.process(img_list, times=exposure_times.copy())
merge_robertson = cv2.createMergeRobertson()
merge_robertson = cv.createMergeRobertson()
hdr_robertson = merge_robertson.process(img_list, times=exposure_times.copy())
@endcode
@ -85,9 +85,9 @@ we will later have to clip the data in order to avoid overflow.
@code{.py}
# Tonemap HDR image
tonemap1 = cv2.createTonemapDurand(gamma=2.2)
tonemap1 = cv.createTonemapDurand(gamma=2.2)
res_debvec = tonemap1.process(hdr_debvec.copy())
tonemap2 = cv2.createTonemapDurand(gamma=1.3)
tonemap2 = cv.createTonemapDurand(gamma=1.3)
res_robertson = tonemap2.process(hdr_robertson.copy())
@endcode
@ -100,7 +100,7 @@ range of [0..1].
@code{.py}
# Exposure fusion using Mertens
merge_mertens = cv2.createMergeMertens()
merge_mertens = cv.createMergeMertens()
res_mertens = merge_mertens.process(img_list)
@endcode
@ -115,9 +115,9 @@ res_debvec_8bit = np.clip(res_debvec*255, 0, 255).astype('uint8')
res_robertson_8bit = np.clip(res_robertson*255, 0, 255).astype('uint8')
res_mertens_8bit = np.clip(res_mertens*255, 0, 255).astype('uint8')
cv2.imwrite("ldr_debvec.jpg", res_debvec_8bit)
cv2.imwrite("ldr_robertson.jpg", res_robertson_8bit)
cv2.imwrite("fusion_mertens.jpg", res_mertens_8bit)
cv.imwrite("ldr_debvec.jpg", res_debvec_8bit)
cv.imwrite("ldr_robertson.jpg", res_robertson_8bit)
cv.imwrite("fusion_mertens.jpg", res_mertens_8bit)
@endcode
Results
@ -150,10 +150,10 @@ function and use it for the HDR merge.
@code{.py}
# Estimate camera response function (CRF)
cal_debvec = cv2.createCalibrateDebevec()
cal_debvec = cv.createCalibrateDebevec()
crf_debvec = cal_debvec.process(img_list, times=exposure_times)
hdr_debvec = merge_debvec.process(img_list, times=exposure_times.copy(), response=crf_debvec.copy())
cal_robertson = cv2.createCalibrateRobertson()
cal_robertson = cv.createCalibrateRobertson()
crf_robertson = cal_robertson.process(img_list, times=exposure_times)
hdr_robertson = merge_robertson.process(img_list, times=exposure_times.copy(), response=crf_robertson.copy())
@endcode

View File

@ -22,7 +22,7 @@ shown below (taken from [Wikipedia](http://en.wikipedia.org/wiki/Inpainting)):
![image](images/inpaint_basics.jpg)
Several algorithms were designed for this purpose and OpenCV provides two of them. Both can be
accessed by the same function, **cv2.inpaint()**
accessed by the same function, **cv.inpaint()**
First algorithm is based on the paper **"An Image Inpainting Technique Based on the Fast Marching
Method"** by Alexandru Telea in 2004. It is based on Fast Marching Method. Consider a region in the
@ -33,7 +33,7 @@ known pixels in the neigbourhood. Selection of the weights is an important matte
given to those pixels lying near to the point, near to the normal of the boundary and those lying on
the boundary contours. Once a pixel is inpainted, it moves to next nearest pixel using Fast Marching
Method. FMM ensures those pixels near the known pixels are inpainted first, so that it just works
like a manual heuristic operation. This algorithm is enabled by using the flag, cv2.INPAINT_TELEA.
like a manual heuristic operation. This algorithm is enabled by using the flag, cv.INPAINT_TELEA.
Second algorithm is based on the paper **"Navier-Stokes, Fluid Dynamics, and Image and Video
Inpainting"** by Bertalmio, Marcelo, Andrea L. Bertozzi, and Guillermo Sapiro in 2001. This
@ -43,7 +43,7 @@ are meant to be continuous). It continues isophotes (lines joining points with s
like contours joins points with same elevation) while matching gradient vectors at the boundary of
the inpainting region. For this, some methods from fluid dynamics are used. Once they are obtained,
color is filled to reduce minimum variance in that area. This algorithm is enabled by using the
flag, cv2.INPAINT_NS.
flag, cv.INPAINT_NS.
Code
----
@ -53,16 +53,16 @@ the area which is to be inpainted. Everything else is simple. My image is degrad
strokes (I added manually). I created a corresponding strokes with Paint tool.
@code{.py}
import numpy as np
import cv2
import cv2 as cv
img = cv2.imread('messi_2.jpg')
mask = cv2.imread('mask2.png',0)
img = cv.imread('messi_2.jpg')
mask = cv.imread('mask2.png',0)
dst = cv2.inpaint(img,mask,3,cv2.INPAINT_TELEA)
dst = cv.inpaint(img,mask,3,cv.INPAINT_TELEA)
cv2.imshow('dst',dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv.imshow('dst',dst)
cv.waitKey(0)
cv.destroyAllWindows()
@endcode
See the result below. First image shows degraded input. Second image is the mask. Third image is the
result of first algorithm and last image is the result of second algorithm.

View File

@ -7,8 +7,8 @@ Goal
In this chapter,
- You will learn about Non-local Means Denoising algorithm to remove noise in the image.
- You will see different functions like **cv2.fastNlMeansDenoising()**,
**cv2.fastNlMeansDenoisingColored()** etc.
- You will see different functions like **cv.fastNlMeansDenoising()**,
**cv.fastNlMeansDenoisingColored()** etc.
Theory
------
@ -52,11 +52,11 @@ Image Denoising in OpenCV
OpenCV provides four variations of this technique.
-# **cv2.fastNlMeansDenoising()** - works with a single grayscale images
2. **cv2.fastNlMeansDenoisingColored()** - works with a color image.
3. **cv2.fastNlMeansDenoisingMulti()** - works with image sequence captured in short period of time
-# **cv.fastNlMeansDenoising()** - works with a single grayscale images
2. **cv.fastNlMeansDenoisingColored()** - works with a color image.
3. **cv.fastNlMeansDenoisingMulti()** - works with image sequence captured in short period of time
(grayscale images)
4. **cv2.fastNlMeansDenoisingColoredMulti()** - same as above, but for color images.
4. **cv.fastNlMeansDenoisingColoredMulti()** - same as above, but for color images.
Common arguments are:
- h : parameter deciding filter strength. Higher h value removes noise better, but removes
@ -69,18 +69,18 @@ Please visit first link in additional resources for more details on these parame
We will demonstrate 2 and 3 here. Rest is left for you.
### 1. cv2.fastNlMeansDenoisingColored()
### 1. cv.fastNlMeansDenoisingColored()
As mentioned above it is used to remove noise from color images. (Noise is expected to be gaussian).
See the example below:
@code{.py}
import numpy as np
import cv2
import cv2 as cv
from matplotlib import pyplot as plt
img = cv2.imread('die.png')
img = cv.imread('die.png')
dst = cv2.fastNlMeansDenoisingColored(img,None,10,10,7,21)
dst = cv.fastNlMeansDenoisingColored(img,None,10,10,7,21)
plt.subplot(121),plt.imshow(img)
plt.subplot(122),plt.imshow(dst)
@ -91,7 +91,7 @@ result:
![image](images/nlm_result1.jpg)
### 2. cv2.fastNlMeansDenoisingMulti()
### 2. cv.fastNlMeansDenoisingMulti()
Now we will apply the same method to a video. The first argument is the list of noisy frames. Second
argument imgToDenoiseIndex specifies which frame we need to denoise, for that we pass the index of
@ -102,16 +102,16 @@ input. Let imgToDenoiseIndex = 2 and temporalWindowSize = 3. Then frame-1, frame
used to denoise frame-2. Let's see an example.
@code{.py}
import numpy as np
import cv2
import cv2 as cv
from matplotlib import pyplot as plt
cap = cv2.VideoCapture('vtest.avi')
cap = cv.VideoCapture('vtest.avi')
# create a list of first 5 frames
img = [cap.read()[1] for i in xrange(5)]
# convert all to grayscale
gray = [cv2.cvtColor(i, cv2.COLOR_BGR2GRAY) for i in img]
gray = [cv.cvtColor(i, cv.COLOR_BGR2GRAY) for i in img]
# convert all to float64
gray = [np.float64(i) for i in gray]
@ -126,7 +126,7 @@ noisy = [i+noise for i in gray]
noisy = [np.uint8(np.clip(i,0,255)) for i in noisy]
# Denoise 3rd frame considering all the 5 frames
dst = cv2.fastNlMeansDenoisingMulti(noisy, 2, 5, None, 4, 7, 35)
dst = cv.fastNlMeansDenoisingMulti(noisy, 2, 5, None, 4, 7, 35)
plt.subplot(131),plt.imshow(gray[2],'gray')
plt.subplot(132),plt.imshow(noisy[2],'gray')

View File

@ -29,8 +29,8 @@ $ yum install numpy opencv*
@endcode
Open Python IDLE (or IPython) and type following codes in Python terminal.
@code{.py}
>>> import cv2
>>> print( cv2.__version__ )
>>> import cv2 as cv
>>> print( cv.__version__ )
@endcode
If the results are printed out without any errors, congratulations !!! You have installed
OpenCV-Python successfully.
@ -230,7 +230,7 @@ But you will have to do this every time you install OpenCV.
@code{.sh}
export PYTHONPATH=$PYTHONPATH:/usr/local/lib/python2.7/site-packages
@endcode
Thus OpenCV installation is finished. Open a terminal and try import cv2.
Thus OpenCV installation is finished. Open a terminal and try 'import cv2 as cv'.
To build the documentation, just enter following commands:
@code{.sh}

View File

@ -31,8 +31,8 @@ $ sudo apt-get install python-opencv
Open Python IDLE (or IPython) and type following codes in Python terminal.
```
import cv2
print(cv2.__version__)
import cv2 as cv
print(cv.__version__)
```
If the results are printed out without any errors, congratulations !!!
@ -160,6 +160,6 @@ All files are installed in "/usr/local/" folder.
Open a terminal and try import "cv2".
```
import cv2
print(cv2.__version__)
import cv2 as cv
print(cv.__version__)
```

View File

@ -35,8 +35,8 @@ Installing OpenCV from prebuilt binaries
-# Open Python IDLE and type following codes in Python terminal.
@code
>>> import cv2
>>> print( cv2.__version__ )
>>> import cv2 as cv
>>> print( cv.__version__ )
@endcode
If the results are printed out without any errors, congratulations !!! You have installed
@ -136,7 +136,7 @@ Building OpenCV from source
![image](images/Capture8.png)
-# Open Python IDLE and enter import cv2. If no error, it is installed correctly.
-# Open Python IDLE and enter 'import cv2 as cv'. If no error, it is installed correctly.
@note We have installed with no other support like TBB, Eigen, Qt, Documentation etc. It would be
difficult to explain it here. A more detailed video will be added soon or you can just hack around.

View File

@ -37,31 +37,31 @@ the time proportions that those colours stay in the scene. The probable backgrou
ones which stay longer and more static.
While coding, we need to create a background object using the function,
**cv2.createBackgroundSubtractorMOG()**. It has some optional parameters like length of history,
**cv.createBackgroundSubtractorMOG()**. It has some optional parameters like length of history,
number of gaussian mixtures, threshold etc. It is all set to some default values. Then inside the
video loop, use backgroundsubtractor.apply() method to get the foreground mask.
See a simple example below:
@code{.py}
import numpy as np
import cv2
import cv2 as cv
cap = cv2.VideoCapture('vtest.avi')
cap = cv.VideoCapture('vtest.avi')
fgbg = cv2.createBackgroundSubtractorMOG()
fgbg = cv.createBackgroundSubtractorMOG()
while(1):
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
cv2.imshow('frame',fgmask)
k = cv2.waitKey(30) & 0xff
cv.imshow('frame',fgmask)
k = cv.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
cv.destroyAllWindows()
@endcode
( All the results are shown at the end for comparison).
@ -80,24 +80,24 @@ detecting shadows or not. If detectShadows = True (which is so by default), it
detects and marks shadows, but decreases the speed. Shadows will be marked in gray color.
@code{.py}
import numpy as np
import cv2
import cv2 as cv
cap = cv2.VideoCapture('vtest.avi')
cap = cv.VideoCapture('vtest.avi')
fgbg = cv2.createBackgroundSubtractorMOG2()
fgbg = cv.createBackgroundSubtractorMOG2()
while(1):
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
cv2.imshow('frame',fgmask)
k = cv2.waitKey(30) & 0xff
cv.imshow('frame',fgmask)
k = cv.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
cv.destroyAllWindows()
@endcode
(Results given at the end)
@ -120,26 +120,26 @@ frames.
It would be better to apply morphological opening to the result to remove the noises.
@code{.py}
import numpy as np
import cv2
import cv2 as cv
cap = cv2.VideoCapture('vtest.avi')
cap = cv.VideoCapture('vtest.avi')
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
fgbg = cv2.createBackgroundSubtractorGMG()
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE,(3,3))
fgbg = cv.createBackgroundSubtractorGMG()
while(1):
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
fgmask = cv.morphologyEx(fgmask, cv.MORPH_OPEN, kernel)
cv2.imshow('frame',fgmask)
k = cv2.waitKey(30) & 0xff
cv.imshow('frame',fgmask)
k = cv.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
cv.destroyAllWindows()
@endcode
Results
-------

View File

@ -7,7 +7,7 @@ Goal
In this chapter,
- We will understand the concepts of optical flow and its estimation using Lucas-Kanade
method.
- We will use functions like **cv2.calcOpticalFlowPyrLK()** to track feature points in a
- We will use functions like **cv.calcOpticalFlowPyrLK()** to track feature points in a
video.
Optical Flow
@ -84,19 +84,19 @@ Lucas-Kanade there, we get optical flow along with the scale.
Lucas-Kanade Optical Flow in OpenCV
-----------------------------------
OpenCV provides all these in a single function, **cv2.calcOpticalFlowPyrLK()**. Here, we create a
OpenCV provides all these in a single function, **cv.calcOpticalFlowPyrLK()**. Here, we create a
simple application which tracks some points in a video. To decide the points, we use
**cv2.goodFeaturesToTrack()**. We take the first frame, detect some Shi-Tomasi corner points in it,
**cv.goodFeaturesToTrack()**. We take the first frame, detect some Shi-Tomasi corner points in it,
then we iteratively track those points using Lucas-Kanade optical flow. For the function
**cv2.calcOpticalFlowPyrLK()** we pass the previous frame, previous points and next frame. It
**cv.calcOpticalFlowPyrLK()** we pass the previous frame, previous points and next frame. It
returns next points along with some status numbers which has a value of 1 if next point is found,
else zero. We iteratively pass these next points as previous points in next step. See the code
below:
@code{.py}
import numpy as np
import cv2
import cv2 as cv
cap = cv2.VideoCapture('slow.flv')
cap = cv.VideoCapture('slow.flv')
# params for ShiTomasi corner detection
feature_params = dict( maxCorners = 100,
@ -107,25 +107,25 @@ feature_params = dict( maxCorners = 100,
# Parameters for lucas kanade optical flow
lk_params = dict( winSize = (15,15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))
# Create some random colors
color = np.random.randint(0,255,(100,3))
# Take first frame and find corners in it
ret, old_frame = cap.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
old_gray = cv.cvtColor(old_frame, cv.COLOR_BGR2GRAY)
p0 = cv.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
# Create a mask image for drawing purposes
mask = np.zeros_like(old_frame)
while(1):
ret,frame = cap.read()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
# calculate optical flow
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
p1, st, err = cv.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
# Select good points
good_new = p1[st==1]
@ -135,12 +135,12 @@ while(1):
for i,(new,old) in enumerate(zip(good_new,good_old)):
a,b = new.ravel()
c,d = old.ravel()
mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2)
frame = cv2.circle(frame,(a,b),5,color[i].tolist(),-1)
img = cv2.add(frame,mask)
mask = cv.line(mask, (a,b),(c,d), color[i].tolist(), 2)
frame = cv.circle(frame,(a,b),5,color[i].tolist(),-1)
img = cv.add(frame,mask)
cv2.imshow('frame',img)
k = cv2.waitKey(30) & 0xff
cv.imshow('frame',img)
k = cv.waitKey(30) & 0xff
if k == 27:
break
@ -148,7 +148,7 @@ while(1):
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1,1,2)
cv2.destroyAllWindows()
cv.destroyAllWindows()
cap.release()
@endcode
(This code doesn't check how correct are the next keypoints. So even if any feature point disappears
@ -176,37 +176,37 @@ array with optical flow vectors, \f$(u,v)\f$. We find their magnitude and direct
result for better visualization. Direction corresponds to Hue value of the image. Magnitude
corresponds to Value plane. See the code below:
@code{.py}
import cv2
import cv2 as cv
import numpy as np
cap = cv2.VideoCapture("vtest.avi")
cap = cv.VideoCapture("vtest.avi")
ret, frame1 = cap.read()
prvs = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)
prvs = cv.cvtColor(frame1,cv.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[...,1] = 255
while(1):
ret, frame2 = cap.read()
next = cv2.cvtColor(frame2,cv2.COLOR_BGR2GRAY)
next = cv.cvtColor(frame2,cv.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs,next, None, 0.5, 3, 15, 3, 5, 1.2, 0)
flow = cv.calcOpticalFlowFarneback(prvs,next, None, 0.5, 3, 15, 3, 5, 1.2, 0)
mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
mag, ang = cv.cartToPolar(flow[...,0], flow[...,1])
hsv[...,0] = ang*180/np.pi/2
hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)
bgr = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)
hsv[...,2] = cv.normalize(mag,None,0,255,cv.NORM_MINMAX)
bgr = cv.cvtColor(hsv,cv.COLOR_HSV2BGR)
cv2.imshow('frame2',bgr)
k = cv2.waitKey(30) & 0xff
cv.imshow('frame2',bgr)
k = cv.waitKey(30) & 0xff
if k == 27:
break
elif k == ord('s'):
cv2.imwrite('opticalfb.png',frame2)
cv2.imwrite('opticalhsv.png',bgr)
cv.imwrite('opticalfb.png',frame2)
cv.imwrite('opticalhsv.png',bgr)
prvs = next
cap.release()
cv2.destroyAllWindows()
cv.destroyAllWindows()
@endcode
See the result below:

View File

@ -39,12 +39,12 @@ algorithm moves our window to the new location with maximum density.
To use meanshift in OpenCV, first we need to setup the target, find its histogram so that we can
backproject the target on each frame for calculation of meanshift. We also need to provide initial
location of window. For histogram, only Hue is considered here. Also, to avoid false values due to
low light, low light values are discarded using **cv2.inRange()** function.
low light, low light values are discarded using **cv.inRange()** function.
@code{.py}
import numpy as np
import cv2
import cv2 as cv
cap = cv2.VideoCapture('slow.flv')
cap = cv.VideoCapture('slow.flv')
# take first frame of the video
ret,frame = cap.read()
@ -55,39 +55,39 @@ track_window = (c,r,w,h)
# set up the ROI for tracking
roi = frame[r:r+h, c:c+w]
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))
roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180])
cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)
hsv_roi = cv.cvtColor(roi, cv.COLOR_BGR2HSV)
mask = cv.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))
roi_hist = cv.calcHist([hsv_roi],[0],mask,[180],[0,180])
cv.normalize(roi_hist,roi_hist,0,255,cv.NORM_MINMAX)
# Setup the termination criteria, either 10 iteration or move by atleast 1 pt
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
term_crit = ( cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1 )
while(1):
ret ,frame = cap.read()
if ret == True:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)
hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
dst = cv.calcBackProject([hsv],[0],roi_hist,[0,180],1)
# apply meanshift to get the new location
ret, track_window = cv2.meanShift(dst, track_window, term_crit)
ret, track_window = cv.meanShift(dst, track_window, term_crit)
# Draw it on image
x,y,w,h = track_window
img2 = cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
cv2.imshow('img2',img2)
img2 = cv.rectangle(frame, (x,y), (x+w,y+h), 255,2)
cv.imshow('img2',img2)
k = cv2.waitKey(60) & 0xff
k = cv.waitKey(60) & 0xff
if k == 27:
break
else:
cv2.imwrite(chr(k)+".jpg",img2)
cv.imwrite(chr(k)+".jpg",img2)
else:
break
cv2.destroyAllWindows()
cv.destroyAllWindows()
cap.release()
@endcode
Three frames in a video I used is given below:
@ -116,9 +116,9 @@ It is almost same as meanshift, but it returns a rotated rectangle (that is our
parameters (used to be passed as search window in next iteration). See the code below:
@code{.py}
import numpy as np
import cv2
import cv2 as cv
cap = cv2.VideoCapture('slow.flv')
cap = cv.VideoCapture('slow.flv')
# take first frame of the video
ret,frame = cap.read()
@ -129,40 +129,40 @@ track_window = (c,r,w,h)
# set up the ROI for tracking
roi = frame[r:r+h, c:c+w]
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))
roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180])
cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)
hsv_roi = cv.cvtColor(roi, cv.COLOR_BGR2HSV)
mask = cv.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))
roi_hist = cv.calcHist([hsv_roi],[0],mask,[180],[0,180])
cv.normalize(roi_hist,roi_hist,0,255,cv.NORM_MINMAX)
# Setup the termination criteria, either 10 iteration or move by atleast 1 pt
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
term_crit = ( cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1 )
while(1):
ret ,frame = cap.read()
if ret == True:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)
hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
dst = cv.calcBackProject([hsv],[0],roi_hist,[0,180],1)
# apply meanshift to get the new location
ret, track_window = cv2.CamShift(dst, track_window, term_crit)
ret, track_window = cv.CamShift(dst, track_window, term_crit)
# Draw it on image
pts = cv2.boxPoints(ret)
pts = cv.boxPoints(ret)
pts = np.int0(pts)
img2 = cv2.polylines(frame,[pts],True, 255,2)
cv2.imshow('img2',img2)
img2 = cv.polylines(frame,[pts],True, 255,2)
cv.imshow('img2',img2)
k = cv2.waitKey(60) & 0xff
k = cv.waitKey(60) & 0xff
if k == 27:
break
else:
cv2.imwrite(chr(k)+".jpg",img2)
cv.imwrite(chr(k)+".jpg",img2)
else:
break
cv2.destroyAllWindows()
cv.destroyAllWindows()
cap.release()
@endcode
Three frames of the result is shown below:

View File

@ -86,7 +86,7 @@ Now we need to generate the `g(x)` image. For this, the function **addWeighted()
@add_toggle_python
@snippet python/tutorial_code/core/AddingImages/adding_images.py blend_images
Numpy version of above line (but cv2 function is around 2x faster):
Numpy version of above line (but cv function is around 2x faster):
\code{.py}
dst = np.uint8(alpha*(img1)+beta*(img2))
\endcode

View File

@ -82,7 +82,7 @@ At first we make sure that the input images data in unsigned 8 bit format.
At first we make sure that the input images data in unsigned 8 bit format.
@code{.py}
my_image = cv2.cvtColor(my_image, cv2.CV_8U)
my_image = cv.cvtColor(my_image, cv.CV_8U)
@endcode
@end_toggle

View File

@ -3,18 +3,18 @@
'''
Algorithm serializaion test
'''
import cv2
import cv2 as cv
from tests_common import NewOpenCVTests
class algorithm_rw_test(NewOpenCVTests):
def test_algorithm_rw(self):
# some arbitrary non-default parameters
gold = cv2.AKAZE_create(descriptor_size=1, descriptor_channels=2, nOctaves=3, threshold=4.0)
gold.write(cv2.FileStorage("params.yml", 1), "AKAZE")
gold = cv.AKAZE_create(descriptor_size=1, descriptor_channels=2, nOctaves=3, threshold=4.0)
gold.write(cv.FileStorage("params.yml", 1), "AKAZE")
fs = cv2.FileStorage("params.yml", 0)
algorithm = cv2.AKAZE_create()
fs = cv.FileStorage("params.yml", 0)
algorithm = cv.AKAZE_create()
algorithm.read(fs.getNode("AKAZE"))
self.assertEqual(algorithm.getDescriptorSize(), 1)

View File

@ -9,7 +9,7 @@ reads distorted images, calculates the calibration and write undistorted images
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
from tests_common import NewOpenCVTests
@ -38,10 +38,10 @@ class calibration_test(NewOpenCVTests):
continue
h, w = img.shape[:2]
found, corners = cv2.findChessboardCorners(img, pattern_size)
found, corners = cv.findChessboardCorners(img, pattern_size)
if found:
term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1)
cv2.cornerSubPix(img, corners, (5, 5), (-1, -1), term)
term = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_COUNT, 30, 0.1)
cv.cornerSubPix(img, corners, (5, 5), (-1, -1), term)
if not found:
continue
@ -50,7 +50,7 @@ class calibration_test(NewOpenCVTests):
obj_points.append(pattern_points)
# calculate camera distortion
rms, camera_matrix, dist_coefs, _rvecs, _tvecs = cv2.calibrateCamera(obj_points, img_points, (w, h), None, None, flags = 0)
rms, camera_matrix, dist_coefs, _rvecs, _tvecs = cv.calibrateCamera(obj_points, img_points, (w, h), None, None, flags = 0)
eps = 0.01
normCamEps = 10.0
@ -64,8 +64,8 @@ class calibration_test(NewOpenCVTests):
1.21234330e-03, -1.40825372e-04, 1.54865844e-01]
self.assertLess(abs(rms - 0.196334638034), eps)
self.assertLess(cv2.norm(camera_matrix - cameraMatrixTest, cv2.NORM_L1), normCamEps)
self.assertLess(cv2.norm(dist_coefs - distCoeffsTest, cv2.NORM_L1), normDistEps)
self.assertLess(cv.norm(camera_matrix - cameraMatrixTest, cv.NORM_L1), normCamEps)
self.assertLess(cv.norm(dist_coefs - distCoeffsTest, cv.NORM_L1), normDistEps)

View File

@ -21,7 +21,7 @@ if PY3:
xrange = range
import numpy as np
import cv2
import cv2 as cv
from tst_scene_render import TestSceneRender
from tests_common import NewOpenCVTests, intersectionRate
@ -53,8 +53,8 @@ class camshift_test(NewOpenCVTests):
while True:
framesCounter += 1
self.frame = self.render.getNextFrame()
hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.)))
hsv = cv.cvtColor(self.frame, cv.COLOR_BGR2HSV)
mask = cv.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.)))
if self.selection:
x0, y0, x1, y1 = self.render.getCurrentRect() + 50
@ -63,17 +63,17 @@ class camshift_test(NewOpenCVTests):
hsv_roi = hsv[y0:y1, x0:x1]
mask_roi = mask[y0:y1, x0:x1]
hist = cv2.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] )
cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX)
hist = cv.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] )
cv.normalize(hist, hist, 0, 255, cv.NORM_MINMAX)
self.hist = hist.reshape(-1)
self.selection = False
if self.track_window and self.track_window[2] > 0 and self.track_window[3] > 0:
self.selection = None
prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
prob = cv.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
prob &= mask
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
_track_box, self.track_window = cv2.CamShift(prob, self.track_window, term_crit)
term_crit = ( cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1 )
_track_box, self.track_window = cv.CamShift(prob, self.track_window, term_crit)
trackingRect = np.array(self.track_window)
trackingRect[2] += trackingRect[0]

View File

@ -7,7 +7,7 @@ Test for disctrete fourier transform (dft)
# Python 2/3 compatibility
from __future__ import print_function
import cv2
import cv2 as cv
import numpy as np
import sys
@ -24,26 +24,26 @@ class dft_test(NewOpenCVTests):
refDftShift = np.fft.fftshift(refDft)
refMagnitide = np.log(1.0 + np.abs(refDftShift))
testDft = cv2.dft(np.float32(img),flags = cv2.DFT_COMPLEX_OUTPUT)
testDft = cv.dft(np.float32(img),flags = cv.DFT_COMPLEX_OUTPUT)
testDftShift = np.fft.fftshift(testDft)
testMagnitude = np.log(1.0 + cv2.magnitude(testDftShift[:,:,0], testDftShift[:,:,1]))
testMagnitude = np.log(1.0 + cv.magnitude(testDftShift[:,:,0], testDftShift[:,:,1]))
refMagnitide = cv2.normalize(refMagnitide, 0.0, 1.0, cv2.NORM_MINMAX)
testMagnitude = cv2.normalize(testMagnitude, 0.0, 1.0, cv2.NORM_MINMAX)
refMagnitide = cv.normalize(refMagnitide, 0.0, 1.0, cv.NORM_MINMAX)
testMagnitude = cv.normalize(testMagnitude, 0.0, 1.0, cv.NORM_MINMAX)
self.assertLess(cv2.norm(refMagnitide - testMagnitude), eps)
self.assertLess(cv.norm(refMagnitide - testMagnitude), eps)
#test inverse transform
img_back = np.fft.ifft2(refDft)
img_back = np.abs(img_back)
img_backTest = cv2.idft(testDft)
img_backTest = cv2.magnitude(img_backTest[:,:,0], img_backTest[:,:,1])
img_backTest = cv.idft(testDft)
img_backTest = cv.magnitude(img_backTest[:,:,0], img_backTest[:,:,1])
img_backTest = cv2.normalize(img_backTest, 0.0, 1.0, cv2.NORM_MINMAX)
img_back = cv2.normalize(img_back, 0.0, 1.0, cv2.NORM_MINMAX)
img_backTest = cv.normalize(img_backTest, 0.0, 1.0, cv.NORM_MINMAX)
img_back = cv.normalize(img_back, 0.0, 1.0, cv.NORM_MINMAX)
self.assertLess(cv2.norm(img_back - img_backTest), eps)
self.assertLess(cv.norm(img_back - img_backTest), eps)
if __name__ == '__main__':

View File

@ -28,7 +28,7 @@ from __future__ import print_function
# built-in modules
from multiprocessing.pool import ThreadPool
import cv2
import cv2 as cv
import numpy as np
from numpy.linalg import norm
@ -48,12 +48,12 @@ def split2d(img, cell_size, flatten=True):
return cells
def deskew(img):
m = cv2.moments(img)
m = cv.moments(img)
if abs(m['mu02']) < 1e-2:
return img.copy()
skew = m['mu11']/m['mu02']
M = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]])
img = cv2.warpAffine(img, M, (SZ, SZ), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR)
img = cv.warpAffine(img, M, (SZ, SZ), flags=cv.WARP_INVERSE_MAP | cv.INTER_LINEAR)
return img
class StatModel(object):
@ -65,10 +65,10 @@ class StatModel(object):
class KNearest(StatModel):
def __init__(self, k = 3):
self.k = k
self.model = cv2.ml.KNearest_create()
self.model = cv.ml.KNearest_create()
def train(self, samples, responses):
self.model.train(samples, cv2.ml.ROW_SAMPLE, responses)
self.model.train(samples, cv.ml.ROW_SAMPLE, responses)
def predict(self, samples):
_retval, results, _neigh_resp, _dists = self.model.findNearest(samples, self.k)
@ -76,14 +76,14 @@ class KNearest(StatModel):
class SVM(StatModel):
def __init__(self, C = 1, gamma = 0.5):
self.model = cv2.ml.SVM_create()
self.model = cv.ml.SVM_create()
self.model.setGamma(gamma)
self.model.setC(C)
self.model.setKernel(cv2.ml.SVM_RBF)
self.model.setType(cv2.ml.SVM_C_SVC)
self.model.setKernel(cv.ml.SVM_RBF)
self.model.setType(cv.ml.SVM_C_SVC)
def train(self, samples, responses):
self.model.train(samples, cv2.ml.ROW_SAMPLE, responses)
self.model.train(samples, cv.ml.ROW_SAMPLE, responses)
def predict(self, samples):
return self.model.predict(samples)[1].ravel()
@ -105,9 +105,9 @@ def preprocess_simple(digits):
def preprocess_hog(digits):
samples = []
for img in digits:
gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
mag, ang = cv2.cartToPolar(gx, gy)
gx = cv.Sobel(img, cv.CV_32F, 1, 0)
gy = cv.Sobel(img, cv.CV_32F, 0, 1)
mag, ang = cv.cartToPolar(gx, gy)
bin_n = 16
bin = np.int32(bin_n*ang/(2*np.pi))
bin_cells = bin[:10,:10], bin[10:,:10], bin[:10,10:], bin[10:,10:]
@ -190,8 +190,8 @@ class digits_test(NewOpenCVTests):
[ 0, 0, 0, 0, 0, 0, 0, 0, 47, 0],
[ 0, 1, 0, 1, 0, 0, 0, 0, 1, 45]]
self.assertLess(cv2.norm(confusionMatrixes[0] - confusionKNN, cv2.NORM_L1), normEps)
self.assertLess(cv2.norm(confusionMatrixes[1] - confusionSVM, cv2.NORM_L1), normEps)
self.assertLess(cv.norm(confusionMatrixes[0] - confusionKNN, cv.NORM_L1), normEps)
self.assertLess(cv.norm(confusionMatrixes[1] - confusionSVM, cv.NORM_L1), normEps)
self.assertLess(errors[0] - 0.034, eps)
self.assertLess(errors[1] - 0.018, eps)

View File

@ -8,11 +8,11 @@ face detection using haar cascades
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
def detect(img, cascade):
rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE)
flags=cv.CASCADE_SCALE_IMAGE)
if len(rects) == 0:
return []
rects[:,2:] += rects[:,:2]
@ -26,8 +26,8 @@ class facedetect_test(NewOpenCVTests):
cascade_fn = self.repoPath + '/data/haarcascades/haarcascade_frontalface_alt.xml'
nested_fn = self.repoPath + '/data/haarcascades/haarcascade_eye.xml'
cascade = cv2.CascadeClassifier(cascade_fn)
nested = cv2.CascadeClassifier(nested_fn)
cascade = cv.CascadeClassifier(cascade_fn)
nested = cv.CascadeClassifier(nested_fn)
samples = ['samples/data/lena.jpg', 'cv/cascadeandhog/images/mona-lisa.png']
@ -49,8 +49,8 @@ class facedetect_test(NewOpenCVTests):
for sample in samples:
img = self.get_sample( sample)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 5.1)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
gray = cv.GaussianBlur(gray, (5, 5), 5.1)
rects = detect(gray, cascade)
faces.append(rects)

View File

@ -13,7 +13,7 @@ PlaneTracker class in plane_tracker.py
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
import sys
PY3 = sys.version_info[0] == 3
@ -28,8 +28,8 @@ def intersectionRate(s1, s2):
x1, y1, x2, y2 = s1
s1 = np.array([[x1, y1], [x2,y1], [x2, y2], [x1, y2]])
area, _intersection = cv2.intersectConvexConvex(s1, np.array(s2))
return 2 * area / (cv2.contourArea(s1) + cv2.contourArea(np.array(s2)))
area, _intersection = cv.intersectConvexConvex(s1, np.array(s2))
return 2 * area / (cv.contourArea(s1) + cv.contourArea(np.array(s2)))
from tests_common import NewOpenCVTests
@ -92,8 +92,8 @@ TrackedTarget = namedtuple('TrackedTarget', 'target, p0, p1, H, quad')
class PlaneTracker:
def __init__(self):
self.detector = cv2.AKAZE_create(threshold = 0.003)
self.matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329)
self.detector = cv.AKAZE_create(threshold = 0.003)
self.matcher = cv.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329)
self.targets = []
self.frame_points = []
@ -137,7 +137,7 @@ class PlaneTracker:
p0 = [target.keypoints[m.trainIdx].pt for m in matches]
p1 = [self.frame_points[m.queryIdx].pt for m in matches]
p0, p1 = np.float32((p0, p1))
H, status = cv2.findHomography(p0, p1, cv2.RANSAC, 3.0)
H, status = cv.findHomography(p0, p1, cv.RANSAC, 3.0)
status = status.ravel() != 0
if status.sum() < MIN_MATCH_COUNT:
continue
@ -145,7 +145,7 @@ class PlaneTracker:
x0, y0, x1, y1 = target.rect
quad = np.float32([[x0, y0], [x1, y0], [x1, y1], [x0, y1]])
quad = cv2.perspectiveTransform(quad.reshape(1, -1, 2), H).reshape(-1, 2)
quad = cv.perspectiveTransform(quad.reshape(1, -1, 2), H).reshape(-1, 2)
track = TrackedTarget(target=target, p0=p0, p1=p1, H=H, quad=quad)
tracked.append(track)

View File

@ -4,7 +4,7 @@
Robust line fitting.
==================
Example of using cv2.fitLine function for fitting line
Example of using cv.fitLine function for fitting line
to points in presence of outliers.
Switch through different M-estimator functions and see,
@ -19,7 +19,7 @@ import sys
PY3 = sys.version_info[0] == 3
import numpy as np
import cv2
import cv2 as cv
from tests_common import NewOpenCVTests
@ -53,17 +53,17 @@ class fitline_test(NewOpenCVTests):
lines = []
for name in dist_func_names:
func = getattr(cv2, name)
vx, vy, cx, cy = cv2.fitLine(np.float32(points), func, 0, 0.01, 0.01)
func = getattr(cv, name)
vx, vy, cx, cy = cv.fitLine(np.float32(points), func, 0, 0.01, 0.01)
line = [float(vx), float(vy), float(cx), float(cy)]
lines.append(line)
eps = 0.05
refVec = (np.float32(p1) - p0) / cv2.norm(np.float32(p1) - p0)
refVec = (np.float32(p1) - p0) / cv.norm(np.float32(p1) - p0)
for i in range(len(lines)):
self.assertLessEqual(cv2.norm(refVec - lines[i][0:2], cv2.NORM_L2), eps)
self.assertLessEqual(cv.norm(refVec - lines[i][0:2], cv.NORM_L2), eps)
if __name__ == '__main__':

View File

@ -10,7 +10,7 @@ if PY3:
import numpy as np
from numpy import random
import cv2
import cv2 as cv
def make_gaussians(cluster_n, img_size):
points = []
@ -38,9 +38,9 @@ class gaussian_mix_test(NewOpenCVTests):
points, ref_distrs = make_gaussians(cluster_n, img_size)
em = cv2.ml.EM_create()
em = cv.ml.EM_create()
em.setClustersNumber(cluster_n)
em.setCovarianceMatrixType(cv2.ml.EM_COV_MAT_GENERIC)
em.setCovarianceMatrixType(cv.ml.EM_COV_MAT_GENERIC)
em.trainEM(points)
means = em.getMeans()
covs = em.getCovs() # Known bug: https://github.com/opencv/opencv/pull/4232
@ -53,8 +53,8 @@ class gaussian_mix_test(NewOpenCVTests):
for i in range(cluster_n):
for j in range(cluster_n):
if (cv2.norm(means[i] - ref_distrs[j][0], cv2.NORM_L2) / cv2.norm(ref_distrs[j][0], cv2.NORM_L2) < meanEps and
cv2.norm(covs[i] - ref_distrs[j][1], cv2.NORM_L2) / cv2.norm(ref_distrs[j][1], cv2.NORM_L2) < covEps):
if (cv.norm(means[i] - ref_distrs[j][0], cv.NORM_L2) / cv.norm(ref_distrs[j][0], cv.NORM_L2) < meanEps and
cv.norm(covs[i] - ref_distrs[j][1], cv.NORM_L2) / cv.norm(ref_distrs[j][1], cv.NORM_L2) < covEps):
matches_count += 1
self.assertEqual(matches_count, cluster_n)

View File

@ -3,7 +3,7 @@
# Python 2/3 compatibility
from __future__ import print_function
import cv2
import cv2 as cv
import numpy as np
from tests_common import NewOpenCVTests
@ -15,16 +15,16 @@ class TestGoodFeaturesToTrack_test(NewOpenCVTests):
threshes = [ x / 100. for x in range(1,10) ]
numPoints = 20000
results = dict([(t, cv2.goodFeaturesToTrack(arr, numPoints, t, 2, useHarrisDetector=True)) for t in threshes])
results = dict([(t, cv.goodFeaturesToTrack(arr, numPoints, t, 2, useHarrisDetector=True)) for t in threshes])
# Check that GoodFeaturesToTrack has not modified input image
self.assertTrue(arr.tostring() == original.tostring())
# Check for repeatability
for i in range(1):
results2 = dict([(t, cv2.goodFeaturesToTrack(arr, numPoints, t, 2, useHarrisDetector=True)) for t in threshes])
results2 = dict([(t, cv.goodFeaturesToTrack(arr, numPoints, t, 2, useHarrisDetector=True)) for t in threshes])
for t in threshes:
self.assertTrue(len(results2[t]) == len(results[t]))
for i in range(len(results[t])):
self.assertTrue(cv2.norm(results[t][i][0] - results2[t][i][0]) == 0)
self.assertTrue(cv.norm(results[t][i][0] - results2[t][i][0]) == 0)
for t0,t1 in zip(threshes, threshes[1:]):
r0 = results[t0]
@ -33,7 +33,7 @@ class TestGoodFeaturesToTrack_test(NewOpenCVTests):
self.assertTrue(len(r0) > len(r1))
# Increasing thresh should monly truncate result list
for i in range(len(r1)):
self.assertTrue(cv2.norm(r1[i][0] - r0[i][0])==0)
self.assertTrue(cv.norm(r1[i][0] - r0[i][0])==0)
if __name__ == '__main__':

View File

@ -9,7 +9,7 @@ Interactive Image Segmentation using GrabCut algorithm.
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
import sys
from tests_common import NewOpenCVTests
@ -26,7 +26,7 @@ class grabcut_test(NewOpenCVTests):
def scaleMask(self, mask):
return np.where((mask==cv2.GC_FGD) + (mask==cv2.GC_PR_FGD),255,0).astype('uint8')
return np.where((mask==cv.GC_FGD) + (mask==cv.GC_PR_FGD),255,0).astype('uint8')
def test_grabcut(self):
@ -42,27 +42,27 @@ class grabcut_test(NewOpenCVTests):
mask = np.zeros(img.shape[:2], dtype = np.uint8)
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
cv2.grabCut(img, mask, rect, bgdModel, fgdModel, 0, cv2.GC_INIT_WITH_RECT)
cv2.grabCut(img, mask, rect, bgdModel, fgdModel, 2, cv2.GC_EVAL)
cv.grabCut(img, mask, rect, bgdModel, fgdModel, 0, cv.GC_INIT_WITH_RECT)
cv.grabCut(img, mask, rect, bgdModel, fgdModel, 2, cv.GC_EVAL)
if mask_prob is None:
mask_prob = mask.copy()
cv2.imwrite(self.extraTestDataPath + '/cv/grabcut/mask_probpy.png', mask_prob)
cv.imwrite(self.extraTestDataPath + '/cv/grabcut/mask_probpy.png', mask_prob)
if exp_mask1 is None:
exp_mask1 = self.scaleMask(mask)
cv2.imwrite(self.extraTestDataPath + '/cv/grabcut/exp_mask1py.png', exp_mask1)
cv.imwrite(self.extraTestDataPath + '/cv/grabcut/exp_mask1py.png', exp_mask1)
self.assertEqual(self.verify(self.scaleMask(mask), exp_mask1), True)
mask = mask_prob
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
cv2.grabCut(img, mask, rect, bgdModel, fgdModel, 0, cv2.GC_INIT_WITH_MASK)
cv2.grabCut(img, mask, rect, bgdModel, fgdModel, 1, cv2.GC_EVAL)
cv.grabCut(img, mask, rect, bgdModel, fgdModel, 0, cv.GC_INIT_WITH_MASK)
cv.grabCut(img, mask, rect, bgdModel, fgdModel, 1, cv.GC_EVAL)
if exp_mask2 is None:
exp_mask2 = self.scaleMask(mask)
cv2.imwrite(self.extraTestDataPath + '/cv/grabcut/exp_mask2py.png', exp_mask2)
cv.imwrite(self.extraTestDataPath + '/cv/grabcut/exp_mask2py.png', exp_mask2)
self.assertEqual(self.verify(self.scaleMask(mask), exp_mask2), True)

View File

@ -1,13 +1,13 @@
#!/usr/bin/python
'''
This example illustrates how to use cv2.HoughCircles() function.
This example illustrates how to use cv.HoughCircles() function.
'''
# Python 2/3 compatibility
from __future__ import print_function
import cv2
import cv2 as cv
import numpy as np
import sys
from numpy import pi, sin, cos
@ -27,10 +27,10 @@ def circleApproximation(circle):
def convContoursIntersectiponRate(c1, c2):
s1 = cv2.contourArea(c1)
s2 = cv2.contourArea(c2)
s1 = cv.contourArea(c1)
s2 = cv.contourArea(c2)
s, _ = cv2.intersectConvexConvex(c1, c2)
s, _ = cv.intersectConvexConvex(c1, c2)
return 2*s/(s1+s2)
@ -41,10 +41,10 @@ class houghcircles_test(NewOpenCVTests):
fn = "samples/data/board.jpg"
src = self.get_sample(fn, 1)
img = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
img = cv2.medianBlur(img, 5)
img = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
img = cv.medianBlur(img, 5)
circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 10, np.array([]), 100, 30, 1, 30)[0]
circles = cv.HoughCircles(img, cv.HOUGH_GRADIENT, 1, 10, np.array([]), 100, 30, 1, 30)[0]
testCircles = [[38, 181, 17.6],
[99.7, 166, 13.12],

View File

@ -7,7 +7,7 @@ This example illustrates how to use Hough Transform to find lines
# Python 2/3 compatibility
from __future__ import print_function
import cv2
import cv2 as cv
import numpy as np
import sys
import math
@ -16,9 +16,9 @@ from tests_common import NewOpenCVTests
def linesDiff(line1, line2):
norm1 = cv2.norm(line1 - line2, cv2.NORM_L2)
norm1 = cv.norm(line1 - line2, cv.NORM_L2)
line3 = line1[2:4] + line1[0:2]
norm2 = cv2.norm(line3 - line2, cv2.NORM_L2)
norm2 = cv.norm(line3 - line2, cv.NORM_L2)
return min(norm1, norm2)
@ -29,9 +29,9 @@ class houghlines_test(NewOpenCVTests):
fn = "/samples/data/pic1.png"
src = self.get_sample(fn)
dst = cv2.Canny(src, 50, 200)
dst = cv.Canny(src, 50, 200)
lines = cv2.HoughLinesP(dst, 1, math.pi/180.0, 40, np.array([]), 50, 10)[:,0,:]
lines = cv.HoughLinesP(dst, 1, math.pi/180.0, 40, np.array([]), 50, 10)[:,0,:]
eps = 5
testLines = [

View File

@ -8,7 +8,7 @@ K-means clusterization test
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
from numpy import random
import sys
PY3 = sys.version_info[0] == 3
@ -58,8 +58,8 @@ class kmeans_test(NewOpenCVTests):
points, _, clusterSizes = make_gaussians(cluster_n, img_size)
term_crit = (cv2.TERM_CRITERIA_EPS, 30, 0.1)
_ret, labels, centers = cv2.kmeans(points, cluster_n, None, term_crit, 10, 0)
term_crit = (cv.TERM_CRITERIA_EPS, 30, 0.1)
_ret, labels, centers = cv.kmeans(points, cluster_n, None, term_crit, 10, 0)
self.assertEqual(len(centers), cluster_n)

View File

@ -2,7 +2,7 @@
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
from tests_common import NewOpenCVTests
@ -11,13 +11,13 @@ class Hackathon244Tests(NewOpenCVTests):
def test_int_array(self):
a = np.array([-1, 2, -3, 4, -5])
absa0 = np.abs(a)
self.assertTrue(cv2.norm(a, cv2.NORM_L1) == 15)
absa1 = cv2.absdiff(a, 0)
self.assertEqual(cv2.norm(absa1, absa0, cv2.NORM_INF), 0)
self.assertTrue(cv.norm(a, cv.NORM_L1) == 15)
absa1 = cv.absdiff(a, 0)
self.assertEqual(cv.norm(absa1, absa0, cv.NORM_INF), 0)
def test_imencode(self):
a = np.zeros((480, 640), dtype=np.uint8)
flag, ajpg = cv2.imencode("img_q90.jpg", a, [cv2.IMWRITE_JPEG_QUALITY, 90])
flag, ajpg = cv.imencode("img_q90.jpg", a, [cv.IMWRITE_JPEG_QUALITY, 90])
self.assertEqual(flag, True)
self.assertEqual(ajpg.dtype, np.uint8)
self.assertGreater(ajpg.shape[0], 1)
@ -25,8 +25,8 @@ class Hackathon244Tests(NewOpenCVTests):
def test_projectPoints(self):
objpt = np.float64([[1,2,3]])
imgpt0, jac0 = cv2.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), np.float64([]))
imgpt1, jac1 = cv2.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), None)
imgpt0, jac0 = cv.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), np.float64([]))
imgpt1, jac1 = cv.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), None)
self.assertEqual(imgpt0.shape, (objpt.shape[0], 1, 2))
self.assertEqual(imgpt1.shape, imgpt0.shape)
self.assertEqual(jac0.shape, jac1.shape)
@ -37,17 +37,17 @@ class Hackathon244Tests(NewOpenCVTests):
pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32)
pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2)
pattern_points *= 10
(retval, out, inliers) = cv2.estimateAffine3D(pattern_points, pattern_points)
(retval, out, inliers) = cv.estimateAffine3D(pattern_points, pattern_points)
self.assertEqual(retval, 1)
if cv2.norm(out[2,:]) < 1e-3:
if cv.norm(out[2,:]) < 1e-3:
out[2,2]=1
self.assertLess(cv2.norm(out, np.float64([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])), 1e-3)
self.assertEqual(cv2.countNonZero(inliers), pattern_size[0]*pattern_size[1])
self.assertLess(cv.norm(out, np.float64([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])), 1e-3)
self.assertEqual(cv.countNonZero(inliers), pattern_size[0]*pattern_size[1])
def test_fast(self):
fd = cv2.FastFeatureDetector_create(30, True)
fd = cv.FastFeatureDetector_create(30, True)
img = self.get_sample("samples/data/right02.jpg", 0)
img = cv2.medianBlur(img, 3)
img = cv.medianBlur(img, 3)
keypoints = fd.detect(img)
self.assertTrue(600 <= len(keypoints) <= 700)
for kpt in keypoints:
@ -71,9 +71,9 @@ class Hackathon244Tests(NewOpenCVTests):
np.random.seed(244)
a = np.random.randn(npt,2).astype('float32')*50 + 150
be = cv2.fitEllipse(a)
br = cv2.minAreaRect(a)
mc, mr = cv2.minEnclosingCircle(a)
be = cv.fitEllipse(a)
br = cv.minAreaRect(a)
mc, mr = cv.minEnclosingCircle(a)
be0 = ((150.2511749267578, 150.77322387695312), (158.024658203125, 197.57696533203125), 37.57804489135742)
br0 = ((161.2974090576172, 154.41793823242188), (199.2301483154297, 207.7177734375), -9.164555549621582)

View File

@ -24,7 +24,7 @@ and the remaining 10000 - to test the classifier.
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
def load_base(fn):
a = np.loadtxt(fn, np.float32, delimiter=',', converters={ 0 : lambda ch : ord(ch)-ord('A') })
@ -56,12 +56,12 @@ class LetterStatModel(object):
class RTrees(LetterStatModel):
def __init__(self):
self.model = cv2.ml.RTrees_create()
self.model = cv.ml.RTrees_create()
def train(self, samples, responses):
#sample_n, var_n = samples.shape
self.model.setMaxDepth(20)
self.model.train(samples, cv2.ml.ROW_SAMPLE, responses.astype(int))
self.model.train(samples, cv.ml.ROW_SAMPLE, responses.astype(int))
def predict(self, samples):
_ret, resp = self.model.predict(samples)
@ -70,10 +70,10 @@ class RTrees(LetterStatModel):
class KNearest(LetterStatModel):
def __init__(self):
self.model = cv2.ml.KNearest_create()
self.model = cv.ml.KNearest_create()
def train(self, samples, responses):
self.model.train(samples, cv2.ml.ROW_SAMPLE, responses)
self.model.train(samples, cv.ml.ROW_SAMPLE, responses)
def predict(self, samples):
_retval, results, _neigh_resp, _dists = self.model.findNearest(samples, k = 10)
@ -82,17 +82,17 @@ class KNearest(LetterStatModel):
class Boost(LetterStatModel):
def __init__(self):
self.model = cv2.ml.Boost_create()
self.model = cv.ml.Boost_create()
def train(self, samples, responses):
_sample_n, var_n = samples.shape
new_samples = self.unroll_samples(samples)
new_responses = self.unroll_responses(responses)
var_types = np.array([cv2.ml.VAR_NUMERICAL] * var_n + [cv2.ml.VAR_CATEGORICAL, cv2.ml.VAR_CATEGORICAL], np.uint8)
var_types = np.array([cv.ml.VAR_NUMERICAL] * var_n + [cv.ml.VAR_CATEGORICAL, cv.ml.VAR_CATEGORICAL], np.uint8)
self.model.setWeakCount(15)
self.model.setMaxDepth(10)
self.model.train(cv2.ml.TrainData_create(new_samples, cv2.ml.ROW_SAMPLE, new_responses.astype(int), varType = var_types))
self.model.train(cv.ml.TrainData_create(new_samples, cv.ml.ROW_SAMPLE, new_responses.astype(int), varType = var_types))
def predict(self, samples):
new_samples = self.unroll_samples(samples)
@ -103,14 +103,14 @@ class Boost(LetterStatModel):
class SVM(LetterStatModel):
def __init__(self):
self.model = cv2.ml.SVM_create()
self.model = cv.ml.SVM_create()
def train(self, samples, responses):
self.model.setType(cv2.ml.SVM_C_SVC)
self.model.setType(cv.ml.SVM_C_SVC)
self.model.setC(1)
self.model.setKernel(cv2.ml.SVM_RBF)
self.model.setKernel(cv.ml.SVM_RBF)
self.model.setGamma(.1)
self.model.train(samples, cv2.ml.ROW_SAMPLE, responses.astype(int))
self.model.train(samples, cv.ml.ROW_SAMPLE, responses.astype(int))
def predict(self, samples):
_ret, resp = self.model.predict(samples)
@ -119,7 +119,7 @@ class SVM(LetterStatModel):
class MLP(LetterStatModel):
def __init__(self):
self.model = cv2.ml.ANN_MLP_create()
self.model = cv.ml.ANN_MLP_create()
def train(self, samples, responses):
_sample_n, var_n = samples.shape
@ -127,13 +127,13 @@ class MLP(LetterStatModel):
layer_sizes = np.int32([var_n, 100, 100, self.class_n])
self.model.setLayerSizes(layer_sizes)
self.model.setTrainMethod(cv2.ml.ANN_MLP_BACKPROP)
self.model.setTrainMethod(cv.ml.ANN_MLP_BACKPROP)
self.model.setBackpropMomentumScale(0)
self.model.setBackpropWeightScale(0.001)
self.model.setTermCriteria((cv2.TERM_CRITERIA_COUNT, 20, 0.01))
self.model.setActivationFunction(cv2.ml.ANN_MLP_SIGMOID_SYM, 2, 1)
self.model.setTermCriteria((cv.TERM_CRITERIA_COUNT, 20, 0.01))
self.model.setActivationFunction(cv.ml.ANN_MLP_SIGMOID_SYM, 2, 1)
self.model.train(samples, cv2.ml.ROW_SAMPLE, np.float32(new_responses))
self.model.train(samples, cv.ml.ROW_SAMPLE, np.float32(new_responses))
def predict(self, samples):
_ret, resp = self.model.predict(samples)

View File

@ -11,7 +11,7 @@ between frames. Finds homography between reference and current views.
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
#local modules
from tst_scene_render import TestSceneRender
@ -19,7 +19,7 @@ from tests_common import NewOpenCVTests, isPointInRect
lk_params = dict( winSize = (19, 19),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))
feature_params = dict( maxCorners = 1000,
qualityLevel = 0.01,
@ -27,8 +27,8 @@ feature_params = dict( maxCorners = 1000,
blockSize = 19 )
def checkedTrace(img0, img1, p0, back_threshold = 1.0):
p1, _st, _err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
p0r, _st, _err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
p1, _st, _err = cv.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
p0r, _st, _err = cv.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
d = abs(p0-p0r).reshape(-1, 2).max(-1)
status = d < back_threshold
return p1, status
@ -48,9 +48,9 @@ class lk_homography_test(NewOpenCVTests):
self.get_sample('samples/data/box.png'), noise = 0.1, speed = 1.0)
frame = self.render.getNextFrame()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
self.frame0 = frame.copy()
self.p0 = cv2.goodFeaturesToTrack(frame_gray, **feature_params)
self.p0 = cv.goodFeaturesToTrack(frame_gray, **feature_params)
isForegroundHomographyFound = False
@ -66,7 +66,7 @@ class lk_homography_test(NewOpenCVTests):
while self.framesCounter < 200:
self.framesCounter += 1
frame = self.render.getNextFrame()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
if self.p0 is not None:
p2, trace_status = checkedTrace(self.gray1, frame_gray, self.p1)
@ -77,7 +77,7 @@ class lk_homography_test(NewOpenCVTests):
if len(self.p0) < 4:
self.p0 = None
continue
_H, status = cv2.findHomography(self.p0, self.p1, cv2.RANSAC, 5.0)
_H, status = cv.findHomography(self.p0, self.p1, cv.RANSAC, 5.0)
goodPointsInRect = 0
goodPointsOutsideRect = 0
@ -91,7 +91,7 @@ class lk_homography_test(NewOpenCVTests):
isForegroundHomographyFound = True
self.assertGreater(float(goodPointsInRect) / (self.numFeaturesInRectOnStart + 1), 0.6)
else:
self.p0 = cv2.goodFeaturesToTrack(frame_gray, **feature_params)
self.p0 = cv.goodFeaturesToTrack(frame_gray, **feature_params)
self.assertEqual(isForegroundHomographyFound, True)

View File

@ -13,7 +13,7 @@ between frames.
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
#local modules
from tst_scene_render import TestSceneRender
@ -21,7 +21,7 @@ from tests_common import NewOpenCVTests, intersectionRate, isPointInRect
lk_params = dict( winSize = (15, 15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))
feature_params = dict( maxCorners = 500,
qualityLevel = 0.3,
@ -32,7 +32,7 @@ def getRectFromPoints(points):
distances = []
for point in points:
distances.append(cv2.norm(point, cv2.NORM_L2))
distances.append(cv.norm(point, cv.NORM_L2))
x0, y0 = points[np.argmin(distances)]
x1, y1 = points[np.argmax(distances)]
@ -58,13 +58,13 @@ class lk_track_test(NewOpenCVTests):
while True:
frame = self.render.getNextFrame()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
if len(self.tracks) > 0:
img0, img1 = self.prev_gray, frame_gray
p0 = np.float32([tr[-1][0] for tr in self.tracks]).reshape(-1, 1, 2)
p1, _st, _err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
p0r, _st, _err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
p1, _st, _err = cv.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
p0r, _st, _err = cv.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
d = abs(p0-p0r).reshape(-1, 2).max(-1)
good = d < 1
new_tracks = []
@ -98,8 +98,8 @@ class lk_track_test(NewOpenCVTests):
mask = np.zeros_like(frame_gray)
mask[:] = 255
for x, y in [np.int32(tr[-1][0]) for tr in self.tracks]:
cv2.circle(mask, (x, y), 5, 0, -1)
p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)
cv.circle(mask, (x, y), 5, 0, -1)
p = cv.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)
if p is not None:
for x, y in np.float32(p).reshape(-1, 2):
self.tracks.append([[(x, y), self.frame_idx]])

View File

@ -2,18 +2,18 @@
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
from tests_common import NewOpenCVTests
class Bindings(NewOpenCVTests):
def test_inheritance(self):
bm = cv2.StereoBM_create()
bm = cv.StereoBM_create()
bm.getPreFilterCap() # from StereoBM
bm.getBlockSize() # from SteroMatcher
boost = cv2.ml.Boost_create()
boost = cv.ml.Boost_create()
boost.getBoostType() # from ml::Boost
boost.getMaxDepth() # from ml::DTrees
boost.isClassifier() # from ml::StatModel

View File

@ -10,7 +10,7 @@ import sys
PY3 = sys.version_info[0] == 3
import numpy as np
import cv2
import cv2 as cv
from tests_common import NewOpenCVTests
@ -43,8 +43,8 @@ class morphology_test(NewOpenCVTests):
str_name = 'MORPH_' + cur_str_mode.upper()
oper_name = 'MORPH_' + op.upper()
st = cv2.getStructuringElement(getattr(cv2, str_name), (sz, sz))
return cv2.morphologyEx(img, getattr(cv2, oper_name), st, iterations=iters)
st = cv.getStructuringElement(getattr(cv, str_name), (sz, sz))
return cv.morphologyEx(img, getattr(cv, oper_name), st, iterations=iters)
for mode in modes:
res = update(mode)

View File

@ -7,7 +7,7 @@ MSER detector test
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
from tests_common import NewOpenCVTests
@ -33,7 +33,7 @@ class mser_test(NewOpenCVTests):
]
thresharr = [ 0, 70, 120, 180, 255 ]
kDelta = 5
mserExtractor = cv2.MSER_create()
mserExtractor = cv.MSER_create()
mserExtractor.setDelta(kDelta)
np.random.seed(10)
@ -53,11 +53,11 @@ class mser_test(NewOpenCVTests):
mserExtractor.setMinArea(kMinArea)
mserExtractor.setMaxArea(kMaxArea)
if invert:
cv2.bitwise_not(src, src)
cv.bitwise_not(src, src)
if binarize:
_, src = cv2.threshold(src, thresh, 255, cv2.THRESH_BINARY)
_, src = cv.threshold(src, thresh, 255, cv.THRESH_BINARY)
if blur:
src = cv2.GaussianBlur(src, (5, 5), 1.5, 1.5)
src = cv.GaussianBlur(src, (5, 5), 1.5, 1.5)
minRegs = 7 if use_big_image else 2
maxRegs = 1000 if use_big_image else 20
if binarize and (thresh == 0 or thresh == 255):

View File

@ -8,7 +8,7 @@ example to detect upright people in images using HOG features
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
def inside(r, q):
@ -21,8 +21,8 @@ from tests_common import NewOpenCVTests, intersectionRate
class peopledetect_test(NewOpenCVTests):
def test_peopledetect(self):
hog = cv2.HOGDescriptor()
hog.setSVMDetector( cv2.HOGDescriptor_getDefaultPeopleDetector() )
hog = cv.HOGDescriptor()
hog.setSVMDetector( cv.HOGDescriptor_getDefaultPeopleDetector() )
dirPath = 'samples/data/'
samples = ['basketball1.png', 'basketball2.png']

View File

@ -1,5 +1,5 @@
#!/usr/bin/env python
import cv2
import cv2 as cv
from tests_common import NewOpenCVTests
@ -7,14 +7,14 @@ class shape_test(NewOpenCVTests):
def test_computeDistance(self):
a = self.get_sample('samples/data/shape_sample/1.png', cv2.IMREAD_GRAYSCALE)
b = self.get_sample('samples/data/shape_sample/2.png', cv2.IMREAD_GRAYSCALE)
a = self.get_sample('samples/data/shape_sample/1.png', cv.IMREAD_GRAYSCALE)
b = self.get_sample('samples/data/shape_sample/2.png', cv.IMREAD_GRAYSCALE)
_, ca, _ = cv2.findContours(a, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS)
_, cb, _ = cv2.findContours(b, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS)
_, ca, _ = cv.findContours(a, cv.RETR_CCOMP, cv.CHAIN_APPROX_TC89_KCOS)
_, cb, _ = cv.findContours(b, cv.RETR_CCOMP, cv.CHAIN_APPROX_TC89_KCOS)
hd = cv2.createHausdorffDistanceExtractor()
sd = cv2.createShapeContextDistanceExtractor()
hd = cv.createHausdorffDistanceExtractor()
sd = cv.createShapeContextDistanceExtractor()
d1 = hd.computeDistance(ca[0], cb[0])
d2 = sd.computeDistance(ca[0], cb[0])

View File

@ -14,7 +14,7 @@ if PY3:
xrange = range
import numpy as np
import cv2
import cv2 as cv
def angle_cos(p0, p1, p2):
@ -22,20 +22,20 @@ def angle_cos(p0, p1, p2):
return abs( np.dot(d1, d2) / np.sqrt( np.dot(d1, d1)*np.dot(d2, d2) ) )
def find_squares(img):
img = cv2.GaussianBlur(img, (5, 5), 0)
img = cv.GaussianBlur(img, (5, 5), 0)
squares = []
for gray in cv2.split(img):
for gray in cv.split(img):
for thrs in xrange(0, 255, 26):
if thrs == 0:
bin = cv2.Canny(gray, 0, 50, apertureSize=5)
bin = cv2.dilate(bin, None)
bin = cv.Canny(gray, 0, 50, apertureSize=5)
bin = cv.dilate(bin, None)
else:
_retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
bin, contours, _hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
_retval, bin = cv.threshold(gray, thrs, 255, cv.THRESH_BINARY)
bin, contours, _hierarchy = cv.findContours(bin, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)
for cnt in contours:
cnt_len = cv2.arcLength(cnt, True)
cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt):
cnt_len = cv.arcLength(cnt, True)
cnt = cv.approxPolyDP(cnt, 0.02*cnt_len, True)
if len(cnt) == 4 and cv.contourArea(cnt) > 1000 and cv.isContourConvex(cnt):
cnt = cnt.reshape(-1, 2)
max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
if max_cos < 0.1 and filterSquares(squares, cnt):
@ -44,8 +44,8 @@ def find_squares(img):
return squares
def intersectionRate(s1, s2):
area, _intersection = cv2.intersectConvexConvex(np.array(s1), np.array(s2))
return 2 * area / (cv2.contourArea(np.array(s1)) + cv2.contourArea(np.array(s2)))
area, _intersection = cv.intersectConvexConvex(np.array(s1), np.array(s2))
return 2 * area / (cv.contourArea(np.array(s1)) + cv.contourArea(np.array(s2)))
def filterSquares(squares, square):

View File

@ -1,5 +1,5 @@
#!/usr/bin/env python
import cv2
import cv2 as cv
from tests_common import NewOpenCVTests
@ -10,11 +10,11 @@ class stitching_test(NewOpenCVTests):
img1 = self.get_sample('stitching/a1.png')
img2 = self.get_sample('stitching/a2.png')
stitcher = cv2.createStitcher(False)
stitcher = cv.createStitcher(False)
(_result, pano) = stitcher.stitch((img1, img2))
#cv2.imshow("pano", pano)
#cv2.waitKey()
#cv.imshow("pano", pano)
#cv.waitKey()
self.assertAlmostEqual(pano.shape[0], 685, delta=100, msg="rows: %r" % list(pano.shape))
self.assertAlmostEqual(pano.shape[1], 1025, delta=100, msg="cols: %r" % list(pano.shape))

View File

@ -3,7 +3,7 @@
'''
Texture flow direction estimation.
Sample shows how cv2.cornerEigenValsAndVecs function can be used
Sample shows how cv.cornerEigenValsAndVecs function can be used
to estimate image texture flow direction.
'''
@ -11,7 +11,7 @@ to estimate image texture flow direction.
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
import sys
from tests_common import NewOpenCVTests
@ -23,10 +23,10 @@ class texture_flow_test(NewOpenCVTests):
img = self.get_sample('samples/data/chessboard.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
h, w = img.shape[:2]
eigen = cv2.cornerEigenValsAndVecs(gray, 5, 3)
eigen = cv.cornerEigenValsAndVecs(gray, 5, 3)
eigen = eigen.reshape(h, w, 3, 2) # [[e1, e2], v1, v2]
flow = eigen[:,:,2]
@ -40,8 +40,8 @@ class texture_flow_test(NewOpenCVTests):
textureVectors.append(np.int32(flow[y, x]*d))
for i in range(len(textureVectors)):
self.assertTrue(cv2.norm(textureVectors[i], cv2.NORM_L2) < eps
or abs(cv2.norm(textureVectors[i], cv2.NORM_L2) - d) < eps)
self.assertTrue(cv.norm(textureVectors[i], cv.NORM_L2) < eps
or abs(cv.norm(textureVectors[i], cv.NORM_L2) - d) < eps)
if __name__ == '__main__':
NewOpenCVTests.bootstrap()

View File

@ -2,7 +2,7 @@
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
from tests_common import NewOpenCVTests
@ -11,39 +11,39 @@ class UMat(NewOpenCVTests):
def test_umat_construct(self):
data = np.random.random([512, 512])
# UMat constructors
data_um = cv2.UMat(data) # from ndarray
data_sub_um = cv2.UMat(data_um, [128, 256], [128, 256]) # from UMat
data_dst_um = cv2.UMat(128, 128, cv2.CV_64F) # from size/type
data_um = cv.UMat(data) # from ndarray
data_sub_um = cv.UMat(data_um, [128, 256], [128, 256]) # from UMat
data_dst_um = cv.UMat(128, 128, cv.CV_64F) # from size/type
# test continuous and submatrix flags
assert data_um.isContinuous() and not data_um.isSubmatrix()
assert not data_sub_um.isContinuous() and data_sub_um.isSubmatrix()
# test operation on submatrix
cv2.multiply(data_sub_um, 2., dst=data_dst_um)
cv.multiply(data_sub_um, 2., dst=data_dst_um)
assert np.allclose(2. * data[128:256, 128:256], data_dst_um.get())
def test_umat_handle(self):
a_um = cv2.UMat(256, 256, cv2.CV_32F)
_ctx_handle = cv2.UMat.context() # obtain context handle
_queue_handle = cv2.UMat.queue() # obtain queue handle
_a_handle = a_um.handle(cv2.ACCESS_READ) # obtain buffer handle
a_um = cv.UMat(256, 256, cv.CV_32F)
_ctx_handle = cv.UMat.context() # obtain context handle
_queue_handle = cv.UMat.queue() # obtain queue handle
_a_handle = a_um.handle(cv.ACCESS_READ) # obtain buffer handle
_offset = a_um.offset # obtain buffer offset
def test_umat_matching(self):
img1 = self.get_sample("samples/data/right01.jpg")
img2 = self.get_sample("samples/data/right02.jpg")
orb = cv2.ORB_create()
orb = cv.ORB_create()
img1, img2 = cv2.UMat(img1), cv2.UMat(img2)
img1, img2 = cv.UMat(img1), cv.UMat(img2)
ps1, descs_umat1 = orb.detectAndCompute(img1, None)
ps2, descs_umat2 = orb.detectAndCompute(img2, None)
self.assertIsInstance(descs_umat1, cv2.UMat)
self.assertIsInstance(descs_umat2, cv2.UMat)
self.assertIsInstance(descs_umat1, cv.UMat)
self.assertIsInstance(descs_umat2, cv.UMat)
self.assertGreater(len(ps1), 0)
self.assertGreater(len(ps2), 0)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)
res_umats = bf.match(descs_umat1, descs_umat2)
res = bf.match(descs_umat1.get(), descs_umat2.get())
@ -52,8 +52,8 @@ class UMat(NewOpenCVTests):
self.assertEqual(len(res_umats), len(res))
def test_umat_optical_flow(self):
img1 = self.get_sample("samples/data/right01.jpg", cv2.IMREAD_GRAYSCALE)
img2 = self.get_sample("samples/data/right02.jpg", cv2.IMREAD_GRAYSCALE)
img1 = self.get_sample("samples/data/right01.jpg", cv.IMREAD_GRAYSCALE)
img2 = self.get_sample("samples/data/right02.jpg", cv.IMREAD_GRAYSCALE)
# Note, that if you want to see performance boost by OCL implementation - you need enough data
# For example you can increase maxCorners param to 10000 and increase img1 and img2 in such way:
# img = np.hstack([np.vstack([img] * 6)] * 6)
@ -63,19 +63,19 @@ class UMat(NewOpenCVTests):
minDistance=7,
blockSize=7)
p0 = cv2.goodFeaturesToTrack(img1, mask=None, **feature_params)
p0_umat = cv2.goodFeaturesToTrack(cv2.UMat(img1), mask=None, **feature_params)
p0 = cv.goodFeaturesToTrack(img1, mask=None, **feature_params)
p0_umat = cv.goodFeaturesToTrack(cv.UMat(img1), mask=None, **feature_params)
self.assertEqual(p0_umat.get().shape, p0.shape)
p0 = np.array(sorted(p0, key=lambda p: tuple(p[0])))
p0_umat = cv2.UMat(np.array(sorted(p0_umat.get(), key=lambda p: tuple(p[0]))))
p0_umat = cv.UMat(np.array(sorted(p0_umat.get(), key=lambda p: tuple(p[0]))))
self.assertTrue(np.allclose(p0_umat.get(), p0))
_p1_mask_err = cv2.calcOpticalFlowPyrLK(img1, img2, p0, None)
_p1_mask_err = cv.calcOpticalFlowPyrLK(img1, img2, p0, None)
_p1_mask_err_umat0 = map(cv2.UMat.get, cv2.calcOpticalFlowPyrLK(img1, img2, p0_umat, None))
_p1_mask_err_umat1 = map(cv2.UMat.get, cv2.calcOpticalFlowPyrLK(cv2.UMat(img1), img2, p0_umat, None))
_p1_mask_err_umat2 = map(cv2.UMat.get, cv2.calcOpticalFlowPyrLK(img1, cv2.UMat(img2), p0_umat, None))
_p1_mask_err_umat0 = map(cv.UMat.get, cv.calcOpticalFlowPyrLK(img1, img2, p0_umat, None))
_p1_mask_err_umat1 = map(cv.UMat.get, cv.calcOpticalFlowPyrLK(cv.UMat(img1), img2, p0_umat, None))
_p1_mask_err_umat2 = map(cv.UMat.get, cv.calcOpticalFlowPyrLK(img1, cv.UMat(img2), p0_umat, None))
# # results of OCL optical flow differs from CPU implementation, so result can not be easily compared
# for p1_mask_err_umat in [p1_mask_err_umat0, p1_mask_err_umat1, p1_mask_err_umat2]:

View File

@ -8,7 +8,7 @@ Watershed segmentation test
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
from tests_common import NewOpenCVTests
@ -23,14 +23,14 @@ class watershed_test(NewOpenCVTests):
self.assertEqual(0, 1, 'Missing test data')
colors = np.int32( list(np.ndindex(3, 3, 3)) ) * 122
cv2.watershed(img, np.int32(markers))
cv.watershed(img, np.int32(markers))
segments = colors[np.maximum(markers, 0)]
if refSegments is None:
refSegments = segments.copy()
cv2.imwrite(self.extraTestDataPath + '/cv/watershed/wshed_segments.png', refSegments)
cv.imwrite(self.extraTestDataPath + '/cv/watershed/wshed_segments.png', refSegments)
self.assertLess(cv2.norm(segments - refSegments, cv2.NORM_L1) / 255.0, 50)
self.assertLess(cv.norm(segments - refSegments, cv.NORM_L1) / 255.0, 50)
if __name__ == '__main__':
NewOpenCVTests.bootstrap()

View File

@ -10,7 +10,7 @@ import random
import argparse
import numpy as np
import cv2
import cv2 as cv
# Python 3 moved urlopen to urllib.requests
try:
@ -26,7 +26,7 @@ class NewOpenCVTests(unittest.TestCase):
# github repository url
repoUrl = 'https://raw.github.com/opencv/opencv/master'
def get_sample(self, filename, iscolor = cv2.IMREAD_COLOR):
def get_sample(self, filename, iscolor = cv.IMREAD_COLOR):
if not filename in self.image_cache:
filedata = None
if NewOpenCVTests.repoPath is not None:
@ -41,11 +41,11 @@ class NewOpenCVTests(unittest.TestCase):
filedata = f.read()
if filedata is None:
return None#filedata = urlopen(NewOpenCVTests.repoUrl + '/' + filename).read()
self.image_cache[filename] = cv2.imdecode(np.fromstring(filedata, dtype=np.uint8), iscolor)
self.image_cache[filename] = cv.imdecode(np.fromstring(filedata, dtype=np.uint8), iscolor)
return self.image_cache[filename]
def setUp(self):
cv2.setRNGSeed(10)
cv.setRNGSeed(10)
self.image_cache = {}
def hashimg(self, im):
@ -73,7 +73,7 @@ class NewOpenCVTests(unittest.TestCase):
parser.add_argument('--data', help='<not used> use data files from local folder (path to folder), '
'if not set, data files will be downloaded from docs.opencv.org')
args, other = parser.parse_known_args()
print("Testing OpenCV", cv2.__version__)
print("Testing OpenCV", cv.__version__)
print("Local repo path:", args.repo)
NewOpenCVTests.repoPath = args.repo
try:
@ -93,8 +93,8 @@ def intersectionRate(s1, s2):
x1, y1, x2, y2 = s2
s2 = np.array([[x1, y1], [x2,y1], [x2, y2], [x1, y2]])
area, _intersection = cv2.intersectConvexConvex(s1, s2)
return 2 * area / (cv2.contourArea(s1) + cv2.contourArea(s2))
area, _intersection = cv.intersectConvexConvex(s1, s2)
return 2 * area / (cv.contourArea(s1) + cv.contourArea(s2))
def isPointInRect(p, rect):
if rect[0] <= p[0] and rect[1] <=p[1] and p[0] <= rect[2] and p[1] <= rect[3]:

View File

@ -7,7 +7,7 @@ from __future__ import print_function
import numpy as np
from numpy import pi, sin, cos
import cv2
import cv2 as cv
defaultSize = 512
@ -88,14 +88,14 @@ class TestSceneRender():
self.currentRect = self.initialRect + np.int( 30*cos(self.time) + 50*sin(self.time/3))
if self.deformation:
self.currentRect[1:3] += int(self.h/20*cos(self.time))
cv2.fillConvexPoly(img, self.currentRect, (0, 0, 255))
cv.fillConvexPoly(img, self.currentRect, (0, 0, 255))
self.time += self.timeStep
if self.noise:
noise = np.zeros(self.sceneBg.shape, np.int8)
cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)
img = cv2.add(img, noise, dtype=cv2.CV_8UC3)
cv.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)
img = cv.add(img, noise, dtype=cv.CV_8UC3)
return img
def resetTime(self):
@ -104,16 +104,16 @@ class TestSceneRender():
if __name__ == '__main__':
backGr = cv2.imread('../../../samples/data/lena.jpg')
backGr = cv.imread('../../../samples/data/lena.jpg')
render = TestSceneRender(backGr, noise = 0.5)
while True:
img = render.getNextFrame()
cv2.imshow('img', img)
cv.imshow('img', img)
ch = cv2.waitKey(3)
ch = cv.waitKey(3)
if ch == 27:
break
cv2.destroyAllWindows()
cv.destroyAllWindows()

View File

@ -1,6 +1,6 @@
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
from cv2 import dnn
import timeit
@ -11,7 +11,7 @@ def get_class_list():
with open('synset_words.txt', 'rt') as f:
return [x[x.find(" ") + 1:] for x in f]
blob = dnn.blobFromImage(cv2.imread('space_shuttle.jpg'), 1, (224, 224), (104, 117, 123), False)
blob = dnn.blobFromImage(cv.imread('space_shuttle.jpg'), 1, (224, 224), (104, 117, 123), False)
print("Input:", blob.shape, blob.dtype)
net = dnn.readNetFromCaffe('bvlc_googlenet.prototxt', 'bvlc_googlenet.caffemodel')

View File

@ -8,11 +8,11 @@ Utility for measuring python opencv API coverage by samples.
from __future__ import print_function
from glob import glob
import cv2
import cv2 as cv
import re
if __name__ == '__main__':
cv2_callable = set(['cv2.'+name for name in dir(cv2) if callable( getattr(cv2, name) )])
cv2_callable = set(['cv.'+name for name in dir(cv) if callable( getattr(cv, name) )])
found = set()
for fn in glob('*.py'):
@ -26,4 +26,4 @@ if __name__ == '__main__':
f.write('\n'.join(sorted(cv2_unused)))
r = 1.0 * len(cv2_used) / len(cv2_callable)
print('\ncv2 api coverage: %d / %d (%.1f%%)' % ( len(cv2_used), len(cv2_callable), r*100 ))
print('\ncv api coverage: %d / %d (%.1f%%)' % ( len(cv2_used), len(cv2_callable), r*100 ))

View File

@ -23,7 +23,7 @@ USAGE
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
# built-in modules
import itertools as it
@ -51,18 +51,18 @@ def affine_skew(tilt, phi, img, mask=None):
A = np.float32([[c,-s], [ s, c]])
corners = [[0, 0], [w, 0], [w, h], [0, h]]
tcorners = np.int32( np.dot(corners, A.T) )
x, y, w, h = cv2.boundingRect(tcorners.reshape(1,-1,2))
x, y, w, h = cv.boundingRect(tcorners.reshape(1,-1,2))
A = np.hstack([A, [[-x], [-y]]])
img = cv2.warpAffine(img, A, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)
img = cv.warpAffine(img, A, (w, h), flags=cv.INTER_LINEAR, borderMode=cv.BORDER_REPLICATE)
if tilt != 1.0:
s = 0.8*np.sqrt(tilt*tilt-1)
img = cv2.GaussianBlur(img, (0, 0), sigmaX=s, sigmaY=0.01)
img = cv2.resize(img, (0, 0), fx=1.0/tilt, fy=1.0, interpolation=cv2.INTER_NEAREST)
img = cv.GaussianBlur(img, (0, 0), sigmaX=s, sigmaY=0.01)
img = cv.resize(img, (0, 0), fx=1.0/tilt, fy=1.0, interpolation=cv.INTER_NEAREST)
A[0] /= tilt
if phi != 0.0 or tilt != 1.0:
h, w = img.shape[:2]
mask = cv2.warpAffine(mask, A, (w, h), flags=cv2.INTER_NEAREST)
Ai = cv2.invertAffineTransform(A)
mask = cv.warpAffine(mask, A, (w, h), flags=cv.INTER_NEAREST)
Ai = cv.invertAffineTransform(A)
return img, mask, Ai
@ -119,8 +119,8 @@ if __name__ == '__main__':
fn1 = '../data/aero1.jpg'
fn2 = '../data/aero3.jpg'
img1 = cv2.imread(fn1, 0)
img2 = cv2.imread(fn2, 0)
img1 = cv.imread(fn1, 0)
img2 = cv.imread(fn2, 0)
detector, matcher = init_feature(feature_name)
if img1 is None:
@ -137,7 +137,7 @@ if __name__ == '__main__':
print('using', feature_name)
pool=ThreadPool(processes = cv2.getNumberOfCPUs())
pool=ThreadPool(processes = cv.getNumberOfCPUs())
kp1, desc1 = affine_detect(detector, img1, pool=pool)
kp2, desc2 = affine_detect(detector, img2, pool=pool)
print('img1 - %d features, img2 - %d features' % (len(kp1), len(kp2)))
@ -147,7 +147,7 @@ if __name__ == '__main__':
raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2
p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
if len(p1) >= 4:
H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
H, status = cv.findHomography(p1, p2, cv.RANSAC, 5.0)
print('%d / %d inliers/matched' % (np.sum(status), len(status)))
# do not draw outliers (there will be a lot of them)
kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]
@ -159,5 +159,5 @@ if __name__ == '__main__':
match_and_draw('affine find_obj')
cv2.waitKey()
cv2.destroyAllWindows()
cv.waitKey()
cv.destroyAllWindows()

View File

@ -21,7 +21,7 @@ if PY3:
xrange = range
import numpy as np
import cv2
import cv2 as cv
# built-in modules
import sys
@ -34,7 +34,7 @@ if __name__ == '__main__':
if len(sys.argv) > 1:
fn = sys.argv[1]
print('loading %s ...' % fn)
img = cv2.imread(fn)
img = cv.imread(fn)
if img is None:
print('Failed to load fn:', fn)
sys.exit(1)
@ -45,21 +45,21 @@ if __name__ == '__main__':
img = np.zeros((sz, sz), np.uint8)
track = np.cumsum(np.random.rand(500000, 2)-0.5, axis=0)
track = np.int32(track*10 + (sz/2, sz/2))
cv2.polylines(img, [track], 0, 255, 1, cv2.LINE_AA)
cv.polylines(img, [track], 0, 255, 1, cv.LINE_AA)
small = img
for i in xrange(3):
small = cv2.pyrDown(small)
small = cv.pyrDown(small)
def onmouse(event, x, y, flags, param):
h, _w = img.shape[:2]
h1, _w1 = small.shape[:2]
x, y = 1.0*x*h/h1, 1.0*y*h/h1
zoom = cv2.getRectSubPix(img, (800, 600), (x+0.5, y+0.5))
cv2.imshow('zoom', zoom)
zoom = cv.getRectSubPix(img, (800, 600), (x+0.5, y+0.5))
cv.imshow('zoom', zoom)
cv2.imshow('preview', small)
cv2.setMouseCallback('preview', onmouse)
cv2.waitKey()
cv2.destroyAllWindows()
cv.imshow('preview', small)
cv.setMouseCallback('preview', onmouse)
cv.waitKey()
cv.destroyAllWindows()

View File

@ -17,7 +17,7 @@ default values:
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
# local modules
from common import splitfn
@ -53,27 +53,27 @@ if __name__ == '__main__':
obj_points = []
img_points = []
h, w = cv2.imread(img_names[0], 0).shape[:2] # TODO: use imquery call to retrieve results
h, w = cv.imread(img_names[0], 0).shape[:2] # TODO: use imquery call to retrieve results
def processImage(fn):
print('processing %s... ' % fn)
img = cv2.imread(fn, 0)
img = cv.imread(fn, 0)
if img is None:
print("Failed to load", fn)
return None
assert w == img.shape[1] and h == img.shape[0], ("size: %d x %d ... " % (img.shape[1], img.shape[0]))
found, corners = cv2.findChessboardCorners(img, pattern_size)
found, corners = cv.findChessboardCorners(img, pattern_size)
if found:
term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1)
cv2.cornerSubPix(img, corners, (5, 5), (-1, -1), term)
term = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_COUNT, 30, 0.1)
cv.cornerSubPix(img, corners, (5, 5), (-1, -1), term)
if debug_dir:
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.drawChessboardCorners(vis, pattern_size, corners, found)
vis = cv.cvtColor(img, cv.COLOR_GRAY2BGR)
cv.drawChessboardCorners(vis, pattern_size, corners, found)
path, name, ext = splitfn(fn)
outfile = os.path.join(debug_dir, name + '_chess.png')
cv2.imwrite(outfile, vis)
cv.imwrite(outfile, vis)
if not found:
print('chessboard not found')
@ -97,7 +97,7 @@ if __name__ == '__main__':
obj_points.append(pattern_points)
# calculate camera distortion
rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, (w, h), None, None)
rms, camera_matrix, dist_coefs, rvecs, tvecs = cv.calibrateCamera(obj_points, img_points, (w, h), None, None)
print("\nRMS:", rms)
print("camera matrix:\n", camera_matrix)
@ -110,20 +110,20 @@ if __name__ == '__main__':
img_found = os.path.join(debug_dir, name + '_chess.png')
outfile = os.path.join(debug_dir, name + '_undistorted.png')
img = cv2.imread(img_found)
img = cv.imread(img_found)
if img is None:
continue
h, w = img.shape[:2]
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(camera_matrix, dist_coefs, (w, h), 1, (w, h))
newcameramtx, roi = cv.getOptimalNewCameraMatrix(camera_matrix, dist_coefs, (w, h), 1, (w, h))
dst = cv2.undistort(img, camera_matrix, dist_coefs, None, newcameramtx)
dst = cv.undistort(img, camera_matrix, dist_coefs, None, newcameramtx)
# crop and save the image
x, y, w, h = roi
dst = dst[y:y+h, x:x+w]
print('Undistorted image written to: %s' % outfile)
cv2.imwrite(outfile, dst)
cv.imwrite(outfile, dst)
cv2.destroyAllWindows()
cv.destroyAllWindows()

View File

@ -31,7 +31,7 @@ if PY3:
xrange = range
import numpy as np
import cv2
import cv2 as cv
# local module
import video
@ -42,8 +42,8 @@ class App(object):
def __init__(self, video_src):
self.cam = video.create_capture(video_src, presets['cube'])
_ret, self.frame = self.cam.read()
cv2.namedWindow('camshift')
cv2.setMouseCallback('camshift', self.onmouse)
cv.namedWindow('camshift')
cv.setMouseCallback('camshift', self.onmouse)
self.selection = None
self.drag_start = None
@ -51,7 +51,7 @@ class App(object):
self.track_window = None
def onmouse(self, event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
if event == cv.EVENT_LBUTTONDOWN:
self.drag_start = (x, y)
self.track_window = None
if self.drag_start:
@ -60,7 +60,7 @@ class App(object):
xmax = max(x, self.drag_start[0])
ymax = max(y, self.drag_start[1])
self.selection = (xmin, ymin, xmax, ymax)
if event == cv2.EVENT_LBUTTONUP:
if event == cv.EVENT_LBUTTONUP:
self.drag_start = None
self.track_window = (xmin, ymin, xmax - xmin, ymax - ymin)
@ -70,52 +70,52 @@ class App(object):
img = np.zeros((256, bin_count*bin_w, 3), np.uint8)
for i in xrange(bin_count):
h = int(self.hist[i])
cv2.rectangle(img, (i*bin_w+2, 255), ((i+1)*bin_w-2, 255-h), (int(180.0*i/bin_count), 255, 255), -1)
img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
cv2.imshow('hist', img)
cv.rectangle(img, (i*bin_w+2, 255), ((i+1)*bin_w-2, 255-h), (int(180.0*i/bin_count), 255, 255), -1)
img = cv.cvtColor(img, cv.COLOR_HSV2BGR)
cv.imshow('hist', img)
def run(self):
while True:
_ret, self.frame = self.cam.read()
vis = self.frame.copy()
hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.)))
hsv = cv.cvtColor(self.frame, cv.COLOR_BGR2HSV)
mask = cv.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.)))
if self.selection:
x0, y0, x1, y1 = self.selection
hsv_roi = hsv[y0:y1, x0:x1]
mask_roi = mask[y0:y1, x0:x1]
hist = cv2.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] )
cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX)
hist = cv.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] )
cv.normalize(hist, hist, 0, 255, cv.NORM_MINMAX)
self.hist = hist.reshape(-1)
self.show_hist()
vis_roi = vis[y0:y1, x0:x1]
cv2.bitwise_not(vis_roi, vis_roi)
cv.bitwise_not(vis_roi, vis_roi)
vis[mask == 0] = 0
if self.track_window and self.track_window[2] > 0 and self.track_window[3] > 0:
self.selection = None
prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
prob = cv.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
prob &= mask
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
track_box, self.track_window = cv2.CamShift(prob, self.track_window, term_crit)
term_crit = ( cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1 )
track_box, self.track_window = cv.CamShift(prob, self.track_window, term_crit)
if self.show_backproj:
vis[:] = prob[...,np.newaxis]
try:
cv2.ellipse(vis, track_box, (0, 0, 255), 2)
cv.ellipse(vis, track_box, (0, 0, 255), 2)
except:
print(track_box)
cv2.imshow('camshift', vis)
cv.imshow('camshift', vis)
ch = cv2.waitKey(5)
ch = cv.waitKey(5)
if ch == 27:
break
if ch == ord('b'):
self.show_backproj = not self.show_backproj
cv2.destroyAllWindows()
cv.destroyAllWindows()
if __name__ == '__main__':

View File

@ -18,7 +18,7 @@ if PY3:
xrange = range
import numpy as np
import cv2
import cv2 as cv
def coherence_filter(img, sigma = 11, str_sigma = 11, blend = 0.5, iter_n = 4):
h, w = img.shape[:2]
@ -26,19 +26,19 @@ def coherence_filter(img, sigma = 11, str_sigma = 11, blend = 0.5, iter_n = 4):
for i in xrange(iter_n):
print(i)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
eigen = cv2.cornerEigenValsAndVecs(gray, str_sigma, 3)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
eigen = cv.cornerEigenValsAndVecs(gray, str_sigma, 3)
eigen = eigen.reshape(h, w, 3, 2) # [[e1, e2], v1, v2]
x, y = eigen[:,:,1,0], eigen[:,:,1,1]
gxx = cv2.Sobel(gray, cv2.CV_32F, 2, 0, ksize=sigma)
gxy = cv2.Sobel(gray, cv2.CV_32F, 1, 1, ksize=sigma)
gyy = cv2.Sobel(gray, cv2.CV_32F, 0, 2, ksize=sigma)
gxx = cv.Sobel(gray, cv.CV_32F, 2, 0, ksize=sigma)
gxy = cv.Sobel(gray, cv.CV_32F, 1, 1, ksize=sigma)
gyy = cv.Sobel(gray, cv.CV_32F, 0, 2, ksize=sigma)
gvv = x*x*gxx + 2*x*y*gxy + y*y*gyy
m = gvv < 0
ero = cv2.erode(img, None)
dil = cv2.dilate(img, None)
ero = cv.erode(img, None)
dil = cv.dilate(img, None)
img1 = ero
img1[m] = dil[m]
img = np.uint8(img*(1.0 - blend) + img1*blend)
@ -53,33 +53,33 @@ if __name__ == '__main__':
except:
fn = '../data/baboon.jpg'
src = cv2.imread(fn)
src = cv.imread(fn)
def nothing(*argv):
pass
def update():
sigma = cv2.getTrackbarPos('sigma', 'control')*2+1
str_sigma = cv2.getTrackbarPos('str_sigma', 'control')*2+1
blend = cv2.getTrackbarPos('blend', 'control') / 10.0
sigma = cv.getTrackbarPos('sigma', 'control')*2+1
str_sigma = cv.getTrackbarPos('str_sigma', 'control')*2+1
blend = cv.getTrackbarPos('blend', 'control') / 10.0
print('sigma: %d str_sigma: %d blend_coef: %f' % (sigma, str_sigma, blend))
dst = coherence_filter(src, sigma=sigma, str_sigma = str_sigma, blend = blend)
cv2.imshow('dst', dst)
cv.imshow('dst', dst)
cv2.namedWindow('control', 0)
cv2.createTrackbar('sigma', 'control', 9, 15, nothing)
cv2.createTrackbar('blend', 'control', 7, 10, nothing)
cv2.createTrackbar('str_sigma', 'control', 9, 15, nothing)
cv.namedWindow('control', 0)
cv.createTrackbar('sigma', 'control', 9, 15, nothing)
cv.createTrackbar('blend', 'control', 7, 10, nothing)
cv.createTrackbar('str_sigma', 'control', 9, 15, nothing)
print('Press SPACE to update the image\n')
cv2.imshow('src', src)
cv.imshow('src', src)
update()
while True:
ch = cv2.waitKey()
ch = cv.waitKey()
if ch == ord(' '):
update()
if ch == 27:
break
cv2.destroyAllWindows()
cv.destroyAllWindows()

View File

@ -9,7 +9,7 @@ Keys:
'''
import numpy as np
import cv2
import cv2 as cv
# built-in modules
import sys
@ -24,16 +24,16 @@ if __name__ == '__main__':
hsv_map[:,:,0] = h
hsv_map[:,:,1] = s
hsv_map[:,:,2] = 255
hsv_map = cv2.cvtColor(hsv_map, cv2.COLOR_HSV2BGR)
cv2.imshow('hsv_map', hsv_map)
hsv_map = cv.cvtColor(hsv_map, cv.COLOR_HSV2BGR)
cv.imshow('hsv_map', hsv_map)
cv2.namedWindow('hist', 0)
cv.namedWindow('hist', 0)
hist_scale = 10
def set_scale(val):
global hist_scale
hist_scale = val
cv2.createTrackbar('scale', 'hist', hist_scale, 32, set_scale)
cv.createTrackbar('scale', 'hist', hist_scale, 32, set_scale)
try:
fn = sys.argv[1]
@ -43,20 +43,20 @@ if __name__ == '__main__':
while True:
flag, frame = cam.read()
cv2.imshow('camera', frame)
cv.imshow('camera', frame)
small = cv2.pyrDown(frame)
small = cv.pyrDown(frame)
hsv = cv2.cvtColor(small, cv2.COLOR_BGR2HSV)
hsv = cv.cvtColor(small, cv.COLOR_BGR2HSV)
dark = hsv[...,2] < 32
hsv[dark] = 0
h = cv2.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])
h = cv.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])
h = np.clip(h*0.005*hist_scale, 0, 1)
vis = hsv_map*h[:,:,np.newaxis] / 255.0
cv2.imshow('hist', vis)
cv.imshow('hist', vis)
ch = cv2.waitKey(1)
ch = cv.waitKey(1)
if ch == 27:
break
cv2.destroyAllWindows()
cv.destroyAllWindows()

View File

@ -13,7 +13,7 @@ if PY3:
from functools import reduce
import numpy as np
import cv2
import cv2 as cv
# built-in modules
import os
@ -71,7 +71,7 @@ def lookat(eye, target, up = (0, 0, 1)):
return R, tvec
def mtx2rvec(R):
w, u, vt = cv2.SVDecomp(R - np.eye(3))
w, u, vt = cv.SVDecomp(R - np.eye(3))
p = vt[0] + u[:,0]*w[0] # same as np.dot(R, vt[0])
c = np.dot(vt[0], p)
s = np.dot(vt[1], p)
@ -80,8 +80,8 @@ def mtx2rvec(R):
def draw_str(dst, target, s):
x, y = target
cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv2.LINE_AA)
cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv2.LINE_AA)
cv.putText(dst, s, (x+1, y+1), cv.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv.LINE_AA)
cv.putText(dst, s, (x, y), cv.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv.LINE_AA)
class Sketcher:
def __init__(self, windowname, dests, colors_func):
@ -91,21 +91,21 @@ class Sketcher:
self.colors_func = colors_func
self.dirty = False
self.show()
cv2.setMouseCallback(self.windowname, self.on_mouse)
cv.setMouseCallback(self.windowname, self.on_mouse)
def show(self):
cv2.imshow(self.windowname, self.dests[0])
cv.imshow(self.windowname, self.dests[0])
def on_mouse(self, event, x, y, flags, param):
pt = (x, y)
if event == cv2.EVENT_LBUTTONDOWN:
if event == cv.EVENT_LBUTTONDOWN:
self.prev_pt = pt
elif event == cv2.EVENT_LBUTTONUP:
elif event == cv.EVENT_LBUTTONUP:
self.prev_pt = None
if self.prev_pt and flags & cv2.EVENT_FLAG_LBUTTON:
if self.prev_pt and flags & cv.EVENT_FLAG_LBUTTON:
for dst, color in zip(self.dests, self.colors_func()):
cv2.line(dst, self.prev_pt, pt, color, 5)
cv.line(dst, self.prev_pt, pt, color, 5)
self.dirty = True
self.prev_pt = pt
self.show()
@ -140,7 +140,7 @@ def nothing(*arg, **kw):
pass
def clock():
return cv2.getTickCount() / cv2.getTickFrequency()
return cv.getTickCount() / cv.getTickFrequency()
@contextmanager
def Timer(msg):
@ -166,16 +166,16 @@ class RectSelector:
def __init__(self, win, callback):
self.win = win
self.callback = callback
cv2.setMouseCallback(win, self.onmouse)
cv.setMouseCallback(win, self.onmouse)
self.drag_start = None
self.drag_rect = None
def onmouse(self, event, x, y, flags, param):
x, y = np.int16([x, y]) # BUG
if event == cv2.EVENT_LBUTTONDOWN:
if event == cv.EVENT_LBUTTONDOWN:
self.drag_start = (x, y)
return
if self.drag_start:
if flags & cv2.EVENT_FLAG_LBUTTON:
if flags & cv.EVENT_FLAG_LBUTTON:
xo, yo = self.drag_start
x0, y0 = np.minimum([xo, yo], [x, y])
x1, y1 = np.maximum([xo, yo], [x, y])
@ -192,7 +192,7 @@ class RectSelector:
if not self.drag_rect:
return False
x0, y0, x1, y1 = self.drag_rect
cv2.rectangle(vis, (x0, y0), (x1, y1), (0, 255, 0), 2)
cv.rectangle(vis, (x0, y0), (x1, y1), (0, 255, 0), 2)
return True
@property
def dragging(self):
@ -234,4 +234,4 @@ def mdot(*args):
def draw_keypoints(vis, keypoints, color = (0, 255, 255)):
for kp in keypoints:
x, y = kp.pt
cv2.circle(vis, (int(x), int(y)), 2, color)
cv.circle(vis, (int(x), int(y)), 2, color)

View File

@ -18,7 +18,7 @@ if PY3:
xrange = range
import numpy as np
import cv2
import cv2 as cv
def make_image():
img = np.zeros((500, 500), np.uint8)
@ -33,19 +33,19 @@ def make_image():
c, s = np.cos(angle), np.sin(angle)
x1, y1 = np.int32([dx+100+j*10-80*c, dy+100-90*s])
x2, y2 = np.int32([dx+100+j*10-30*c, dy+100-30*s])
cv2.line(img, (x1, y1), (x2, y2), white)
cv.line(img, (x1, y1), (x2, y2), white)
cv2.ellipse( img, (dx+150, dy+100), (100,70), 0, 0, 360, white, -1 )
cv2.ellipse( img, (dx+115, dy+70), (30,20), 0, 0, 360, black, -1 )
cv2.ellipse( img, (dx+185, dy+70), (30,20), 0, 0, 360, black, -1 )
cv2.ellipse( img, (dx+115, dy+70), (15,15), 0, 0, 360, white, -1 )
cv2.ellipse( img, (dx+185, dy+70), (15,15), 0, 0, 360, white, -1 )
cv2.ellipse( img, (dx+115, dy+70), (5,5), 0, 0, 360, black, -1 )
cv2.ellipse( img, (dx+185, dy+70), (5,5), 0, 0, 360, black, -1 )
cv2.ellipse( img, (dx+150, dy+100), (10,5), 0, 0, 360, black, -1 )
cv2.ellipse( img, (dx+150, dy+150), (40,10), 0, 0, 360, black, -1 )
cv2.ellipse( img, (dx+27, dy+100), (20,35), 0, 0, 360, white, -1 )
cv2.ellipse( img, (dx+273, dy+100), (20,35), 0, 0, 360, white, -1 )
cv.ellipse( img, (dx+150, dy+100), (100,70), 0, 0, 360, white, -1 )
cv.ellipse( img, (dx+115, dy+70), (30,20), 0, 0, 360, black, -1 )
cv.ellipse( img, (dx+185, dy+70), (30,20), 0, 0, 360, black, -1 )
cv.ellipse( img, (dx+115, dy+70), (15,15), 0, 0, 360, white, -1 )
cv.ellipse( img, (dx+185, dy+70), (15,15), 0, 0, 360, white, -1 )
cv.ellipse( img, (dx+115, dy+70), (5,5), 0, 0, 360, black, -1 )
cv.ellipse( img, (dx+185, dy+70), (5,5), 0, 0, 360, black, -1 )
cv.ellipse( img, (dx+150, dy+100), (10,5), 0, 0, 360, black, -1 )
cv.ellipse( img, (dx+150, dy+150), (40,10), 0, 0, 360, black, -1 )
cv.ellipse( img, (dx+27, dy+100), (20,35), 0, 0, 360, white, -1 )
cv.ellipse( img, (dx+273, dy+100), (20,35), 0, 0, 360, white, -1 )
return img
if __name__ == '__main__':
@ -54,17 +54,17 @@ if __name__ == '__main__':
img = make_image()
h, w = img.shape[:2]
_, contours0, hierarchy = cv2.findContours( img.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = [cv2.approxPolyDP(cnt, 3, True) for cnt in contours0]
_, contours0, hierarchy = cv.findContours( img.copy(), cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
contours = [cv.approxPolyDP(cnt, 3, True) for cnt in contours0]
def update(levels):
vis = np.zeros((h, w, 3), np.uint8)
levels = levels - 3
cv2.drawContours( vis, contours, (-1, 2)[levels <= 0], (128,255,255),
3, cv2.LINE_AA, hierarchy, abs(levels) )
cv2.imshow('contours', vis)
cv.drawContours( vis, contours, (-1, 2)[levels <= 0], (128,255,255),
3, cv.LINE_AA, hierarchy, abs(levels) )
cv.imshow('contours', vis)
update(3)
cv2.createTrackbar( "levels+3", "contours", 3, 7, update )
cv2.imshow('image', img)
cv2.waitKey()
cv2.destroyAllWindows()
cv.createTrackbar( "levels+3", "contours", 3, 7, update )
cv.imshow('image', img)
cv.waitKey()
cv.destroyAllWindows()

Some files were not shown because too many files have changed in this diff Show More