updating python tutorials + providing necessary data
@ -173,7 +173,7 @@ from matplotlib import pyplot as plt
|
|||||||
|
|
||||||
BLUE = [255,0,0]
|
BLUE = [255,0,0]
|
||||||
|
|
||||||
img1 = cv2.imread('opencv_logo.png')
|
img1 = cv2.imread('opencv-logo.png')
|
||||||
|
|
||||||
replicate = cv2.copyMakeBorder(img1,10,10,10,10,cv2.BORDER_REPLICATE)
|
replicate = cv2.copyMakeBorder(img1,10,10,10,10,cv2.BORDER_REPLICATE)
|
||||||
reflect = cv2.copyMakeBorder(img1,10,10,10,10,cv2.BORDER_REFLECT)
|
reflect = cv2.copyMakeBorder(img1,10,10,10,10,cv2.BORDER_REFLECT)
|
||||||
|
@ -51,7 +51,7 @@ is given 0.3. cv2.addWeighted() applies following equation on the image.
|
|||||||
Here \f$\gamma\f$ is taken as zero.
|
Here \f$\gamma\f$ is taken as zero.
|
||||||
@code{.py}
|
@code{.py}
|
||||||
img1 = cv2.imread('ml.png')
|
img1 = cv2.imread('ml.png')
|
||||||
img2 = cv2.imread('opencv_logo.jpg')
|
img2 = cv2.imread('opencv-logo.png')
|
||||||
|
|
||||||
dst = cv2.addWeighted(img1,0.7,img2,0.3,0)
|
dst = cv2.addWeighted(img1,0.7,img2,0.3,0)
|
||||||
|
|
||||||
@ -77,7 +77,7 @@ bitwise operations as below:
|
|||||||
@code{.py}
|
@code{.py}
|
||||||
# Load two images
|
# Load two images
|
||||||
img1 = cv2.imread('messi5.jpg')
|
img1 = cv2.imread('messi5.jpg')
|
||||||
img2 = cv2.imread('opencv_logo.png')
|
img2 = cv2.imread('opencv-logo.png')
|
||||||
|
|
||||||
# I want to put logo on top-left corner, So I create a ROI
|
# I want to put logo on top-left corner, So I create a ROI
|
||||||
rows,cols,channels = img2.shape
|
rows,cols,channels = img2.shape
|
||||||
|
@ -69,7 +69,7 @@ import cv2
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
from matplotlib import pyplot as plt
|
from matplotlib import pyplot as plt
|
||||||
|
|
||||||
img = cv2.imread('opencv_logo.png')
|
img = cv2.imread('opencv-logo-white.png')
|
||||||
|
|
||||||
blur = cv2.blur(img,(5,5))
|
blur = cv2.blur(img,(5,5))
|
||||||
|
|
||||||
|
@ -135,7 +135,7 @@ matrix.
|
|||||||
|
|
||||||
See the code below:
|
See the code below:
|
||||||
@code{.py}
|
@code{.py}
|
||||||
img = cv2.imread('sudokusmall.png')
|
img = cv2.imread('sudoku.png')
|
||||||
rows,cols,ch = img.shape
|
rows,cols,ch = img.shape
|
||||||
|
|
||||||
pts1 = np.float32([[56,65],[368,52],[28,387],[389,390]])
|
pts1 = np.float32([[56,65],[368,52],[28,387],[389,390]])
|
||||||
|
@ -23,7 +23,7 @@ explained in the documentation. So we directly go to the code.
|
|||||||
import cv2
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
img = cv2.imread('opencv_logo.png',0)
|
img = cv2.imread('opencv-logo-white.png',0)
|
||||||
img = cv2.medianBlur(img,5)
|
img = cv2.medianBlur(img,5)
|
||||||
cimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)
|
cimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)
|
||||||
|
|
||||||
|
@ -73,7 +73,7 @@ represents the minimum length of line that should be detected.
|
|||||||
import cv2
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
img = cv2.imread('dave.jpg')
|
img = cv2.imread('sudoku.png')
|
||||||
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
|
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
|
||||||
edges = cv2.Canny(gray,50,150,apertureSize = 3)
|
edges = cv2.Canny(gray,50,150,apertureSize = 3)
|
||||||
|
|
||||||
@ -121,7 +121,7 @@ the parameters of lines, and you had to find all the points. Here, everything is
|
|||||||
import cv2
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
img = cv2.imread('dave.jpg')
|
img = cv2.imread('sudoku.png')
|
||||||
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
|
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
|
||||||
edges = cv2.Canny(gray,50,150,apertureSize = 3)
|
edges = cv2.Canny(gray,50,150,apertureSize = 3)
|
||||||
lines = cv2.HoughLinesP(edges,1,np.pi/180,100,minLineLength=100,maxLineGap=10)
|
lines = cv2.HoughLinesP(edges,1,np.pi/180,100,minLineLength=100,maxLineGap=10)
|
||||||
|
@ -87,7 +87,7 @@ import cv2
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
from matplotlib import pyplot as plt
|
from matplotlib import pyplot as plt
|
||||||
|
|
||||||
img = cv2.imread('dave.jpg',0)
|
img = cv2.imread('sudoku.png',0)
|
||||||
img = cv2.medianBlur(img,5)
|
img = cv2.medianBlur(img,5)
|
||||||
|
|
||||||
ret,th1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
|
ret,th1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
|
||||||
|
@ -16,7 +16,7 @@ const char* keys =
|
|||||||
"{ help h | | print help message }"
|
"{ help h | | print help message }"
|
||||||
"{ image i | | specify input image}"
|
"{ image i | | specify input image}"
|
||||||
"{ camera c | | enable camera capturing }"
|
"{ camera c | | enable camera capturing }"
|
||||||
"{ video v | ../data/768x576.avi | use video as input }"
|
"{ video v | ../data/vtest.avi | use video as input }"
|
||||||
"{ directory d | | images directory}"
|
"{ directory d | | images directory}"
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -79,7 +79,7 @@ int main(int argc, char** argv)
|
|||||||
namedWindow("people detector", 1);
|
namedWindow("people detector", 1);
|
||||||
|
|
||||||
string pattern_glob = "";
|
string pattern_glob = "";
|
||||||
string video_filename = "../data/768x576.avi";
|
string video_filename = "../data/vtest.avi";
|
||||||
int camera_id = -1;
|
int camera_id = -1;
|
||||||
if (parser.has("directory"))
|
if (parser.has("directory"))
|
||||||
{
|
{
|
||||||
|
BIN
samples/data/apple.jpg
Normal file
After Width: | Height: | Size: 50 KiB |
BIN
samples/data/gradient.png
Normal file
After Width: | Height: | Size: 55 KiB |
BIN
samples/data/ml.png
Normal file
After Width: | Height: | Size: 80 KiB |
BIN
samples/data/opencv-logo-white.png
Normal file
After Width: | Height: | Size: 7.9 KiB |
Before Width: | Height: | Size: 14 KiB After Width: | Height: | Size: 24 KiB |
BIN
samples/data/orange.jpg
Normal file
After Width: | Height: | Size: 49 KiB |
BIN
samples/data/sudoku.png
Normal file
After Width: | Height: | Size: 245 KiB |
@ -23,10 +23,10 @@ enum Method
|
|||||||
int main(int argc, const char** argv)
|
int main(int argc, const char** argv)
|
||||||
{
|
{
|
||||||
cv::CommandLineParser cmd(argc, argv,
|
cv::CommandLineParser cmd(argc, argv,
|
||||||
"{ c camera | | use camera }"
|
"{ c camera | | use camera }"
|
||||||
"{ f file | ../data/768x576.avi | input video file }"
|
"{ f file | ../data/vtest.avi | input video file }"
|
||||||
"{ m method | mog | method (mog, mog2, gmg, fgd) }"
|
"{ m method | mog | method (mog, mog2, gmg, fgd) }"
|
||||||
"{ h help | | print help message }");
|
"{ h help | | print help message }");
|
||||||
|
|
||||||
if (cmd.has("help") || !cmd.check())
|
if (cmd.has("help") || !cmd.check())
|
||||||
{
|
{
|
||||||
|
@ -1191,10 +1191,10 @@ TEST(GoodFeaturesToTrack)
|
|||||||
|
|
||||||
TEST(MOG)
|
TEST(MOG)
|
||||||
{
|
{
|
||||||
const std::string inputFile = abspath("../data/768x576.avi");
|
const std::string inputFile = abspath("../data/vtest.avi");
|
||||||
|
|
||||||
cv::VideoCapture cap(inputFile);
|
cv::VideoCapture cap(inputFile);
|
||||||
if (!cap.isOpened()) throw runtime_error("can't open ../data/768x576.avi");
|
if (!cap.isOpened()) throw runtime_error("can't open ../data/vtest.avi");
|
||||||
|
|
||||||
cv::Mat frame;
|
cv::Mat frame;
|
||||||
cap >> frame;
|
cap >> frame;
|
||||||
|
@ -17,11 +17,11 @@ using namespace cv;
|
|||||||
int main(int argc, const char** argv)
|
int main(int argc, const char** argv)
|
||||||
{
|
{
|
||||||
CommandLineParser cmd(argc, argv,
|
CommandLineParser cmd(argc, argv,
|
||||||
"{ c camera | | use camera }"
|
"{ c camera | | use camera }"
|
||||||
"{ f file | ../data/768x576.avi | input video file }"
|
"{ f file | ../data/vtest.avi | input video file }"
|
||||||
"{ t type | mog2 | method's type (knn, mog2) }"
|
"{ t type | mog2 | method's type (knn, mog2) }"
|
||||||
"{ h help | | print help message }"
|
"{ h help | | print help message }"
|
||||||
"{ m cpu_mode | false | press 'm' to switch OpenCL<->CPU}");
|
"{ m cpu_mode | false | press 'm' to switch OpenCL<->CPU}");
|
||||||
|
|
||||||
if (cmd.has("help"))
|
if (cmd.has("help"))
|
||||||
{
|
{
|
||||||
|
@ -71,7 +71,7 @@ int main(int argc, char** argv)
|
|||||||
"{ h help | | print help message }"
|
"{ h help | | print help message }"
|
||||||
"{ i input | | specify input image}"
|
"{ i input | | specify input image}"
|
||||||
"{ c camera | -1 | enable camera capturing }"
|
"{ c camera | -1 | enable camera capturing }"
|
||||||
"{ v video | ../data/768x576.avi | use video as input }"
|
"{ v video | ../data/vtest.avi | use video as input }"
|
||||||
"{ g gray | | convert image to gray one or not}"
|
"{ g gray | | convert image to gray one or not}"
|
||||||
"{ s scale | 1.0 | resize the image before detect}"
|
"{ s scale | 1.0 | resize the image before detect}"
|
||||||
"{ o output | | specify output path when input is images}";
|
"{ o output | | specify output path when input is images}";
|
||||||
|