From c4c1e94088edeef3bee8fb5140656acfa2286054 Mon Sep 17 00:00:00 2001 From: tribta Date: Fri, 18 Aug 2017 15:06:05 +0100 Subject: [PATCH 1/4] Tutorial Adding Images --- .../core/adding_images/adding_images.markdown | 85 +++++++++++++++---- .../core/table_of_content_core.markdown | 2 + .../core/AddingImages/AddingImages.cpp | 3 +- .../core/AddingImages/AddingImages.java | 51 +++++++++++ .../core/AddingImages/adding_images.py | 35 ++++++++ 5 files changed, 158 insertions(+), 18 deletions(-) create mode 100644 samples/java/tutorial_code/core/AddingImages/AddingImages.java create mode 100644 samples/python/tutorial_code/core/AddingImages/adding_images.py diff --git a/doc/tutorials/core/adding_images/adding_images.markdown b/doc/tutorials/core/adding_images/adding_images.markdown index 012a2480fa..95cc19e124 100644 --- a/doc/tutorials/core/adding_images/adding_images.markdown +++ b/doc/tutorials/core/adding_images/adding_images.markdown @@ -1,13 +1,16 @@ Adding (blending) two images using OpenCV {#tutorial_adding_images} ========================================= +@prev_tutorial{tutorial_mat_operations} +@next_tutorial{tutorial_basic_linear_transform} + Goal ---- In this tutorial you will learn: - what is *linear blending* and why it is useful; -- how to add two images using @ref cv::addWeighted +- how to add two images using **addWeighted()** Theory ------ @@ -28,33 +31,83 @@ eh?) Source Code ----------- +@add_toggle_cpp Download the source code from -[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/core/AddingImages/AddingImages.cpp). +[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/core/AddingImages/AddingImages.cpp). @include cpp/tutorial_code/core/AddingImages/AddingImages.cpp +@end_toggle + +@add_toggle_java +Download the source code from +[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/core/AddingImages/AddingImages.java). +@include java/tutorial_code/core/AddingImages/AddingImages.java +@end_toggle + +@add_toggle_python +Download the source code from +[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/core/AddingImages/adding_images.py). +@include python/tutorial_code/core/AddingImages/adding_images.py +@end_toggle Explanation ----------- --# Since we are going to perform: +Since we are going to perform: - \f[g(x) = (1 - \alpha)f_{0}(x) + \alpha f_{1}(x)\f] +\f[g(x) = (1 - \alpha)f_{0}(x) + \alpha f_{1}(x)\f] - We need two source images (\f$f_{0}(x)\f$ and \f$f_{1}(x)\f$). So, we load them in the usual way: - @snippet cpp/tutorial_code/core/AddingImages/AddingImages.cpp load +We need two source images (\f$f_{0}(x)\f$ and \f$f_{1}(x)\f$). So, we load them in the usual way: +@add_toggle_cpp +@snippet cpp/tutorial_code/core/AddingImages/AddingImages.cpp load +@end_toggle - **warning** +@add_toggle_java +@snippet java/tutorial_code/core/AddingImages/AddingImages.java load +@end_toggle - Since we are *adding* *src1* and *src2*, they both have to be of the same size (width and - height) and type. +@add_toggle_python +@snippet python/tutorial_code/core/AddingImages/adding_images.py load +@end_toggle --# Now we need to generate the `g(x)` image. For this, the function @ref cv::addWeighted comes quite handy: - @snippet cpp/tutorial_code/core/AddingImages/AddingImages.cpp blend_images - since @ref cv::addWeighted produces: - \f[dst = \alpha \cdot src1 + \beta \cdot src2 + \gamma\f] - In this case, `gamma` is the argument \f$0.0\f$ in the code above. +We used the following images: [LinuxLogo.jpg](https://raw.githubusercontent.com/opencv/opencv/master/samples/data/LinuxLogo.jpg) and [WindowsLogo.jpg](https://raw.githubusercontent.com/opencv/opencv/master/samples/data/WindowsLogo.jpg) --# Create windows, show the images and wait for the user to end the program. - @snippet cpp/tutorial_code/core/AddingImages/AddingImages.cpp display +@warning Since we are *adding* *src1* and *src2*, they both have to be of the same size +(width and height) and type. + +Now we need to generate the `g(x)` image. For this, the function **addWeighted()** comes quite handy: + +@add_toggle_cpp +@snippet cpp/tutorial_code/core/AddingImages/AddingImages.cpp blend_images +@end_toggle + +@add_toggle_java +@snippet java/tutorial_code/core/AddingImages/AddingImages.java blend_images +@end_toggle + +@add_toggle_python +@snippet python/tutorial_code/core/AddingImages/adding_images.py blend_images +Numpy version of above line (but cv2 function is around 2x faster): +\code{.py} + dst = np.uint8(alpha*(img1)+beta*(img2)) +\endcode +@end_toggle + +since **addWeighted()** produces: +\f[dst = \alpha \cdot src1 + \beta \cdot src2 + \gamma\f] +In this case, `gamma` is the argument \f$0.0\f$ in the code above. + +Create windows, show the images and wait for the user to end the program. +@add_toggle_cpp +@snippet cpp/tutorial_code/core/AddingImages/AddingImages.cpp display +@end_toggle + +@add_toggle_java +@snippet java/tutorial_code/core/AddingImages/AddingImages.java display +@end_toggle + +@add_toggle_python +@snippet python/tutorial_code/core/AddingImages/adding_images.py display +@end_toggle Result ------ diff --git a/doc/tutorials/core/table_of_content_core.markdown b/doc/tutorials/core/table_of_content_core.markdown index 2b9afb8b19..c0453dd611 100644 --- a/doc/tutorials/core/table_of_content_core.markdown +++ b/doc/tutorials/core/table_of_content_core.markdown @@ -40,6 +40,8 @@ understanding how to manipulate the images on a pixel level. - @subpage tutorial_adding_images + *Languages:* C++, Java, Python + *Compatibility:* \> OpenCV 2.0 *Author:* Ana Huamán diff --git a/samples/cpp/tutorial_code/core/AddingImages/AddingImages.cpp b/samples/cpp/tutorial_code/core/AddingImages/AddingImages.cpp index 4ddb6f0b02..1dd1d09546 100644 --- a/samples/cpp/tutorial_code/core/AddingImages/AddingImages.cpp +++ b/samples/cpp/tutorial_code/core/AddingImages/AddingImages.cpp @@ -3,7 +3,6 @@ * @brief Simple linear blender ( dst = alpha*src1 + beta*src2 ) * @author OpenCV team */ - #include "opencv2/imgcodecs.hpp" #include "opencv2/highgui.hpp" #include @@ -24,7 +23,7 @@ int main( void ) /// Ask the user enter alpha cout << " Simple Linear Blender " << endl; cout << "-----------------------" << endl; - cout << "* Enter alpha [0-1]: "; + cout << "* Enter alpha [0.0-1.0]: "; cin >> input; // We use the alpha provided by the user if it is between 0 and 1 diff --git a/samples/java/tutorial_code/core/AddingImages/AddingImages.java b/samples/java/tutorial_code/core/AddingImages/AddingImages.java new file mode 100644 index 0000000000..238c5eeec4 --- /dev/null +++ b/samples/java/tutorial_code/core/AddingImages/AddingImages.java @@ -0,0 +1,51 @@ +import org.opencv.core.*; +import org.opencv.highgui.HighGui; +import org.opencv.imgcodecs.Imgcodecs; + +import java.util.Locale; +import java.util.Scanner; + +class AddingImagesRun{ + public void run() { + double alpha = 0.5; double beta; double input; + + Mat src1, src2, dst = new Mat(); + + System.out.println(" Simple Linear Blender "); + System.out.println("-----------------------"); + System.out.println("* Enter alpha [0.0-1.0]: "); + Scanner scan = new Scanner( System.in ).useLocale(Locale.US); + input = scan.nextDouble(); + + if( input >= 0.0 && input <= 1.0 ) + alpha = input; + + //! [load] + src1 = Imgcodecs.imread("../../images/LinuxLogo.jpg"); + src2 = Imgcodecs.imread("../../images/WindowsLogo.jpg"); + //! [load] + + if( src1.empty() == true ){ System.out.println("Error loading src1"); return;} + if( src2.empty() == true ){ System.out.println("Error loading src2"); return;} + + //! [blend_images] + beta = ( 1.0 - alpha ); + Core.addWeighted( src1, alpha, src2, beta, 0.0, dst); + //! [blend_images] + + //![display] + HighGui.imshow("Linear Blend", dst); + HighGui.waitKey(0); + //![display] + + System.exit(0); + } +} + +public class AddingImages { + public static void main(String[] args) { + // Load the native library. + System.loadLibrary(Core.NATIVE_LIBRARY_NAME); + new AddingImagesRun().run(); + } +} diff --git a/samples/python/tutorial_code/core/AddingImages/adding_images.py b/samples/python/tutorial_code/core/AddingImages/adding_images.py new file mode 100644 index 0000000000..62abb3e4bb --- /dev/null +++ b/samples/python/tutorial_code/core/AddingImages/adding_images.py @@ -0,0 +1,35 @@ +from __future__ import print_function +import sys + +import cv2 + +alpha = 0.5 + +print(''' Simple Linear Blender +----------------------- +* Enter alpha [0.0-1.0]: ''') +if sys.version_info >= (3, 0): # If Python 3.x + input_alpha = float(input()) +else: + input_alpha = float(raw_input()) +if 0 <= alpha <= 1: + alpha = input_alpha +## [load] +src1 = cv2.imread('../../../../data/LinuxLogo.jpg') +src2 = cv2.imread('../../../../data/WindowsLogo.jpg') +## [load] +if src1 is None: + print ("Error loading src1") + exit(-1) +elif src2 is None: + print ("Error loading src2") + exit(-1) +## [blend_images] +beta = (1.0 - alpha) +dst = cv2.addWeighted(src1, alpha, src2, beta, 0.0) +## [blend_images] +## [display] +cv2.imshow('dst', dst) +cv2.waitKey(0) +## [display] +cv2.destroyAllWindows() From 13317bdfda691d25aef22376bfd9cbb87f244f4d Mon Sep 17 00:00:00 2001 From: tribta Date: Fri, 18 Aug 2017 21:33:28 +0100 Subject: [PATCH 2/4] Tutorial Basic Geometric Drawing --- .../basic_geometric_drawing.markdown | 261 +++++++++++++----- .../core/table_of_content_core.markdown | 2 + .../tutorial_code/core/Matrix/Drawing_1.cpp | 21 +- .../BasicGeometricDrawing.java | 186 +++++++++++++ .../basic_geometric_drawing.py | 115 ++++++++ 5 files changed, 511 insertions(+), 74 deletions(-) create mode 100644 samples/java/tutorial_code/core/BasicGeometricDrawing/BasicGeometricDrawing.java create mode 100644 samples/python/tutorial_code/core/BasicGeometricDrawing/basic_geometric_drawing.py diff --git a/doc/tutorials/core/basic_geometric_drawing/basic_geometric_drawing.markdown b/doc/tutorials/core/basic_geometric_drawing/basic_geometric_drawing.markdown index c27e3293a4..ec3a2ffadc 100644 --- a/doc/tutorials/core/basic_geometric_drawing/basic_geometric_drawing.markdown +++ b/doc/tutorials/core/basic_geometric_drawing/basic_geometric_drawing.markdown @@ -1,19 +1,21 @@ Basic Drawing {#tutorial_basic_geometric_drawing} ============= +@prev_tutorial{tutorial_basic_linear_transform} +@next_tutorial{tutorial_random_generator_and_text} + Goals ----- In this tutorial you will learn how to: -- Use @ref cv::Point to define 2D points in an image. -- Use @ref cv::Scalar and why it is useful -- Draw a **line** by using the OpenCV function @ref cv::line -- Draw an **ellipse** by using the OpenCV function @ref cv::ellipse -- Draw a **rectangle** by using the OpenCV function @ref cv::rectangle -- Draw a **circle** by using the OpenCV function @ref cv::circle -- Draw a **filled polygon** by using the OpenCV function @ref cv::fillPoly +- Draw a **line** by using the OpenCV function **line()** +- Draw an **ellipse** by using the OpenCV function **ellipse()** +- Draw a **rectangle** by using the OpenCV function **rectangle()** +- Draw a **circle** by using the OpenCV function **circle()** +- Draw a **filled polygon** by using the OpenCV function **fillPoly()** +@add_toggle_cpp OpenCV Theory ------------- @@ -42,86 +44,217 @@ Point pt = Point(10, 8); Scalar( a, b, c ) @endcode We would be defining a BGR color such as: *Blue = a*, *Green = b* and *Red = c* +@end_toggle + +@add_toggle_java +OpenCV Theory +------------- + +For this tutorial, we will heavily use two structures: @ref cv::Point and @ref cv::Scalar : + +### Point + +It represents a 2D point, specified by its image coordinates \f$x\f$ and \f$y\f$. We can define it as: +@code{.java} +Point pt = new Point(); +pt.x = 10; +pt.y = 8; +@endcode +or +@code{.java} +Point pt = new Point(10, 8); +@endcode +### Scalar + +- Represents a 4-element vector. The type Scalar is widely used in OpenCV for passing pixel + values. +- In this tutorial, we will use it extensively to represent BGR color values (3 parameters). It is + not necessary to define the last argument if it is not going to be used. +- Let's see an example, if we are asked for a color argument and we give: + @code{.java} + Scalar( a, b, c ) + @endcode + We would be defining a BGR color such as: *Blue = a*, *Green = b* and *Red = c* +@end_toggle Code ---- +@add_toggle_cpp - This code is in your OpenCV sample folder. Otherwise you can grab it from - [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/core/Matrix/Drawing_1.cpp) + [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/core/Matrix/Drawing_1.cpp) @include samples/cpp/tutorial_code/core/Matrix/Drawing_1.cpp +@end_toggle + +@add_toggle_java +- This code is in your OpenCV sample folder. Otherwise you can grab it from + [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/core/BasicGeometricDrawing/BasicGeometricDrawing.java) + @include samples/java/tutorial_code/core/BasicGeometricDrawing/BasicGeometricDrawing.java +@end_toggle + +@add_toggle_python +- This code is in your OpenCV sample folder. Otherwise you can grab it from + [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/core/BasicGeometricDrawing/basic_geometric_drawing.py) + @include samples/python/tutorial_code/core/BasicGeometricDrawing/basic_geometric_drawing.py +@end_toggle Explanation ----------- --# Since we plan to draw two examples (an atom and a rook), we have to create two images and two - windows to display them. - @snippet cpp/tutorial_code/core/Matrix/Drawing_1.cpp create_images +Since we plan to draw two examples (an atom and a rook), we have to create two images and two +windows to display them. +@add_toggle_cpp +@snippet cpp/tutorial_code/core/Matrix/Drawing_1.cpp create_images +@end_toggle --# We created functions to draw different geometric shapes. For instance, to draw the atom we used - *MyEllipse* and *MyFilledCircle*: - @snippet cpp/tutorial_code/core/Matrix/Drawing_1.cpp draw_atom +@add_toggle_java +@snippet java/tutorial_code/core/BasicGeometricDrawing/BasicGeometricDrawing.java create_images +@end_toggle --# And to draw the rook we employed *MyLine*, *rectangle* and a *MyPolygon*: - @snippet cpp/tutorial_code/core/Matrix/Drawing_1.cpp draw_rook +@add_toggle_python +@snippet python/tutorial_code/core/BasicGeometricDrawing/basic_geometric_drawing.py create_images +@end_toggle --# Let's check what is inside each of these functions: - - *MyLine* - @snippet cpp/tutorial_code/core/Matrix/Drawing_1.cpp myline +We created functions to draw different geometric shapes. For instance, to draw the atom we used +**MyEllipse** and **MyFilledCircle**: +@add_toggle_cpp +@snippet cpp/tutorial_code/core/Matrix/Drawing_1.cpp draw_atom +@end_toggle - As we can see, *MyLine* just call the function @ref cv::line , which does the following: +@add_toggle_java +@snippet java/tutorial_code/core/BasicGeometricDrawing/BasicGeometricDrawing.java draw_atom +@end_toggle - - Draw a line from Point **start** to Point **end** - - The line is displayed in the image **img** - - The line color is defined by **Scalar( 0, 0, 0)** which is the RGB value correspondent - to **Black** - - The line thickness is set to **thickness** (in this case 2) - - The line is a 8-connected one (**lineType** = 8) - - *MyEllipse* - @snippet cpp/tutorial_code/core/Matrix/Drawing_1.cpp myellipse +@add_toggle_python +@snippet python/tutorial_code/core/BasicGeometricDrawing/basic_geometric_drawing.py draw_atom +@end_toggle - From the code above, we can observe that the function @ref cv::ellipse draws an ellipse such - that: +And to draw the rook we employed **MyLine**, **rectangle** and a **MyPolygon**: +@add_toggle_cpp +@snippet cpp/tutorial_code/core/Matrix/Drawing_1.cpp draw_rook +@end_toggle - - The ellipse is displayed in the image **img** - - The ellipse center is located in the point **(w/2, w/2)** and is enclosed in a box - of size **(w/4, w/16)** - - The ellipse is rotated **angle** degrees - - The ellipse extends an arc between **0** and **360** degrees - - The color of the figure will be **Scalar( 255, 0, 0)** which means blue in BGR value. - - The ellipse's **thickness** is 2. - - *MyFilledCircle* - @snippet cpp/tutorial_code/core/Matrix/Drawing_1.cpp myfilledcircle +@add_toggle_java +@snippet java/tutorial_code/core/BasicGeometricDrawing/BasicGeometricDrawing.java draw_rook +@end_toggle - Similar to the ellipse function, we can observe that *circle* receives as arguments: +@add_toggle_python +@snippet python/tutorial_code/core/BasicGeometricDrawing/basic_geometric_drawing.py draw_rook +@end_toggle - - The image where the circle will be displayed (**img**) - - The center of the circle denoted as the Point **center** - - The radius of the circle: **w/32** - - The color of the circle: **Scalar(0, 0, 255)** which means *Red* in BGR - - Since **thickness** = -1, the circle will be drawn filled. - - *MyPolygon* - @snippet cpp/tutorial_code/core/Matrix/Drawing_1.cpp mypolygon - To draw a filled polygon we use the function @ref cv::fillPoly . We note that: +Let's check what is inside each of these functions: +@add_toggle_cpp +@end_toggle - - The polygon will be drawn on **img** - - The vertices of the polygon are the set of points in **ppt** - - The total number of vertices to be drawn are **npt** - - The number of polygons to be drawn is only **1** - - The color of the polygon is defined by **Scalar( 255, 255, 255)**, which is the BGR - value for *white* - - *rectangle* - @snippet cpp/tutorial_code/core/Matrix/Drawing_1.cpp rectangle +

MyLine

+@add_toggle_cpp +@snippet cpp/tutorial_code/core/Matrix/Drawing_1.cpp my_line +@end_toggle - Finally we have the @ref cv::rectangle function (we did not create a special function for - this guy). We note that: +@add_toggle_java +@snippet java/tutorial_code/core/BasicGeometricDrawing/BasicGeometricDrawing.java my_line +@end_toggle - - The rectangle will be drawn on **rook_image** - - Two opposite vertices of the rectangle are defined by *\* Point( 0, 7*w/8 )*\* - andPoint( w, w)*\* - - The color of the rectangle is given by **Scalar(0, 255, 255)** which is the BGR value - for *yellow* - - Since the thickness value is given by **FILLED (-1)**, the rectangle will be filled. +@add_toggle_python +@snippet python/tutorial_code/core/BasicGeometricDrawing/basic_geometric_drawing.py my_line +@end_toggle + +- As we can see, **MyLine** just call the function **line()** , which does the following: + - Draw a line from Point **start** to Point **end** + - The line is displayed in the image **img** + - The line color is defined by ( 0, 0, 0 ) which is the RGB value correspondent + to **Black** + - The line thickness is set to **thickness** (in this case 2) + - The line is a 8-connected one (**lineType** = 8) + +

MyEllipse

+@add_toggle_cpp +@snippet cpp/tutorial_code/core/Matrix/Drawing_1.cpp my_ellipse +@end_toggle + +@add_toggle_java +@snippet java/tutorial_code/core/BasicGeometricDrawing/BasicGeometricDrawing.java my_ellipse +@end_toggle + +@add_toggle_python +@snippet python/tutorial_code/core/BasicGeometricDrawing/basic_geometric_drawing.py my_ellipse +@end_toggle + +- From the code above, we can observe that the function **ellipse()** draws an ellipse such + that: + + - The ellipse is displayed in the image **img** + - The ellipse center is located in the point (w/2, w/2) and is enclosed in a box + of size (w/4, w/16) + - The ellipse is rotated **angle** degrees + - The ellipse extends an arc between **0** and **360** degrees + - The color of the figure will be ( 255, 0, 0 ) which means blue in BGR value. + - The ellipse's **thickness** is 2. + +

MyFilledCircle

+@add_toggle_cpp +@snippet cpp/tutorial_code/core/Matrix/Drawing_1.cpp my_filled_circle +@end_toggle + +@add_toggle_java +@snippet java/tutorial_code/core/BasicGeometricDrawing/BasicGeometricDrawing.java my_filled_circle +@end_toggle + +@add_toggle_python +@snippet python/tutorial_code/core/BasicGeometricDrawing/basic_geometric_drawing.py my_filled_circle +@end_toggle + +- Similar to the ellipse function, we can observe that *circle* receives as arguments: + + - The image where the circle will be displayed (**img**) + - The center of the circle denoted as the point **center** + - The radius of the circle: **w/32** + - The color of the circle: ( 0, 0, 255 ) which means *Red* in BGR + - Since **thickness** = -1, the circle will be drawn filled. + +

MyPolygon

+@add_toggle_cpp +@snippet cpp/tutorial_code/core/Matrix/Drawing_1.cpp my_polygon +@end_toggle + +@add_toggle_java +@snippet java/tutorial_code/core/BasicGeometricDrawing/BasicGeometricDrawing.java my_polygon +@end_toggle + +@add_toggle_python +@snippet python/tutorial_code/core/BasicGeometricDrawing/basic_geometric_drawing.py my_polygon +@end_toggle + +- To draw a filled polygon we use the function **fillPoly()** . We note that: + + - The polygon will be drawn on **img** + - The vertices of the polygon are the set of points in **ppt** + - The color of the polygon is defined by ( 255, 255, 255 ), which is the BGR + value for *white* + +

rectangle

+@add_toggle_cpp +@snippet cpp/tutorial_code/core/Matrix/Drawing_1.cpp rectangle +@end_toggle + +@add_toggle_java +@snippet java/tutorial_code/core/BasicGeometricDrawing/BasicGeometricDrawing.java rectangle +@end_toggle + +@add_toggle_python +@snippet python/tutorial_code/core/BasicGeometricDrawing/basic_geometric_drawing.py rectangle +@end_toggle + +- Finally we have the @ref cv::rectangle function (we did not create a special function for + this guy). We note that: + + - The rectangle will be drawn on **rook_image** + - Two opposite vertices of the rectangle are defined by ( 0, 7*w/8 ) + and ( w, w ) + - The color of the rectangle is given by ( 0, 255, 255 ) which is the BGR value + for *yellow* + - Since the thickness value is given by **FILLED (-1)**, the rectangle will be filled. Result ------ diff --git a/doc/tutorials/core/table_of_content_core.markdown b/doc/tutorials/core/table_of_content_core.markdown index c0453dd611..af040b9145 100644 --- a/doc/tutorials/core/table_of_content_core.markdown +++ b/doc/tutorials/core/table_of_content_core.markdown @@ -58,6 +58,8 @@ understanding how to manipulate the images on a pixel level. - @subpage tutorial_basic_geometric_drawing + *Languages:* C++, Java, Python + *Compatibility:* \> OpenCV 2.0 *Author:* Ana Huamán diff --git a/samples/cpp/tutorial_code/core/Matrix/Drawing_1.cpp b/samples/cpp/tutorial_code/core/Matrix/Drawing_1.cpp index 7e184ddc1d..3f23a0b6c5 100644 --- a/samples/cpp/tutorial_code/core/Matrix/Drawing_1.cpp +++ b/samples/cpp/tutorial_code/core/Matrix/Drawing_1.cpp @@ -1,8 +1,8 @@ /** * @file Drawing_1.cpp - * @brief Simple sample code + * @brief Simple geometric drawing + * @author OpenCV team */ - #include #include #include @@ -83,11 +83,11 @@ int main( void ){ /// Function Declaration -//![myellipse] /** * @function MyEllipse * @brief Draw a fixed-size ellipse with different angles */ +//![my_ellipse] void MyEllipse( Mat img, double angle ) { int thickness = 2; @@ -103,13 +103,13 @@ void MyEllipse( Mat img, double angle ) thickness, lineType ); } -//![myellipse] +//![my_ellipse] -//![myfilledcircle] /** * @function MyFilledCircle * @brief Draw a fixed-size filled circle */ +//![my_filled_circle] void MyFilledCircle( Mat img, Point center ) { circle( img, @@ -119,13 +119,13 @@ void MyFilledCircle( Mat img, Point center ) FILLED, LINE_8 ); } -//![myfilledcircle] +//![my_filled_circle] -//![mypolygon] /** * @function MyPolygon * @brief Draw a simple concave polygon (rook) */ +//![my_polygon] void MyPolygon( Mat img ) { int lineType = LINE_8; @@ -163,17 +163,18 @@ void MyPolygon( Mat img ) Scalar( 255, 255, 255 ), lineType ); } -//![mypolygon] +//![my_polygon] -//![myline] /** * @function MyLine * @brief Draw a simple line */ +//![my_line] void MyLine( Mat img, Point start, Point end ) { int thickness = 2; int lineType = LINE_8; + line( img, start, end, @@ -181,4 +182,4 @@ void MyLine( Mat img, Point start, Point end ) thickness, lineType ); } -//![myline] +//![my_line] diff --git a/samples/java/tutorial_code/core/BasicGeometricDrawing/BasicGeometricDrawing.java b/samples/java/tutorial_code/core/BasicGeometricDrawing/BasicGeometricDrawing.java new file mode 100644 index 0000000000..d59d99030e --- /dev/null +++ b/samples/java/tutorial_code/core/BasicGeometricDrawing/BasicGeometricDrawing.java @@ -0,0 +1,186 @@ +import org.opencv.core.*; +import org.opencv.core.Point; +import org.opencv.highgui.HighGui; +import org.opencv.imgproc.Imgproc; + +import java.util.*; +import java.util.List; + +class GeometricDrawingRun{ + + private static final int W = 400; + + public void run(){ + //! [create_images] + /// Windows names + String atom_window = "Drawing 1: Atom"; + String rook_window = "Drawing 2: Rook"; + + /// Create black empty images + Mat atom_image = Mat.zeros( W, W, CvType.CV_8UC3 ); + Mat rook_image = Mat.zeros( W, W, CvType.CV_8UC3 ); + //! [create_images] + + //! [draw_atom] + /// 1. Draw a simple atom: + /// ----------------------- + MyEllipse( atom_image, 90.0 ); + MyEllipse( atom_image, 0.0 ); + MyEllipse( atom_image, 45.0 ); + MyEllipse( atom_image, -45.0 ); + + /// 1.b. Creating circles + MyFilledCircle( atom_image, new Point( W/2, W/2) ); + //! [draw_atom] + + //! [draw_rook] + /// 2. Draw a rook + /// ------------------ + /// 2.a. Create a convex polygon + MyPolygon( rook_image ); + + //! [rectangle] + /// 2.b. Creating rectangles + Imgproc.rectangle( rook_image, + new Point( 0, 7*W/8 ), + new Point( W, W), + new Scalar( 0, 255, 255 ), + -1, + 8, + 0 ); + //! [rectangle] + + /// 2.c. Create a few lines + MyLine( rook_image, new Point( 0, 15*W/16 ), new Point( W, 15*W/16 ) ); + MyLine( rook_image, new Point( W/4, 7*W/8 ), new Point( W/4, W ) ); + MyLine( rook_image, new Point( W/2, 7*W/8 ), new Point( W/2, W ) ); + MyLine( rook_image, new Point( 3*W/4, 7*W/8 ), new Point( 3*W/4, W ) ); + //! [draw_rook] + + /// 3. Display your stuff! + HighGui.imshow( atom_window, atom_image ); + HighGui.moveWindow( atom_window, 0, 200 ); + HighGui.imshow( rook_window, rook_image ); + HighGui.moveWindow( rook_window, W, 200 ); + + HighGui.waitKey( 0 ); + System.exit(0); + } + + /// Function Declaration + + /** + * @function MyEllipse + * @brief Draw a fixed-size ellipse with different angles + */ + //! [my_ellipse] + private void MyEllipse( Mat img, double angle ) { + int thickness = 2; + int lineType = 8; + int shift = 0; + + Imgproc.ellipse( img, + new Point( W/2, W/2 ), + new Size( W/4, W/16 ), + angle, + 0.0, + 360.0, + new Scalar( 255, 0, 0 ), + thickness, + lineType, + shift ); + } + //! [my_ellipse] + /** + * @function MyFilledCircle + * @brief Draw a fixed-size filled circle + */ + //! [my_filled_circle] + private void MyFilledCircle( Mat img, Point center ) { + int thickness = -1; + int lineType = 8; + int shift = 0; + + Imgproc.circle( img, + center, + W/32, + new Scalar( 0, 0, 255 ), + thickness, + lineType, + shift ); + } + //! [my_filled_circle] + /** + * @function MyPolygon + * @function Draw a simple concave polygon (rook) + */ + //! [my_polygon] + private void MyPolygon( Mat img ) { + int lineType = 8; + int shift = 0; + + /** Create some points */ + Point[] rook_points = new Point[20]; + rook_points[0] = new Point( W/4, 7*W/8 ); + rook_points[1] = new Point( 3*W/4, 7*W/8 ); + rook_points[2] = new Point( 3*W/4, 13*W/16 ); + rook_points[3] = new Point( 11*W/16, 13*W/16 ); + rook_points[4] = new Point( 19*W/32, 3*W/8 ); + rook_points[5] = new Point( 3*W/4, 3*W/8 ); + rook_points[6] = new Point( 3*W/4, W/8 ); + rook_points[7] = new Point( 26*W/40, W/8 ); + rook_points[8] = new Point( 26*W/40, W/4 ); + rook_points[9] = new Point( 22*W/40, W/4 ); + rook_points[10] = new Point( 22*W/40, W/8 ); + rook_points[11] = new Point( 18*W/40, W/8 ); + rook_points[12] = new Point( 18*W/40, W/4 ); + rook_points[13] = new Point( 14*W/40, W/4 ); + rook_points[14] = new Point( 14*W/40, W/8 ); + rook_points[15] = new Point( W/4, W/8 ); + rook_points[16] = new Point( W/4, 3*W/8 ); + rook_points[17] = new Point( 13*W/32, 3*W/8 ); + rook_points[18] = new Point( 5*W/16, 13*W/16 ); + rook_points[19] = new Point( W/4, 13*W/16 ); + + MatOfPoint matPt = new MatOfPoint(); + matPt.fromArray(rook_points); + + List ppt = new ArrayList(); + ppt.add(matPt); + + Imgproc.fillPoly(img, + ppt, + new Scalar( 255, 255, 255 ), + lineType, + shift, + new Point(0,0) ); + } + //! [my_polygon] + /** + * @function MyLine + * @brief Draw a simple line + */ + //! [my_line] + private void MyLine( Mat img, Point start, Point end ) { + int thickness = 2; + int lineType = 8; + int shift = 0; + + Imgproc.line( img, + start, + end, + new Scalar( 0, 0, 0 ), + thickness, + lineType, + shift ); + } + //! [my_line] +} + +public class BasicGeometricDrawing { + public static void main(String[] args) { + // Load the native library. + System.loadLibrary(Core.NATIVE_LIBRARY_NAME); + new GeometricDrawingRun().run(); + } +} diff --git a/samples/python/tutorial_code/core/BasicGeometricDrawing/basic_geometric_drawing.py b/samples/python/tutorial_code/core/BasicGeometricDrawing/basic_geometric_drawing.py new file mode 100644 index 0000000000..a6f4098785 --- /dev/null +++ b/samples/python/tutorial_code/core/BasicGeometricDrawing/basic_geometric_drawing.py @@ -0,0 +1,115 @@ +import cv2 +import numpy as np + +W = 400 +## [my_ellipse] +def my_ellipse(img, angle): + thickness = 2 + line_type = 8 + + cv2.ellipse(img, + (W / 2, W / 2), + (W / 4, W / 16), + angle, + 0, + 360, + (255, 0, 0), + thickness, + line_type) +## [my_ellipse] +## [my_filled_circle] +def my_filled_circle(img, center): + thickness = -1 + line_type = 8 + + cv2.circle(img, + center, + W / 32, + (0, 0, 255), + thickness, + line_type) +## [my_filled_circle] +## [my_polygon] +def my_polygon(img): + line_type = 8 + + # Create some points + ppt = np.array([[W / 4, 7 * W / 8], [3 * W / 4, 7 * W / 8], + [3 * W / 4, 13 * W / 16], [11 * W / 16, 13 * W / 16], + [19 * W / 32, 3 * W / 8], [3 * W / 4, 3 * W / 8], + [3 * W / 4, W / 8], [26 * W / 40, W / 8], + [26 * W / 40, W / 4], [22 * W / 40, W / 4], + [22 * W / 40, W / 8], [18 * W / 40, W / 8], + [18 * W / 40, W / 4], [14 * W / 40, W / 4], + [14 * W / 40, W / 8], [W / 4, W / 8], + [W / 4, 3 * W / 8], [13 * W / 32, 3 * W / 8], + [5 * W / 16, 13 * W / 16], [W / 4, 13 * W / 16]], np.int32) + ppt = ppt.reshape((-1, 1, 2)) + cv2.fillPoly(img, [ppt], (255, 255, 255), line_type) + # Only drawind the lines would be: + # cv2.polylines(img, [ppt], True, (255, 0, 255), line_type) +## [my_polygon] +## [my_line] +def my_line(img, start, end): + thickness = 2 + line_type = 8 + + cv2.line(img, + start, + end, + (0, 0, 0), + thickness, + line_type) +## [my_line] +## [create_images] +# Windows names +atom_window = "Drawing 1: Atom" +rook_window = "Drawing 2: Rook" + +# Create black empty images +size = W, W, 3 +atom_image = np.zeros(size, dtype=np.uint8) +rook_image = np.zeros(size, dtype=np.uint8) +## [create_images] +## [draw_atom] +# 1. Draw a simple atom: +# ----------------------- + +# 1.a. Creating ellipses +my_ellipse(atom_image, 90) +my_ellipse(atom_image, 0) +my_ellipse(atom_image, 45) +my_ellipse(atom_image, -45) + +# 1.b. Creating circles +my_filled_circle(atom_image, (W / 2, W / 2)) +## [draw_atom] +## [draw_rook] + +# 2. Draw a rook +# ------------------ +# 2.a. Create a convex polygon +my_polygon(rook_image) +## [rectangle] +# 2.b. Creating rectangles +cv2.rectangle(rook_image, + (0, 7 * W / 8), + (W, W), + (0, 255, 255), + -1, + 8) +## [rectangle] + +# 2.c. Create a few lines +my_line(rook_image, (0, 15 * W / 16), (W, 15 * W / 16)) +my_line(rook_image, (W / 4, 7 * W / 8), (W / 4, W)) +my_line(rook_image, (W / 2, 7 * W / 8), (W / 2, W)) +my_line(rook_image, (3 * W / 4, 7 * W / 8), (3 * W / 4, W)) +## [draw_rook] +cv2.imshow(atom_window, atom_image) +cv2.moveWindow(atom_window, 0, 200) +cv2.imshow(rook_window, rook_image) +cv2.moveWindow(rook_window, W, 200) + +cv2.waitKey(0) +cv2.destroyAllWindows() From 954e2f9b9c1b1e2a9067b316b80bcbb040d60dcc Mon Sep 17 00:00:00 2001 From: tribta Date: Sun, 20 Aug 2017 13:45:46 +0100 Subject: [PATCH 3/4] Tutorial Discrete Fourier Transform --- .../discrete_fourier_transform.markdown | 244 ++++++++++++------ .../core/table_of_content_core.markdown | 2 + .../discrete_fourier_transform.cpp | 24 +- .../DiscreteFourierTransform.java | 109 ++++++++ .../discrete_fourier_transform.py | 80 ++++++ 5 files changed, 374 insertions(+), 85 deletions(-) create mode 100644 samples/java/tutorial_code/core/discrete_fourier_transform/DiscreteFourierTransform.java create mode 100644 samples/python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py diff --git a/doc/tutorials/core/discrete_fourier_transform/discrete_fourier_transform.markdown b/doc/tutorials/core/discrete_fourier_transform/discrete_fourier_transform.markdown index 22bbc877d8..32536a5632 100644 --- a/doc/tutorials/core/discrete_fourier_transform/discrete_fourier_transform.markdown +++ b/doc/tutorials/core/discrete_fourier_transform/discrete_fourier_transform.markdown @@ -1,6 +1,9 @@ Discrete Fourier Transform {#tutorial_discrete_fourier_transform} ========================== +@prev_tutorial{tutorial_random_generator_and_text} +@next_tutorial{tutorial_file_input_output_with_xml_yml} + Goal ---- @@ -8,21 +11,49 @@ We'll seek answers for the following questions: - What is a Fourier transform and why use it? - How to do it in OpenCV? -- Usage of functions such as: @ref cv::copyMakeBorder() , @ref cv::merge() , @ref cv::dft() , @ref - cv::getOptimalDFTSize() , @ref cv::log() and @ref cv::normalize() . +- Usage of functions such as: **copyMakeBorder()** , **merge()** , **dft()** , + **getOptimalDFTSize()** , **log()** and **normalize()** . Source code ----------- +@add_toggle_cpp You can [download this from here -](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp) or +](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp) or find it in the `samples/cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp` of the OpenCV source code library. +@end_toggle -Here's a sample usage of @ref cv::dft() : +@add_toggle_java +You can [download this from here +](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/core/discrete_fourier_transform/DiscreteFourierTransform.java) or +find it in the +`samples/java/tutorial_code/core/discrete_fourier_transform/DiscreteFourierTransform.java` of the +OpenCV source code library. +@end_toggle -@includelineno cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp +@add_toggle_python +You can [download this from here +](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py) or +find it in the +`samples/python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py` of the +OpenCV source code library. +@end_toggle + +Here's a sample usage of **dft()** : + +@add_toggle_cpp +@include cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp +@end_toggle + +@add_toggle_java +@include java/tutorial_code/core/discrete_fourier_transform/DiscreteFourierTransform.java +@end_toggle + +@add_toggle_python +@include python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py +@end_toggle Explanation ----------- @@ -49,89 +80,140 @@ Fourier Transform too needs to be of a discrete type resulting in a Discrete Fou (*DFT*). You'll want to use this whenever you need to determine the structure of an image from a geometrical point of view. Here are the steps to follow (in case of a gray scale input image *I*): --# **Expand the image to an optimal size**. The performance of a DFT is dependent of the image - size. It tends to be the fastest for image sizes that are multiple of the numbers two, three and - five. Therefore, to achieve maximal performance it is generally a good idea to pad border values - to the image to get a size with such traits. The @ref cv::getOptimalDFTSize() returns this - optimal size and we can use the @ref cv::copyMakeBorder() function to expand the borders of an - image: - @code{.cpp} - Mat padded; //expand input image to optimal size - int m = getOptimalDFTSize( I.rows ); - int n = getOptimalDFTSize( I.cols ); // on the border add zero pixels - copyMakeBorder(I, padded, 0, m - I.rows, 0, n - I.cols, BORDER_CONSTANT, Scalar::all(0)); - @endcode - The appended pixels are initialized with zero. +#### Expand the image to an optimal size --# **Make place for both the complex and the real values**. The result of a Fourier Transform is - complex. This implies that for each image value the result is two image values (one per - component). Moreover, the frequency domains range is much larger than its spatial counterpart. - Therefore, we store these usually at least in a *float* format. Therefore we'll convert our - input image to this type and expand it with another channel to hold the complex values: - @code{.cpp} - Mat planes[] = {Mat_(padded), Mat::zeros(padded.size(), CV_32F)}; - Mat complexI; - merge(planes, 2, complexI); // Add to the expanded another plane with zeros - @endcode --# **Make the Discrete Fourier Transform**. It's possible an in-place calculation (same input as - output): - @code{.cpp} - dft(complexI, complexI); // this way the result may fit in the source matrix - @endcode --# **Transform the real and complex values to magnitude**. A complex number has a real (*Re*) and a - complex (imaginary - *Im*) part. The results of a DFT are complex numbers. The magnitude of a - DFT is: +The performance of a DFT is dependent of the image +size. It tends to be the fastest for image sizes that are multiple of the numbers two, three and +five. Therefore, to achieve maximal performance it is generally a good idea to pad border values +to the image to get a size with such traits. The **getOptimalDFTSize()** returns this +optimal size and we can use the **copyMakeBorder()** function to expand the borders of an +image (the appended pixels are initialized with zero): - \f[M = \sqrt[2]{ {Re(DFT(I))}^2 + {Im(DFT(I))}^2}\f] +@add_toggle_cpp +@snippet cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp expand +@end_toggle - Translated to OpenCV code: - @code{.cpp} - split(complexI, planes); // planes[0] = Re(DFT(I), planes[1] = Im(DFT(I)) - magnitude(planes[0], planes[1], planes[0]);// planes[0] = magnitude - Mat magI = planes[0]; - @endcode --# **Switch to a logarithmic scale**. It turns out that the dynamic range of the Fourier - coefficients is too large to be displayed on the screen. We have some small and some high - changing values that we can't observe like this. Therefore the high values will all turn out as - white points, while the small ones as black. To use the gray scale values to for visualization - we can transform our linear scale to a logarithmic one: +@add_toggle_java +@snippet java/tutorial_code/core/discrete_fourier_transform/DiscreteFourierTransform.java expand +@end_toggle - \f[M_1 = \log{(1 + M)}\f] +@add_toggle_python +@snippet python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py expand +@end_toggle - Translated to OpenCV code: - @code{.cpp} - magI += Scalar::all(1); // switch to logarithmic scale - log(magI, magI); - @endcode --# **Crop and rearrange**. Remember, that at the first step, we expanded the image? Well, it's time - to throw away the newly introduced values. For visualization purposes we may also rearrange the - quadrants of the result, so that the origin (zero, zero) corresponds with the image center. - @code{.cpp} - magI = magI(Rect(0, 0, magI.cols & -2, magI.rows & -2)); - int cx = magI.cols/2; - int cy = magI.rows/2; +#### Make place for both the complex and the real values - Mat q0(magI, Rect(0, 0, cx, cy)); // Top-Left - Create a ROI per quadrant - Mat q1(magI, Rect(cx, 0, cx, cy)); // Top-Right - Mat q2(magI, Rect(0, cy, cx, cy)); // Bottom-Left - Mat q3(magI, Rect(cx, cy, cx, cy)); // Bottom-Right +The result of a Fourier Transform is +complex. This implies that for each image value the result is two image values (one per +component). Moreover, the frequency domains range is much larger than its spatial counterpart. +Therefore, we store these usually at least in a *float* format. Therefore we'll convert our +input image to this type and expand it with another channel to hold the complex values: - Mat tmp; // swap quadrants (Top-Left with Bottom-Right) - q0.copyTo(tmp); - q3.copyTo(q0); - tmp.copyTo(q3); +@add_toggle_cpp +@snippet cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp complex_and_real +@end_toggle - q1.copyTo(tmp); // swap quadrant (Top-Right with Bottom-Left) - q2.copyTo(q1); - tmp.copyTo(q2); - @endcode --# **Normalize**. This is done again for visualization purposes. We now have the magnitudes, - however this are still out of our image display range of zero to one. We normalize our values to - this range using the @ref cv::normalize() function. -@code{.cpp} -normalize(magI, magI, 0, 1, NORM_MINMAX); // Transform the matrix with float values into a - // viewable image form (float between values 0 and 1). -@endcode +@add_toggle_java +@snippet java/tutorial_code/core/discrete_fourier_transform/DiscreteFourierTransform.java complex_and_real +@end_toggle + +@add_toggle_python +@snippet python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py complex_and_real +@end_toggle + +#### Make the Discrete Fourier Transform +It's possible an in-place calculation (same input as +output): + +@add_toggle_cpp +@snippet cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp dft +@end_toggle + +@add_toggle_java +@snippet java/tutorial_code/core/discrete_fourier_transform/DiscreteFourierTransform.java dft +@end_toggle + +@add_toggle_python +@snippet python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py dft +@end_toggle + +#### Transform the real and complex values to magnitude +A complex number has a real (*Re*) and a +complex (imaginary - *Im*) part. The results of a DFT are complex numbers. The magnitude of a +DFT is: + +\f[M = \sqrt[2]{ {Re(DFT(I))}^2 + {Im(DFT(I))}^2}\f] + +Translated to OpenCV code: + +@add_toggle_cpp +@snippet cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp magnitude +@end_toggle + +@add_toggle_java +@snippet java/tutorial_code/core/discrete_fourier_transform/DiscreteFourierTransform.java magnitude +@end_toggle + +@add_toggle_python +@snippet python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py magnitude +@end_toggle + +#### Switch to a logarithmic scale +It turns out that the dynamic range of the Fourier +coefficients is too large to be displayed on the screen. We have some small and some high +changing values that we can't observe like this. Therefore the high values will all turn out as +white points, while the small ones as black. To use the gray scale values to for visualization +we can transform our linear scale to a logarithmic one: + +\f[M_1 = \log{(1 + M)}\f] + +Translated to OpenCV code: + +@add_toggle_cpp +@snippet cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp log +@end_toggle + +@add_toggle_java +@snippet java/tutorial_code/core/discrete_fourier_transform/DiscreteFourierTransform.java log +@end_toggle + +@add_toggle_python +@snippet python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py log +@end_toggle + +#### Crop and rearrange +Remember, that at the first step, we expanded the image? Well, it's time +to throw away the newly introduced values. For visualization purposes we may also rearrange the +quadrants of the result, so that the origin (zero, zero) corresponds with the image center. + +@add_toggle_cpp +@snippet cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp crop_rearrange +@end_toggle + +@add_toggle_java +@snippet java/tutorial_code/core/discrete_fourier_transform/DiscreteFourierTransform.java crop_rearrange +@end_toggle + +@add_toggle_python +@snippet python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py crop_rearrange +@end_toggle + +#### Normalize +This is done again for visualization purposes. We now have the magnitudes, +however this are still out of our image display range of zero to one. We normalize our values to +this range using the @ref cv::normalize() function. + +@add_toggle_cpp +@snippet cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp normalize +@end_toggle + +@add_toggle_java +@snippet java/tutorial_code/core/discrete_fourier_transform/DiscreteFourierTransform.java normalize +@end_toggle + +@add_toggle_python +@snippet python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py normalize +@end_toggle Result ------ @@ -140,7 +222,7 @@ An application idea would be to determine the geometrical orientation present in example, let us find out if a text is horizontal or not? Looking at some text you'll notice that the text lines sort of form also horizontal lines and the letters form sort of vertical lines. These two main components of a text snippet may be also seen in case of the Fourier transform. Let us use -[this horizontal ](https://github.com/opencv/opencv/tree/master/samples/data/imageTextN.png) and [this rotated](https://github.com/opencv/opencv/tree/master/samples/data/imageTextR.png) +[this horizontal ](https://raw.githubusercontent.com/opencv/opencv/master/samples/data/imageTextN.png) and [this rotated](https://raw.githubusercontent.com/opencv/opencv/master/samples/data/imageTextR.png) image about a text. In case of the horizontal text: diff --git a/doc/tutorials/core/table_of_content_core.markdown b/doc/tutorials/core/table_of_content_core.markdown index af040b9145..0a1fb5614c 100644 --- a/doc/tutorials/core/table_of_content_core.markdown +++ b/doc/tutorials/core/table_of_content_core.markdown @@ -76,6 +76,8 @@ understanding how to manipulate the images on a pixel level. - @subpage tutorial_discrete_fourier_transform + *Languages:* C++, Java, Python + *Compatibility:* \> OpenCV 2.0 *Author:* Bernát Gábor diff --git a/samples/cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp b/samples/cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp index e23ab1c326..7121b16b35 100644 --- a/samples/cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp +++ b/samples/cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp @@ -8,45 +8,58 @@ using namespace cv; using namespace std; -static void help(char* progName) +static void help(void) { cout << endl << "This program demonstrated the use of the discrete Fourier transform (DFT). " << endl << "The dft of an image is taken and it's power spectrum is displayed." << endl << "Usage:" << endl - << progName << " [image_name -- default ../data/lena.jpg] " << endl << endl; + << "./discrete_fourier_transform [image_name -- default ../data/lena.jpg]" << endl; } int main(int argc, char ** argv) { - help(argv[0]); + help(); const char* filename = argc >=2 ? argv[1] : "../data/lena.jpg"; Mat I = imread(filename, IMREAD_GRAYSCALE); - if( I.empty()) + if( I.empty()){ + cout << "Error opening image" << endl; return -1; + } +//! [expand] Mat padded; //expand input image to optimal size int m = getOptimalDFTSize( I.rows ); int n = getOptimalDFTSize( I.cols ); // on the border add zero values copyMakeBorder(I, padded, 0, m - I.rows, 0, n - I.cols, BORDER_CONSTANT, Scalar::all(0)); +//! [expand] +//! [complex_and_real] Mat planes[] = {Mat_(padded), Mat::zeros(padded.size(), CV_32F)}; Mat complexI; merge(planes, 2, complexI); // Add to the expanded another plane with zeros +//! [complex_and_real] +//! [dft] dft(complexI, complexI); // this way the result may fit in the source matrix +//! [dft] // compute the magnitude and switch to logarithmic scale // => log(1 + sqrt(Re(DFT(I))^2 + Im(DFT(I))^2)) +//! [magnitude] split(complexI, planes); // planes[0] = Re(DFT(I), planes[1] = Im(DFT(I)) magnitude(planes[0], planes[1], planes[0]);// planes[0] = magnitude Mat magI = planes[0]; +//! [magnitude] +//! [log] magI += Scalar::all(1); // switch to logarithmic scale log(magI, magI); +//! [log] +//! [crop_rearrange] // crop the spectrum, if it has an odd number of rows or columns magI = magI(Rect(0, 0, magI.cols & -2, magI.rows & -2)); @@ -67,9 +80,12 @@ int main(int argc, char ** argv) q1.copyTo(tmp); // swap quadrant (Top-Right with Bottom-Left) q2.copyTo(q1); tmp.copyTo(q2); +//! [crop_rearrange] +//! [normalize] normalize(magI, magI, 0, 1, NORM_MINMAX); // Transform the matrix with float values into a // viewable image form (float between values 0 and 1). +//! [normalize] imshow("Input Image" , I ); // Show the result imshow("spectrum magnitude", magI); diff --git a/samples/java/tutorial_code/core/discrete_fourier_transform/DiscreteFourierTransform.java b/samples/java/tutorial_code/core/discrete_fourier_transform/DiscreteFourierTransform.java new file mode 100644 index 0000000000..ca2560be35 --- /dev/null +++ b/samples/java/tutorial_code/core/discrete_fourier_transform/DiscreteFourierTransform.java @@ -0,0 +1,109 @@ +import org.opencv.core.*; +import org.opencv.highgui.HighGui; +import org.opencv.imgcodecs.Imgcodecs; + +import java.util.List; +import java.util.*; + +class DiscreteFourierTransformRun{ + private void help() { + System.out.println("" + + "This program demonstrated the use of the discrete Fourier transform (DFT). \n" + + "The dft of an image is taken and it's power spectrum is displayed.\n" + + "Usage:\n" + + "./DiscreteFourierTransform [image_name -- default ../data/lena.jpg]"); + } + + public void run(String[] args){ + + help(); + + String filename = ((args.length > 0) ? args[0] : "../data/lena.jpg"); + + Mat I = Imgcodecs.imread(filename, Imgcodecs.IMREAD_GRAYSCALE); + if( I.empty() ) { + System.out.println("Error opening image"); + System.exit(-1); + } + + //! [expand] + Mat padded = new Mat(); //expand input image to optimal size + int m = Core.getOptimalDFTSize( I.rows() ); + int n = Core.getOptimalDFTSize( I.cols() ); // on the border add zero values + Core.copyMakeBorder(I, padded, 0, m - I.rows(), 0, n - I.cols(), Core.BORDER_CONSTANT, Scalar.all(0)); + //! [expand] + + //! [complex_and_real] + List planes = new ArrayList(); + padded.convertTo(padded, CvType.CV_32F); + planes.add(padded); + planes.add(Mat.zeros(padded.size(), CvType.CV_32F)); + Mat complexI = new Mat(); + Core.merge(planes, complexI); // Add to the expanded another plane with zeros + //! [complex_and_real] + + //! [dft] + Core.dft(complexI, complexI); // this way the result may fit in the source matrix + //! [dft] + + // compute the magnitude and switch to logarithmic scale + // => log(1 + sqrt(Re(DFT(I))^2 + Im(DFT(I))^2)) + //! [magnitude] + Core.split(complexI, planes); // planes.get(0) = Re(DFT(I) + // planes.get(1) = Im(DFT(I)) + Core.magnitude(planes.get(0), planes.get(1), planes.get(0));// planes.get(0) = magnitude + Mat magI = planes.get(0); + //! [magnitude] + + //! [log] + Mat matOfOnes = Mat.ones(magI.size(), magI.type()); + Core.add(matOfOnes, magI, magI); // switch to logarithmic scale + Core.log(magI, magI); + //! [log] + + //! [crop_rearrange] + // crop the spectrum, if it has an odd number of rows or columns + magI = magI.submat(new Rect(0, 0, magI.cols() & -2, magI.rows() & -2)); + + // rearrange the quadrants of Fourier image so that the origin is at the image center + int cx = magI.cols()/2; + int cy = magI.rows()/2; + + Mat q0 = new Mat(magI, new Rect(0, 0, cx, cy)); // Top-Left - Create a ROI per quadrant + Mat q1 = new Mat(magI, new Rect(cx, 0, cx, cy)); // Top-Right + Mat q2 = new Mat(magI, new Rect(0, cy, cx, cy)); // Bottom-Left + Mat q3 = new Mat(magI, new Rect(cx, cy, cx, cy)); // Bottom-Right + + Mat tmp = new Mat(); // swap quadrants (Top-Left with Bottom-Right) + q0.copyTo(tmp); + q3.copyTo(q0); + tmp.copyTo(q3); + + q1.copyTo(tmp); // swap quadrant (Top-Right with Bottom-Left) + q2.copyTo(q1); + tmp.copyTo(q2); + //! [crop_rearrange] + + magI.convertTo(magI, CvType.CV_8UC1); + //! [normalize] + Core.normalize(magI, magI, 0, 255, Core.NORM_MINMAX, CvType.CV_8UC1); // Transform the matrix with float values + // into a viewable image form (float between + // values 0 and 255). + //! [normalize] + + HighGui.imshow("Input Image" , I ); // Show the result + HighGui.imshow("Spectrum Magnitude", magI); + HighGui.waitKey(); + + System.exit(0); + } +} + + +public class DiscreteFourierTransform { + public static void main(String[] args) { + // Load the native library. + System.loadLibrary(Core.NATIVE_LIBRARY_NAME); + new DiscreteFourierTransformRun().run(args); + } +} diff --git a/samples/python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py b/samples/python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py new file mode 100644 index 0000000000..a16c9bead8 --- /dev/null +++ b/samples/python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py @@ -0,0 +1,80 @@ +from __future__ import print_function +import sys + +import cv2 +import numpy as np + + +def print_help(): + print(''' + This program demonstrated the use of the discrete Fourier transform (DFT). + The dft of an image is taken and it's power spectrum is displayed. + Usage: + discrete_fourier_transform.py [image_name -- default ../../../../data/lena.jpg]''') + + +def main(argv): + + print_help() + + filename = argv[0] if len(argv) > 0 else "../../../../data/lena.jpg" + + I = cv2.imread(filename, cv2.IMREAD_GRAYSCALE) + if I is None: + print('Error opening image') + return -1 + ## [expand] + rows, cols = I.shape + m = cv2.getOptimalDFTSize( rows ) + n = cv2.getOptimalDFTSize( cols ) + padded = cv2.copyMakeBorder(I, 0, m - rows, 0, n - cols, cv2.BORDER_CONSTANT, value=[0, 0, 0]) + ## [expand] + ## [complex_and_real] + planes = [np.float32(padded), np.zeros(padded.shape, np.float32)] + complexI = cv2.merge(planes) # Add to the expanded another plane with zeros + ## [complex_and_real] + ## [dft] + cv2.dft(complexI, complexI) # this way the result may fit in the source matrix + ## [dft] + # compute the magnitude and switch to logarithmic scale + # = > log(1 + sqrt(Re(DFT(I)) ^ 2 + Im(DFT(I)) ^ 2)) + ## [magnitude] + cv2.split(complexI, planes) # planes[0] = Re(DFT(I), planes[1] = Im(DFT(I)) + cv2.magnitude(planes[0], planes[1], planes[0])# planes[0] = magnitude + magI = planes[0] + ## [magnitude] + ## [log] + matOfOnes = np.ones(magI.shape, dtype=magI.dtype) + cv2.add(matOfOnes, magI, magI) # switch to logarithmic scale + cv2.log(magI, magI) + ## [log] + ## [crop_rearrange] + magI_rows, magI_cols = magI.shape + # crop the spectrum, if it has an odd number of rows or columns + magI = magI[0:(magI_rows & -2), 0:(magI_cols & -2)] + cx = int(magI_rows/2) + cy = int(magI_cols/2) + + q0 = magI[0:cx, 0:cy] # Top-Left - Create a ROI per quadrant + q1 = magI[cx:cx+cx, 0:cy] # Top-Right + q2 = magI[0:cx, cy:cy+cy] # Bottom-Left + q3 = magI[cx:cx+cx, cy:cy+cy] # Bottom-Right + + tmp = np.copy(q0) # swap quadrants (Top-Left with Bottom-Right) + magI[0:cx, 0:cy] = q3 + magI[cx:cx + cx, cy:cy + cy] = tmp + + tmp = np.copy(q1) # swap quadrant (Top-Right with Bottom-Left) + magI[cx:cx + cx, 0:cy] = q2 + magI[0:cx, cy:cy + cy] = tmp + ## [crop_rearrange] + ## [normalize] + cv2.normalize(magI, magI, 0, 1, cv2.NORM_MINMAX) # Transform the matrix with float values into a + ## viewable image form(float between values 0 and 1). + ## [normalize] + cv2.imshow("Input Image" , I ) # Show the result + cv2.imshow("spectrum magnitude", magI) + cv2.waitKey() + +if __name__ == "__main__": + main(sys.argv[1:]) From 45afd29b726e0396fedddd8cbfd59e092da0410e Mon Sep 17 00:00:00 2001 From: tribta Date: Mon, 21 Aug 2017 15:08:13 +0100 Subject: [PATCH 4/4] Tutorial Mask Operations on Matrices --- .../mat_mask_operations.markdown | 75 ++++++++++--------- .../MatMaskOperations.java | 51 ++++--------- .../mat_mask_operations.py | 26 +++---- 3 files changed, 64 insertions(+), 88 deletions(-) diff --git a/doc/tutorials/core/mat-mask-operations/mat_mask_operations.markdown b/doc/tutorials/core/mat-mask-operations/mat_mask_operations.markdown index ca95243e7d..32545de350 100644 --- a/doc/tutorials/core/mat-mask-operations/mat_mask_operations.markdown +++ b/doc/tutorials/core/mat-mask-operations/mat_mask_operations.markdown @@ -28,24 +28,39 @@ the zero-zero index) on the pixel you want to calculate and sum up the pixel val the overlapped matrix values. It's the same thing, however in case of large matrices the latter notation is a lot easier to look over. +Code +---- + @add_toggle_cpp -Now let us see how we can make this happen by using the basic pixel access method or by using the -@ref cv::filter2D function. +You can download this source code from [here +](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp) or look in the +OpenCV source code libraries sample directory at +`samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp`. +@include samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp @end_toggle @add_toggle_java -Now let us see how we can make this happen by using the basic pixel access method or by using the -**Imgproc.filter2D()** function. +You can download this source code from [here +](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/core/mat_mask_operations/MatMaskOperations.java) or look in the +OpenCV source code libraries sample directory at +`samples/java/tutorial_code/core/mat_mask_operations/MatMaskOperations.java`. +@include samples/java/tutorial_code/core/mat_mask_operations/MatMaskOperations.java @end_toggle @add_toggle_python -Now let us see how we can make this happen by using the basic pixel access method or by using the -**cv2.filter2D()** function. +You can download this source code from [here +](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/core/mat_mask_operations/mat_mask_operations.py) or look in the +OpenCV source code libraries sample directory at +`samples/python/tutorial_code/core/mat_mask_operations/mat_mask_operations.py`. +@include samples/python/tutorial_code/core/mat_mask_operations/mat_mask_operations.py @end_toggle The Basic Method ---------------- +Now let us see how we can make this happen by using the basic pixel access method or by using the +**filter2D()** function. + Here's a function that will do this: @add_toggle_cpp @snippet samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp basic_method @@ -132,37 +147,38 @@ The filter2D function Applying such filters are so common in image processing that in OpenCV there exist a function that will take care of applying the mask (also called a kernel in some places). For this you first need to define an object that holds the mask: + @add_toggle_cpp @snippet samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp kern - -Then call the @ref cv::filter2D function specifying the input, the output image and the kernel to -use: -@snippet samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp filter2D - -The function even has a fifth optional argument to specify the center of the kernel, a sixth -for adding an optional value to the filtered pixels before storing them in K and a seventh one -for determining what to do in the regions where the operation is undefined (borders). @end_toggle @add_toggle_java @snippet samples/java/tutorial_code/core/mat_mask_operations/MatMaskOperations.java kern - -Then call the **Imgproc.filter2D()** function specifying the input, the output image and the kernel to -use: -@snippet samples/java/tutorial_code/core/mat_mask_operations/MatMaskOperations.java filter2D -The function even has a fifth optional argument to specify the center of the kernel, a sixth -for adding an optional value to the filtered pixels before storing them in K and a seventh one -for determining what to do in the regions where the operation is undefined (borders). @end_toggle @add_toggle_python @snippet samples/python/tutorial_code/core/mat_mask_operations/mat_mask_operations.py kern +@end_toggle -Then call the **cv2.filter2D()** function specifying the input, the output image and the kernell to +Then call the **filter2D()** function specifying the input, the output image and the kernel to use: + +@add_toggle_cpp +@snippet samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp filter2D +@end_toggle + +@add_toggle_java +@snippet samples/java/tutorial_code/core/mat_mask_operations/MatMaskOperations.java filter2D +@end_toggle + +@add_toggle_python @snippet samples/python/tutorial_code/core/mat_mask_operations/mat_mask_operations.py filter2D @end_toggle +The function even has a fifth optional argument to specify the center of the kernel, a sixth +for adding an optional value to the filtered pixels before storing them in K and a seventh one +for determining what to do in the regions where the operation is undefined (borders). + This function is shorter, less verbose and, because there are some optimizations, it is usually faster than the *hand-coded method*. For example in my test while the second one took only 13 milliseconds the first took around 31 milliseconds. Quite some difference. @@ -172,22 +188,7 @@ For example: ![](images/resultMatMaskFilter2D.png) @add_toggle_cpp -You can download this source code from [here -](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp) or look in the -OpenCV source code libraries sample directory at -`samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp`. - Check out an instance of running the program on our [YouTube channel](http://www.youtube.com/watch?v=7PF1tAU9se4) . @youtube{7PF1tAU9se4} @end_toggle - -@add_toggle_java -You can look in the OpenCV source code libraries sample directory at -`samples/java/tutorial_code/core/mat_mask_operations/MatMaskOperations.java`. -@end_toggle - -@add_toggle_python -You can look in the OpenCV source code libraries sample directory at -`samples/python/tutorial_code/core/mat_mask_operations/mat_mask_operations.py`. -@end_toggle diff --git a/samples/java/tutorial_code/core/mat_mask_operations/MatMaskOperations.java b/samples/java/tutorial_code/core/mat_mask_operations/MatMaskOperations.java index f597a5bc0d..12869c7301 100644 --- a/samples/java/tutorial_code/core/mat_mask_operations/MatMaskOperations.java +++ b/samples/java/tutorial_code/core/mat_mask_operations/MatMaskOperations.java @@ -2,14 +2,10 @@ import org.opencv.core.Core; import org.opencv.core.CvType; import org.opencv.core.Mat; import org.opencv.core.Scalar; +import org.opencv.highgui.HighGui; import org.opencv.imgcodecs.Imgcodecs; import org.opencv.imgproc.Imgproc; -import javax.swing.*; -import java.awt.*; -import java.awt.image.BufferedImage; -import java.awt.image.DataBufferByte; - class MatMaskOperationsRun { public void run(String[] args) { @@ -31,8 +27,10 @@ class MatMaskOperationsRun { System.exit(-1); } - Image img = toBufferedImage(src); - displayImage("Input", img, 0, 200); + HighGui.namedWindow("Input", HighGui.WINDOW_AUTOSIZE); + HighGui.namedWindow("Output", HighGui.WINDOW_AUTOSIZE); + + HighGui.imshow( "Input", src ); double t = System.currentTimeMillis(); Mat dst0 = sharpen(src, new Mat()); @@ -40,8 +38,9 @@ class MatMaskOperationsRun { t = ((double) System.currentTimeMillis() - t) / 1000; System.out.println("Hand written function time passed in seconds: " + t); - Image img2 = toBufferedImage(dst0); - displayImage("Output", img2, 400, 400); + HighGui.imshow( "Output", dst0 ); + HighGui.moveWindow("Output", 400, 400); + HighGui.waitKey(); //![kern] Mat kern = new Mat(3, 3, CvType.CV_8S); @@ -58,8 +57,10 @@ class MatMaskOperationsRun { t = ((double) System.currentTimeMillis() - t) / 1000; System.out.println("Built-in filter2D time passed in seconds: " + t); - Image img3 = toBufferedImage(dst1); - displayImage("Output", img3, 800, 400); + HighGui.imshow( "Output", dst1 ); + + HighGui.waitKey(); + System.exit(0); } //! [basic_method] @@ -108,38 +109,12 @@ class MatMaskOperationsRun { return Result; } //! [basic_method] - - public Image toBufferedImage(Mat m) { - int type = BufferedImage.TYPE_BYTE_GRAY; - if (m.channels() > 1) { - type = BufferedImage.TYPE_3BYTE_BGR; - } - int bufferSize = m.channels() * m.cols() * m.rows(); - byte[] b = new byte[bufferSize]; - m.get(0, 0, b); // get all the pixels - BufferedImage image = new BufferedImage(m.cols(), m.rows(), type); - final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData(); - System.arraycopy(b, 0, targetPixels, 0, b.length); - return image; - } - - public void displayImage(String title, Image img, int x, int y) { - ImageIcon icon = new ImageIcon(img); - JFrame frame = new JFrame(title); - JLabel lbl = new JLabel(icon); - frame.add(lbl); - frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); - frame.pack(); - frame.setLocation(x, y); - frame.setVisible(true); - } } public class MatMaskOperations { public static void main(String[] args) { // Load the native library. System.loadLibrary(Core.NATIVE_LIBRARY_NAME); - - new MatMaskOperationsRun().run(args); // run code + new MatMaskOperationsRun().run(args); } } diff --git a/samples/python/tutorial_code/core/mat_mask_operations/mat_mask_operations.py b/samples/python/tutorial_code/core/mat_mask_operations/mat_mask_operations.py index 4be987d1e9..21cdb2b103 100644 --- a/samples/python/tutorial_code/core/mat_mask_operations/mat_mask_operations.py +++ b/samples/python/tutorial_code/core/mat_mask_operations/mat_mask_operations.py @@ -1,9 +1,10 @@ +from __future__ import print_function import sys import time + import numpy as np import cv2 - ## [basic_method] def is_grayscale(my_image): return len(my_image.shape) < 3 @@ -26,7 +27,6 @@ def sharpen(my_image): height, width, n_channels = my_image.shape result = np.zeros(my_image.shape, my_image.dtype) - ## [basic_method_loop] for j in range(1, height - 1): for i in range(1, width - 1): @@ -36,17 +36,16 @@ def sharpen(my_image): result[j, i] = saturated(sum_value) else: for k in range(0, n_channels): - sum_value = 5 * my_image[j, i, k] - my_image[j + 1, i, k] - my_image[j - 1, i, k] \ - - my_image[j, i + 1, k] - my_image[j, i - 1, k] + sum_value = 5 * my_image[j, i, k] - my_image[j + 1, i, k] \ + - my_image[j - 1, i, k] - my_image[j, i + 1, k]\ + - my_image[j, i - 1, k] result[j, i, k] = saturated(sum_value) ## [basic_method_loop] - return result ## [basic_method] - def main(argv): - filename = "../data/lena.jpg" + filename = "../../../../data/lena.jpg" img_codec = cv2.IMREAD_COLOR if argv: @@ -57,8 +56,9 @@ def main(argv): src = cv2.imread(filename, img_codec) if src is None: - print "Can't open image [" + filename + "]" - print "Usage:\nmat_mask_operations.py [image_path -- default ../data/lena.jpg] [G -- grayscale]" + print("Can't open image [" + filename + "]") + print("Usage:") + print("mat_mask_operations.py [image_path -- default ../../../../data/lena.jpg] [G -- grayscale]") return -1 cv2.namedWindow("Input", cv2.WINDOW_AUTOSIZE) @@ -70,7 +70,7 @@ def main(argv): dst0 = sharpen(src) t = (time.time() - t) / 1000 - print "Hand written function time passed in seconds: %s" % t + print("Hand written function time passed in seconds: %s" % t) cv2.imshow("Output", dst0) cv2.waitKey() @@ -81,13 +81,13 @@ def main(argv): [-1, 5, -1], [0, -1, 0]], np.float32) # kernel should be floating point type ## [kern] - ## [filter2D] - dst1 = cv2.filter2D(src, -1, kernel) # ddepth = -1, means destination image has depth same as input image + dst1 = cv2.filter2D(src, -1, kernel) + # ddepth = -1, means destination image has depth same as input image ## [filter2D] t = (time.time() - t) / 1000 - print "Built-in filter2D time passed in seconds: %s" % t + print("Built-in filter2D time passed in seconds: %s" % t) cv2.imshow("Output", dst1)