mirror of
https://github.com/opencv/opencv.git
synced 2025-01-18 14:13:15 +08:00
Update documentation ( tutorials )
This commit is contained in:
parent
55d0945149
commit
bb6f65c199
@ -25,51 +25,13 @@ By varying \f$\alpha\f$ from \f$0 \rightarrow 1\f$ this operator can be used to
|
||||
*cross-dissolve* between two images or videos, as seen in slide shows and film productions (cool,
|
||||
eh?)
|
||||
|
||||
Code
|
||||
----
|
||||
Source Code
|
||||
-----------
|
||||
|
||||
As usual, after the not-so-lengthy explanation, let's go to the code:
|
||||
@code{.cpp}
|
||||
#include <opencv2/opencv.hpp>
|
||||
#include <iostream>
|
||||
Download the source code from
|
||||
[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/core/AddingImages/AddingImages.cpp).
|
||||
@include cpp/tutorial_code/core/AddingImages/AddingImages.cpp
|
||||
|
||||
using namespace cv;
|
||||
|
||||
int main( int argc, char** argv )
|
||||
{
|
||||
double alpha = 0.5; double beta; double input;
|
||||
|
||||
Mat src1, src2, dst;
|
||||
|
||||
/// Ask the user enter alpha
|
||||
std::cout<<" Simple Linear Blender "<<std::endl;
|
||||
std::cout<<"-----------------------"<<std::endl;
|
||||
std::cout<<"* Enter alpha [0-1]: ";
|
||||
std::cin>>input;
|
||||
|
||||
/// We use the alpha provided by the user if it is between 0 and 1
|
||||
if( input >= 0.0 && input <= 1.0 )
|
||||
{ alpha = input; }
|
||||
|
||||
/// Read image ( same size, same type )
|
||||
src1 = imread("../../images/LinuxLogo.jpg");
|
||||
src2 = imread("../../images/WindowsLogo.jpg");
|
||||
|
||||
if( !src1.data ) { printf("Error loading src1 \n"); return -1; }
|
||||
if( !src2.data ) { printf("Error loading src2 \n"); return -1; }
|
||||
|
||||
/// Create Windows
|
||||
namedWindow("Linear Blend", 1);
|
||||
|
||||
beta = ( 1.0 - alpha );
|
||||
addWeighted( src1, alpha, src2, beta, 0.0, dst);
|
||||
|
||||
imshow( "Linear Blend", dst );
|
||||
|
||||
waitKey(0);
|
||||
return 0;
|
||||
}
|
||||
@endcode
|
||||
Explanation
|
||||
-----------
|
||||
|
||||
@ -78,25 +40,21 @@ Explanation
|
||||
\f[g(x) = (1 - \alpha)f_{0}(x) + \alpha f_{1}(x)\f]
|
||||
|
||||
We need two source images (\f$f_{0}(x)\f$ and \f$f_{1}(x)\f$). So, we load them in the usual way:
|
||||
@code{.cpp}
|
||||
src1 = imread("../../images/LinuxLogo.jpg");
|
||||
src2 = imread("../../images/WindowsLogo.jpg");
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/core/AddingImages/AddingImages.cpp load
|
||||
|
||||
**warning**
|
||||
|
||||
Since we are *adding* *src1* and *src2*, they both have to be of the same size (width and
|
||||
height) and type.
|
||||
|
||||
-# Now we need to generate the `g(x)` image. For this, the function add_weighted:addWeighted comes quite handy:
|
||||
@code{.cpp}
|
||||
beta = ( 1.0 - alpha );
|
||||
addWeighted( src1, alpha, src2, beta, 0.0, dst);
|
||||
@endcode
|
||||
-# Now we need to generate the `g(x)` image. For this, the function @ref cv::addWeighted comes quite handy:
|
||||
@snippet cpp/tutorial_code/core/AddingImages/AddingImages.cpp blend_images
|
||||
since @ref cv::addWeighted produces:
|
||||
\f[dst = \alpha \cdot src1 + \beta \cdot src2 + \gamma\f]
|
||||
In this case, `gamma` is the argument \f$0.0\f$ in the code above.
|
||||
|
||||
-# Create windows, show the images and wait for the user to end the program.
|
||||
@snippet cpp/tutorial_code/core/AddingImages/AddingImages.cpp display
|
||||
|
||||
Result
|
||||
------
|
||||
|
@ -48,69 +48,26 @@ Code
|
||||
|
||||
- This code is in your OpenCV sample folder. Otherwise you can grab it from
|
||||
[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/core/Matrix/Drawing_1.cpp)
|
||||
@include samples/cpp/tutorial_code/core/Matrix/Drawing_1.cpp
|
||||
|
||||
Explanation
|
||||
-----------
|
||||
|
||||
-# Since we plan to draw two examples (an atom and a rook), we have to create 02 images and two
|
||||
-# Since we plan to draw two examples (an atom and a rook), we have to create two images and two
|
||||
windows to display them.
|
||||
@code{.cpp}
|
||||
/// Windows names
|
||||
char atom_window[] = "Drawing 1: Atom";
|
||||
char rook_window[] = "Drawing 2: Rook";
|
||||
@snippet cpp/tutorial_code/core/Matrix/Drawing_1.cpp create_images
|
||||
|
||||
/// Create black empty images
|
||||
Mat atom_image = Mat::zeros( w, w, CV_8UC3 );
|
||||
Mat rook_image = Mat::zeros( w, w, CV_8UC3 );
|
||||
@endcode
|
||||
-# We created functions to draw different geometric shapes. For instance, to draw the atom we used
|
||||
*MyEllipse* and *MyFilledCircle*:
|
||||
@code{.cpp}
|
||||
/// 1. Draw a simple atom:
|
||||
@snippet cpp/tutorial_code/core/Matrix/Drawing_1.cpp draw_atom
|
||||
|
||||
/// 1.a. Creating ellipses
|
||||
MyEllipse( atom_image, 90 );
|
||||
MyEllipse( atom_image, 0 );
|
||||
MyEllipse( atom_image, 45 );
|
||||
MyEllipse( atom_image, -45 );
|
||||
|
||||
/// 1.b. Creating circles
|
||||
MyFilledCircle( atom_image, Point( w/2.0, w/2.0) );
|
||||
@endcode
|
||||
-# And to draw the rook we employed *MyLine*, *rectangle* and a *MyPolygon*:
|
||||
@code{.cpp}
|
||||
/// 2. Draw a rook
|
||||
@snippet cpp/tutorial_code/core/Matrix/Drawing_1.cpp draw_rook
|
||||
|
||||
/// 2.a. Create a convex polygon
|
||||
MyPolygon( rook_image );
|
||||
|
||||
/// 2.b. Creating rectangles
|
||||
rectangle( rook_image,
|
||||
Point( 0, 7*w/8.0 ),
|
||||
Point( w, w),
|
||||
Scalar( 0, 255, 255 ),
|
||||
-1,
|
||||
8 );
|
||||
|
||||
/// 2.c. Create a few lines
|
||||
MyLine( rook_image, Point( 0, 15*w/16 ), Point( w, 15*w/16 ) );
|
||||
MyLine( rook_image, Point( w/4, 7*w/8 ), Point( w/4, w ) );
|
||||
MyLine( rook_image, Point( w/2, 7*w/8 ), Point( w/2, w ) );
|
||||
MyLine( rook_image, Point( 3*w/4, 7*w/8 ), Point( 3*w/4, w ) );
|
||||
@endcode
|
||||
-# Let's check what is inside each of these functions:
|
||||
- *MyLine*
|
||||
@code{.cpp}
|
||||
void MyLine( Mat img, Point start, Point end )
|
||||
{
|
||||
int thickness = 2;
|
||||
int lineType = 8;
|
||||
line( img, start, end,
|
||||
Scalar( 0, 0, 0 ),
|
||||
thickness,
|
||||
lineType );
|
||||
}
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/core/Matrix/Drawing_1.cpp myline
|
||||
|
||||
As we can see, *MyLine* just call the function @ref cv::line , which does the following:
|
||||
|
||||
- Draw a line from Point **start** to Point **end**
|
||||
@ -120,95 +77,31 @@ Explanation
|
||||
- The line thickness is set to **thickness** (in this case 2)
|
||||
- The line is a 8-connected one (**lineType** = 8)
|
||||
- *MyEllipse*
|
||||
@code{.cpp}
|
||||
void MyEllipse( Mat img, double angle )
|
||||
{
|
||||
int thickness = 2;
|
||||
int lineType = 8;
|
||||
@snippet cpp/tutorial_code/core/Matrix/Drawing_1.cpp myellipse
|
||||
|
||||
ellipse( img,
|
||||
Point( w/2.0, w/2.0 ),
|
||||
Size( w/4.0, w/16.0 ),
|
||||
angle,
|
||||
0,
|
||||
360,
|
||||
Scalar( 255, 0, 0 ),
|
||||
thickness,
|
||||
lineType );
|
||||
}
|
||||
@endcode
|
||||
From the code above, we can observe that the function @ref cv::ellipse draws an ellipse such
|
||||
that:
|
||||
|
||||
- The ellipse is displayed in the image **img**
|
||||
- The ellipse center is located in the point **(w/2.0, w/2.0)** and is enclosed in a box
|
||||
of size **(w/4.0, w/16.0)**
|
||||
- The ellipse center is located in the point **(w/2, w/2)** and is enclosed in a box
|
||||
of size **(w/4, w/16)**
|
||||
- The ellipse is rotated **angle** degrees
|
||||
- The ellipse extends an arc between **0** and **360** degrees
|
||||
- The color of the figure will be **Scalar( 255, 0, 0)** which means blue in RGB value.
|
||||
- The color of the figure will be **Scalar( 255, 0, 0)** which means blue in BGR value.
|
||||
- The ellipse's **thickness** is 2.
|
||||
- *MyFilledCircle*
|
||||
@code{.cpp}
|
||||
void MyFilledCircle( Mat img, Point center )
|
||||
{
|
||||
int thickness = -1;
|
||||
int lineType = 8;
|
||||
@snippet cpp/tutorial_code/core/Matrix/Drawing_1.cpp myfilledcircle
|
||||
|
||||
circle( img,
|
||||
center,
|
||||
w/32.0,
|
||||
Scalar( 0, 0, 255 ),
|
||||
thickness,
|
||||
lineType );
|
||||
}
|
||||
@endcode
|
||||
Similar to the ellipse function, we can observe that *circle* receives as arguments:
|
||||
|
||||
- The image where the circle will be displayed (**img**)
|
||||
- The center of the circle denoted as the Point **center**
|
||||
- The radius of the circle: **w/32.0**
|
||||
- The radius of the circle: **w/32**
|
||||
- The color of the circle: **Scalar(0, 0, 255)** which means *Red* in BGR
|
||||
- Since **thickness** = -1, the circle will be drawn filled.
|
||||
- *MyPolygon*
|
||||
@code{.cpp}
|
||||
void MyPolygon( Mat img )
|
||||
{
|
||||
int lineType = 8;
|
||||
@snippet cpp/tutorial_code/core/Matrix/Drawing_1.cpp mypolygon
|
||||
|
||||
/* Create some points */
|
||||
Point rook_points[1][20];
|
||||
rook_points[0][0] = Point( w/4.0, 7*w/8.0 );
|
||||
rook_points[0][1] = Point( 3*w/4.0, 7*w/8.0 );
|
||||
rook_points[0][2] = Point( 3*w/4.0, 13*w/16.0 );
|
||||
rook_points[0][3] = Point( 11*w/16.0, 13*w/16.0 );
|
||||
rook_points[0][4] = Point( 19*w/32.0, 3*w/8.0 );
|
||||
rook_points[0][5] = Point( 3*w/4.0, 3*w/8.0 );
|
||||
rook_points[0][6] = Point( 3*w/4.0, w/8.0 );
|
||||
rook_points[0][7] = Point( 26*w/40.0, w/8.0 );
|
||||
rook_points[0][8] = Point( 26*w/40.0, w/4.0 );
|
||||
rook_points[0][9] = Point( 22*w/40.0, w/4.0 );
|
||||
rook_points[0][10] = Point( 22*w/40.0, w/8.0 );
|
||||
rook_points[0][11] = Point( 18*w/40.0, w/8.0 );
|
||||
rook_points[0][12] = Point( 18*w/40.0, w/4.0 );
|
||||
rook_points[0][13] = Point( 14*w/40.0, w/4.0 );
|
||||
rook_points[0][14] = Point( 14*w/40.0, w/8.0 );
|
||||
rook_points[0][15] = Point( w/4.0, w/8.0 );
|
||||
rook_points[0][16] = Point( w/4.0, 3*w/8.0 );
|
||||
rook_points[0][17] = Point( 13*w/32.0, 3*w/8.0 );
|
||||
rook_points[0][18] = Point( 5*w/16.0, 13*w/16.0 );
|
||||
rook_points[0][19] = Point( w/4.0, 13*w/16.0) ;
|
||||
|
||||
const Point* ppt[1] = { rook_points[0] };
|
||||
int npt[] = { 20 };
|
||||
|
||||
fillPoly( img,
|
||||
ppt,
|
||||
npt,
|
||||
1,
|
||||
Scalar( 255, 255, 255 ),
|
||||
lineType );
|
||||
}
|
||||
@endcode
|
||||
To draw a filled polygon we use the function @ref cv::fillPoly . We note that:
|
||||
|
||||
- The polygon will be drawn on **img**
|
||||
@ -218,22 +111,17 @@ Explanation
|
||||
- The color of the polygon is defined by **Scalar( 255, 255, 255)**, which is the BGR
|
||||
value for *white*
|
||||
- *rectangle*
|
||||
@code{.cpp}
|
||||
rectangle( rook_image,
|
||||
Point( 0, 7*w/8.0 ),
|
||||
Point( w, w),
|
||||
Scalar( 0, 255, 255 ),
|
||||
-1, 8 );
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/core/Matrix/Drawing_1.cpp rectangle
|
||||
|
||||
Finally we have the @ref cv::rectangle function (we did not create a special function for
|
||||
this guy). We note that:
|
||||
|
||||
- The rectangle will be drawn on **rook_image**
|
||||
- Two opposite vertices of the rectangle are defined by *\* Point( 0, 7*w/8.0 )*\*
|
||||
- Two opposite vertices of the rectangle are defined by *\* Point( 0, 7*w/8 )*\*
|
||||
andPoint( w, w)*\*
|
||||
- The color of the rectangle is given by **Scalar(0, 255, 255)** which is the BGR value
|
||||
for *yellow*
|
||||
- Since the thickness value is given by **-1**, the rectangle will be filled.
|
||||
- Since the thickness value is given by **FILLED (-1)**, the rectangle will be filled.
|
||||
|
||||
Result
|
||||
------
|
||||
|
@ -17,105 +17,8 @@ Code
|
||||
|
||||
This tutorial code's is shown lines below. You can also download it from
|
||||
[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp)
|
||||
@code{.cpp}
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include <iostream>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
@include samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
/// Global variables
|
||||
Mat src, src_gray;
|
||||
|
||||
int maxCorners = 10;
|
||||
int maxTrackbar = 25;
|
||||
|
||||
RNG rng(12345);
|
||||
char* source_window = "Image";
|
||||
|
||||
/// Function header
|
||||
void goodFeaturesToTrack_Demo( int, void* );
|
||||
|
||||
/* @function main */
|
||||
int main( int argc, char** argv )
|
||||
{
|
||||
/// Load source image and convert it to gray
|
||||
src = imread( argv[1], 1 );
|
||||
cvtColor( src, src_gray, COLOR_BGR2GRAY );
|
||||
|
||||
/// Create Window
|
||||
namedWindow( source_window, WINDOW_AUTOSIZE );
|
||||
|
||||
/// Create Trackbar to set the number of corners
|
||||
createTrackbar( "Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo);
|
||||
|
||||
imshow( source_window, src );
|
||||
|
||||
goodFeaturesToTrack_Demo( 0, 0 );
|
||||
|
||||
waitKey(0);
|
||||
return(0);
|
||||
}
|
||||
|
||||
/*
|
||||
* @function goodFeaturesToTrack_Demo.cpp
|
||||
* @brief Apply Shi-Tomasi corner detector
|
||||
*/
|
||||
void goodFeaturesToTrack_Demo( int, void* )
|
||||
{
|
||||
if( maxCorners < 1 ) { maxCorners = 1; }
|
||||
|
||||
/// Parameters for Shi-Tomasi algorithm
|
||||
vector<Point2f> corners;
|
||||
double qualityLevel = 0.01;
|
||||
double minDistance = 10;
|
||||
int blockSize = 3;
|
||||
bool useHarrisDetector = false;
|
||||
double k = 0.04;
|
||||
|
||||
/// Copy the source image
|
||||
Mat copy;
|
||||
copy = src.clone();
|
||||
|
||||
/// Apply corner detection
|
||||
goodFeaturesToTrack( src_gray,
|
||||
corners,
|
||||
maxCorners,
|
||||
qualityLevel,
|
||||
minDistance,
|
||||
Mat(),
|
||||
blockSize,
|
||||
useHarrisDetector,
|
||||
k );
|
||||
|
||||
|
||||
/// Draw corners detected
|
||||
cout<<"** Number of corners detected: "<<corners.size()<<endl;
|
||||
int r = 4;
|
||||
for( int i = 0; i < corners.size(); i++ )
|
||||
{ circle( copy, corners[i], r, Scalar(rng.uniform(0,255), rng.uniform(0,255),
|
||||
rng.uniform(0,255)), -1, 8, 0 ); }
|
||||
|
||||
/// Show what you got
|
||||
namedWindow( source_window, WINDOW_AUTOSIZE );
|
||||
imshow( source_window, copy );
|
||||
|
||||
/// Set the neeed parameters to find the refined corners
|
||||
Size winSize = Size( 5, 5 );
|
||||
Size zeroZone = Size( -1, -1 );
|
||||
TermCriteria criteria = TermCriteria( TermCriteria::EPS + TermCriteria::MAX_ITER, 40, 0.001 );
|
||||
|
||||
/// Calculate the refined corner locations
|
||||
cornerSubPix( src_gray, corners, winSize, zeroZone, criteria );
|
||||
|
||||
/// Write them down
|
||||
for( int i = 0; i < corners.size(); i++ )
|
||||
{ cout<<" -- Refined Corner ["<<i<<"] ("<<corners[i].x<<","<<corners[i].y<<")"<<endl; }
|
||||
}
|
||||
@endcode
|
||||
Explanation
|
||||
-----------
|
||||
|
||||
|
@ -16,95 +16,8 @@ Code
|
||||
|
||||
This tutorial code's is shown lines below. You can also download it from
|
||||
[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp)
|
||||
@code{.cpp}
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include <iostream>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
@include samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
/// Global variables
|
||||
Mat src, src_gray;
|
||||
|
||||
int maxCorners = 23;
|
||||
int maxTrackbar = 100;
|
||||
|
||||
RNG rng(12345);
|
||||
char* source_window = "Image";
|
||||
|
||||
/// Function header
|
||||
void goodFeaturesToTrack_Demo( int, void* );
|
||||
|
||||
/*
|
||||
* @function main
|
||||
*/
|
||||
int main( int argc, char** argv )
|
||||
{
|
||||
/// Load source image and convert it to gray
|
||||
src = imread( argv[1], 1 );
|
||||
cvtColor( src, src_gray, COLOR_BGR2GRAY );
|
||||
|
||||
/// Create Window
|
||||
namedWindow( source_window, WINDOW_AUTOSIZE );
|
||||
|
||||
/// Create Trackbar to set the number of corners
|
||||
createTrackbar( "Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo );
|
||||
|
||||
imshow( source_window, src );
|
||||
|
||||
goodFeaturesToTrack_Demo( 0, 0 );
|
||||
|
||||
waitKey(0);
|
||||
return(0);
|
||||
}
|
||||
|
||||
/*
|
||||
* @function goodFeaturesToTrack_Demo.cpp
|
||||
* @brief Apply Shi-Tomasi corner detector
|
||||
*/
|
||||
void goodFeaturesToTrack_Demo( int, void* )
|
||||
{
|
||||
if( maxCorners < 1 ) { maxCorners = 1; }
|
||||
|
||||
/// Parameters for Shi-Tomasi algorithm
|
||||
vector<Point2f> corners;
|
||||
double qualityLevel = 0.01;
|
||||
double minDistance = 10;
|
||||
int blockSize = 3;
|
||||
bool useHarrisDetector = false;
|
||||
double k = 0.04;
|
||||
|
||||
/// Copy the source image
|
||||
Mat copy;
|
||||
copy = src.clone();
|
||||
|
||||
/// Apply corner detection
|
||||
goodFeaturesToTrack( src_gray,
|
||||
corners,
|
||||
maxCorners,
|
||||
qualityLevel,
|
||||
minDistance,
|
||||
Mat(),
|
||||
blockSize,
|
||||
useHarrisDetector,
|
||||
k );
|
||||
|
||||
|
||||
/// Draw corners detected
|
||||
cout<<"** Number of corners detected: "<<corners.size()<<endl;
|
||||
int r = 4;
|
||||
for( size_t i = 0; i < corners.size(); i++ )
|
||||
{ circle( copy, corners[i], r, Scalar(rng.uniform(0,255), rng.uniform(0,255),
|
||||
rng.uniform(0,255)), -1, 8, 0 ); }
|
||||
|
||||
/// Show what you got
|
||||
namedWindow( source_window, WINDOW_AUTOSIZE );
|
||||
imshow( source_window, copy );
|
||||
}
|
||||
@endcode
|
||||
Explanation
|
||||
-----------
|
||||
|
||||
|
@ -120,79 +120,8 @@ Code
|
||||
|
||||
This tutorial code's is shown lines below. You can also download it from
|
||||
[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp)
|
||||
@code{.cpp}
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include <iostream>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
@include samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
/// Global variables
|
||||
Mat src, src_gray;
|
||||
int thresh = 200;
|
||||
int max_thresh = 255;
|
||||
|
||||
char* source_window = "Source image";
|
||||
char* corners_window = "Corners detected";
|
||||
|
||||
/// Function header
|
||||
void cornerHarris_demo( int, void* );
|
||||
|
||||
/* @function main */
|
||||
int main( int argc, char** argv )
|
||||
{
|
||||
/// Load source image and convert it to gray
|
||||
src = imread( argv[1], 1 );
|
||||
cvtColor( src, src_gray, COLOR_BGR2GRAY );
|
||||
|
||||
/// Create a window and a trackbar
|
||||
namedWindow( source_window, WINDOW_AUTOSIZE );
|
||||
createTrackbar( "Threshold: ", source_window, &thresh, max_thresh, cornerHarris_demo );
|
||||
imshow( source_window, src );
|
||||
|
||||
cornerHarris_demo( 0, 0 );
|
||||
|
||||
waitKey(0);
|
||||
return(0);
|
||||
}
|
||||
|
||||
/* @function cornerHarris_demo */
|
||||
void cornerHarris_demo( int, void* )
|
||||
{
|
||||
|
||||
Mat dst, dst_norm, dst_norm_scaled;
|
||||
dst = Mat::zeros( src.size(), CV_32FC1 );
|
||||
|
||||
/// Detector parameters
|
||||
int blockSize = 2;
|
||||
int apertureSize = 3;
|
||||
double k = 0.04;
|
||||
|
||||
/// Detecting corners
|
||||
cornerHarris( src_gray, dst, blockSize, apertureSize, k, BORDER_DEFAULT );
|
||||
|
||||
/// Normalizing
|
||||
normalize( dst, dst_norm, 0, 255, NORM_MINMAX, CV_32FC1, Mat() );
|
||||
convertScaleAbs( dst_norm, dst_norm_scaled );
|
||||
|
||||
/// Drawing a circle around corners
|
||||
for( int j = 0; j < dst_norm.rows ; j++ )
|
||||
{ for( int i = 0; i < dst_norm.cols; i++ )
|
||||
{
|
||||
if( (int) dst_norm.at<float>(j,i) > thresh )
|
||||
{
|
||||
circle( dst_norm_scaled, Point( i, j ), 5, Scalar(0), 2, 8, 0 );
|
||||
}
|
||||
}
|
||||
}
|
||||
/// Showing the result
|
||||
namedWindow( corners_window, WINDOW_AUTOSIZE );
|
||||
imshow( corners_window, dst_norm_scaled );
|
||||
}
|
||||
@endcode
|
||||
Explanation
|
||||
-----------
|
||||
|
||||
|
@ -4,7 +4,7 @@ Adding a Trackbar to our applications! {#tutorial_trackbar}
|
||||
- In the previous tutorials (about *linear blending* and the *brightness and contrast
|
||||
adjustments*) you might have noted that we needed to give some **input** to our programs, such
|
||||
as \f$\alpha\f$ and \f$beta\f$. We accomplished that by entering this data using the Terminal
|
||||
- Well, it is time to use some fancy GUI tools. OpenCV provides some GUI utilities (*highgui.h*)
|
||||
- Well, it is time to use some fancy GUI tools. OpenCV provides some GUI utilities (*highgui.hpp*)
|
||||
for you. An example of this is a **Trackbar**
|
||||
|
||||
![](images/Adding_Trackbars_Tutorial_Trackbar.png)
|
||||
@ -24,104 +24,36 @@ Code
|
||||
|
||||
Let's modify the program made in the tutorial @ref tutorial_adding_images. We will let the user enter the
|
||||
\f$\alpha\f$ value by using the Trackbar.
|
||||
@code{.cpp}
|
||||
#include <opencv2/opencv.hpp>
|
||||
using namespace cv;
|
||||
|
||||
/// Global Variables
|
||||
const int alpha_slider_max = 100;
|
||||
int alpha_slider;
|
||||
double alpha;
|
||||
double beta;
|
||||
|
||||
/// Matrices to store images
|
||||
Mat src1;
|
||||
Mat src2;
|
||||
Mat dst;
|
||||
|
||||
/*
|
||||
* @function on_trackbar
|
||||
* @brief Callback for trackbar
|
||||
*/
|
||||
void on_trackbar( int, void* )
|
||||
{
|
||||
alpha = (double) alpha_slider/alpha_slider_max ;
|
||||
beta = ( 1.0 - alpha );
|
||||
|
||||
addWeighted( src1, alpha, src2, beta, 0.0, dst);
|
||||
|
||||
imshow( "Linear Blend", dst );
|
||||
}
|
||||
|
||||
int main( int argc, char** argv )
|
||||
{
|
||||
/// Read image ( same size, same type )
|
||||
src1 = imread("../../images/LinuxLogo.jpg");
|
||||
src2 = imread("../../images/WindowsLogo.jpg");
|
||||
|
||||
if( !src1.data ) { printf("Error loading src1 \n"); return -1; }
|
||||
if( !src2.data ) { printf("Error loading src2 \n"); return -1; }
|
||||
|
||||
/// Initialize values
|
||||
alpha_slider = 0;
|
||||
|
||||
/// Create Windows
|
||||
namedWindow("Linear Blend", 1);
|
||||
|
||||
/// Create Trackbars
|
||||
char TrackbarName[50];
|
||||
sprintf( TrackbarName, "Alpha x %d", alpha_slider_max );
|
||||
|
||||
createTrackbar( TrackbarName, "Linear Blend", &alpha_slider, alpha_slider_max, on_trackbar );
|
||||
|
||||
/// Show some stuff
|
||||
on_trackbar( alpha_slider, 0 );
|
||||
|
||||
/// Wait until user press some key
|
||||
waitKey(0);
|
||||
return 0;
|
||||
}
|
||||
@endcode
|
||||
This tutorial code's is shown lines below. You can also download it from
|
||||
[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/HighGUI/AddingImagesTrackbar.cpp)
|
||||
@include cpp/tutorial_code/HighGUI/AddingImagesTrackbar.cpp
|
||||
|
||||
Explanation
|
||||
-----------
|
||||
|
||||
We only analyze the code that is related to Trackbar:
|
||||
|
||||
-# First, we load 02 images, which are going to be blended.
|
||||
@code{.cpp}
|
||||
src1 = imread("../../images/LinuxLogo.jpg");
|
||||
src2 = imread("../../images/WindowsLogo.jpg");
|
||||
@endcode
|
||||
-# First, we load two images, which are going to be blended.
|
||||
@snippet cpp/tutorial_code/HighGUI/AddingImagesTrackbar.cpp load
|
||||
|
||||
-# To create a trackbar, first we have to create the window in which it is going to be located. So:
|
||||
@code{.cpp}
|
||||
namedWindow("Linear Blend", 1);
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/HighGUI/AddingImagesTrackbar.cpp window
|
||||
|
||||
-# Now we can create the Trackbar:
|
||||
@code{.cpp}
|
||||
createTrackbar( TrackbarName, "Linear Blend", &alpha_slider, alpha_slider_max, on_trackbar );
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/HighGUI/AddingImagesTrackbar.cpp create_trackbar
|
||||
|
||||
Note the following:
|
||||
|
||||
- Our Trackbar has a label **TrackbarName**
|
||||
- The Trackbar is located in the window named **"Linear Blend"**
|
||||
- The Trackbar is located in the window named **Linear Blend**
|
||||
- The Trackbar values will be in the range from \f$0\f$ to **alpha_slider_max** (the minimum
|
||||
limit is always **zero**).
|
||||
- The numerical value of Trackbar is stored in **alpha_slider**
|
||||
- Whenever the user moves the Trackbar, the callback function **on_trackbar** is called
|
||||
|
||||
-# Finally, we have to define the callback function **on_trackbar**
|
||||
@code{.cpp}
|
||||
void on_trackbar( int, void* )
|
||||
{
|
||||
alpha = (double) alpha_slider/alpha_slider_max ;
|
||||
beta = ( 1.0 - alpha );
|
||||
@snippet cpp/tutorial_code/HighGUI/AddingImagesTrackbar.cpp on_trackbar
|
||||
|
||||
addWeighted( src1, alpha, src2, beta, 0.0, dst);
|
||||
|
||||
imshow( "Linear Blend", dst );
|
||||
}
|
||||
@endcode
|
||||
Note that:
|
||||
- We use the value of **alpha_slider** (integer) to get a double value for **alpha**.
|
||||
- **alpha_slider** is updated each time the trackbar is displaced by the user.
|
||||
@ -135,7 +67,7 @@ Result
|
||||
|
||||
![](images/Adding_Trackbars_Tutorial_Result_0.jpg)
|
||||
|
||||
- As a manner of practice, you can also add 02 trackbars for the program made in
|
||||
- As a manner of practice, you can also add two trackbars for the program made in
|
||||
@ref tutorial_basic_linear_transform. One trackbar to set \f$\alpha\f$ and another for \f$\beta\f$. The output might
|
||||
look like:
|
||||
|
||||
|
@ -35,17 +35,13 @@ How to Read Raster Data using GDAL
|
||||
|
||||
This demonstration uses the default OpenCV imread function. The primary difference is that in order
|
||||
to force GDAL to load the image, you must use the appropriate flag.
|
||||
@code{.cpp}
|
||||
cv::Mat image = cv::imread( argv[1], cv::IMREAD_LOAD_GDAL );
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/imgcodecs/GDAL_IO/gdal-image.cpp load1
|
||||
When loading digital elevation models, the actual numeric value of each pixel is essential and
|
||||
cannot be scaled or truncated. For example, with image data a pixel represented as a double with a
|
||||
value of 1 has an equal appearance to a pixel which is represented as an unsigned character with a
|
||||
value of 255. With terrain data, the pixel value represents the elevation in meters. In order to
|
||||
ensure that OpenCV preserves the native value, use the GDAL flag in imread with the ANYDEPTH flag.
|
||||
@code{.cpp}
|
||||
cv::Mat dem = cv::imread( argv[2], cv::IMREAD_LOAD_GDAL | cv::IMREAD_ANYDEPTH );
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/imgcodecs/GDAL_IO/gdal-image.cpp load2
|
||||
If you know beforehand the type of DEM model you are loading, then it may be a safe bet to test the
|
||||
Mat::type() or Mat::depth() using an assert or other mechanism. NASA or DOD specification documents
|
||||
can provide the input types for various elevation models. The major types, SRTM and DTED, are both
|
||||
|
@ -71,7 +71,7 @@ Explanation
|
||||
|
||||
- Load an image (can be BGR or grayscale)
|
||||
- Create two windows (one for dilation output, the other for erosion)
|
||||
- Create a set of 02 Trackbars for each operation:
|
||||
- Create a set of two Trackbars for each operation:
|
||||
- The first trackbar "Element" returns either **erosion_elem** or **dilation_elem**
|
||||
- The second trackbar "Kernel size" return **erosion_size** or **dilation_size** for the
|
||||
corresponding operation.
|
||||
@ -81,23 +81,8 @@ Explanation
|
||||
Let's analyze these two functions:
|
||||
|
||||
-# **erosion:**
|
||||
@code{.cpp}
|
||||
/* @function Erosion */
|
||||
void Erosion( int, void* )
|
||||
{
|
||||
int erosion_type;
|
||||
if( erosion_elem == 0 ){ erosion_type = MORPH_RECT; }
|
||||
else if( erosion_elem == 1 ){ erosion_type = MORPH_CROSS; }
|
||||
else if( erosion_elem == 2) { erosion_type = MORPH_ELLIPSE; }
|
||||
@snippet cpp/tutorial_code/ImgProc/Morphology_1.cpp erosion
|
||||
|
||||
Mat element = getStructuringElement( erosion_type,
|
||||
Size( 2*erosion_size + 1, 2*erosion_size+1 ),
|
||||
Point( erosion_size, erosion_size ) );
|
||||
/// Apply the erosion operation
|
||||
erode( src, erosion_dst, element );
|
||||
imshow( "Erosion Demo", erosion_dst );
|
||||
}
|
||||
@endcode
|
||||
- The function that performs the *erosion* operation is @ref cv::erode . As we can see, it
|
||||
receives three arguments:
|
||||
- *src*: The source image
|
||||
@ -105,11 +90,8 @@ Explanation
|
||||
- *element*: This is the kernel we will use to perform the operation. If we do not
|
||||
specify, the default is a simple `3x3` matrix. Otherwise, we can specify its
|
||||
shape. For this, we need to use the function cv::getStructuringElement :
|
||||
@code{.cpp}
|
||||
Mat element = getStructuringElement( erosion_type,
|
||||
Size( 2*erosion_size + 1, 2*erosion_size+1 ),
|
||||
Point( erosion_size, erosion_size ) );
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgProc/Morphology_1.cpp kernel
|
||||
|
||||
We can choose any of three shapes for our kernel:
|
||||
|
||||
- Rectangular box: MORPH_RECT
|
||||
@ -129,23 +111,7 @@ Reference for more details.
|
||||
The code is below. As you can see, it is completely similar to the snippet of code for **erosion**.
|
||||
Here we also have the option of defining our kernel, its anchor point and the size of the operator
|
||||
to be used.
|
||||
@code{.cpp}
|
||||
/* @function Dilation */
|
||||
void Dilation( int, void* )
|
||||
{
|
||||
int dilation_type;
|
||||
if( dilation_elem == 0 ){ dilation_type = MORPH_RECT; }
|
||||
else if( dilation_elem == 1 ){ dilation_type = MORPH_CROSS; }
|
||||
else if( dilation_elem == 2) { dilation_type = MORPH_ELLIPSE; }
|
||||
|
||||
Mat element = getStructuringElement( dilation_type,
|
||||
Size( 2*dilation_size + 1, 2*dilation_size+1 ),
|
||||
Point( dilation_size, dilation_size ) );
|
||||
/// Apply the dilation operation
|
||||
dilate( src, dilation_dst, element );
|
||||
imshow( "Dilation Demo", dilation_dst );
|
||||
}
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgProc/Morphology_1.cpp dilation
|
||||
|
||||
Results
|
||||
-------
|
||||
|
@ -16,8 +16,7 @@ Theory
|
||||
------
|
||||
|
||||
@note The explanation below belongs to the book [Computer Vision: Algorithms and
|
||||
Applications](http://szeliski.org/Book/) by Richard Szeliski and to *LearningOpenCV* .. container::
|
||||
enumeratevisibleitemswithsquare
|
||||
Applications](http://szeliski.org/Book/) by Richard Szeliski and to *LearningOpenCV*
|
||||
|
||||
- *Smoothing*, also called *blurring*, is a simple and frequently used image processing
|
||||
operation.
|
||||
@ -96,96 +95,7 @@ Code
|
||||
- **Downloadable code**: Click
|
||||
[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ImgProc/Smoothing.cpp)
|
||||
- **Code at glance:**
|
||||
@code{.cpp}
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
/// Global Variables
|
||||
int DELAY_CAPTION = 1500;
|
||||
int DELAY_BLUR = 100;
|
||||
int MAX_KERNEL_LENGTH = 31;
|
||||
|
||||
Mat src; Mat dst;
|
||||
char window_name[] = "Filter Demo 1";
|
||||
|
||||
/// Function headers
|
||||
int display_caption( char* caption );
|
||||
int display_dst( int delay );
|
||||
|
||||
/*
|
||||
* function main
|
||||
*/
|
||||
int main( int argc, char** argv )
|
||||
{
|
||||
namedWindow( window_name, WINDOW_AUTOSIZE );
|
||||
|
||||
/// Load the source image
|
||||
src = imread( "../images/lena.jpg", 1 );
|
||||
|
||||
if( display_caption( "Original Image" ) != 0 ) { return 0; }
|
||||
|
||||
dst = src.clone();
|
||||
if( display_dst( DELAY_CAPTION ) != 0 ) { return 0; }
|
||||
|
||||
/// Applying Homogeneous blur
|
||||
if( display_caption( "Homogeneous Blur" ) != 0 ) { return 0; }
|
||||
|
||||
for ( int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2 )
|
||||
{ blur( src, dst, Size( i, i ), Point(-1,-1) );
|
||||
if( display_dst( DELAY_BLUR ) != 0 ) { return 0; } }
|
||||
|
||||
/// Applying Gaussian blur
|
||||
if( display_caption( "Gaussian Blur" ) != 0 ) { return 0; }
|
||||
|
||||
for ( int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2 )
|
||||
{ GaussianBlur( src, dst, Size( i, i ), 0, 0 );
|
||||
if( display_dst( DELAY_BLUR ) != 0 ) { return 0; } }
|
||||
|
||||
/// Applying Median blur
|
||||
if( display_caption( "Median Blur" ) != 0 ) { return 0; }
|
||||
|
||||
for ( int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2 )
|
||||
{ medianBlur ( src, dst, i );
|
||||
if( display_dst( DELAY_BLUR ) != 0 ) { return 0; } }
|
||||
|
||||
/// Applying Bilateral Filter
|
||||
if( display_caption( "Bilateral Blur" ) != 0 ) { return 0; }
|
||||
|
||||
for ( int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2 )
|
||||
{ bilateralFilter ( src, dst, i, i*2, i/2 );
|
||||
if( display_dst( DELAY_BLUR ) != 0 ) { return 0; } }
|
||||
|
||||
/// Wait until user press a key
|
||||
display_caption( "End: Press a key!" );
|
||||
|
||||
waitKey(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int display_caption( char* caption )
|
||||
{
|
||||
dst = Mat::zeros( src.size(), src.type() );
|
||||
putText( dst, caption,
|
||||
Point( src.cols/4, src.rows/2),
|
||||
FONT_HERSHEY_COMPLEX, 1, Scalar(255, 255, 255) );
|
||||
|
||||
imshow( window_name, dst );
|
||||
int c = waitKey( DELAY_CAPTION );
|
||||
if( c >= 0 ) { return -1; }
|
||||
return 0;
|
||||
}
|
||||
|
||||
int display_dst( int delay )
|
||||
{
|
||||
imshow( window_name, dst );
|
||||
int c = waitKey ( delay );
|
||||
if( c >= 0 ) { return -1; }
|
||||
return 0;
|
||||
}
|
||||
@endcode
|
||||
@include samples/cpp/tutorial_code/ImgProc/Smoothing.cpp
|
||||
|
||||
Explanation
|
||||
-----------
|
||||
@ -195,11 +105,8 @@ Explanation
|
||||
-# **Normalized Block Filter:**
|
||||
|
||||
OpenCV offers the function @ref cv::blur to perform smoothing with this filter.
|
||||
@code{.cpp}
|
||||
for ( int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2 )
|
||||
{ blur( src, dst, Size( i, i ), Point(-1,-1) );
|
||||
if( display_dst( DELAY_BLUR ) != 0 ) { return 0; } }
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgProc/Smoothing.cpp blur
|
||||
|
||||
We specify 4 arguments (more details, check the Reference):
|
||||
|
||||
- *src*: Source image
|
||||
@ -213,11 +120,8 @@ Explanation
|
||||
-# **Gaussian Filter:**
|
||||
|
||||
It is performed by the function @ref cv::GaussianBlur :
|
||||
@code{.cpp}
|
||||
for ( int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2 )
|
||||
{ GaussianBlur( src, dst, Size( i, i ), 0, 0 );
|
||||
if( display_dst( DELAY_BLUR ) != 0 ) { return 0; } }
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgProc/Smoothing.cpp gaussianblur
|
||||
|
||||
Here we use 4 arguments (more details, check the OpenCV reference):
|
||||
|
||||
- *src*: Source image
|
||||
@ -233,11 +137,8 @@ Explanation
|
||||
-# **Median Filter:**
|
||||
|
||||
This filter is provided by the @ref cv::medianBlur function:
|
||||
@code{.cpp}
|
||||
for ( int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2 )
|
||||
{ medianBlur ( src, dst, i );
|
||||
if( display_dst( DELAY_BLUR ) != 0 ) { return 0; } }
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgProc/Smoothing.cpp medianblur
|
||||
|
||||
We use three arguments:
|
||||
|
||||
- *src*: Source image
|
||||
@ -247,11 +148,8 @@ Explanation
|
||||
-# **Bilateral Filter**
|
||||
|
||||
Provided by OpenCV function @ref cv::bilateralFilter
|
||||
@code{.cpp}
|
||||
for ( int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2 )
|
||||
{ bilateralFilter ( src, dst, i, i*2, i/2 );
|
||||
if( display_dst( DELAY_BLUR ) != 0 ) { return 0; } }
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgProc/Smoothing.cpp bilateralfilter
|
||||
|
||||
We use 5 arguments:
|
||||
|
||||
- *src*: Source image
|
||||
|
@ -81,17 +81,8 @@ Explanation
|
||||
-----------
|
||||
|
||||
-# Create some needed variables:
|
||||
@code{.cpp}
|
||||
Mat src, src_gray;
|
||||
Mat dst, detected_edges;
|
||||
@snippet cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp variables
|
||||
|
||||
int edgeThresh = 1;
|
||||
int lowThreshold;
|
||||
int const max_lowThreshold = 100;
|
||||
int ratio = 3;
|
||||
int kernel_size = 3;
|
||||
char* window_name = "Edge Map";
|
||||
@endcode
|
||||
Note the following:
|
||||
|
||||
-# We establish a ratio of lower:upper threshold of 3:1 (with the variable *ratio*)
|
||||
@ -100,29 +91,16 @@ Explanation
|
||||
-# We set a maximum value for the lower Threshold of \f$100\f$.
|
||||
|
||||
-# Loads the source image:
|
||||
@code{.cpp}
|
||||
/// Load an image
|
||||
src = imread( argv[1] );
|
||||
@snippet cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp load
|
||||
|
||||
if( !src.data )
|
||||
{ return -1; }
|
||||
@endcode
|
||||
-# Create a matrix of the same type and size of *src* (to be *dst*)
|
||||
@code{.cpp}
|
||||
dst.create( src.size(), src.type() );
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp create_mat
|
||||
-# Convert the image to grayscale (using the function @ref cv::cvtColor :
|
||||
@code{.cpp}
|
||||
cvtColor( src, src_gray, COLOR_BGR2GRAY );
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp convert_to_gray
|
||||
-# Create a window to display the results
|
||||
@code{.cpp}
|
||||
namedWindow( window_name, WINDOW_AUTOSIZE );
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp create_window
|
||||
-# Create a Trackbar for the user to enter the lower threshold for our Canny detector:
|
||||
@code{.cpp}
|
||||
createTrackbar( "Min Threshold:", window_name, &lowThreshold, max_lowThreshold, CannyThreshold );
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp create_trackbar
|
||||
Observe the following:
|
||||
|
||||
-# The variable to be controlled by the Trackbar is *lowThreshold* with a limit of
|
||||
@ -132,13 +110,9 @@ Explanation
|
||||
|
||||
-# Let's check the *CannyThreshold* function, step by step:
|
||||
-# First, we blur the image with a filter of kernel size 3:
|
||||
@code{.cpp}
|
||||
blur( src_gray, detected_edges, Size(3,3) );
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp reduce_noise
|
||||
-# Second, we apply the OpenCV function @ref cv::Canny :
|
||||
@code{.cpp}
|
||||
Canny( detected_edges, detected_edges, lowThreshold, lowThreshold*ratio, kernel_size );
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp canny
|
||||
where the arguments are:
|
||||
|
||||
- *detected_edges*: Source image, grayscale
|
||||
@ -150,23 +124,16 @@ Explanation
|
||||
internally)
|
||||
|
||||
-# We fill a *dst* image with zeros (meaning the image is completely black).
|
||||
@code{.cpp}
|
||||
dst = Scalar::all(0);
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp fill
|
||||
-# Finally, we will use the function @ref cv::Mat::copyTo to map only the areas of the image that are
|
||||
identified as edges (on a black background).
|
||||
@code{.cpp}
|
||||
src.copyTo( dst, detected_edges);
|
||||
@endcode
|
||||
@ref cv::Mat::copyTo copy the *src* image onto *dst*. However, it will only copy the pixels in the
|
||||
locations where they have non-zero values. Since the output of the Canny detector is the edge
|
||||
contours on a black background, the resulting *dst* will be black in all the area but the
|
||||
detected edges.
|
||||
|
||||
@snippet cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp copyto
|
||||
-# We display our result:
|
||||
@code{.cpp}
|
||||
imshow( window_name, dst );
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp display
|
||||
|
||||
Result
|
||||
------
|
||||
|
@ -53,61 +53,29 @@ Explanation
|
||||
-----------
|
||||
|
||||
-# First we declare the variables we are going to use:
|
||||
@code{.cpp}
|
||||
Mat src, dst;
|
||||
int top, bottom, left, right;
|
||||
int borderType;
|
||||
Scalar value;
|
||||
char* window_name = "copyMakeBorder Demo";
|
||||
RNG rng(12345);
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp variables
|
||||
|
||||
Especial attention deserves the variable *rng* which is a random number generator. We use it to
|
||||
generate the random border color, as we will see soon.
|
||||
|
||||
-# As usual we load our source image *src*:
|
||||
@code{.cpp}
|
||||
src = imread( argv[1] );
|
||||
@snippet cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp load
|
||||
|
||||
if( !src.data )
|
||||
{ return -1;
|
||||
printf(" No data entered, please enter the path to an image file \n");
|
||||
}
|
||||
@endcode
|
||||
-# After giving a short intro of how to use the program, we create a window:
|
||||
@code{.cpp}
|
||||
namedWindow( window_name, WINDOW_AUTOSIZE );
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp create_window
|
||||
-# Now we initialize the argument that defines the size of the borders (*top*, *bottom*, *left* and
|
||||
*right*). We give them a value of 5% the size of *src*.
|
||||
@code{.cpp}
|
||||
top = (int) (0.05*src.rows); bottom = (int) (0.05*src.rows);
|
||||
left = (int) (0.05*src.cols); right = (int) (0.05*src.cols);
|
||||
@endcode
|
||||
-# The program begins a *while* loop. If the user presses 'c' or 'r', the *borderType* variable
|
||||
@snippet cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp init_arguments
|
||||
-# The program runs in a **for** loop. If the user presses 'c' or 'r', the *borderType* variable
|
||||
takes the value of *BORDER_CONSTANT* or *BORDER_REPLICATE* respectively:
|
||||
@code{.cpp}
|
||||
while( true )
|
||||
{
|
||||
c = waitKey(500);
|
||||
|
||||
if( (char)c == 27 )
|
||||
{ break; }
|
||||
else if( (char)c == 'c' )
|
||||
{ borderType = BORDER_CONSTANT; }
|
||||
else if( (char)c == 'r' )
|
||||
{ borderType = BORDER_REPLICATE; }
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp check_keypress
|
||||
-# In each iteration (after 0.5 seconds), the variable *value* is updated...
|
||||
@code{.cpp}
|
||||
value = Scalar( rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255) );
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp update_value
|
||||
with a random value generated by the **RNG** variable *rng*. This value is a number picked
|
||||
randomly in the range \f$[0,255]\f$
|
||||
|
||||
-# Finally, we call the function @ref cv::copyMakeBorder to apply the respective padding:
|
||||
@code{.cpp}
|
||||
copyMakeBorder( src, dst, top, bottom, left, right, borderType, value );
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp copymakeborder
|
||||
The arguments are:
|
||||
|
||||
-# *src*: Source image
|
||||
@ -120,9 +88,7 @@ Explanation
|
||||
pixels.
|
||||
|
||||
-# We display our output image in the image created previously
|
||||
@code{.cpp}
|
||||
imshow( window_name, dst );
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp display
|
||||
|
||||
Results
|
||||
-------
|
||||
|
@ -63,100 +63,25 @@ Code
|
||||
|
||||
-# The tutorial code's is shown lines below. You can also download it from
|
||||
[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ImgTrans/filter2D_demo.cpp)
|
||||
@code{.cpp}
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
@include cpp/tutorial_code/ImgTrans/filter2D_demo.cpp
|
||||
|
||||
using namespace cv;
|
||||
|
||||
/* @function main */
|
||||
int main ( int argc, char** argv )
|
||||
{
|
||||
/// Declare variables
|
||||
Mat src, dst;
|
||||
|
||||
Mat kernel;
|
||||
Point anchor;
|
||||
double delta;
|
||||
int ddepth;
|
||||
int kernel_size;
|
||||
char* window_name = "filter2D Demo";
|
||||
|
||||
int c;
|
||||
|
||||
/// Load an image
|
||||
src = imread( argv[1] );
|
||||
|
||||
if( !src.data )
|
||||
{ return -1; }
|
||||
|
||||
/// Create window
|
||||
namedWindow( window_name, WINDOW_AUTOSIZE );
|
||||
|
||||
/// Initialize arguments for the filter
|
||||
anchor = Point( -1, -1 );
|
||||
delta = 0;
|
||||
ddepth = -1;
|
||||
|
||||
/// Loop - Will filter the image with different kernel sizes each 0.5 seconds
|
||||
int ind = 0;
|
||||
while( true )
|
||||
{
|
||||
c = waitKey(500);
|
||||
/// Press 'ESC' to exit the program
|
||||
if( (char)c == 27 )
|
||||
{ break; }
|
||||
|
||||
/// Update kernel size for a normalized box filter
|
||||
kernel_size = 3 + 2*( ind%5 );
|
||||
kernel = Mat::ones( kernel_size, kernel_size, CV_32F )/ (float)(kernel_size*kernel_size);
|
||||
|
||||
/// Apply filter
|
||||
filter2D(src, dst, ddepth , kernel, anchor, delta, BORDER_DEFAULT );
|
||||
imshow( window_name, dst );
|
||||
ind++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@endcode
|
||||
Explanation
|
||||
-----------
|
||||
|
||||
-# Load an image
|
||||
@code{.cpp}
|
||||
src = imread( argv[1] );
|
||||
|
||||
if( !src.data )
|
||||
{ return -1; }
|
||||
@endcode
|
||||
-# Create a window to display the result
|
||||
@code{.cpp}
|
||||
namedWindow( window_name, WINDOW_AUTOSIZE );
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/filter2D_demo.cpp load
|
||||
-# Initialize the arguments for the linear filter
|
||||
@code{.cpp}
|
||||
anchor = Point( -1, -1 );
|
||||
delta = 0;
|
||||
ddepth = -1;
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/filter2D_demo.cpp init_arguments
|
||||
-# Perform an infinite loop updating the kernel size and applying our linear filter to the input
|
||||
image. Let's analyze that more in detail:
|
||||
-# First we define the kernel our filter is going to use. Here it is:
|
||||
@code{.cpp}
|
||||
kernel_size = 3 + 2*( ind%5 );
|
||||
kernel = Mat::ones( kernel_size, kernel_size, CV_32F )/ (float)(kernel_size*kernel_size);
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/filter2D_demo.cpp update_kernel
|
||||
The first line is to update the *kernel_size* to odd values in the range: \f$[3,11]\f$. The second
|
||||
line actually builds the kernel by setting its value to a matrix filled with \f$1's\f$ and
|
||||
normalizing it by dividing it between the number of elements.
|
||||
|
||||
-# After setting the kernel, we can generate the filter by using the function @ref cv::filter2D :
|
||||
@code{.cpp}
|
||||
filter2D(src, dst, ddepth , kernel, anchor, delta, BORDER_DEFAULT );
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/filter2D_demo.cpp apply_filter
|
||||
The arguments denote:
|
||||
|
||||
-# *src*: Source image
|
||||
|
@ -48,63 +48,33 @@ Explanation
|
||||
-----------
|
||||
|
||||
-# Load an image
|
||||
@code{.cpp}
|
||||
src = imread( argv[1], 1 );
|
||||
|
||||
if( !src.data )
|
||||
{ return -1; }
|
||||
@endcode
|
||||
@snippet samples/cpp/houghcircles.cpp load
|
||||
-# Convert it to grayscale:
|
||||
@code{.cpp}
|
||||
cvtColor( src, src_gray, COLOR_BGR2GRAY );
|
||||
@endcode
|
||||
-# Apply a Gaussian blur to reduce noise and avoid false circle detection:
|
||||
@code{.cpp}
|
||||
GaussianBlur( src_gray, src_gray, Size(9, 9), 2, 2 );
|
||||
@endcode
|
||||
@snippet samples/cpp/houghcircles.cpp convert_to_gray
|
||||
-# Apply a Median blur to reduce noise and avoid false circle detection:
|
||||
@snippet samples/cpp/houghcircles.cpp reduce_noise
|
||||
-# Proceed to apply Hough Circle Transform:
|
||||
@code{.cpp}
|
||||
vector<Vec3f> circles;
|
||||
|
||||
HoughCircles( src_gray, circles, HOUGH_GRADIENT, 1, src_gray.rows/8, 200, 100, 0, 0 );
|
||||
@endcode
|
||||
@snippet samples/cpp/houghcircles.cpp houghcircles
|
||||
with the arguments:
|
||||
|
||||
- *src_gray*: Input image (grayscale).
|
||||
- *gray*: Input image (grayscale).
|
||||
- *circles*: A vector that stores sets of 3 values: \f$x_{c}, y_{c}, r\f$ for each detected
|
||||
circle.
|
||||
- *HOUGH_GRADIENT*: Define the detection method. Currently this is the only one available in
|
||||
OpenCV.
|
||||
- *dp = 1*: The inverse ratio of resolution.
|
||||
- *min_dist = src_gray.rows/8*: Minimum distance between detected centers.
|
||||
- *min_dist = gray.rows/16*: Minimum distance between detected centers.
|
||||
- *param_1 = 200*: Upper threshold for the internal Canny edge detector.
|
||||
- *param_2* = 100\*: Threshold for center detection.
|
||||
- *min_radius = 0*: Minimum radio to be detected. If unknown, put zero as default.
|
||||
- *max_radius = 0*: Maximum radius to be detected. If unknown, put zero as default.
|
||||
|
||||
-# Draw the detected circles:
|
||||
@code{.cpp}
|
||||
for( size_t i = 0; i < circles.size(); i++ )
|
||||
{
|
||||
Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
|
||||
int radius = cvRound(circles[i][2]);
|
||||
// circle center
|
||||
circle( src, center, 3, Scalar(0,255,0), -1, 8, 0 );
|
||||
// circle outline
|
||||
circle( src, center, radius, Scalar(0,0,255), 3, 8, 0 );
|
||||
}
|
||||
@endcode
|
||||
@snippet samples/cpp/houghcircles.cpp draw
|
||||
You can see that we will draw the circle(s) on red and the center(s) with a small green dot
|
||||
|
||||
-# Display the detected circle(s):
|
||||
@code{.cpp}
|
||||
namedWindow( "Hough Circle Transform Demo", WINDOW_AUTOSIZE );
|
||||
imshow( "Hough Circle Transform Demo", src );
|
||||
@endcode
|
||||
-# Wait for the user to exit the program
|
||||
@code{.cpp}
|
||||
waitKey(0);
|
||||
@endcode
|
||||
-# Display the detected circle(s) and wait for the user to exit the program:
|
||||
@snippet samples/cpp/houghcircles.cpp display
|
||||
|
||||
Result
|
||||
------
|
||||
|
@ -58,33 +58,15 @@ Explanation
|
||||
-----------
|
||||
|
||||
-# Create some needed variables:
|
||||
@code{.cpp}
|
||||
Mat src, src_gray, dst;
|
||||
int kernel_size = 3;
|
||||
int scale = 1;
|
||||
int delta = 0;
|
||||
int ddepth = CV_16S;
|
||||
char* window_name = "Laplace Demo";
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp variables
|
||||
-# Loads the source image:
|
||||
@code{.cpp}
|
||||
src = imread( argv[1] );
|
||||
|
||||
if( !src.data )
|
||||
{ return -1; }
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp load
|
||||
-# Apply a Gaussian blur to reduce noise:
|
||||
@code{.cpp}
|
||||
GaussianBlur( src, src, Size(3,3), 0, 0, BORDER_DEFAULT );
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp reduce_noise
|
||||
-# Convert the image to grayscale using @ref cv::cvtColor
|
||||
@code{.cpp}
|
||||
cvtColor( src, src_gray, COLOR_RGB2GRAY );
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp convert_to_gray
|
||||
-# Apply the Laplacian operator to the grayscale image:
|
||||
@code{.cpp}
|
||||
Laplacian( src_gray, dst, ddepth, kernel_size, scale, delta, BORDER_DEFAULT );
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp laplacian
|
||||
where the arguments are:
|
||||
|
||||
- *src_gray*: The input image.
|
||||
@ -96,13 +78,9 @@ Explanation
|
||||
- *scale*, *delta* and *BORDER_DEFAULT*: We leave them as default values.
|
||||
|
||||
-# Convert the output from the Laplacian operator to a *CV_8U* image:
|
||||
@code{.cpp}
|
||||
convertScaleAbs( dst, abs_dst );
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp convert
|
||||
-# Display the result in a window:
|
||||
@code{.cpp}
|
||||
imshow( window_name, abs_dst );
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp display
|
||||
|
||||
Results
|
||||
-------
|
||||
|
@ -115,40 +115,16 @@ Explanation
|
||||
-----------
|
||||
|
||||
-# First we declare the variables we are going to use:
|
||||
@code{.cpp}
|
||||
Mat src, src_gray;
|
||||
Mat grad;
|
||||
char* window_name = "Sobel Demo - Simple Edge Detector";
|
||||
int scale = 1;
|
||||
int delta = 0;
|
||||
int ddepth = CV_16S;
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp variables
|
||||
-# As usual we load our source image *src*:
|
||||
@code{.cpp}
|
||||
src = imread( argv[1] );
|
||||
|
||||
if( !src.data )
|
||||
{ return -1; }
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp load
|
||||
-# First, we apply a @ref cv::GaussianBlur to our image to reduce the noise ( kernel size = 3 )
|
||||
@code{.cpp}
|
||||
GaussianBlur( src, src, Size(3,3), 0, 0, BORDER_DEFAULT );
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp reduce_noise
|
||||
-# Now we convert our filtered image to grayscale:
|
||||
@code{.cpp}
|
||||
cvtColor( src, src_gray, COLOR_RGB2GRAY );
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp convert_to_gray
|
||||
-# Second, we calculate the "*derivatives*" in *x* and *y* directions. For this, we use the
|
||||
function @ref cv::Sobel as shown below:
|
||||
@code{.cpp}
|
||||
Mat grad_x, grad_y;
|
||||
Mat abs_grad_x, abs_grad_y;
|
||||
|
||||
/// Gradient X
|
||||
Sobel( src_gray, grad_x, ddepth, 1, 0, 3, scale, delta, BORDER_DEFAULT );
|
||||
/// Gradient Y
|
||||
Sobel( src_gray, grad_y, ddepth, 0, 1, 3, scale, delta, BORDER_DEFAULT );
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp sobel
|
||||
The function takes the following arguments:
|
||||
|
||||
- *src_gray*: In our example, the input image. Here it is *CV_8U*
|
||||
@ -162,19 +138,12 @@ Explanation
|
||||
\f$y_{order} = 0\f$. We do analogously for the *y* direction.
|
||||
|
||||
-# We convert our partial results back to *CV_8U*:
|
||||
@code{.cpp}
|
||||
convertScaleAbs( grad_x, abs_grad_x );
|
||||
convertScaleAbs( grad_y, abs_grad_y );
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp convert
|
||||
-# Finally, we try to approximate the *gradient* by adding both directional gradients (note that
|
||||
this is not an exact calculation at all! but it is good for our purposes).
|
||||
@code{.cpp}
|
||||
addWeighted( abs_grad_x, 0.5, abs_grad_y, 0.5, 0, grad );
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp blend
|
||||
-# Finally, we show our result:
|
||||
@code{.cpp}
|
||||
imshow( window_name, grad );
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp display
|
||||
|
||||
Results
|
||||
-------
|
||||
|
@ -81,76 +81,7 @@ Code
|
||||
|
||||
This tutorial code's is shown lines below. You can also download it from
|
||||
[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ImgProc/Morphology_2.cpp)
|
||||
@code{.cpp}
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
|
||||
using namespace cv;
|
||||
|
||||
/// Global variables
|
||||
Mat src, dst;
|
||||
|
||||
int morph_elem = 0;
|
||||
int morph_size = 0;
|
||||
int morph_operator = 0;
|
||||
int const max_operator = 4;
|
||||
int const max_elem = 2;
|
||||
int const max_kernel_size = 21;
|
||||
|
||||
char* window_name = "Morphology Transformations Demo";
|
||||
|
||||
/* Function Headers */
|
||||
void Morphology_Operations( int, void* );
|
||||
|
||||
/* @function main */
|
||||
int main( int argc, char** argv )
|
||||
{
|
||||
/// Load an image
|
||||
src = imread( argv[1] );
|
||||
|
||||
if( !src.data )
|
||||
{ return -1; }
|
||||
|
||||
/// Create window
|
||||
namedWindow( window_name, WINDOW_AUTOSIZE );
|
||||
|
||||
/// Create Trackbar to select Morphology operation
|
||||
createTrackbar("Operator:\n 0: Opening - 1: Closing \n 2: Gradient - 3: Top Hat \n 4: Black Hat", window_name, &morph_operator, max_operator, Morphology_Operations );
|
||||
|
||||
/// Create Trackbar to select kernel type
|
||||
createTrackbar( "Element:\n 0: Rect - 1: Cross - 2: Ellipse", window_name,
|
||||
&morph_elem, max_elem,
|
||||
Morphology_Operations );
|
||||
|
||||
/// Create Trackbar to choose kernel size
|
||||
createTrackbar( "Kernel size:\n 2n +1", window_name,
|
||||
&morph_size, max_kernel_size,
|
||||
Morphology_Operations );
|
||||
|
||||
/// Default start
|
||||
Morphology_Operations( 0, 0 );
|
||||
|
||||
waitKey(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* @function Morphology_Operations
|
||||
*/
|
||||
void Morphology_Operations( int, void* )
|
||||
{
|
||||
// Since MORPH_X : 2,3,4,5 and 6
|
||||
int operation = morph_operator + 2;
|
||||
|
||||
Mat element = getStructuringElement( morph_elem, Size( 2*morph_size + 1, 2*morph_size+1 ), Point( morph_size, morph_size ) );
|
||||
|
||||
/// Apply the specified morphology operation
|
||||
morphologyEx( src, dst, operation, element );
|
||||
imshow( window_name, dst );
|
||||
}
|
||||
@endcode
|
||||
@include cpp/tutorial_code/ImgProc/Morphology_2.cpp
|
||||
|
||||
Explanation
|
||||
-----------
|
||||
@ -158,47 +89,23 @@ Explanation
|
||||
-# Let's check the general structure of the program:
|
||||
- Load an image
|
||||
- Create a window to display results of the Morphological operations
|
||||
- Create 03 Trackbars for the user to enter parameters:
|
||||
- The first trackbar **"Operator"** returns the kind of morphology operation to use
|
||||
- Create three Trackbars for the user to enter parameters:
|
||||
- The first trackbar **Operator** returns the kind of morphology operation to use
|
||||
(**morph_operator**).
|
||||
@code{.cpp}
|
||||
createTrackbar("Operator:\n 0: Opening - 1: Closing \n 2: Gradient - 3: Top Hat \n 4: Black Hat",
|
||||
window_name, &morph_operator, max_operator,
|
||||
Morphology_Operations );
|
||||
@endcode
|
||||
- The second trackbar **"Element"** returns **morph_elem**, which indicates what kind of
|
||||
@snippet cpp/tutorial_code/ImgProc/Morphology_2.cpp create_trackbar1
|
||||
|
||||
- The second trackbar **Element** returns **morph_elem**, which indicates what kind of
|
||||
structure our kernel is:
|
||||
@code{.cpp}
|
||||
createTrackbar( "Element:\n 0: Rect - 1: Cross - 2: Ellipse", window_name,
|
||||
&morph_elem, max_elem,
|
||||
Morphology_Operations );
|
||||
@endcode
|
||||
- The final trackbar **"Kernel Size"** returns the size of the kernel to be used
|
||||
@snippet cpp/tutorial_code/ImgProc/Morphology_2.cpp create_trackbar2
|
||||
|
||||
- The final trackbar **Kernel Size** returns the size of the kernel to be used
|
||||
(**morph_size**)
|
||||
@code{.cpp}
|
||||
createTrackbar( "Kernel size:\n 2n +1", window_name,
|
||||
&morph_size, max_kernel_size,
|
||||
Morphology_Operations );
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgProc/Morphology_2.cpp create_trackbar3
|
||||
|
||||
- Every time we move any slider, the user's function **Morphology_Operations** will be called
|
||||
to effectuate a new morphology operation and it will update the output image based on the
|
||||
current trackbar values.
|
||||
@code{.cpp}
|
||||
/*
|
||||
* @function Morphology_Operations
|
||||
*/
|
||||
void Morphology_Operations( int, void* )
|
||||
{
|
||||
// Since MORPH_X : 2,3,4,5 and 6
|
||||
int operation = morph_operator + 2;
|
||||
|
||||
Mat element = getStructuringElement( morph_elem, Size( 2*morph_size + 1, 2*morph_size+1 ), Point( morph_size, morph_size ) );
|
||||
|
||||
/// Apply the specified morphology operation
|
||||
morphologyEx( src, dst, operation, element );
|
||||
imshow( window_name, dst );
|
||||
}
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgProc/Morphology_2.cpp morphology_operations
|
||||
|
||||
We can observe that the key function to perform the morphology transformations is @ref
|
||||
cv::morphologyEx . In this example we use four arguments (leaving the rest as defaults):
|
||||
@ -216,9 +123,7 @@ Explanation
|
||||
|
||||
As you can see the values range from \<2-6\>, that is why we add (+2) to the values
|
||||
entered by the Trackbar:
|
||||
@code{.cpp}
|
||||
int operation = morph_operator + 2;
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgProc/Morphology_2.cpp operation
|
||||
- **element**: The kernel to be used. We use the function @ref cv::getStructuringElement
|
||||
to define our own structure.
|
||||
|
||||
|
@ -77,13 +77,7 @@ Let's check the general structure of the program:
|
||||
|
||||
- Load an image (in this case it is defined in the program, the user does not have to enter it
|
||||
as an argument)
|
||||
@code{.cpp}
|
||||
/// Test image - Make sure it s divisible by 2^{n}
|
||||
src = imread( "../images/chicky_512.jpg" );
|
||||
if( !src.data )
|
||||
{ printf(" No data! -- Exiting the program \n");
|
||||
return -1; }
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgProc/Pyramids.cpp load
|
||||
|
||||
- Create a Mat object to store the result of the operations (*dst*) and one to save temporal
|
||||
results (*tmp*).
|
||||
@ -95,40 +89,15 @@ Let's check the general structure of the program:
|
||||
@endcode
|
||||
|
||||
- Create a window to display the result
|
||||
@code{.cpp}
|
||||
namedWindow( window_name, WINDOW_AUTOSIZE );
|
||||
imshow( window_name, dst );
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgProc/Pyramids.cpp create_window
|
||||
|
||||
- Perform an infinite loop waiting for user input.
|
||||
@code{.cpp}
|
||||
while( true )
|
||||
{
|
||||
int c;
|
||||
c = waitKey(10);
|
||||
|
||||
if( (char)c == 27 )
|
||||
{ break; }
|
||||
if( (char)c == 'u' )
|
||||
{ pyrUp( tmp, dst, Size( tmp.cols*2, tmp.rows*2 ) );
|
||||
printf( "** Zoom In: Image x 2 \n" );
|
||||
}
|
||||
else if( (char)c == 'd' )
|
||||
{ pyrDown( tmp, dst, Size( tmp.cols/2, tmp.rows/2 ) );
|
||||
printf( "** Zoom Out: Image / 2 \n" );
|
||||
}
|
||||
|
||||
imshow( window_name, dst );
|
||||
tmp = dst;
|
||||
}
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgProc/Pyramids.cpp infinite_loop
|
||||
Our program exits if the user presses *ESC*. Besides, it has two options:
|
||||
|
||||
- **Perform upsampling (after pressing 'u')**
|
||||
@code{.cpp}
|
||||
pyrUp( tmp, dst, Size( tmp.cols*2, tmp.rows*2 )
|
||||
@endcode
|
||||
We use the function @ref cv::pyrUp with 03 arguments:
|
||||
@snippet cpp/tutorial_code/ImgProc/Pyramids.cpp pyrup
|
||||
We use the function @ref cv::pyrUp with three arguments:
|
||||
|
||||
- *tmp*: The current image, it is initialized with the *src* original image.
|
||||
- *dst*: The destination image (to be shown on screen, supposedly the double of the
|
||||
@ -136,11 +105,8 @@ Let's check the general structure of the program:
|
||||
- *Size( tmp.cols*2, tmp.rows\*2 )\* : The destination size. Since we are upsampling,
|
||||
@ref cv::pyrUp expects a size double than the input image (in this case *tmp*).
|
||||
- **Perform downsampling (after pressing 'd')**
|
||||
@code{.cpp}
|
||||
pyrDown( tmp, dst, Size( tmp.cols/2, tmp.rows/2 )
|
||||
@endcode
|
||||
Similarly as with @ref cv::pyrUp , we use the function @ref cv::pyrDown with 03
|
||||
arguments:
|
||||
@snippet cpp/tutorial_code/ImgProc/Pyramids.cpp pyrdown
|
||||
Similarly as with @ref cv::pyrUp , we use the function @ref cv::pyrDown with three arguments:
|
||||
|
||||
- *tmp*: The current image, it is initialized with the *src* original image.
|
||||
- *dst*: The destination image (to be shown on screen, supposedly half the input
|
||||
@ -151,15 +117,13 @@ Let's check the general structure of the program:
|
||||
both dimensions). Otherwise, an error will be shown.
|
||||
- Finally, we update the input image **tmp** with the current image displayed, so the
|
||||
subsequent operations are performed on it.
|
||||
@code{.cpp}
|
||||
tmp = dst;
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgProc/Pyramids.cpp update_tmp
|
||||
|
||||
Results
|
||||
-------
|
||||
|
||||
- After compiling the code above we can test it. The program calls an image **chicky_512.jpg**
|
||||
that comes in the *tutorial_code/image* folder. Notice that this image is \f$512 \times 512\f$,
|
||||
that comes in the *samples/data* folder. Notice that this image is \f$512 \times 512\f$,
|
||||
hence a downsample won't generate any error (\f$512 = 2^{9}\f$). The original image is shown below:
|
||||
|
||||
![](images/Pyramids_Tutorial_Original_Image.jpg)
|
||||
|
@ -106,51 +106,23 @@ Explanation
|
||||
-# Let's check the general structure of the program:
|
||||
- Load an image. If it is BGR we convert it to Grayscale. For this, remember that we can use
|
||||
the function @ref cv::cvtColor :
|
||||
@code{.cpp}
|
||||
src = imread( argv[1], 1 );
|
||||
@snippet cpp/tutorial_code/ImgProc/Threshold.cpp load
|
||||
|
||||
/// Convert the image to Gray
|
||||
cvtColor( src, src_gray, COLOR_BGR2GRAY );
|
||||
@endcode
|
||||
- Create a window to display the result
|
||||
@code{.cpp}
|
||||
namedWindow( window_name, WINDOW_AUTOSIZE );
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ImgProc/Threshold.cpp window
|
||||
|
||||
- Create \f$2\f$ trackbars for the user to enter user input:
|
||||
|
||||
- **Type of thresholding**: Binary, To Zero, etc...
|
||||
- **Threshold value**
|
||||
@code{.cpp}
|
||||
createTrackbar( trackbar_type,
|
||||
window_name, &threshold_type,
|
||||
max_type, Threshold_Demo );
|
||||
@snippet cpp/tutorial_code/ImgProc/Threshold.cpp trackbar
|
||||
|
||||
createTrackbar( trackbar_value,
|
||||
window_name, &threshold_value,
|
||||
max_value, Threshold_Demo );
|
||||
@endcode
|
||||
- Wait until the user enters the threshold value, the type of thresholding (or until the
|
||||
program exits)
|
||||
- Whenever the user changes the value of any of the Trackbars, the function *Threshold_Demo*
|
||||
is called:
|
||||
@code{.cpp}
|
||||
/*
|
||||
* @function Threshold_Demo
|
||||
*/
|
||||
void Threshold_Demo( int, void* )
|
||||
{
|
||||
/* 0: Binary
|
||||
1: Binary Inverted
|
||||
2: Threshold Truncated
|
||||
3: Threshold to Zero
|
||||
4: Threshold to Zero Inverted
|
||||
*/
|
||||
@snippet cpp/tutorial_code/ImgProc/Threshold.cpp Threshold_Demo
|
||||
|
||||
threshold( src_gray, dst, threshold_value, max_BINARY_value,threshold_type );
|
||||
|
||||
imshow( window_name, dst );
|
||||
}
|
||||
@endcode
|
||||
As you can see, the function @ref cv::threshold is invoked. We give \f$5\f$ parameters:
|
||||
|
||||
- *src_gray*: Our input image
|
||||
|
@ -21,92 +21,8 @@ This tutorial code's is shown lines below. You can also download it from
|
||||
[here](https://github.com/opencv/tree/master/samples/cpp/tutorial_code/objectDetection/objectDetection.cpp)
|
||||
. The second version (using LBP for face detection) can be [found
|
||||
here](https://github.com/opencv/tree/master/samples/cpp/tutorial_code/objectDetection/objectDetection2.cpp)
|
||||
@code{.cpp}
|
||||
#include "opencv2/objdetect.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
@include samples/cpp/tutorial_code/objectDetection/objectDetection.cpp
|
||||
|
||||
#include <iostream>
|
||||
#include <stdio.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
/* Function Headers */
|
||||
void detectAndDisplay( Mat frame );
|
||||
|
||||
/* Global variables */
|
||||
String face_cascade_name = "haarcascade_frontalface_alt.xml";
|
||||
String eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
|
||||
CascadeClassifier face_cascade;
|
||||
CascadeClassifier eyes_cascade;
|
||||
String window_name = "Capture - Face detection";
|
||||
|
||||
/* @function main */
|
||||
int main( void )
|
||||
{
|
||||
VideoCapture capture;
|
||||
Mat frame;
|
||||
|
||||
//-- 1. Load the cascades
|
||||
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading face cascade\n"); return -1; };
|
||||
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading eyes cascade\n"); return -1; };
|
||||
|
||||
//-- 2. Read the video stream
|
||||
capture.open( -1 );
|
||||
if ( ! capture.isOpened() ) { printf("--(!)Error opening video capture\n"); return -1; }
|
||||
|
||||
while ( capture.read(frame) )
|
||||
{
|
||||
if( frame.empty() )
|
||||
{
|
||||
printf(" --(!) No captured frame -- Break!");
|
||||
break;
|
||||
}
|
||||
|
||||
//-- 3. Apply the classifier to the frame
|
||||
detectAndDisplay( frame );
|
||||
|
||||
int c = waitKey(10);
|
||||
if( (char)c == 27 ) { break; } // escape
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* @function detectAndDisplay */
|
||||
void detectAndDisplay( Mat frame )
|
||||
{
|
||||
std::vector<Rect> faces;
|
||||
Mat frame_gray;
|
||||
|
||||
cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
|
||||
equalizeHist( frame_gray, frame_gray );
|
||||
|
||||
//-- Detect faces
|
||||
face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CASCADE_SCALE_IMAGE, Size(30, 30) );
|
||||
|
||||
for( size_t i = 0; i < faces.size(); i++ )
|
||||
{
|
||||
Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 );
|
||||
ellipse( frame, center, Size( faces[i].width/2, faces[i].height/2), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
|
||||
|
||||
Mat faceROI = frame_gray( faces[i] );
|
||||
std::vector<Rect> eyes;
|
||||
|
||||
//-- In each face, detect eyes
|
||||
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CASCADE_SCALE_IMAGE, Size(30, 30) );
|
||||
|
||||
for( size_t j = 0; j < eyes.size(); j++ )
|
||||
{
|
||||
Point eye_center( faces[i].x + eyes[j].x + eyes[j].width/2, faces[i].y + eyes[j].y + eyes[j].height/2 );
|
||||
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
|
||||
circle( frame, eye_center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
|
||||
}
|
||||
}
|
||||
//-- Show what you got
|
||||
imshow( window_name, frame );
|
||||
}
|
||||
@endcode
|
||||
Explanation
|
||||
-----------
|
||||
|
||||
|
@ -24,39 +24,48 @@ int main(int argc, char** argv)
|
||||
help();
|
||||
return 0;
|
||||
}
|
||||
//![load]
|
||||
string filename = parser.get<string>("@image");
|
||||
if (filename.empty())
|
||||
{
|
||||
help();
|
||||
cout << "no image_name provided" << endl;
|
||||
return -1;
|
||||
}
|
||||
Mat img = imread(filename, 0);
|
||||
Mat img = imread(filename, IMREAD_COLOR);
|
||||
if(img.empty())
|
||||
{
|
||||
help();
|
||||
cout << "can not open " << filename << endl;
|
||||
return -1;
|
||||
}
|
||||
//![load]
|
||||
|
||||
Mat cimg;
|
||||
medianBlur(img, img, 5);
|
||||
cvtColor(img, cimg, COLOR_GRAY2BGR);
|
||||
//![convert_to_gray]
|
||||
Mat gray;
|
||||
cvtColor(img, gray, COLOR_BGR2GRAY);
|
||||
//![convert_to_gray]
|
||||
|
||||
//![reduce_noise]
|
||||
medianBlur(gray, gray, 5);
|
||||
//![reduce_noise]
|
||||
|
||||
//![houghcircles]
|
||||
vector<Vec3f> circles;
|
||||
HoughCircles(img, circles, HOUGH_GRADIENT, 1, 10,
|
||||
HoughCircles(gray, circles, HOUGH_GRADIENT, 1,
|
||||
gray.rows/16, // change this value to detect circles with different distances to each other
|
||||
100, 30, 1, 30 // change the last two parameters
|
||||
// (min_radius & max_radius) to detect larger circles
|
||||
);
|
||||
//![houghcircles]
|
||||
|
||||
//![draw]
|
||||
for( size_t i = 0; i < circles.size(); i++ )
|
||||
{
|
||||
Vec3i c = circles[i];
|
||||
circle( cimg, Point(c[0], c[1]), c[2], Scalar(0,0,255), 3, LINE_AA);
|
||||
circle( cimg, Point(c[0], c[1]), 2, Scalar(0,255,0), 3, LINE_AA);
|
||||
circle( img, Point(c[0], c[1]), c[2], Scalar(0,0,255), 3, LINE_AA);
|
||||
circle( img, Point(c[0], c[1]), 2, Scalar(0,255,0), 3, LINE_AA);
|
||||
}
|
||||
//![draw]
|
||||
|
||||
imshow("detected circles", cimg);
|
||||
//![display]
|
||||
imshow("detected circles", img);
|
||||
waitKey();
|
||||
//![display]
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ Mat src1;
|
||||
Mat src2;
|
||||
Mat dst;
|
||||
|
||||
//![on_trackbar]
|
||||
/**
|
||||
* @function on_trackbar
|
||||
* @brief Callback for trackbar
|
||||
@ -35,7 +36,7 @@ static void on_trackbar( int, void* )
|
||||
|
||||
imshow( "Linear Blend", dst );
|
||||
}
|
||||
|
||||
//![on_trackbar]
|
||||
|
||||
/**
|
||||
* @function main
|
||||
@ -43,9 +44,11 @@ static void on_trackbar( int, void* )
|
||||
*/
|
||||
int main( void )
|
||||
{
|
||||
/// Read image ( same size, same type )
|
||||
//![load]
|
||||
/// Read images ( both have to be of the same size and type )
|
||||
src1 = imread("../data/LinuxLogo.jpg");
|
||||
src2 = imread("../data/WindowsLogo.jpg");
|
||||
//![load]
|
||||
|
||||
if( src1.empty() ) { printf("Error loading src1 \n"); return -1; }
|
||||
if( src2.empty() ) { printf("Error loading src2 \n"); return -1; }
|
||||
@ -53,13 +56,15 @@ int main( void )
|
||||
/// Initialize values
|
||||
alpha_slider = 0;
|
||||
|
||||
/// Create Windows
|
||||
namedWindow("Linear Blend", 1);
|
||||
//![window]
|
||||
namedWindow("Linear Blend", WINDOW_AUTOSIZE); // Create Window
|
||||
//![window]
|
||||
|
||||
/// Create Trackbars
|
||||
//![create_trackbar]
|
||||
char TrackbarName[50];
|
||||
sprintf( TrackbarName, "Alpha x %d", alpha_slider_max );
|
||||
createTrackbar( TrackbarName, "Linear Blend", &alpha_slider, alpha_slider_max, on_trackbar );
|
||||
//![create_trackbar]
|
||||
|
||||
/// Show some stuff
|
||||
on_trackbar( alpha_slider, 0 );
|
||||
|
@ -66,6 +66,7 @@ int main( int, char** argv )
|
||||
return 0;
|
||||
}
|
||||
|
||||
//![erosion]
|
||||
/**
|
||||
* @function Erosion
|
||||
*/
|
||||
@ -76,14 +77,19 @@ void Erosion( int, void* )
|
||||
else if( erosion_elem == 1 ){ erosion_type = MORPH_CROSS; }
|
||||
else if( erosion_elem == 2) { erosion_type = MORPH_ELLIPSE; }
|
||||
|
||||
//![kernel]
|
||||
Mat element = getStructuringElement( erosion_type,
|
||||
Size( 2*erosion_size + 1, 2*erosion_size+1 ),
|
||||
Point( erosion_size, erosion_size ) );
|
||||
//![kernel]
|
||||
|
||||
/// Apply the erosion operation
|
||||
erode( src, erosion_dst, element );
|
||||
imshow( "Erosion Demo", erosion_dst );
|
||||
}
|
||||
//![erosion]
|
||||
|
||||
//![dilation]
|
||||
/**
|
||||
* @function Dilation
|
||||
*/
|
||||
@ -97,7 +103,9 @@ void Dilation( int, void* )
|
||||
Mat element = getStructuringElement( dilation_type,
|
||||
Size( 2*dilation_size + 1, 2*dilation_size+1 ),
|
||||
Point( dilation_size, dilation_size ) );
|
||||
|
||||
/// Apply the dilation operation
|
||||
dilate( src, dilation_dst, element );
|
||||
imshow( "Dilation Demo", dilation_dst );
|
||||
}
|
||||
//![dilation]
|
||||
|
@ -31,27 +31,35 @@ void Morphology_Operations( int, void* );
|
||||
*/
|
||||
int main( int, char** argv )
|
||||
{
|
||||
/// Load an image
|
||||
src = imread( argv[1], IMREAD_COLOR );
|
||||
//![load]
|
||||
src = imread( argv[1], IMREAD_COLOR ); // Load an image
|
||||
|
||||
if( src.empty() )
|
||||
{ return -1; }
|
||||
//![load]
|
||||
|
||||
/// Create window
|
||||
namedWindow( window_name, WINDOW_AUTOSIZE );
|
||||
//![window]
|
||||
namedWindow( window_name, WINDOW_AUTOSIZE ); // Create window
|
||||
//![window]
|
||||
|
||||
//![create_trackbar1]
|
||||
/// Create Trackbar to select Morphology operation
|
||||
createTrackbar("Operator:\n 0: Opening - 1: Closing \n 2: Gradient - 3: Top Hat \n 4: Black Hat", window_name, &morph_operator, max_operator, Morphology_Operations );
|
||||
//![create_trackbar1]
|
||||
|
||||
//![create_trackbar2]
|
||||
/// Create Trackbar to select kernel type
|
||||
createTrackbar( "Element:\n 0: Rect - 1: Cross - 2: Ellipse", window_name,
|
||||
&morph_elem, max_elem,
|
||||
Morphology_Operations );
|
||||
//![create_trackbar2]
|
||||
|
||||
//![create_trackbar3]
|
||||
/// Create Trackbar to choose kernel size
|
||||
createTrackbar( "Kernel size:\n 2n +1", window_name,
|
||||
&morph_size, max_kernel_size,
|
||||
Morphology_Operations );
|
||||
//![create_trackbar3]
|
||||
|
||||
/// Default start
|
||||
Morphology_Operations( 0, 0 );
|
||||
@ -60,13 +68,16 @@ int main( int, char** argv )
|
||||
return 0;
|
||||
}
|
||||
|
||||
//![morphology_operations]
|
||||
/**
|
||||
* @function Morphology_Operations
|
||||
*/
|
||||
void Morphology_Operations( int, void* )
|
||||
{
|
||||
// Since MORPH_X : 2,3,4,5 and 6
|
||||
//![operation]
|
||||
int operation = morph_operator + 2;
|
||||
//![operation]
|
||||
|
||||
Mat element = getStructuringElement( morph_elem, Size( 2*morph_size + 1, 2*morph_size+1 ), Point( morph_size, morph_size ) );
|
||||
|
||||
@ -74,3 +85,4 @@ void Morphology_Operations( int, void* )
|
||||
morphologyEx( src, dst, operation, element );
|
||||
imshow( window_name, dst );
|
||||
}
|
||||
//![morphology_operations]
|
||||
|
@ -28,39 +28,50 @@ int main( void )
|
||||
printf( " * [d] -> Zoom out \n" );
|
||||
printf( " * [ESC] -> Close program \n \n" );
|
||||
|
||||
/// Test image - Make sure it s divisible by 2^{n}
|
||||
src = imread( "../data/chicky_512.png" );
|
||||
//![load]
|
||||
src = imread( "../data/chicky_512.png" ); // Loads the test image
|
||||
if( src.empty() )
|
||||
{ printf(" No data! -- Exiting the program \n");
|
||||
return -1; }
|
||||
//![load]
|
||||
|
||||
tmp = src;
|
||||
dst = tmp;
|
||||
|
||||
/// Create window
|
||||
namedWindow( window_name, WINDOW_AUTOSIZE );
|
||||
//![create_window]
|
||||
imshow( window_name, dst );
|
||||
//![create_window]
|
||||
|
||||
/// Loop
|
||||
//![infinite_loop]
|
||||
for(;;)
|
||||
{
|
||||
int c;
|
||||
c = waitKey(10);
|
||||
c = waitKey(0);
|
||||
|
||||
if( (char)c == 27 )
|
||||
{ break; }
|
||||
if( (char)c == 'u' )
|
||||
{ pyrUp( tmp, dst, Size( tmp.cols*2, tmp.rows*2 ) );
|
||||
{
|
||||
//![pyrup]
|
||||
pyrUp( tmp, dst, Size( tmp.cols*2, tmp.rows*2 ) );
|
||||
//![pyrup]
|
||||
printf( "** Zoom In: Image x 2 \n" );
|
||||
}
|
||||
else if( (char)c == 'd' )
|
||||
{ pyrDown( tmp, dst, Size( tmp.cols/2, tmp.rows/2 ) );
|
||||
{
|
||||
//![pyrdown]
|
||||
pyrDown( tmp, dst, Size( tmp.cols/2, tmp.rows/2 ) );
|
||||
//![pyrdown]
|
||||
printf( "** Zoom Out: Image / 2 \n" );
|
||||
}
|
||||
|
||||
imshow( window_name, dst );
|
||||
|
||||
//![update_tmp]
|
||||
tmp = dst;
|
||||
//![update_tmp]
|
||||
}
|
||||
//![infinite_loop]
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -43,33 +43,38 @@ int main( void )
|
||||
/// Applying Homogeneous blur
|
||||
if( display_caption( "Homogeneous Blur" ) != 0 ) { return 0; }
|
||||
|
||||
//![blur]
|
||||
for ( int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2 )
|
||||
{ blur( src, dst, Size( i, i ), Point(-1,-1) );
|
||||
if( display_dst( DELAY_BLUR ) != 0 ) { return 0; } }
|
||||
|
||||
//![blur]
|
||||
|
||||
/// Applying Gaussian blur
|
||||
if( display_caption( "Gaussian Blur" ) != 0 ) { return 0; }
|
||||
|
||||
//![gaussianblur]
|
||||
for ( int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2 )
|
||||
{ GaussianBlur( src, dst, Size( i, i ), 0, 0 );
|
||||
if( display_dst( DELAY_BLUR ) != 0 ) { return 0; } }
|
||||
|
||||
//![gaussianblur]
|
||||
|
||||
/// Applying Median blur
|
||||
if( display_caption( "Median Blur" ) != 0 ) { return 0; }
|
||||
|
||||
//![medianblur]
|
||||
for ( int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2 )
|
||||
{ medianBlur ( src, dst, i );
|
||||
if( display_dst( DELAY_BLUR ) != 0 ) { return 0; } }
|
||||
|
||||
//![medianblur]
|
||||
|
||||
/// Applying Bilateral Filter
|
||||
if( display_caption( "Bilateral Blur" ) != 0 ) { return 0; }
|
||||
|
||||
//![bilateralfilter]
|
||||
for ( int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2 )
|
||||
{ bilateralFilter ( src, dst, i, i*2, i/2 );
|
||||
if( display_dst( DELAY_BLUR ) != 0 ) { return 0; } }
|
||||
//![bilateralfilter]
|
||||
|
||||
/// Wait until user press a key
|
||||
display_caption( "End: Press a key!" );
|
||||
|
@ -32,29 +32,30 @@ void Threshold_Demo( int, void* );
|
||||
*/
|
||||
int main( int, char** argv )
|
||||
{
|
||||
/// Load an image
|
||||
src = imread( argv[1], IMREAD_COLOR );
|
||||
//! [load]
|
||||
src = imread( argv[1], IMREAD_COLOR ); // Load an image
|
||||
|
||||
if( src.empty() )
|
||||
{ return -1; }
|
||||
|
||||
/// Convert the image to Gray
|
||||
cvtColor( src, src_gray, COLOR_BGR2GRAY );
|
||||
cvtColor( src, src_gray, COLOR_BGR2GRAY ); // Convert the image to Gray
|
||||
//! [load]
|
||||
|
||||
/// Create a window to display results
|
||||
namedWindow( window_name, WINDOW_AUTOSIZE );
|
||||
//! [window]
|
||||
namedWindow( window_name, WINDOW_AUTOSIZE ); // Create a window to display results
|
||||
//! [window]
|
||||
|
||||
/// Create Trackbar to choose type of Threshold
|
||||
//! [trackbar]
|
||||
createTrackbar( trackbar_type,
|
||||
window_name, &threshold_type,
|
||||
max_type, Threshold_Demo );
|
||||
max_type, Threshold_Demo ); // Create Trackbar to choose type of Threshold
|
||||
|
||||
createTrackbar( trackbar_value,
|
||||
window_name, &threshold_value,
|
||||
max_value, Threshold_Demo );
|
||||
max_value, Threshold_Demo ); // Create Trackbar to choose Threshold value
|
||||
//! [trackbar]
|
||||
|
||||
/// Call the function to initialize
|
||||
Threshold_Demo( 0, 0 );
|
||||
Threshold_Demo( 0, 0 ); // Call the function to initialize
|
||||
|
||||
/// Wait until user finishes program
|
||||
for(;;)
|
||||
@ -67,7 +68,7 @@ int main( int, char** argv )
|
||||
|
||||
}
|
||||
|
||||
|
||||
//![Threshold_Demo]
|
||||
/**
|
||||
* @function Threshold_Demo
|
||||
*/
|
||||
@ -84,3 +85,4 @@ void Threshold_Demo( int, void* )
|
||||
|
||||
imshow( window_name, dst );
|
||||
}
|
||||
//![Threshold_Demo]
|
||||
|
@ -10,8 +10,7 @@
|
||||
|
||||
using namespace cv;
|
||||
|
||||
/// Global variables
|
||||
|
||||
//![variables]
|
||||
Mat src, src_gray;
|
||||
Mat dst, detected_edges;
|
||||
|
||||
@ -21,6 +20,7 @@ int const max_lowThreshold = 100;
|
||||
int ratio = 3;
|
||||
int kernel_size = 3;
|
||||
const char* window_name = "Edge Map";
|
||||
//![variables]
|
||||
|
||||
/**
|
||||
* @function CannyThreshold
|
||||
@ -28,17 +28,28 @@ const char* window_name = "Edge Map";
|
||||
*/
|
||||
static void CannyThreshold(int, void*)
|
||||
{
|
||||
//![reduce_noise]
|
||||
/// Reduce noise with a kernel 3x3
|
||||
blur( src_gray, detected_edges, Size(3,3) );
|
||||
//![reduce_noise]
|
||||
|
||||
//![canny]
|
||||
/// Canny detector
|
||||
Canny( detected_edges, detected_edges, lowThreshold, lowThreshold*ratio, kernel_size );
|
||||
//![canny]
|
||||
|
||||
/// Using Canny's output as a mask, we display our result
|
||||
//![fill]
|
||||
dst = Scalar::all(0);
|
||||
//![fill]
|
||||
|
||||
//![copyto]
|
||||
src.copyTo( dst, detected_edges);
|
||||
//![copyto]
|
||||
|
||||
//![display]
|
||||
imshow( window_name, dst );
|
||||
//![display]
|
||||
}
|
||||
|
||||
|
||||
@ -47,23 +58,30 @@ static void CannyThreshold(int, void*)
|
||||
*/
|
||||
int main( int, char** argv )
|
||||
{
|
||||
/// Load an image
|
||||
src = imread( argv[1], IMREAD_COLOR );
|
||||
//![load]
|
||||
src = imread( argv[1], IMREAD_COLOR ); // Load an image
|
||||
|
||||
if( src.empty() )
|
||||
{ return -1; }
|
||||
//![load]
|
||||
|
||||
//![create_mat]
|
||||
/// Create a matrix of the same type and size as src (for dst)
|
||||
dst.create( src.size(), src.type() );
|
||||
//![create_mat]
|
||||
|
||||
/// Convert the image to grayscale
|
||||
//![convert_to_gray]
|
||||
cvtColor( src, src_gray, COLOR_BGR2GRAY );
|
||||
//![convert_to_gray]
|
||||
|
||||
/// Create a window
|
||||
//![create_window]
|
||||
namedWindow( window_name, WINDOW_AUTOSIZE );
|
||||
//![create_window]
|
||||
|
||||
//![create_trackbar]
|
||||
/// Create a Trackbar for user to enter threshold
|
||||
createTrackbar( "Min Threshold:", window_name, &lowThreshold, max_lowThreshold, CannyThreshold );
|
||||
//![create_trackbar]
|
||||
|
||||
/// Show the image
|
||||
CannyThreshold(0, 0);
|
||||
|
@ -15,39 +15,45 @@ using namespace cv;
|
||||
*/
|
||||
int main( int, char** argv )
|
||||
{
|
||||
|
||||
//![variables]
|
||||
Mat src, src_gray, dst;
|
||||
int kernel_size = 3;
|
||||
int scale = 1;
|
||||
int delta = 0;
|
||||
int ddepth = CV_16S;
|
||||
const char* window_name = "Laplace Demo";
|
||||
//![variables]
|
||||
|
||||
/// Load an image
|
||||
src = imread( argv[1], IMREAD_COLOR );
|
||||
//![load]
|
||||
src = imread( argv[1], IMREAD_COLOR ); // Load an image
|
||||
|
||||
if( src.empty() )
|
||||
{ return -1; }
|
||||
//![load]
|
||||
|
||||
/// Remove noise by blurring with a Gaussian filter
|
||||
//![reduce_noise]
|
||||
/// Reduce noise by blurring with a Gaussian filter
|
||||
GaussianBlur( src, src, Size(3,3), 0, 0, BORDER_DEFAULT );
|
||||
//![reduce_noise]
|
||||
|
||||
/// Convert the image to grayscale
|
||||
cvtColor( src, src_gray, COLOR_RGB2GRAY );
|
||||
|
||||
/// Create window
|
||||
namedWindow( window_name, WINDOW_AUTOSIZE );
|
||||
//![convert_to_gray]
|
||||
cvtColor( src, src_gray, COLOR_BGR2GRAY ); // Convert the image to grayscale
|
||||
//![convert_to_gray]
|
||||
|
||||
/// Apply Laplace function
|
||||
Mat abs_dst;
|
||||
|
||||
//![laplacian]
|
||||
Laplacian( src_gray, dst, ddepth, kernel_size, scale, delta, BORDER_DEFAULT );
|
||||
//![laplacian]
|
||||
|
||||
//![convert]
|
||||
convertScaleAbs( dst, abs_dst );
|
||||
//![convert]
|
||||
|
||||
/// Show what you got
|
||||
//![display]
|
||||
imshow( window_name, abs_dst );
|
||||
|
||||
waitKey(0);
|
||||
//![display]
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
/**
|
||||
* @file Sobel_Demo.cpp
|
||||
* @brief Sample code using Sobel and/orScharr OpenCV functions to make a simple Edge Detector
|
||||
* @brief Sample code using Sobel and/or Scharr OpenCV functions to make a simple Edge Detector
|
||||
* @author OpenCV team
|
||||
*/
|
||||
|
||||
@ -15,28 +15,31 @@ using namespace cv;
|
||||
*/
|
||||
int main( int, char** argv )
|
||||
{
|
||||
|
||||
//![variables]
|
||||
Mat src, src_gray;
|
||||
Mat grad;
|
||||
const char* window_name = "Sobel Demo - Simple Edge Detector";
|
||||
int scale = 1;
|
||||
int delta = 0;
|
||||
int ddepth = CV_16S;
|
||||
//![variables]
|
||||
|
||||
/// Load an image
|
||||
src = imread( argv[1], IMREAD_COLOR );
|
||||
//![load]
|
||||
src = imread( argv[1], IMREAD_COLOR ); // Load an image
|
||||
|
||||
if( src.empty() )
|
||||
{ return -1; }
|
||||
//![load]
|
||||
|
||||
//![reduce_noise]
|
||||
GaussianBlur( src, src, Size(3,3), 0, 0, BORDER_DEFAULT );
|
||||
//![reduce_noise]
|
||||
|
||||
/// Convert it to gray
|
||||
cvtColor( src, src_gray, COLOR_RGB2GRAY );
|
||||
|
||||
/// Create window
|
||||
namedWindow( window_name, WINDOW_AUTOSIZE );
|
||||
//![convert_to_gray]
|
||||
cvtColor( src, src_gray, COLOR_BGR2GRAY );
|
||||
//![convert_to_gray]
|
||||
|
||||
//![sobel]
|
||||
/// Generate grad_x and grad_y
|
||||
Mat grad_x, grad_y;
|
||||
Mat abs_grad_x, abs_grad_y;
|
||||
@ -44,19 +47,26 @@ int main( int, char** argv )
|
||||
/// Gradient X
|
||||
//Scharr( src_gray, grad_x, ddepth, 1, 0, scale, delta, BORDER_DEFAULT );
|
||||
Sobel( src_gray, grad_x, ddepth, 1, 0, 3, scale, delta, BORDER_DEFAULT );
|
||||
convertScaleAbs( grad_x, abs_grad_x );
|
||||
|
||||
/// Gradient Y
|
||||
//Scharr( src_gray, grad_y, ddepth, 0, 1, scale, delta, BORDER_DEFAULT );
|
||||
Sobel( src_gray, grad_y, ddepth, 0, 1, 3, scale, delta, BORDER_DEFAULT );
|
||||
convertScaleAbs( grad_y, abs_grad_y );
|
||||
//![sobel]
|
||||
|
||||
//![convert]
|
||||
convertScaleAbs( grad_x, abs_grad_x );
|
||||
convertScaleAbs( grad_y, abs_grad_y );
|
||||
//![convert]
|
||||
|
||||
//![blend]
|
||||
/// Total Gradient (approximate)
|
||||
addWeighted( abs_grad_x, 0.5, abs_grad_y, 0.5, 0, grad );
|
||||
//![blend]
|
||||
|
||||
//![display]
|
||||
imshow( window_name, grad );
|
||||
|
||||
waitKey(0);
|
||||
//![display]
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -10,12 +10,13 @@
|
||||
|
||||
using namespace cv;
|
||||
|
||||
/// Global Variables
|
||||
//![variables]
|
||||
Mat src, dst;
|
||||
int top, bottom, left, right;
|
||||
int borderType;
|
||||
const char* window_name = "copyMakeBorder Demo";
|
||||
RNG rng(12345);
|
||||
//![variables]
|
||||
|
||||
/**
|
||||
* @function main
|
||||
@ -25,14 +26,15 @@ int main( int, char** argv )
|
||||
|
||||
int c;
|
||||
|
||||
/// Load an image
|
||||
src = imread( argv[1], IMREAD_COLOR );
|
||||
//![load]
|
||||
src = imread( argv[1], IMREAD_COLOR ); // Load an image
|
||||
|
||||
if( src.empty() )
|
||||
{
|
||||
printf(" No data entered, please enter the path to an image file \n");
|
||||
return -1;
|
||||
}
|
||||
//![load]
|
||||
|
||||
/// Brief how-to for this program
|
||||
printf( "\n \t copyMakeBorder Demo: \n" );
|
||||
@ -41,18 +43,22 @@ int main( int, char** argv )
|
||||
printf( " ** Press 'r' to set the border to be replicated \n");
|
||||
printf( " ** Press 'ESC' to exit the program \n");
|
||||
|
||||
/// Create window
|
||||
//![create_window]
|
||||
namedWindow( window_name, WINDOW_AUTOSIZE );
|
||||
//![create_window]
|
||||
|
||||
//![init_arguments]
|
||||
/// Initialize arguments for the filter
|
||||
top = (int) (0.05*src.rows); bottom = (int) (0.05*src.rows);
|
||||
left = (int) (0.05*src.cols); right = (int) (0.05*src.cols);
|
||||
dst = src;
|
||||
//![init_arguments]
|
||||
|
||||
dst = src;
|
||||
imshow( window_name, dst );
|
||||
|
||||
for(;;)
|
||||
{
|
||||
//![check_keypress]
|
||||
c = waitKey(500);
|
||||
|
||||
if( (char)c == 27 )
|
||||
@ -61,11 +67,19 @@ int main( int, char** argv )
|
||||
{ borderType = BORDER_CONSTANT; }
|
||||
else if( (char)c == 'r' )
|
||||
{ borderType = BORDER_REPLICATE; }
|
||||
//![check_keypress]
|
||||
|
||||
//![update_value]
|
||||
Scalar value( rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255) );
|
||||
copyMakeBorder( src, dst, top, bottom, left, right, borderType, value );
|
||||
//![update_value]
|
||||
|
||||
//![copymakeborder]
|
||||
copyMakeBorder( src, dst, top, bottom, left, right, borderType, value );
|
||||
//![copymakeborder]
|
||||
|
||||
//![display]
|
||||
imshow( window_name, dst );
|
||||
//![display]
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -27,19 +27,19 @@ int main ( int, char** argv )
|
||||
|
||||
int c;
|
||||
|
||||
/// Load an image
|
||||
src = imread( argv[1], IMREAD_COLOR );
|
||||
//![load]
|
||||
src = imread( argv[1], IMREAD_COLOR ); // Load an image
|
||||
|
||||
if( src.empty() )
|
||||
{ return -1; }
|
||||
//![load]
|
||||
|
||||
/// Create window
|
||||
namedWindow( window_name, WINDOW_AUTOSIZE );
|
||||
|
||||
//![init_arguments]
|
||||
/// Initialize arguments for the filter
|
||||
anchor = Point( -1, -1 );
|
||||
delta = 0;
|
||||
ddepth = -1;
|
||||
//![init_arguments]
|
||||
|
||||
/// Loop - Will filter the image with different kernel sizes each 0.5 seconds
|
||||
int ind = 0;
|
||||
@ -50,12 +50,15 @@ int main ( int, char** argv )
|
||||
if( (char)c == 27 )
|
||||
{ break; }
|
||||
|
||||
//![update_kernel]
|
||||
/// Update kernel size for a normalized box filter
|
||||
kernel_size = 3 + 2*( ind%5 );
|
||||
kernel = Mat::ones( kernel_size, kernel_size, CV_32F )/ (float)(kernel_size*kernel_size);
|
||||
//![update_kernel]
|
||||
|
||||
/// Apply filter
|
||||
//![apply_filter]
|
||||
filter2D(src, dst, ddepth , kernel, anchor, delta, BORDER_DEFAULT );
|
||||
//![apply_filter]
|
||||
imshow( window_name, dst );
|
||||
ind++;
|
||||
}
|
||||
|
@ -16,7 +16,6 @@ using namespace cv;
|
||||
*/
|
||||
int main( void )
|
||||
{
|
||||
|
||||
double alpha = 0.5; double beta; double input;
|
||||
|
||||
Mat src1, src2, dst;
|
||||
@ -27,26 +26,28 @@ int main( void )
|
||||
std::cout<<"* Enter alpha [0-1]: ";
|
||||
std::cin>>input;
|
||||
|
||||
// We use the alpha provided by the user iff it is between 0 and 1
|
||||
// We use the alpha provided by the user if it is between 0 and 1
|
||||
if( alpha >= 0 && alpha <= 1 )
|
||||
{ alpha = input; }
|
||||
|
||||
/// Read image ( same size, same type )
|
||||
//![load]
|
||||
/// Read images ( both have to be of the same size and type )
|
||||
src1 = imread("../data/LinuxLogo.jpg");
|
||||
src2 = imread("../data/WindowsLogo.jpg");
|
||||
//![load]
|
||||
|
||||
if( src1.empty() ) { std::cout<< "Error loading src1"<<std::endl; return -1; }
|
||||
if( src2.empty() ) { std::cout<< "Error loading src2"<<std::endl; return -1; }
|
||||
|
||||
/// Create Windows
|
||||
namedWindow("Linear Blend", 1);
|
||||
|
||||
//![blend_images]
|
||||
beta = ( 1.0 - alpha );
|
||||
addWeighted( src1, alpha, src2, beta, 0.0, dst);
|
||||
//![blend_images]
|
||||
|
||||
//![display]
|
||||
imshow( "Linear Blend", dst );
|
||||
|
||||
|
||||
waitKey(0);
|
||||
//![display]
|
||||
|
||||
return 0;
|
||||
}
|
@ -23,6 +23,7 @@ void MyLine( Mat img, Point start, Point end );
|
||||
*/
|
||||
int main( void ){
|
||||
|
||||
//![create_images]
|
||||
/// Windows names
|
||||
char atom_window[] = "Drawing 1: Atom";
|
||||
char rook_window[] = "Drawing 2: Rook";
|
||||
@ -30,10 +31,12 @@ int main( void ){
|
||||
/// Create black empty images
|
||||
Mat atom_image = Mat::zeros( w, w, CV_8UC3 );
|
||||
Mat rook_image = Mat::zeros( w, w, CV_8UC3 );
|
||||
//![create_images]
|
||||
|
||||
/// 1. Draw a simple atom:
|
||||
/// -----------------------
|
||||
|
||||
//![draw_atom]
|
||||
/// 1.a. Creating ellipses
|
||||
MyEllipse( atom_image, 90 );
|
||||
MyEllipse( atom_image, 0 );
|
||||
@ -42,26 +45,31 @@ int main( void ){
|
||||
|
||||
/// 1.b. Creating circles
|
||||
MyFilledCircle( atom_image, Point( w/2, w/2) );
|
||||
//![draw_atom]
|
||||
|
||||
/// 2. Draw a rook
|
||||
/// ------------------
|
||||
|
||||
//![draw_rook]
|
||||
/// 2.a. Create a convex polygon
|
||||
MyPolygon( rook_image );
|
||||
|
||||
//![rectangle]
|
||||
/// 2.b. Creating rectangles
|
||||
rectangle( rook_image,
|
||||
Point( 0, 7*w/8 ),
|
||||
Point( w, w),
|
||||
Scalar( 0, 255, 255 ),
|
||||
-1,
|
||||
8 );
|
||||
FILLED,
|
||||
LINE_8 );
|
||||
//![rectangle]
|
||||
|
||||
/// 2.c. Create a few lines
|
||||
MyLine( rook_image, Point( 0, 15*w/16 ), Point( w, 15*w/16 ) );
|
||||
MyLine( rook_image, Point( w/4, 7*w/8 ), Point( w/4, w ) );
|
||||
MyLine( rook_image, Point( w/2, 7*w/8 ), Point( w/2, w ) );
|
||||
MyLine( rook_image, Point( 3*w/4, 7*w/8 ), Point( 3*w/4, w ) );
|
||||
//![draw_rook]
|
||||
|
||||
/// 3. Display your stuff!
|
||||
imshow( atom_window, atom_image );
|
||||
@ -75,6 +83,7 @@ int main( void ){
|
||||
|
||||
/// Function Declaration
|
||||
|
||||
//![myellipse]
|
||||
/**
|
||||
* @function MyEllipse
|
||||
* @brief Draw a fixed-size ellipse with different angles
|
||||
@ -94,31 +103,32 @@ void MyEllipse( Mat img, double angle )
|
||||
thickness,
|
||||
lineType );
|
||||
}
|
||||
//![myellipse]
|
||||
|
||||
//![myfilledcircle]
|
||||
/**
|
||||
* @function MyFilledCircle
|
||||
* @brief Draw a fixed-size filled circle
|
||||
*/
|
||||
void MyFilledCircle( Mat img, Point center )
|
||||
{
|
||||
int thickness = -1;
|
||||
int lineType = 8;
|
||||
|
||||
circle( img,
|
||||
center,
|
||||
w/32,
|
||||
Scalar( 0, 0, 255 ),
|
||||
thickness,
|
||||
lineType );
|
||||
FILLED,
|
||||
LINE_8 );
|
||||
}
|
||||
//![myfilledcircle]
|
||||
|
||||
//![mypolygon]
|
||||
/**
|
||||
* @function MyPolygon
|
||||
* @function Draw a simple concave polygon (rook)
|
||||
* @brief Draw a simple concave polygon (rook)
|
||||
*/
|
||||
void MyPolygon( Mat img )
|
||||
{
|
||||
int lineType = 8;
|
||||
int lineType = LINE_8;
|
||||
|
||||
/** Create some points */
|
||||
Point rook_points[1][20];
|
||||
@ -149,11 +159,13 @@ void MyPolygon( Mat img )
|
||||
fillPoly( img,
|
||||
ppt,
|
||||
npt,
|
||||
1,
|
||||
1,
|
||||
Scalar( 255, 255, 255 ),
|
||||
lineType );
|
||||
}
|
||||
//![mypolygon]
|
||||
|
||||
//![myline]
|
||||
/**
|
||||
* @function MyLine
|
||||
* @brief Draw a simple line
|
||||
@ -161,7 +173,7 @@ void MyPolygon( Mat img )
|
||||
void MyLine( Mat img, Point start, Point end )
|
||||
{
|
||||
int thickness = 2;
|
||||
int lineType = 8;
|
||||
int lineType = LINE_8;
|
||||
line( img,
|
||||
start,
|
||||
end,
|
||||
@ -169,3 +181,4 @@ void MyLine( Mat img, Point start, Point end )
|
||||
thickness,
|
||||
lineType );
|
||||
}
|
||||
//![myline]
|
||||
|
@ -166,10 +166,14 @@ int main( int argc, char* argv[] ){
|
||||
// load the image (note that we don't have the projection information. You will
|
||||
// need to load that yourself or use the full GDAL driver. The values are pre-defined
|
||||
// at the top of this file
|
||||
//![load1]
|
||||
cv::Mat image = cv::imread(argv[1], cv::IMREAD_LOAD_GDAL | cv::IMREAD_COLOR );
|
||||
//![load1]
|
||||
|
||||
//![load2]
|
||||
// load the dem model
|
||||
cv::Mat dem = cv::imread(argv[2], cv::IMREAD_LOAD_GDAL | cv::IMREAD_ANYDEPTH );
|
||||
//![load2]
|
||||
|
||||
// create our output products
|
||||
cv::Mat output_dem( image.size(), CV_8UC3 );
|
||||
|
Loading…
Reference in New Issue
Block a user