doc: tutorial refactor
96
doc/tools/scan_tutorials.py
Normal file
@ -0,0 +1,96 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from pathlib import Path
|
||||
import re
|
||||
|
||||
# Tasks
|
||||
# 1. Find all tutorials
|
||||
# 2. Generate tree (@subpage)
|
||||
# 3. Check prev/next nodes
|
||||
|
||||
class Tutorial(object):
|
||||
def __init__(self, path):
|
||||
self.path = path
|
||||
self.title = None # doxygen title
|
||||
self.children = [] # ordered titles
|
||||
self.prev = None
|
||||
self.next = None
|
||||
with open(path, "rt") as f:
|
||||
self.parse(f)
|
||||
|
||||
def parse(self, f):
|
||||
rx_title = re.compile(r"\{#(\w+)\}")
|
||||
rx_subpage = re.compile(r"@subpage\s+(\w+)")
|
||||
rx_prev = re.compile(r"@prev_tutorial\{(\w+)\}")
|
||||
rx_next = re.compile(r"@next_tutorial\{(\w+)\}")
|
||||
for line in f:
|
||||
if self.title is None:
|
||||
m = rx_title.search(line)
|
||||
if m:
|
||||
self.title = m.group(1)
|
||||
continue
|
||||
if self.prev is None:
|
||||
m = rx_prev.search(line)
|
||||
if m:
|
||||
self.prev = m.group(1)
|
||||
continue
|
||||
if self.next is None:
|
||||
m = rx_next.search(line)
|
||||
if m:
|
||||
self.next = m.group(1)
|
||||
continue
|
||||
m = rx_subpage.search(line)
|
||||
if m:
|
||||
self.children.append(m.group(1))
|
||||
continue
|
||||
|
||||
def verify_prev_next(self, storage):
|
||||
res = True
|
||||
|
||||
if self.title is None:
|
||||
print("[W] No title")
|
||||
res = False
|
||||
|
||||
prev = None
|
||||
for one in self.children:
|
||||
c = storage[one]
|
||||
if c.prev is not None and c.prev != prev:
|
||||
print("[W] Wrong prev_tutorial: expected {} / actual {}".format(c.prev, prev))
|
||||
res = False
|
||||
prev = c.title
|
||||
|
||||
next = None
|
||||
for one in reversed(self.children):
|
||||
c = storage[one]
|
||||
if c.next is not None and c.next != next:
|
||||
print("[W] Wrong next_tutorial: expected {} / actual {}".format(c.next, next))
|
||||
res = False
|
||||
next = c.title
|
||||
|
||||
if len(self.children) == 0 and self.prev is None and self.next is None:
|
||||
print("[W] No prev and next tutorials")
|
||||
res = False
|
||||
|
||||
return res
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
p = Path('tutorials')
|
||||
print("Looking for tutorials in: '{}'".format(p))
|
||||
|
||||
all_tutorials = dict()
|
||||
for f in p.glob('**/*'):
|
||||
if f.suffix.lower() in ('.markdown', '.md'):
|
||||
t = Tutorial(f)
|
||||
all_tutorials[t.title] = t
|
||||
|
||||
res = 0
|
||||
print("Found: {}".format(len(all_tutorials)))
|
||||
print("------")
|
||||
for title, t in all_tutorials.items():
|
||||
if not t.verify_prev_next(all_tutorials):
|
||||
print("[E] Verification failed: {}".format(t.path))
|
||||
print("------")
|
||||
res = 1
|
||||
|
||||
exit(res)
|
4
doc/tutorials/app/_old/table_of_content_highgui.markdown
Normal file
@ -0,0 +1,4 @@
|
||||
High Level GUI and Media (highgui module) {#tutorial_table_of_content_highgui}
|
||||
=========================================
|
||||
|
||||
Content has been moved to this page: @ref tutorial_table_of_content_app
|
@ -0,0 +1,4 @@
|
||||
Image Input and Output (imgcodecs module) {#tutorial_table_of_content_imgcodecs}
|
||||
=========================================
|
||||
|
||||
Content has been moved to this page: @ref tutorial_table_of_content_app
|
4
doc/tutorials/app/_old/table_of_content_videoio.markdown
Normal file
@ -0,0 +1,4 @@
|
||||
Video Input and Output (videoio module) {#tutorial_table_of_content_videoio}
|
||||
=========================================
|
||||
|
||||
Content has been moved to this page: @ref tutorial_table_of_content_app
|
Before Width: | Height: | Size: 9.2 KiB After Width: | Height: | Size: 9.2 KiB |
Before Width: | Height: | Size: 29 KiB After Width: | Height: | Size: 29 KiB |
Before Width: | Height: | Size: 2.0 KiB After Width: | Height: | Size: 2.0 KiB |
Before Width: | Height: | Size: 135 KiB After Width: | Height: | Size: 135 KiB |
Before Width: | Height: | Size: 29 KiB After Width: | Height: | Size: 29 KiB |
Before Width: | Height: | Size: 111 KiB After Width: | Height: | Size: 111 KiB |
Before Width: | Height: | Size: 53 KiB After Width: | Height: | Size: 53 KiB |
Before Width: | Height: | Size: 120 KiB After Width: | Height: | Size: 120 KiB |
Before Width: | Height: | Size: 48 KiB After Width: | Height: | Size: 48 KiB |
Before Width: | Height: | Size: 39 KiB After Width: | Height: | Size: 39 KiB |
Before Width: | Height: | Size: 7.2 KiB After Width: | Height: | Size: 7.2 KiB |
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 12 KiB |
@ -1,6 +1,14 @@
|
||||
Reading Geospatial Raster files with GDAL {#tutorial_raster_io_gdal}
|
||||
=========================================
|
||||
|
||||
@prev_tutorial{tutorial_trackbar}
|
||||
@next_tutorial{tutorial_video_input_psnr_ssim}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Marvin Smith |
|
||||
| Compatibility | OpenCV >= 3.0 |
|
||||
|
||||
Geospatial raster data is a heavily used product in Geographic Information Systems and
|
||||
Photogrammetry. Raster data typically can represent imagery and Digital Elevation Models (DEM). The
|
||||
standard library for loading GIS imagery is the Geographic Data Abstraction Library [(GDAL)](http://www.gdal.org). In this
|
10
doc/tutorials/app/table_of_content_app.markdown
Normal file
@ -0,0 +1,10 @@
|
||||
Application utils (highgui, imgcodecs, videoio modules) {#tutorial_table_of_content_app}
|
||||
=======================================================
|
||||
|
||||
- @subpage tutorial_trackbar
|
||||
- @subpage tutorial_raster_io_gdal
|
||||
- @subpage tutorial_video_input_psnr_ssim
|
||||
- @subpage tutorial_video_write
|
||||
- @subpage tutorial_kinect_openni
|
||||
- @subpage tutorial_orbbec_astra
|
||||
- @subpage tutorial_intelperc
|
@ -1,6 +1,14 @@
|
||||
Adding a Trackbar to our applications! {#tutorial_trackbar}
|
||||
======================================
|
||||
|
||||
@next_tutorial{tutorial_raster_io_gdal}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Ana Huamán |
|
||||
| Compatibility | OpenCV >= 3.0 |
|
||||
|
||||
|
||||
- In the previous tutorials (about @ref tutorial_adding_images and the @ref tutorial_basic_linear_transform)
|
||||
you might have noted that we needed to give some **input** to our programs, such
|
||||
as \f$\alpha\f$ and \f$beta\f$. We accomplished that by entering this data using the Terminal.
|
@ -1,8 +1,14 @@
|
||||
Video Input with OpenCV and similarity measurement {#tutorial_video_input_psnr_ssim}
|
||||
==================================================
|
||||
|
||||
@prev_tutorial{tutorial_raster_io_gdal}
|
||||
@next_tutorial{tutorial_video_write}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Bernát Gábor |
|
||||
| Compatibility | OpenCV >= 3.0 |
|
||||
|
||||
Goal
|
||||
----
|
||||
|
@ -4,6 +4,11 @@ Creating a video with OpenCV {#tutorial_video_write}
|
||||
@prev_tutorial{tutorial_video_input_psnr_ssim}
|
||||
@next_tutorial{tutorial_kinect_openni}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Bernát Gábor |
|
||||
| Compatibility | OpenCV >= 3.0 |
|
||||
|
||||
Goal
|
||||
----
|
||||
|
@ -4,6 +4,11 @@ Camera calibration With OpenCV {#tutorial_camera_calibration}
|
||||
@prev_tutorial{tutorial_camera_calibration_square_chess}
|
||||
@next_tutorial{tutorial_real_time_pose}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Bernát Gábor |
|
||||
| Compatibility | OpenCV >= 4.0 |
|
||||
|
||||
|
||||
Cameras have been around for a long-long time. However, with the introduction of the cheap *pinhole*
|
||||
cameras in the late 20th century, they became a common occurrence in our everyday life.
|
||||
|
@ -3,6 +3,11 @@ Create calibration pattern {#tutorial_camera_calibration_pattern}
|
||||
|
||||
@next_tutorial{tutorial_camera_calibration_square_chess}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Laurent Berger |
|
||||
| Compatibility | OpenCV >= 3.0 |
|
||||
|
||||
|
||||
The goal of this tutorial is to learn how to create calibration pattern.
|
||||
|
||||
|
@ -4,6 +4,11 @@ Camera calibration with square chessboard {#tutorial_camera_calibration_square_c
|
||||
@prev_tutorial{tutorial_camera_calibration_pattern}
|
||||
@next_tutorial{tutorial_camera_calibration}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Victor Eruhimov |
|
||||
| Compatibility | OpenCV >= 4.0 |
|
||||
|
||||
|
||||
The goal of this tutorial is to learn how to calibrate a camera given a set of chessboard images.
|
||||
|
||||
|
Before Width: | Height: | Size: 8.0 KiB |
Before Width: | Height: | Size: 6.6 KiB |
Before Width: | Height: | Size: 83 KiB |
@ -3,6 +3,11 @@ Interactive camera calibration application {#tutorial_interactive_calibration}
|
||||
|
||||
@prev_tutorial{tutorial_real_time_pose}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Vladislav Sovrasov |
|
||||
| Compatibility | OpenCV >= 3.1 |
|
||||
|
||||
|
||||
According to classical calibration technique user must collect all data first and when run @ref cv::calibrateCamera function
|
||||
to obtain camera parameters. If average re-projection error is huge or if estimated parameters seems to be wrong, process of
|
||||
|
@ -4,6 +4,11 @@ Real Time pose estimation of a textured object {#tutorial_real_time_pose}
|
||||
@prev_tutorial{tutorial_camera_calibration}
|
||||
@next_tutorial{tutorial_interactive_calibration}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Edgar Riba |
|
||||
| Compatibility | OpenCV >= 3.0 |
|
||||
|
||||
|
||||
Nowadays, augmented reality is one of the top research topic in computer vision and robotics fields.
|
||||
The most elemental problem in augmented reality is the estimation of the camera pose respect of an
|
||||
|
@ -1,58 +1,8 @@
|
||||
Camera calibration and 3D reconstruction (calib3d module) {#tutorial_table_of_content_calib3d}
|
||||
==========================================================
|
||||
|
||||
Although we get most of our images in a 2D format they do come from a 3D world. Here you will learn how to find out 3D world information from 2D images.
|
||||
|
||||
- @subpage tutorial_camera_calibration_pattern
|
||||
|
||||
*Languages:* Python
|
||||
|
||||
*Compatibility:* \> OpenCV 2.0
|
||||
|
||||
*Author:* Laurent Berger
|
||||
|
||||
You will learn how to create some calibration pattern.
|
||||
|
||||
- @subpage tutorial_camera_calibration_square_chess
|
||||
|
||||
*Languages:* C++
|
||||
|
||||
*Compatibility:* \> OpenCV 2.0
|
||||
|
||||
*Author:* Victor Eruhimov
|
||||
|
||||
You will use some chessboard images to calibrate your camera.
|
||||
|
||||
- @subpage tutorial_camera_calibration
|
||||
|
||||
*Languages:* C++
|
||||
|
||||
*Compatibility:* \> OpenCV 4.0
|
||||
|
||||
*Author:* Bernát Gábor
|
||||
|
||||
Camera calibration by using either the chessboard, circle or the asymmetrical circle
|
||||
pattern. Get the images either from a camera attached, a video file or from an image
|
||||
collection.
|
||||
|
||||
- @subpage tutorial_real_time_pose
|
||||
|
||||
*Languages:* C++
|
||||
|
||||
*Compatibility:* \> OpenCV 2.0
|
||||
|
||||
*Author:* Edgar Riba
|
||||
|
||||
Real time pose estimation of a textured object using ORB features, FlannBased matcher, PnP
|
||||
approach plus Ransac and Linear Kalman Filter to reject possible bad poses.
|
||||
|
||||
- @subpage tutorial_interactive_calibration
|
||||
|
||||
*Compatibility:* \> OpenCV 3.1
|
||||
|
||||
*Author:* Vladislav Sovrasov
|
||||
|
||||
Camera calibration by using either the chessboard, chAruco, asymmetrical circle or dual asymmetrical circle
|
||||
pattern. Calibration process is continuous, so you can see results after each new pattern shot.
|
||||
As an output you get average reprojection error, intrinsic camera parameters, distortion coefficients and
|
||||
confidence intervals for all of evaluated variables.
|
||||
|
@ -4,6 +4,12 @@ Adding (blending) two images using OpenCV {#tutorial_adding_images}
|
||||
@prev_tutorial{tutorial_mat_operations}
|
||||
@next_tutorial{tutorial_basic_linear_transform}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Ana Huamán |
|
||||
| Compatibility | OpenCV >= 3.0 |
|
||||
|
||||
We will learn how to blend two images!
|
||||
Goal
|
||||
----
|
||||
|
||||
|
@ -4,6 +4,11 @@ Changing the contrast and brightness of an image! {#tutorial_basic_linear_transf
|
||||
@prev_tutorial{tutorial_adding_images}
|
||||
@next_tutorial{tutorial_discrete_fourier_transform}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Ana Huamán |
|
||||
| Compatibility | OpenCV >= 3.0 |
|
||||
|
||||
Goal
|
||||
----
|
||||
|
||||
|
@ -4,6 +4,11 @@ Discrete Fourier Transform {#tutorial_discrete_fourier_transform}
|
||||
@prev_tutorial{tutorial_basic_linear_transform}
|
||||
@next_tutorial{tutorial_file_input_output_with_xml_yml}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Bernát Gábor |
|
||||
| Compatibility | OpenCV >= 3.0 |
|
||||
|
||||
Goal
|
||||
----
|
||||
|
||||
|
@ -4,6 +4,11 @@ File Input and Output using XML and YAML files {#tutorial_file_input_output_with
|
||||
@prev_tutorial{tutorial_discrete_fourier_transform}
|
||||
@next_tutorial{tutorial_how_to_use_OpenCV_parallel_for_}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Bernát Gábor |
|
||||
| Compatibility | OpenCV >= 3.0 |
|
||||
|
||||
Goal
|
||||
----
|
||||
|
||||
|
@ -4,6 +4,11 @@ How to scan images, lookup tables and time measurement with OpenCV {#tutorial_ho
|
||||
@prev_tutorial{tutorial_mat_the_basic_image_container}
|
||||
@next_tutorial{tutorial_mat_mask_operations}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Bernát Gábor |
|
||||
| Compatibility | OpenCV >= 3.0 |
|
||||
|
||||
Goal
|
||||
----
|
||||
|
||||
|
@ -3,6 +3,10 @@ How to use the OpenCV parallel_for_ to parallelize your code {#tutorial_how_to_u
|
||||
|
||||
@prev_tutorial{tutorial_file_input_output_with_xml_yml}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Compatibility | OpenCV >= 3.0 |
|
||||
|
||||
Goal
|
||||
----
|
||||
|
||||
|
Before Width: | Height: | Size: 6.9 KiB |
Before Width: | Height: | Size: 4.1 KiB |
Before Width: | Height: | Size: 2.7 KiB |
Before Width: | Height: | Size: 5.0 KiB |
Before Width: | Height: | Size: 5.3 KiB |
Before Width: | Height: | Size: 3.8 KiB |
Before Width: | Height: | Size: 6.2 KiB |
Before Width: | Height: | Size: 5.4 KiB |
Before Width: | Height: | Size: 6.8 KiB |
Before Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 8.9 KiB |
Before Width: | Height: | Size: 6.1 KiB |
@ -4,6 +4,11 @@ Mask operations on matrices {#tutorial_mat_mask_operations}
|
||||
@prev_tutorial{tutorial_how_to_scan_images}
|
||||
@next_tutorial{tutorial_mat_operations}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Bernát Gábor |
|
||||
| Compatibility | OpenCV >= 3.0 |
|
||||
|
||||
Mask operations on matrices are quite simple. The idea is that we recalculate each pixel's value in
|
||||
an image according to a mask matrix (also known as kernel). This mask holds values that will adjust
|
||||
how much influence neighboring pixels (and the current pixel) have on the new pixel value. From a
|
||||
|
@ -4,6 +4,10 @@ Operations with images {#tutorial_mat_operations}
|
||||
@prev_tutorial{tutorial_mat_mask_operations}
|
||||
@next_tutorial{tutorial_adding_images}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Compatibility | OpenCV >= 3.0 |
|
||||
|
||||
Input/Output
|
||||
------------
|
||||
|
||||
|
@ -3,6 +3,11 @@ Mat - The Basic Image Container {#tutorial_mat_the_basic_image_container}
|
||||
|
||||
@next_tutorial{tutorial_how_to_scan_images}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Bernát Gábor |
|
||||
| Compatibility | OpenCV >= 3.0 |
|
||||
|
||||
Goal
|
||||
----
|
||||
|
||||
|
@ -1,97 +1,12 @@
|
||||
The Core Functionality (core module) {#tutorial_table_of_content_core}
|
||||
=====================================
|
||||
|
||||
Here you will learn the about the basic building blocks of the library. A must read and know for
|
||||
understanding how to manipulate the images on a pixel level.
|
||||
|
||||
- @subpage tutorial_mat_the_basic_image_container
|
||||
|
||||
*Languages:* C++
|
||||
|
||||
*Compatibility:* \> OpenCV 2.0
|
||||
|
||||
*Author:* Bernát Gábor
|
||||
|
||||
You will learn how to store images in the memory and how to print out their content to the
|
||||
console.
|
||||
|
||||
- @subpage tutorial_how_to_scan_images
|
||||
|
||||
*Languages:* C++
|
||||
|
||||
*Compatibility:* \> OpenCV 2.0
|
||||
|
||||
*Author:* Bernát Gábor
|
||||
|
||||
You'll find out how to scan images (go through each of the image pixels) with OpenCV.
|
||||
Bonus: time measurement with OpenCV.
|
||||
|
||||
|
||||
- @subpage tutorial_mat_mask_operations
|
||||
|
||||
*Languages:* C++, Java, Python
|
||||
|
||||
*Compatibility:* \> OpenCV 2.0
|
||||
|
||||
*Author:* Bernát Gábor
|
||||
|
||||
You'll find out how to scan images with neighbor access and use the @ref cv::filter2D
|
||||
function to apply kernel filters on images.
|
||||
|
||||
- @subpage tutorial_mat_operations
|
||||
|
||||
*Languages:* C++, Java, Python
|
||||
|
||||
*Compatibility:* \> OpenCV 2.0
|
||||
|
||||
Reading/writing images from file, accessing pixels, primitive operations, visualizing images.
|
||||
|
||||
- @subpage tutorial_adding_images
|
||||
|
||||
*Languages:* C++, Java, Python
|
||||
|
||||
*Compatibility:* \> OpenCV 2.0
|
||||
|
||||
*Author:* Ana Huamán
|
||||
|
||||
We will learn how to blend two images!
|
||||
|
||||
- @subpage tutorial_basic_linear_transform
|
||||
|
||||
*Languages:* C++, Java, Python
|
||||
|
||||
*Compatibility:* \> OpenCV 2.0
|
||||
|
||||
*Author:* Ana Huamán
|
||||
|
||||
We will learn how to change our image appearance!
|
||||
|
||||
- @subpage tutorial_discrete_fourier_transform
|
||||
|
||||
*Languages:* C++, Java, Python
|
||||
|
||||
*Compatibility:* \> OpenCV 2.0
|
||||
|
||||
*Author:* Bernát Gábor
|
||||
|
||||
You will see how and why use the Discrete Fourier transformation with OpenCV.
|
||||
|
||||
|
||||
- @subpage tutorial_file_input_output_with_xml_yml
|
||||
|
||||
*Languages:* C++, Python
|
||||
|
||||
*Compatibility:* \> OpenCV 2.0
|
||||
|
||||
*Author:* Bernát Gábor
|
||||
|
||||
You will see how to use the @ref cv::FileStorage data structure of OpenCV to write and read
|
||||
data to XML or YAML file format.
|
||||
|
||||
- @subpage tutorial_how_to_use_OpenCV_parallel_for_
|
||||
|
||||
*Languages:* C++
|
||||
|
||||
*Compatibility:* \>= OpenCV 2.4.3
|
||||
|
||||
You will see how to use the OpenCV parallel_for_ to easily parallelize your code.
|
||||
|
@ -3,6 +3,11 @@
|
||||
@prev_tutorial{tutorial_dnn_custom_layers}
|
||||
@next_tutorial{tutorial_dnn_text_spotting}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Zihao Mu |
|
||||
| Compatibility | OpenCV >= 4.3 |
|
||||
|
||||
## Introduction
|
||||
|
||||
In this tutorial, we first introduce how to obtain the custom OCR model, then how to transform your own OCR models so that they can be run correctly by the opencv_dnn module. and finally we will provide some pre-trained models.
|
||||
|
@ -3,6 +3,11 @@
|
||||
@prev_tutorial{tutorial_dnn_halide_scheduling}
|
||||
@next_tutorial{tutorial_dnn_yolo}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Dmitry Kurtaev |
|
||||
| Compatibility | OpenCV >= 3.3 |
|
||||
|
||||
## Introduction
|
||||
In this tutorial you'll know how to run deep learning networks on Android device
|
||||
using OpenCV deep learning module.
|
||||
|
@ -3,6 +3,11 @@
|
||||
@prev_tutorial{tutorial_dnn_javascript}
|
||||
@next_tutorial{tutorial_dnn_OCR}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Dmitry Kurtaev |
|
||||
| Compatibility | OpenCV >= 3.4.1 |
|
||||
|
||||
## Introduction
|
||||
Deep learning is a fast growing area. The new approaches to build neural networks
|
||||
usually introduce new types of layers. They could be modifications of existing
|
||||
|
@ -3,6 +3,11 @@ Load Caffe framework models {#tutorial_dnn_googlenet}
|
||||
|
||||
@next_tutorial{tutorial_dnn_halide}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Vitaliy Lyudvichenko |
|
||||
| Compatibility | OpenCV >= 3.3 |
|
||||
|
||||
Introduction
|
||||
------------
|
||||
|
||||
|
@ -3,6 +3,11 @@
|
||||
@prev_tutorial{tutorial_dnn_googlenet}
|
||||
@next_tutorial{tutorial_dnn_halide_scheduling}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Dmitry Kurtaev |
|
||||
| Compatibility | OpenCV >= 3.3 |
|
||||
|
||||
## Introduction
|
||||
This tutorial guidelines how to run your models in OpenCV deep learning module
|
||||
using Halide language backend. Halide is an open-source project that let us
|
||||
|
@ -3,6 +3,11 @@
|
||||
@prev_tutorial{tutorial_dnn_halide}
|
||||
@next_tutorial{tutorial_dnn_android}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Dmitry Kurtaev |
|
||||
| Compatibility | OpenCV >= 3.3 |
|
||||
|
||||
## Introduction
|
||||
Halide code is the same for every device we use. But for achieving the satisfied
|
||||
efficiency we should schedule computations properly. In this tutorial we describe
|
||||
|
@ -3,6 +3,11 @@
|
||||
@prev_tutorial{tutorial_dnn_yolo}
|
||||
@next_tutorial{tutorial_dnn_custom_layers}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Dmitry Kurtaev |
|
||||
| Compatibility | OpenCV >= 3.3.1 |
|
||||
|
||||
## Introduction
|
||||
This tutorial will show us how to run deep learning models using OpenCV.js right
|
||||
in a browser. Tutorial refers a sample of face detection and face recognition
|
||||
|
@ -2,6 +2,11 @@
|
||||
|
||||
@prev_tutorial{tutorial_dnn_OCR}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Wenqing Zhang |
|
||||
| Compatibility | OpenCV >= 4.5 |
|
||||
|
||||
## Introduction
|
||||
In this tutorial, we will introduce the APIs for TextRecognitionModel and TextDetectionModel in detail.
|
||||
|
||||
|
@ -4,6 +4,11 @@ YOLO DNNs {#tutorial_dnn_yolo}
|
||||
@prev_tutorial{tutorial_dnn_android}
|
||||
@next_tutorial{tutorial_dnn_javascript}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Alessandro de Oliveira Faria |
|
||||
| Compatibility | OpenCV >= 3.3.1 |
|
||||
|
||||
Introduction
|
||||
------------
|
||||
|
||||
|
@ -2,91 +2,11 @@ Deep Neural Networks (dnn module) {#tutorial_table_of_content_dnn}
|
||||
=====================================
|
||||
|
||||
- @subpage tutorial_dnn_googlenet
|
||||
|
||||
*Languages:* C++
|
||||
|
||||
*Compatibility:* \> OpenCV 3.3
|
||||
|
||||
*Author:* Vitaliy Lyudvichenko
|
||||
|
||||
In this tutorial you will learn how to use opencv_dnn module for image classification by using GoogLeNet trained network from Caffe model zoo.
|
||||
|
||||
- @subpage tutorial_dnn_halide
|
||||
|
||||
*Languages:* Halide
|
||||
|
||||
*Compatibility:* \> OpenCV 3.3
|
||||
|
||||
*Author:* Dmitry Kurtaev
|
||||
|
||||
This tutorial guidelines how to run your models in OpenCV deep learning module using Halide language backend.
|
||||
|
||||
- @subpage tutorial_dnn_halide_scheduling
|
||||
|
||||
*Languages:* Halide
|
||||
|
||||
*Compatibility:* \> OpenCV 3.3
|
||||
|
||||
*Author:* Dmitry Kurtaev
|
||||
|
||||
In this tutorial we describe the ways to schedule your networks using Halide backend in OpenCV deep learning module.
|
||||
|
||||
- @subpage tutorial_dnn_android
|
||||
|
||||
*Languages:* Java
|
||||
|
||||
*Compatibility:* \> OpenCV 3.3
|
||||
|
||||
*Author:* Dmitry Kurtaev
|
||||
|
||||
This tutorial will show you how to run deep learning model using OpenCV on Android device.
|
||||
|
||||
- @subpage tutorial_dnn_yolo
|
||||
|
||||
*Languages:* C++, Python
|
||||
|
||||
*Compatibility:* \> OpenCV 3.3.1
|
||||
|
||||
*Author:* Alessandro de Oliveira Faria
|
||||
|
||||
In this tutorial you will learn how to use opencv_dnn module using yolo_object_detection with device capture, video file or image.
|
||||
|
||||
- @subpage tutorial_dnn_javascript
|
||||
|
||||
*Languages:* JavaScript
|
||||
|
||||
*Compatibility:* \> OpenCV 3.3.1
|
||||
|
||||
*Author:* Dmitry Kurtaev
|
||||
|
||||
In this tutorial we'll run deep learning models in browser using OpenCV.js.
|
||||
|
||||
- @subpage tutorial_dnn_custom_layers
|
||||
|
||||
*Languages:* C++, Python
|
||||
|
||||
*Compatibility:* \> OpenCV 3.4.1
|
||||
|
||||
*Author:* Dmitry Kurtaev
|
||||
|
||||
How to define custom layers to import networks.
|
||||
|
||||
- @subpage tutorial_dnn_OCR
|
||||
|
||||
*Languages:* C++
|
||||
|
||||
*Compatibility:* \> OpenCV 4.3
|
||||
|
||||
*Author:* Zihao Mu
|
||||
|
||||
In this tutorial you will learn how to use opencv_dnn module using custom OCR models.
|
||||
|
||||
- @subpage tutorial_dnn_text_spotting
|
||||
|
||||
*Languages:* C++
|
||||
|
||||
*Compatibility:* \> OpenCV 4.5
|
||||
|
||||
*Author:* Wenqing Zhang
|
||||
|
||||
In these tutorial, we'll introduce how to use the high-level APIs for text recognition and text detection
|
||||
|
@ -4,6 +4,11 @@ AKAZE local features matching {#tutorial_akaze_matching}
|
||||
@prev_tutorial{tutorial_detection_of_planar_objects}
|
||||
@next_tutorial{tutorial_akaze_tracking}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Fedor Morozov |
|
||||
| Compatibility | OpenCV >= 3.0 |
|
||||
|
||||
Introduction
|
||||
------------
|
||||
|
||||
|
@ -4,6 +4,11 @@ AKAZE and ORB planar tracking {#tutorial_akaze_tracking}
|
||||
@prev_tutorial{tutorial_akaze_matching}
|
||||
@next_tutorial{tutorial_homography}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Fedor Morozov |
|
||||
| Compatibility | OpenCV >= 3.0 |
|
||||
|
||||
Introduction
|
||||
------------
|
||||
|
||||
|
@ -4,6 +4,10 @@ Detection of planar objects {#tutorial_detection_of_planar_objects}
|
||||
@prev_tutorial{tutorial_feature_homography}
|
||||
@next_tutorial{tutorial_akaze_matching}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Victor Eruhimov |
|
||||
| Compatibility | OpenCV >= 3.0 |
|
||||
|
||||
The goal of this tutorial is to learn how to use *features2d* and *calib3d* modules for detecting
|
||||
known planar objects in scenes.
|
||||
|
@ -4,6 +4,11 @@ Feature Description {#tutorial_feature_description}
|
||||
@prev_tutorial{tutorial_feature_detection}
|
||||
@next_tutorial{tutorial_feature_flann_matcher}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Ana Huamán |
|
||||
| Compatibility | OpenCV >= 3.0 |
|
||||
|
||||
Goal
|
||||
----
|
||||
|
||||
|
@ -4,6 +4,11 @@ Feature Detection {#tutorial_feature_detection}
|
||||
@prev_tutorial{tutorial_corner_subpixels}
|
||||
@next_tutorial{tutorial_feature_description}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Ana Huamán |
|
||||
| Compatibility | OpenCV >= 3.0 |
|
||||
|
||||
Goal
|
||||
----
|
||||
|
||||
|
@ -4,6 +4,11 @@ Feature Matching with FLANN {#tutorial_feature_flann_matcher}
|
||||
@prev_tutorial{tutorial_feature_description}
|
||||
@next_tutorial{tutorial_feature_homography}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Ana Huamán |
|
||||
| Compatibility | OpenCV >= 3.0 |
|
||||
|
||||
Goal
|
||||
----
|
||||
|
||||
|
@ -4,6 +4,11 @@ Features2D + Homography to find a known object {#tutorial_feature_homography}
|
||||
@prev_tutorial{tutorial_feature_flann_matcher}
|
||||
@next_tutorial{tutorial_detection_of_planar_objects}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Ana Huamán |
|
||||
| Compatibility | OpenCV >= 3.0 |
|
||||
|
||||
Goal
|
||||
----
|
||||
|
||||
|
@ -3,6 +3,10 @@ Basic concepts of the homography explained with code {#tutorial_homography}
|
||||
|
||||
@prev_tutorial{tutorial_akaze_tracking}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Compatibility | OpenCV >= 3.0 |
|
||||
|
||||
@tableofcontents
|
||||
|
||||
Introduction {#tutorial_homography_Introduction}
|
||||
|
Before Width: | Height: | Size: 63 KiB |
Before Width: | Height: | Size: 31 KiB |
Before Width: | Height: | Size: 117 KiB |
Before Width: | Height: | Size: 34 KiB |
Before Width: | Height: | Size: 79 KiB |
Before Width: | Height: | Size: 51 KiB |
Before Width: | Height: | Size: 7.6 KiB |
Before Width: | Height: | Size: 3.0 KiB |
Before Width: | Height: | Size: 5.1 KiB |
Before Width: | Height: | Size: 4.7 KiB |
Before Width: | Height: | Size: 5.9 KiB |
@ -1,128 +1,15 @@
|
||||
2D Features framework (feature2d module) {#tutorial_table_of_content_features2d}
|
||||
=========================================
|
||||
|
||||
Learn about how to use the feature points detectors, descriptors and matching framework found inside
|
||||
OpenCV.
|
||||
|
||||
- @subpage tutorial_harris_detector
|
||||
|
||||
*Languages:* C++, Java, Python
|
||||
|
||||
*Compatibility:* \> OpenCV 2.0
|
||||
|
||||
*Author:* Ana Huamán
|
||||
|
||||
Why is it a good idea to track corners? We learn how to use the Harris method to detect
|
||||
corners.
|
||||
|
||||
- @subpage tutorial_good_features_to_track
|
||||
|
||||
*Languages:* C++, Java, Python
|
||||
|
||||
*Compatibility:* \> OpenCV 2.0
|
||||
|
||||
*Author:* Ana Huamán
|
||||
|
||||
Where we use an improved method to detect corners more accurately.
|
||||
|
||||
- @subpage tutorial_generic_corner_detector
|
||||
|
||||
*Languages:* C++, Java, Python
|
||||
|
||||
*Compatibility:* \> OpenCV 2.0
|
||||
|
||||
*Author:* Ana Huamán
|
||||
|
||||
Here you will learn how to use OpenCV functions to make your personalized corner detector!
|
||||
|
||||
*Languages:* C++, Java, Python
|
||||
|
||||
- @subpage tutorial_corner_subpixels
|
||||
|
||||
*Languages:* C++, Java, Python
|
||||
|
||||
*Compatibility:* \> OpenCV 2.0
|
||||
|
||||
*Author:* Ana Huamán
|
||||
|
||||
Is pixel resolution enough? Here we learn a simple method to improve our corner location accuracy.
|
||||
|
||||
- @subpage tutorial_feature_detection
|
||||
|
||||
*Languages:* C++, Java, Python
|
||||
|
||||
*Compatibility:* \> OpenCV 2.0
|
||||
|
||||
*Author:* Ana Huamán
|
||||
|
||||
In this tutorial, you will use *features2d* to detect interest points.
|
||||
|
||||
- @subpage tutorial_feature_description
|
||||
|
||||
*Languages:* C++, Java, Python
|
||||
|
||||
*Compatibility:* \> OpenCV 2.0
|
||||
|
||||
*Author:* Ana Huamán
|
||||
|
||||
In this tutorial, you will use *features2d* to calculate feature vectors.
|
||||
|
||||
- @subpage tutorial_feature_flann_matcher
|
||||
|
||||
*Languages:* C++, Java, Python
|
||||
|
||||
*Compatibility:* \> OpenCV 2.0
|
||||
|
||||
*Author:* Ana Huamán
|
||||
|
||||
In this tutorial, you will use the FLANN library to make a fast matching.
|
||||
|
||||
- @subpage tutorial_feature_homography
|
||||
|
||||
*Languages:* C++, Java, Python
|
||||
|
||||
*Compatibility:* \> OpenCV 2.0
|
||||
|
||||
*Author:* Ana Huamán
|
||||
|
||||
In this tutorial, you will use *features2d* and *calib3d* to detect an object in a scene.
|
||||
|
||||
- @subpage tutorial_detection_of_planar_objects
|
||||
|
||||
*Languages:* C++
|
||||
|
||||
*Compatibility:* \> OpenCV 2.0
|
||||
|
||||
*Author:* Victor Eruhimov
|
||||
|
||||
You will use *features2d* and *calib3d* modules for detecting known planar objects in
|
||||
scenes.
|
||||
|
||||
- @subpage tutorial_akaze_matching
|
||||
|
||||
*Languages:* C++, Java, Python
|
||||
|
||||
*Compatibility:* \> OpenCV 3.0
|
||||
|
||||
*Author:* Fedor Morozov
|
||||
|
||||
Using *AKAZE* local features to find correspondence between two images.
|
||||
|
||||
- @subpage tutorial_akaze_tracking
|
||||
|
||||
*Languages:* C++
|
||||
|
||||
*Compatibility:* \> OpenCV 3.0
|
||||
|
||||
*Author:* Fedor Morozov
|
||||
|
||||
Using *AKAZE* and *ORB* for planar object tracking.
|
||||
|
||||
- @subpage tutorial_homography
|
||||
|
||||
*Languages:* C++, Java, Python
|
||||
|
||||
*Compatibility:* \> OpenCV 3.0
|
||||
|
||||
This tutorial will explain the basic concepts of the homography with some
|
||||
demonstration codes.
|
||||
|
@ -4,6 +4,11 @@ Detecting corners location in subpixels {#tutorial_corner_subpixels}
|
||||
@prev_tutorial{tutorial_generic_corner_detector}
|
||||
@next_tutorial{tutorial_feature_detection}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Ana Huamán |
|
||||
| Compatibility | OpenCV >= 3.0 |
|
||||
|
||||
Goal
|
||||
----
|
||||
|
||||
|
@ -4,6 +4,10 @@ Creating your own corner detector {#tutorial_generic_corner_detector}
|
||||
@prev_tutorial{tutorial_good_features_to_track}
|
||||
@next_tutorial{tutorial_corner_subpixels}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Ana Huamán |
|
||||
| Compatibility | OpenCV >= 3.0 |
|
||||
|
||||
Goal
|
||||
----
|
||||
|
@ -4,6 +4,11 @@ Shi-Tomasi corner detector {#tutorial_good_features_to_track}
|
||||
@prev_tutorial{tutorial_harris_detector}
|
||||
@next_tutorial{tutorial_generic_corner_detector}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Ana Huamán |
|
||||
| Compatibility | OpenCV >= 3.0 |
|
||||
|
||||
Goal
|
||||
----
|
||||
|
||||
|
@ -3,6 +3,11 @@ Harris corner detector {#tutorial_harris_detector}
|
||||
|
||||
@next_tutorial{tutorial_good_features_to_track}
|
||||
|
||||
| | |
|
||||
| -: | :- |
|
||||
| Original author | Ana Huamán |
|
||||
| Compatibility | OpenCV >= 3.0 |
|
||||
|
||||
Goal
|
||||
----
|
||||
|
||||
|
@ -1,5 +1,8 @@
|
||||
# Porting anisotropic image segmentation on G-API {#tutorial_gapi_anisotropic_segmentation}
|
||||
|
||||
@prev_tutorial{tutorial_gapi_interactive_face_detection}
|
||||
@next_tutorial{tutorial_gapi_face_beautification}
|
||||
|
||||
[TOC]
|
||||
|
||||
# Introduction {#gapi_anisotropic_intro}
|
||||
|
@ -1,5 +1,7 @@
|
||||
# Implementing a face beautification algorithm with G-API {#tutorial_gapi_face_beautification}
|
||||
|
||||
@prev_tutorial{tutorial_gapi_anisotropic_segmentation}
|
||||
|
||||
[TOC]
|
||||
|
||||
# Introduction {#gapi_fb_intro}
|
||||
|
@ -1,5 +1,7 @@
|
||||
# Face analytics pipeline with G-API {#tutorial_gapi_interactive_face_detection}
|
||||
|
||||
@next_tutorial{tutorial_gapi_anisotropic_segmentation}
|
||||
|
||||
[TOC]
|
||||
|
||||
# Overview {#gapi_ifd_intro}
|
||||
|
Before Width: | Height: | Size: 4.0 KiB |
@ -1,14 +0,0 @@
|
||||
High Level GUI and Media (highgui module) {#tutorial_table_of_content_highgui}
|
||||
=========================================
|
||||
|
||||
This section contains tutorials about how to use the built-in graphical user interface of the library.
|
||||
|
||||
- @subpage tutorial_trackbar
|
||||
|
||||
*Languages:* C++, Java, Python
|
||||
|
||||
*Compatibility:* \> OpenCV 2.0
|
||||
|
||||
*Author:* Ana Huamán
|
||||
|
||||
We will learn how to add a Trackbar to our applications
|
Before Width: | Height: | Size: 5.1 KiB |
Before Width: | Height: | Size: 8.9 KiB |
Before Width: | Height: | Size: 6.2 KiB |
Before Width: | Height: | Size: 5.0 KiB |
Before Width: | Height: | Size: 7.4 KiB |
Before Width: | Height: | Size: 4.8 KiB |