mirror of
https://github.com/opencv/opencv.git
synced 2025-06-07 09:25:45 +08:00
Merge pull request #25161 from mshabunin:doc-upgrade-5.x
Documentation transition to fresh Doxygen (5.x) #25161 Port of #25042 Merge with opencv/opencv_contrib#3687 CI part: opencv/ci-gha-workflow#162
This commit is contained in:
parent
c6776ec136
commit
de29223217
@ -573,8 +573,6 @@ if(OPENCV_DISABLE_FILESYSTEM_SUPPORT)
|
|||||||
add_definitions(-DOPENCV_HAVE_FILESYSTEM_SUPPORT=0)
|
add_definitions(-DOPENCV_HAVE_FILESYSTEM_SUPPORT=0)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set(OPENCV_MATHJAX_RELPATH "https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0" CACHE STRING "URI to a MathJax installation")
|
|
||||||
|
|
||||||
# ----------------------------------------------------------------------------
|
# ----------------------------------------------------------------------------
|
||||||
# Get actual OpenCV version number from sources
|
# Get actual OpenCV version number from sources
|
||||||
# ----------------------------------------------------------------------------
|
# ----------------------------------------------------------------------------
|
||||||
|
@ -16,6 +16,10 @@ endif()
|
|||||||
|
|
||||||
find_package(Doxygen)
|
find_package(Doxygen)
|
||||||
if(DOXYGEN_FOUND)
|
if(DOXYGEN_FOUND)
|
||||||
|
if (DOXYGEN_VERSION VERSION_LESS 1.9.8)
|
||||||
|
message(WARNING "Found doxygen ${DOXYGEN_VERSION}, version 1.9.8 is used for testing, there is
|
||||||
|
a chance your documentation will look different or have some limitations.")
|
||||||
|
endif()
|
||||||
add_custom_target(doxygen)
|
add_custom_target(doxygen)
|
||||||
|
|
||||||
# not documented modules list
|
# not documented modules list
|
||||||
@ -196,6 +200,8 @@ if(DOXYGEN_FOUND)
|
|||||||
list(APPEND CMAKE_DOXYGEN_HTML_FILES "${CMAKE_CURRENT_SOURCE_DIR}/tutorial-utils.js")
|
list(APPEND CMAKE_DOXYGEN_HTML_FILES "${CMAKE_CURRENT_SOURCE_DIR}/tutorial-utils.js")
|
||||||
string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_HTML_FILES "${CMAKE_DOXYGEN_HTML_FILES}")
|
string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_HTML_FILES "${CMAKE_DOXYGEN_HTML_FILES}")
|
||||||
|
|
||||||
|
set(OPENCV_MATHJAX_RELPATH "https://cdn.jsdelivr.net/npm/mathjax@3.0.1" CACHE STRING "URI to a MathJax installation")
|
||||||
|
|
||||||
set(OPENCV_DOCS_DOT_PATH "" CACHE PATH "Doxygen/DOT_PATH value")
|
set(OPENCV_DOCS_DOT_PATH "" CACHE PATH "Doxygen/DOT_PATH value")
|
||||||
set(CMAKECONFIG_DOT_PATH "${OPENCV_DOCS_DOT_PATH}")
|
set(CMAKECONFIG_DOT_PATH "${OPENCV_DOCS_DOT_PATH}")
|
||||||
|
|
||||||
|
@ -3,8 +3,11 @@ PROJECT_NAME = OpenCV
|
|||||||
PROJECT_NUMBER = @OPENCV_VERSION@
|
PROJECT_NUMBER = @OPENCV_VERSION@
|
||||||
PROJECT_BRIEF = "Open Source Computer Vision"
|
PROJECT_BRIEF = "Open Source Computer Vision"
|
||||||
PROJECT_LOGO = @CMAKE_CURRENT_SOURCE_DIR@/opencv-logo-small.png
|
PROJECT_LOGO = @CMAKE_CURRENT_SOURCE_DIR@/opencv-logo-small.png
|
||||||
|
#PROJECT_ICON =
|
||||||
OUTPUT_DIRECTORY = @CMAKE_DOXYGEN_OUTPUT_PATH@
|
OUTPUT_DIRECTORY = @CMAKE_DOXYGEN_OUTPUT_PATH@
|
||||||
CREATE_SUBDIRS = YES
|
CREATE_SUBDIRS = YES
|
||||||
|
CREATE_SUBDIRS_LEVEL = 8
|
||||||
|
ALLOW_UNICODE_NAMES = NO
|
||||||
OUTPUT_LANGUAGE = English
|
OUTPUT_LANGUAGE = English
|
||||||
BRIEF_MEMBER_DESC = YES
|
BRIEF_MEMBER_DESC = YES
|
||||||
REPEAT_BRIEF = YES
|
REPEAT_BRIEF = YES
|
||||||
@ -26,8 +29,10 @@ STRIP_FROM_PATH = @CMAKE_SOURCE_DIR@/modules @CMAKE_DOXYGEN_INCLUDE_ROOTS
|
|||||||
STRIP_FROM_INC_PATH = @CMAKE_DOXYGEN_INCLUDE_ROOTS@
|
STRIP_FROM_INC_PATH = @CMAKE_DOXYGEN_INCLUDE_ROOTS@
|
||||||
SHORT_NAMES = NO
|
SHORT_NAMES = NO
|
||||||
JAVADOC_AUTOBRIEF = NO
|
JAVADOC_AUTOBRIEF = NO
|
||||||
|
JAVADOC_BANNER = NO
|
||||||
QT_AUTOBRIEF = NO
|
QT_AUTOBRIEF = NO
|
||||||
MULTILINE_CPP_IS_BRIEF = NO
|
MULTILINE_CPP_IS_BRIEF = NO
|
||||||
|
PYTHON_DOCSTRING = YES
|
||||||
INHERIT_DOCS = YES
|
INHERIT_DOCS = YES
|
||||||
SEPARATE_MEMBER_PAGES = NO
|
SEPARATE_MEMBER_PAGES = NO
|
||||||
TAB_SIZE = 4
|
TAB_SIZE = 4
|
||||||
@ -43,26 +48,34 @@ OPTIMIZE_OUTPUT_FOR_C = NO
|
|||||||
OPTIMIZE_OUTPUT_JAVA = NO
|
OPTIMIZE_OUTPUT_JAVA = NO
|
||||||
OPTIMIZE_FOR_FORTRAN = NO
|
OPTIMIZE_FOR_FORTRAN = NO
|
||||||
OPTIMIZE_OUTPUT_VHDL = NO
|
OPTIMIZE_OUTPUT_VHDL = NO
|
||||||
|
OPTIMIZE_OUTPUT_SLICE = NO
|
||||||
EXTENSION_MAPPING =
|
EXTENSION_MAPPING =
|
||||||
MARKDOWN_SUPPORT = YES
|
MARKDOWN_SUPPORT = YES
|
||||||
|
TOC_INCLUDE_HEADINGS = 5
|
||||||
|
MARKDOWN_ID_STYLE = DOXYGEN
|
||||||
AUTOLINK_SUPPORT = YES
|
AUTOLINK_SUPPORT = YES
|
||||||
BUILTIN_STL_SUPPORT = YES
|
BUILTIN_STL_SUPPORT = YES
|
||||||
CPP_CLI_SUPPORT = NO
|
CPP_CLI_SUPPORT = NO
|
||||||
SIP_SUPPORT = NO
|
SIP_SUPPORT = NO
|
||||||
IDL_PROPERTY_SUPPORT = YES
|
IDL_PROPERTY_SUPPORT = YES
|
||||||
DISTRIBUTE_GROUP_DOC = NO
|
DISTRIBUTE_GROUP_DOC = NO
|
||||||
|
GROUP_NESTED_COMPOUNDS = NO
|
||||||
SUBGROUPING = YES
|
SUBGROUPING = YES
|
||||||
INLINE_GROUPED_CLASSES = NO
|
INLINE_GROUPED_CLASSES = NO
|
||||||
INLINE_SIMPLE_STRUCTS = NO
|
INLINE_SIMPLE_STRUCTS = NO
|
||||||
TYPEDEF_HIDES_STRUCT = YES
|
TYPEDEF_HIDES_STRUCT = YES
|
||||||
LOOKUP_CACHE_SIZE = 0
|
LOOKUP_CACHE_SIZE = 0
|
||||||
|
NUM_PROC_THREADS = 1
|
||||||
|
TIMESTAMP = YES
|
||||||
EXTRACT_ALL = YES
|
EXTRACT_ALL = YES
|
||||||
EXTRACT_PRIVATE = NO
|
EXTRACT_PRIVATE = NO
|
||||||
|
EXTRACT_PRIV_VIRTUAL = NO
|
||||||
EXTRACT_PACKAGE = NO
|
EXTRACT_PACKAGE = NO
|
||||||
EXTRACT_STATIC = YES
|
EXTRACT_STATIC = YES
|
||||||
EXTRACT_LOCAL_CLASSES = NO
|
EXTRACT_LOCAL_CLASSES = NO
|
||||||
EXTRACT_LOCAL_METHODS = NO
|
EXTRACT_LOCAL_METHODS = NO
|
||||||
EXTRACT_ANON_NSPACES = NO
|
EXTRACT_ANON_NSPACES = NO
|
||||||
|
RESOLVE_UNNAMED_PARAMS = YES
|
||||||
HIDE_UNDOC_MEMBERS = NO
|
HIDE_UNDOC_MEMBERS = NO
|
||||||
HIDE_UNDOC_CLASSES = NO
|
HIDE_UNDOC_CLASSES = NO
|
||||||
HIDE_FRIEND_COMPOUNDS = NO
|
HIDE_FRIEND_COMPOUNDS = NO
|
||||||
@ -70,6 +83,8 @@ HIDE_IN_BODY_DOCS = NO
|
|||||||
INTERNAL_DOCS = NO
|
INTERNAL_DOCS = NO
|
||||||
CASE_SENSE_NAMES = YES
|
CASE_SENSE_NAMES = YES
|
||||||
HIDE_SCOPE_NAMES = NO
|
HIDE_SCOPE_NAMES = NO
|
||||||
|
HIDE_COMPOUND_REFERENCE= NO
|
||||||
|
SHOW_HEADERFILE = YES
|
||||||
SHOW_INCLUDE_FILES = YES
|
SHOW_INCLUDE_FILES = YES
|
||||||
SHOW_GROUPED_MEMB_INC = YES
|
SHOW_GROUPED_MEMB_INC = YES
|
||||||
FORCE_LOCAL_INCLUDES = NO
|
FORCE_LOCAL_INCLUDES = NO
|
||||||
@ -96,11 +111,16 @@ QUIET = YES
|
|||||||
WARNINGS = YES
|
WARNINGS = YES
|
||||||
WARN_IF_UNDOCUMENTED = YES
|
WARN_IF_UNDOCUMENTED = YES
|
||||||
WARN_IF_DOC_ERROR = YES
|
WARN_IF_DOC_ERROR = YES
|
||||||
|
WARN_IF_INCOMPLETE_DOC = YES
|
||||||
WARN_NO_PARAMDOC = NO
|
WARN_NO_PARAMDOC = NO
|
||||||
|
WARN_IF_UNDOC_ENUM_VAL = NO
|
||||||
|
WARN_AS_ERROR = NO
|
||||||
WARN_FORMAT = "$file:$line: $text"
|
WARN_FORMAT = "$file:$line: $text"
|
||||||
|
WARN_LINE_FORMAT = "at line $line of file $file"
|
||||||
WARN_LOGFILE =
|
WARN_LOGFILE =
|
||||||
INPUT = @CMAKE_DOXYGEN_INPUT_LIST@
|
INPUT = @CMAKE_DOXYGEN_INPUT_LIST@
|
||||||
INPUT_ENCODING = UTF-8
|
INPUT_ENCODING = UTF-8
|
||||||
|
INPUT_FILE_ENCODING =
|
||||||
FILE_PATTERNS =
|
FILE_PATTERNS =
|
||||||
RECURSIVE = YES
|
RECURSIVE = YES
|
||||||
EXCLUDE = @CMAKE_DOXYGEN_EXCLUDE_LIST@
|
EXCLUDE = @CMAKE_DOXYGEN_EXCLUDE_LIST@
|
||||||
@ -125,8 +145,11 @@ REFERENCES_LINK_SOURCE = YES
|
|||||||
SOURCE_TOOLTIPS = YES
|
SOURCE_TOOLTIPS = YES
|
||||||
USE_HTAGS = NO
|
USE_HTAGS = NO
|
||||||
VERBATIM_HEADERS = NO
|
VERBATIM_HEADERS = NO
|
||||||
|
CLANG_ASSISTED_PARSING = NO
|
||||||
|
CLANG_ADD_INC_PATHS = YES
|
||||||
|
CLANG_OPTIONS =
|
||||||
|
CLANG_DATABASE_PATH =
|
||||||
ALPHABETICAL_INDEX = YES
|
ALPHABETICAL_INDEX = YES
|
||||||
COLS_IN_ALPHA_INDEX = 5
|
|
||||||
IGNORE_PREFIX =
|
IGNORE_PREFIX =
|
||||||
GENERATE_HTML = YES
|
GENERATE_HTML = YES
|
||||||
HTML_OUTPUT = html
|
HTML_OUTPUT = html
|
||||||
@ -136,14 +159,19 @@ HTML_FOOTER = @CMAKE_CURRENT_SOURCE_DIR@/footer.html
|
|||||||
HTML_STYLESHEET =
|
HTML_STYLESHEET =
|
||||||
HTML_EXTRA_STYLESHEET = @CMAKE_CURRENT_SOURCE_DIR@/doxygen-awesome.css
|
HTML_EXTRA_STYLESHEET = @CMAKE_CURRENT_SOURCE_DIR@/doxygen-awesome.css
|
||||||
HTML_EXTRA_FILES = @CMAKE_DOXYGEN_HTML_FILES@
|
HTML_EXTRA_FILES = @CMAKE_DOXYGEN_HTML_FILES@
|
||||||
|
HTML_COLORSTYLE = LIGHT
|
||||||
HTML_COLORSTYLE_HUE = 220
|
HTML_COLORSTYLE_HUE = 220
|
||||||
HTML_COLORSTYLE_SAT = 100
|
HTML_COLORSTYLE_SAT = 100
|
||||||
HTML_COLORSTYLE_GAMMA = 80
|
HTML_COLORSTYLE_GAMMA = 80
|
||||||
HTML_TIMESTAMP = YES
|
HTML_DYNAMIC_MENUS = YES
|
||||||
HTML_DYNAMIC_SECTIONS = NO
|
HTML_DYNAMIC_SECTIONS = NO
|
||||||
|
HTML_CODE_FOLDING = YES
|
||||||
|
#HTML_COPY_CLIPBOARD = YES
|
||||||
|
#HTML_PROJECT_COOKIE =
|
||||||
HTML_INDEX_NUM_ENTRIES = 100
|
HTML_INDEX_NUM_ENTRIES = 100
|
||||||
GENERATE_DOCSET = NO
|
GENERATE_DOCSET = NO
|
||||||
DOCSET_FEEDNAME = "Doxygen generated docs"
|
DOCSET_FEEDNAME = "Doxygen generated docs"
|
||||||
|
DOCSET_FEEDURL =
|
||||||
DOCSET_BUNDLE_ID = org.doxygen.Project
|
DOCSET_BUNDLE_ID = org.doxygen.Project
|
||||||
DOCSET_PUBLISHER_ID = org.doxygen.Publisher
|
DOCSET_PUBLISHER_ID = org.doxygen.Publisher
|
||||||
DOCSET_PUBLISHER_NAME = Publisher
|
DOCSET_PUBLISHER_NAME = Publisher
|
||||||
@ -154,6 +182,7 @@ GENERATE_CHI = NO
|
|||||||
CHM_INDEX_ENCODING =
|
CHM_INDEX_ENCODING =
|
||||||
BINARY_TOC = NO
|
BINARY_TOC = NO
|
||||||
TOC_EXPAND = NO
|
TOC_EXPAND = NO
|
||||||
|
SITEMAP_URL =
|
||||||
GENERATE_QHP = @CMAKE_DOXYGEN_GENERATE_QHP@
|
GENERATE_QHP = @CMAKE_DOXYGEN_GENERATE_QHP@
|
||||||
QCH_FILE = ../opencv-@OPENCV_VERSION@.qch
|
QCH_FILE = ../opencv-@OPENCV_VERSION@.qch
|
||||||
QHP_NAMESPACE = org.opencv.@OPENCV_VERSION@
|
QHP_NAMESPACE = org.opencv.@OPENCV_VERSION@
|
||||||
@ -166,15 +195,19 @@ GENERATE_ECLIPSEHELP = NO
|
|||||||
ECLIPSE_DOC_ID = org.doxygen.Project
|
ECLIPSE_DOC_ID = org.doxygen.Project
|
||||||
DISABLE_INDEX = NO
|
DISABLE_INDEX = NO
|
||||||
GENERATE_TREEVIEW = NO
|
GENERATE_TREEVIEW = NO
|
||||||
|
FULL_SIDEBAR = NO
|
||||||
ENUM_VALUES_PER_LINE = 1
|
ENUM_VALUES_PER_LINE = 1
|
||||||
TREEVIEW_WIDTH = 250
|
TREEVIEW_WIDTH = 250
|
||||||
EXT_LINKS_IN_WINDOW = YES
|
EXT_LINKS_IN_WINDOW = YES
|
||||||
|
OBFUSCATE_EMAILS = YES
|
||||||
|
HTML_FORMULA_FORMAT = svg
|
||||||
FORMULA_FONTSIZE = 14
|
FORMULA_FONTSIZE = 14
|
||||||
FORMULA_TRANSPARENT = YES
|
FORMULA_MACROFILE =
|
||||||
USE_MATHJAX = YES
|
USE_MATHJAX = YES
|
||||||
MATHJAX_FORMAT = HTML-CSS
|
MATHJAX_VERSION = MathJax_3
|
||||||
|
MATHJAX_FORMAT = chtml
|
||||||
MATHJAX_RELPATH = @OPENCV_MATHJAX_RELPATH@
|
MATHJAX_RELPATH = @OPENCV_MATHJAX_RELPATH@
|
||||||
MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
|
MATHJAX_EXTENSIONS = ams
|
||||||
MATHJAX_CODEFILE = @CMAKE_CURRENT_SOURCE_DIR@/mymath.js
|
MATHJAX_CODEFILE = @CMAKE_CURRENT_SOURCE_DIR@/mymath.js
|
||||||
SEARCHENGINE = YES
|
SEARCHENGINE = YES
|
||||||
SERVER_BASED_SEARCH = NO
|
SERVER_BASED_SEARCH = NO
|
||||||
@ -187,18 +220,20 @@ GENERATE_LATEX = NO
|
|||||||
LATEX_OUTPUT = latex
|
LATEX_OUTPUT = latex
|
||||||
LATEX_CMD_NAME = latex
|
LATEX_CMD_NAME = latex
|
||||||
MAKEINDEX_CMD_NAME = makeindex
|
MAKEINDEX_CMD_NAME = makeindex
|
||||||
|
LATEX_MAKEINDEX_CMD = makeindex
|
||||||
COMPACT_LATEX = NO
|
COMPACT_LATEX = NO
|
||||||
PAPER_TYPE = a4
|
PAPER_TYPE = a4
|
||||||
EXTRA_PACKAGES = mymath
|
EXTRA_PACKAGES = mymath
|
||||||
LATEX_HEADER =
|
LATEX_HEADER =
|
||||||
LATEX_FOOTER =
|
LATEX_FOOTER =
|
||||||
|
LATEX_EXTRA_STYLESHEET =
|
||||||
LATEX_EXTRA_FILES =
|
LATEX_EXTRA_FILES =
|
||||||
PDF_HYPERLINKS = YES
|
PDF_HYPERLINKS = YES
|
||||||
USE_PDFLATEX = YES
|
USE_PDFLATEX = YES
|
||||||
LATEX_BATCHMODE = NO
|
LATEX_BATCHMODE = NO
|
||||||
LATEX_HIDE_INDICES = NO
|
LATEX_HIDE_INDICES = NO
|
||||||
LATEX_SOURCE_CODE = NO
|
|
||||||
LATEX_BIB_STYLE = plain
|
LATEX_BIB_STYLE = plain
|
||||||
|
LATEX_EMOJI_DIRECTORY =
|
||||||
GENERATE_RTF = NO
|
GENERATE_RTF = NO
|
||||||
RTF_OUTPUT = rtf
|
RTF_OUTPUT = rtf
|
||||||
COMPACT_RTF = NO
|
COMPACT_RTF = NO
|
||||||
@ -208,13 +243,18 @@ RTF_EXTENSIONS_FILE =
|
|||||||
GENERATE_MAN = NO
|
GENERATE_MAN = NO
|
||||||
MAN_OUTPUT = man
|
MAN_OUTPUT = man
|
||||||
MAN_EXTENSION = .3
|
MAN_EXTENSION = .3
|
||||||
|
MAN_SUBDIR =
|
||||||
MAN_LINKS = NO
|
MAN_LINKS = NO
|
||||||
GENERATE_XML = NO
|
GENERATE_XML = NO
|
||||||
XML_OUTPUT = xml
|
XML_OUTPUT = xml
|
||||||
XML_PROGRAMLISTING = YES
|
XML_PROGRAMLISTING = YES
|
||||||
|
XML_NS_MEMB_FILE_SCOPE = NO
|
||||||
GENERATE_DOCBOOK = NO
|
GENERATE_DOCBOOK = NO
|
||||||
DOCBOOK_OUTPUT = docbook
|
DOCBOOK_OUTPUT = docbook
|
||||||
GENERATE_AUTOGEN_DEF = NO
|
GENERATE_AUTOGEN_DEF = NO
|
||||||
|
GENERATE_SQLITE3 = NO
|
||||||
|
SQLITE3_OUTPUT = sqlite3
|
||||||
|
SQLITE3_RECREATE_DB = YES
|
||||||
GENERATE_PERLMOD = NO
|
GENERATE_PERLMOD = NO
|
||||||
PERLMOD_LATEX = NO
|
PERLMOD_LATEX = NO
|
||||||
PERLMOD_PRETTY = YES
|
PERLMOD_PRETTY = YES
|
||||||
@ -272,19 +312,20 @@ GENERATE_TAGFILE = @CMAKE_DOXYGEN_OUTPUT_PATH@/html/opencv.tag
|
|||||||
ALLEXTERNALS = NO
|
ALLEXTERNALS = NO
|
||||||
EXTERNAL_GROUPS = YES
|
EXTERNAL_GROUPS = YES
|
||||||
EXTERNAL_PAGES = YES
|
EXTERNAL_PAGES = YES
|
||||||
CLASS_DIAGRAMS = YES
|
|
||||||
DIA_PATH =
|
|
||||||
HIDE_UNDOC_RELATIONS = NO
|
HIDE_UNDOC_RELATIONS = NO
|
||||||
HAVE_DOT = @CMAKECONFIG_HAVE_DOT@
|
HAVE_DOT = @CMAKECONFIG_HAVE_DOT@
|
||||||
DOT_NUM_THREADS = 0
|
DOT_NUM_THREADS = 0
|
||||||
DOT_FONTNAME = Helvetica
|
|
||||||
DOT_FONTSIZE = 10
|
|
||||||
DOT_FONTPATH =
|
DOT_FONTPATH =
|
||||||
|
DOT_COMMON_ATTR = "fontname=Helvetica,fontsize=10"
|
||||||
|
DOT_EDGE_ATTR = "labelfontname=Helvetica,labelfontsize=10"
|
||||||
|
DOT_NODE_ATTR = "shape=box,height=0.2,width=0.4"
|
||||||
CLASS_GRAPH = YES
|
CLASS_GRAPH = YES
|
||||||
COLLABORATION_GRAPH = YES
|
COLLABORATION_GRAPH = YES
|
||||||
GROUP_GRAPHS = YES
|
GROUP_GRAPHS = NO
|
||||||
UML_LOOK = YES
|
UML_LOOK = YES
|
||||||
UML_LIMIT_NUM_FIELDS = 10
|
UML_LIMIT_NUM_FIELDS = 10
|
||||||
|
DOT_UML_DETAILS = NO
|
||||||
|
DOT_WRAP_THRESHOLD = 17
|
||||||
TEMPLATE_RELATIONS = YES
|
TEMPLATE_RELATIONS = YES
|
||||||
INCLUDE_GRAPH = YES
|
INCLUDE_GRAPH = YES
|
||||||
INCLUDED_BY_GRAPH = YES
|
INCLUDED_BY_GRAPH = YES
|
||||||
@ -292,15 +333,19 @@ CALL_GRAPH = YES
|
|||||||
CALLER_GRAPH = NO
|
CALLER_GRAPH = NO
|
||||||
GRAPHICAL_HIERARCHY = YES
|
GRAPHICAL_HIERARCHY = YES
|
||||||
DIRECTORY_GRAPH = YES
|
DIRECTORY_GRAPH = YES
|
||||||
|
DIR_GRAPH_MAX_DEPTH = 1
|
||||||
DOT_IMAGE_FORMAT = @CMAKECONFIG_DOT_IMAGE_FORMAT@
|
DOT_IMAGE_FORMAT = @CMAKECONFIG_DOT_IMAGE_FORMAT@
|
||||||
INTERACTIVE_SVG = @CMAKECONFIG_INTERACTIVE_SVG@
|
INTERACTIVE_SVG = @CMAKECONFIG_INTERACTIVE_SVG@
|
||||||
DOT_PATH = @CMAKECONFIG_DOT_PATH@
|
DOT_PATH = @CMAKECONFIG_DOT_PATH@
|
||||||
DOTFILE_DIRS =
|
DOTFILE_DIRS =
|
||||||
MSCFILE_DIRS =
|
|
||||||
DIAFILE_DIRS =
|
DIAFILE_DIRS =
|
||||||
DOT_GRAPH_MAX_NODES = 50
|
PLANTUML_JAR_PATH =
|
||||||
|
PLANTUML_CFG_FILE =
|
||||||
|
PLANTUML_INCLUDE_PATH =
|
||||||
|
DOT_GRAPH_MAX_NODES = 250
|
||||||
MAX_DOT_GRAPH_DEPTH = 0
|
MAX_DOT_GRAPH_DEPTH = 0
|
||||||
DOT_TRANSPARENT = NO
|
|
||||||
DOT_MULTI_TARGETS = NO
|
DOT_MULTI_TARGETS = NO
|
||||||
GENERATE_LEGEND = YES
|
GENERATE_LEGEND = YES
|
||||||
DOT_CLEANUP = YES
|
DOT_CLEANUP = YES
|
||||||
|
MSCGEN_TOOL =
|
||||||
|
MSCFILE_DIRS =
|
||||||
|
@ -336,7 +336,7 @@ function installDOM(){
|
|||||||
- Make sure the files `aarcascade_frontalface_default.xml` and `haarcascade_eye.xml` are present in project's directory. They can be obtained from [OpenCV sources](https://github.com/opencv/opencv/tree/5.x/data/haarcascades).
|
- Make sure the files `aarcascade_frontalface_default.xml` and `haarcascade_eye.xml` are present in project's directory. They can be obtained from [OpenCV sources](https://github.com/opencv/opencv/tree/5.x/data/haarcascades).
|
||||||
- Make sure a sample image file `lena.jpg` exists in project's directory. It should display people's faces for this example to make sense. The following image is known to work:
|
- Make sure a sample image file `lena.jpg` exists in project's directory. It should display people's faces for this example to make sense. The following image is known to work:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
The following command should generate the file `output3.jpg`:
|
The following command should generate the file `output3.jpg`:
|
||||||
|
|
||||||
|
@ -1,22 +1,20 @@
|
|||||||
//<![CDATA[
|
//<![CDATA[
|
||||||
MathJax.Hub.Config(
|
window.MathJax = {
|
||||||
{
|
tex: {
|
||||||
TeX: {
|
macros: {
|
||||||
Macros: {
|
matTT: [ "\\[ \\left|\\begin{array}{ccc} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \\end{array}\\right| \\]", 9],
|
||||||
matTT: [ "\\[ \\left|\\begin{array}{ccc} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \\end{array}\\right| \\]", 9],
|
fork: ["\\left\\{ \\begin{array}{l l} #1 & \\mbox{#2}\\\\ #3 & \\mbox{#4}\\\\ \\end{array} \\right.", 4],
|
||||||
fork: ["\\left\\{ \\begin{array}{l l} #1 & \\mbox{#2}\\\\ #3 & \\mbox{#4}\\\\ \\end{array} \\right.", 4],
|
forkthree: ["\\left\\{ \\begin{array}{l l} #1 & \\mbox{#2}\\\\ #3 & \\mbox{#4}\\\\ #5 & \\mbox{#6}\\\\ \\end{array} \\right.", 6],
|
||||||
forkthree: ["\\left\\{ \\begin{array}{l l} #1 & \\mbox{#2}\\\\ #3 & \\mbox{#4}\\\\ #5 & \\mbox{#6}\\\\ \\end{array} \\right.", 6],
|
forkfour: ["\\left\\{ \\begin{array}{l l} #1 & \\mbox{#2}\\\\ #3 & \\mbox{#4}\\\\ #5 & \\mbox{#6}\\\\ #7 & \\mbox{#8}\\\\ \\end{array} \\right.", 8],
|
||||||
forkfour: ["\\left\\{ \\begin{array}{l l} #1 & \\mbox{#2}\\\\ #3 & \\mbox{#4}\\\\ #5 & \\mbox{#6}\\\\ #7 & \\mbox{#8}\\\\ \\end{array} \\right.", 8],
|
vecthree: ["\\begin{bmatrix} #1\\\\ #2\\\\ #3 \\end{bmatrix}", 3],
|
||||||
vecthree: ["\\begin{bmatrix} #1\\\\ #2\\\\ #3 \\end{bmatrix}", 3],
|
vecthreethree: ["\\begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \\end{bmatrix}", 9],
|
||||||
vecthreethree: ["\\begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \\end{bmatrix}", 9],
|
cameramatrix: ["#1 = \\begin{bmatrix} f_x & 0 & c_x\\\\ 0 & f_y & c_y\\\\ 0 & 0 & 1 \\end{bmatrix}", 1],
|
||||||
cameramatrix: ["#1 = \\begin{bmatrix} f_x & 0 & c_x\\\\ 0 & f_y & c_y\\\\ 0 & 0 & 1 \\end{bmatrix}", 1],
|
distcoeffs: ["(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \\tau_x, \\tau_y]]]]) \\text{ of 4, 5, 8, 12 or 14 elements}"],
|
||||||
distcoeffs: ["(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \\tau_x, \\tau_y]]]]) \\text{ of 4, 5, 8, 12 or 14 elements}"],
|
distcoeffsfisheye: ["(k_1, k_2, k_3, k_4)"],
|
||||||
distcoeffsfisheye: ["(k_1, k_2, k_3, k_4)"],
|
hdotsfor: ["\\dots", 1],
|
||||||
hdotsfor: ["\\dots", 1],
|
mathbbm: ["\\mathbb{#1}", 1],
|
||||||
mathbbm: ["\\mathbb{#1}", 1],
|
bordermatrix: ["\\matrix{#1}", 1]
|
||||||
bordermatrix: ["\\matrix{#1}", 1]
|
}
|
||||||
}
|
}
|
||||||
}
|
};
|
||||||
}
|
|
||||||
);
|
|
||||||
//]]>
|
//]]>
|
||||||
|
@ -82,62 +82,38 @@ Building OpenCV from source
|
|||||||
-# Extract it to a folder, opencv and create a new folder build in it.
|
-# Extract it to a folder, opencv and create a new folder build in it.
|
||||||
-# Open CMake-gui (*Start \> All Programs \> CMake-gui*)
|
-# Open CMake-gui (*Start \> All Programs \> CMake-gui*)
|
||||||
-# Fill the fields as follows (see the image below):
|
-# Fill the fields as follows (see the image below):
|
||||||
|
|
||||||
-# Click on **Browse Source...** and locate the opencv folder.
|
-# Click on **Browse Source...** and locate the opencv folder.
|
||||||
|
|
||||||
-# Click on **Browse Build...** and locate the build folder we created.
|
-# Click on **Browse Build...** and locate the build folder we created.
|
||||||
|
|
||||||
-# Click on **Configure**.
|
-# Click on **Configure**.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
-# It will open a new window to select the compiler. Choose appropriate compiler (here,
|
-# It will open a new window to select the compiler. Choose appropriate compiler (here,
|
||||||
Visual Studio 11) and click **Finish**.
|
Visual Studio 11) and click **Finish**.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
-# Wait until analysis is finished.
|
-# Wait until analysis is finished.
|
||||||
|
|
||||||
-# You will see all the fields are marked in red. Click on the **WITH** field to expand it. It
|
-# You will see all the fields are marked in red. Click on the **WITH** field to expand it. It
|
||||||
decides what extra features you need. So mark appropriate fields. See the below image:
|
decides what extra features you need. So mark appropriate fields. See the below image:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
-# Now click on **BUILD** field to expand it. First few fields configure the build method. See the
|
-# Now click on **BUILD** field to expand it. First few fields configure the build method. See the
|
||||||
below image:
|
below image:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
-# Remaining fields specify what modules are to be built. Since GPU modules are not yet supported
|
-# Remaining fields specify what modules are to be built. Since GPU modules are not yet supported
|
||||||
by OpenCV-Python, you can completely avoid it to save time (But if you work with them, keep it
|
by OpenCV-Python, you can completely avoid it to save time (But if you work with them, keep it
|
||||||
there). See the image below:
|
there). See the image below:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
-# Now click on **ENABLE** field to expand it. Make sure **ENABLE_SOLUTION_FOLDERS** is unchecked
|
-# Now click on **ENABLE** field to expand it. Make sure **ENABLE_SOLUTION_FOLDERS** is unchecked
|
||||||
(Solution folders are not supported by Visual Studio Express edition). See the image below:
|
(Solution folders are not supported by Visual Studio Express edition). See the image below:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
-# Also make sure that in the **PYTHON** field, everything is filled. (Ignore
|
-# Also make sure that in the **PYTHON** field, everything is filled. (Ignore
|
||||||
PYTHON_DEBUG_LIBRARY). See image below:
|
PYTHON_DEBUG_LIBRARY). See image below:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
-# Finally click the **Generate** button.
|
-# Finally click the **Generate** button.
|
||||||
|
|
||||||
-# Now go to our **opencv/build** folder. There you will find **OpenCV.sln** file. Open it with
|
-# Now go to our **opencv/build** folder. There you will find **OpenCV.sln** file. Open it with
|
||||||
Visual Studio.
|
Visual Studio.
|
||||||
|
|
||||||
-# Check build mode as **Release** instead of **Debug**.
|
-# Check build mode as **Release** instead of **Debug**.
|
||||||
|
|
||||||
-# In the solution explorer, right-click on the **Solution** (or **ALL_BUILD**) and build it. It
|
-# In the solution explorer, right-click on the **Solution** (or **ALL_BUILD**) and build it. It
|
||||||
will take some time to finish.
|
will take some time to finish.
|
||||||
|
|
||||||
-# Again, right-click on **INSTALL** and build it. Now OpenCV-Python will be installed.
|
-# Again, right-click on **INSTALL** and build it. Now OpenCV-Python will be installed.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
-# Open Python IDLE and enter 'import cv2 as cv'. If no error, it is installed correctly.
|
-# Open Python IDLE and enter 'import cv2 as cv'. If no error, it is installed correctly.
|
||||||
|
|
||||||
@note We have installed with no other support like TBB, Eigen, Qt, Documentation etc. It would be
|
@note We have installed with no other support like TBB, Eigen, Qt, Documentation etc. It would be
|
||||||
|
@ -102,10 +102,10 @@ python3 multiview_calibration.py --help
|
|||||||
```
|
```
|
||||||
|
|
||||||
The expected output in Linux terminal for `multiview_calibration_images` data (from `opencv_extra/testdata/python/` generated in Blender) should be the following:
|
The expected output in Linux terminal for `multiview_calibration_images` data (from `opencv_extra/testdata/python/` generated in Blender) should be the following:
|
||||||

|

|
||||||
|
|
||||||
The expected output for real-life calibration images in `opencv_extra/testdata/python/real_multiview_calibration_images` is the following:
|
The expected output for real-life calibration images in `opencv_extra/testdata/python/real_multiview_calibration_images` is the following:
|
||||||

|

|
||||||
|
|
||||||
|
|
||||||
Python visualization
|
Python visualization
|
||||||
@ -116,21 +116,21 @@ Firstly, the sample shows positions of cameras, checkerboard (of a random frame)
|
|||||||
If images are not known, then a simple plot with arrows (from given point to the back-projected one) visualizing errors is shown. The color of arrows highlights the error values. Additionally, the title reports mean error on this frame, and its accuracy among other frames used in calibration.
|
If images are not known, then a simple plot with arrows (from given point to the back-projected one) visualizing errors is shown. The color of arrows highlights the error values. Additionally, the title reports mean error on this frame, and its accuracy among other frames used in calibration.
|
||||||
The following test instances were synthetically generated (see `opencv/apps/python-calibration-generator/calibration_generator.py`):
|
The following test instances were synthetically generated (see `opencv/apps/python-calibration-generator/calibration_generator.py`):
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
This instance has large Gaussian points noise.
|
This instance has large Gaussian points noise.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
Another example, with more complex tree structure is here, it shows a weak connection between two groups of cameras.
|
Another example, with more complex tree structure is here, it shows a weak connection between two groups of cameras.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
If files to images are provided, then the output is an image with plotted arrows:
|
If files to images are provided, then the output is an image with plotted arrows:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
|
||||||
Details Of The Algorithm
|
Details Of The Algorithm
|
||||||
@ -153,7 +153,7 @@ Details Of The Algorithm
|
|||||||
* a. To reduce the total number of parameters, all rotation and translation vectors estimated in the first step from intrinsics calibration with the lowest error are transformed to be relative with respect to the root camera.
|
* a. To reduce the total number of parameters, all rotation and translation vectors estimated in the first step from intrinsics calibration with the lowest error are transformed to be relative with respect to the root camera.
|
||||||
* b. The total number of parameters is (NUM_CAMERAS - 1) x (3 + 3) + NUM_FRAMES x (3 + 3), where 3 stands for a rotation vector and 3 for a translation vector. The first part of parameters are extrinsics, and the second part is for rotation and translation vectors per frame.
|
* b. The total number of parameters is (NUM_CAMERAS - 1) x (3 + 3) + NUM_FRAMES x (3 + 3), where 3 stands for a rotation vector and 3 for a translation vector. The first part of parameters are extrinsics, and the second part is for rotation and translation vectors per frame.
|
||||||
* c. Robust function is additionally applied to mitigate impact of outlier points during the optimization. The function has the shape of derivative of Gaussian, or it is $x * exp(-x/s)$ (efficiently implemented by approximation of the `exp`), where `x` is a square pixel error, and `s` is manually pre-defined scale. The choice of this function is that it is increasing on the interval of `0` to `y` pixel error, and it’s decreasing thereafter. The idea is that the function slightly decreases errors until it reaches `y`, and if error is too high (more than `y`) then its robust value limits to `0`. The value of scale factor was found by exhaustive evaluation that forces robust function to almost linearly increase until the robust value of an error is 10 px and decrease afterwards (see graph of the function below). The value itself is equal to 30, but could be modified in OpenCV source code.
|
* c. Robust function is additionally applied to mitigate impact of outlier points during the optimization. The function has the shape of derivative of Gaussian, or it is $x * exp(-x/s)$ (efficiently implemented by approximation of the `exp`), where `x` is a square pixel error, and `s` is manually pre-defined scale. The choice of this function is that it is increasing on the interval of `0` to `y` pixel error, and it’s decreasing thereafter. The idea is that the function slightly decreases errors until it reaches `y`, and if error is too high (more than `y`) then its robust value limits to `0`. The value of scale factor was found by exhaustive evaluation that forces robust function to almost linearly increase until the robust value of an error is 10 px and decrease afterwards (see graph of the function below). The value itself is equal to 30, but could be modified in OpenCV source code.
|
||||||

|

|
||||||
|
|
||||||
Method Input
|
Method Input
|
||||||
----
|
----
|
||||||
|
@ -87,7 +87,7 @@ Fourier Transform too needs to be of a discrete type resulting in a Discrete Fou
|
|||||||
(*DFT*). You'll want to use this whenever you need to determine the structure of an image from a
|
(*DFT*). You'll want to use this whenever you need to determine the structure of an image from a
|
||||||
geometrical point of view. Here are the steps to follow (in case of a gray scale input image *I*):
|
geometrical point of view. Here are the steps to follow (in case of a gray scale input image *I*):
|
||||||
|
|
||||||
#### Expand the image to an optimal size
|
### Expand the image to an optimal size
|
||||||
|
|
||||||
The performance of a DFT is dependent of the image
|
The performance of a DFT is dependent of the image
|
||||||
size. It tends to be the fastest for image sizes that are multiple of the numbers two, three and
|
size. It tends to be the fastest for image sizes that are multiple of the numbers two, three and
|
||||||
@ -108,7 +108,7 @@ image (the appended pixels are initialized with zero):
|
|||||||
@snippet python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py expand
|
@snippet python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py expand
|
||||||
@end_toggle
|
@end_toggle
|
||||||
|
|
||||||
#### Make place for both the complex and the real values
|
### Make place for both the complex and the real values
|
||||||
|
|
||||||
The result of a Fourier Transform is
|
The result of a Fourier Transform is
|
||||||
complex. This implies that for each image value the result is two image values (one per
|
complex. This implies that for each image value the result is two image values (one per
|
||||||
@ -128,7 +128,7 @@ input image to this type and expand it with another channel to hold the complex
|
|||||||
@snippet python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py complex_and_real
|
@snippet python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py complex_and_real
|
||||||
@end_toggle
|
@end_toggle
|
||||||
|
|
||||||
#### Make the Discrete Fourier Transform
|
### Make the Discrete Fourier Transform
|
||||||
It's possible an in-place calculation (same input as
|
It's possible an in-place calculation (same input as
|
||||||
output):
|
output):
|
||||||
|
|
||||||
@ -144,7 +144,7 @@ output):
|
|||||||
@snippet python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py dft
|
@snippet python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py dft
|
||||||
@end_toggle
|
@end_toggle
|
||||||
|
|
||||||
#### Transform the real and complex values to magnitude
|
### Transform the real and complex values to magnitude
|
||||||
A complex number has a real (*Re*) and a
|
A complex number has a real (*Re*) and a
|
||||||
complex (imaginary - *Im*) part. The results of a DFT are complex numbers. The magnitude of a
|
complex (imaginary - *Im*) part. The results of a DFT are complex numbers. The magnitude of a
|
||||||
DFT is:
|
DFT is:
|
||||||
@ -165,7 +165,7 @@ Translated to OpenCV code:
|
|||||||
@snippet python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py magnitude
|
@snippet python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py magnitude
|
||||||
@end_toggle
|
@end_toggle
|
||||||
|
|
||||||
#### Switch to a logarithmic scale
|
### Switch to a logarithmic scale
|
||||||
It turns out that the dynamic range of the Fourier
|
It turns out that the dynamic range of the Fourier
|
||||||
coefficients is too large to be displayed on the screen. We have some small and some high
|
coefficients is too large to be displayed on the screen. We have some small and some high
|
||||||
changing values that we can't observe like this. Therefore the high values will all turn out as
|
changing values that we can't observe like this. Therefore the high values will all turn out as
|
||||||
@ -188,7 +188,7 @@ Translated to OpenCV code:
|
|||||||
@snippet python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py log
|
@snippet python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py log
|
||||||
@end_toggle
|
@end_toggle
|
||||||
|
|
||||||
#### Crop and rearrange
|
### Crop and rearrange
|
||||||
Remember, that at the first step, we expanded the image? Well, it's time
|
Remember, that at the first step, we expanded the image? Well, it's time
|
||||||
to throw away the newly introduced values. For visualization purposes we may also rearrange the
|
to throw away the newly introduced values. For visualization purposes we may also rearrange the
|
||||||
quadrants of the result, so that the origin (zero, zero) corresponds with the image center.
|
quadrants of the result, so that the origin (zero, zero) corresponds with the image center.
|
||||||
@ -205,7 +205,7 @@ quadrants of the result, so that the origin (zero, zero) corresponds with the im
|
|||||||
@snippet python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py crop_rearrange
|
@snippet python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py crop_rearrange
|
||||||
@end_toggle
|
@end_toggle
|
||||||
|
|
||||||
#### Normalize
|
### Normalize
|
||||||
This is done again for visualization purposes. We now have the magnitudes,
|
This is done again for visualization purposes. We now have the magnitudes,
|
||||||
however this are still out of our image display range of zero to one. We normalize our values to
|
however this are still out of our image display range of zero to one. We normalize our values to
|
||||||
this range using the @ref cv::normalize() function.
|
this range using the @ref cv::normalize() function.
|
||||||
|
@ -73,7 +73,7 @@ Here's a function that will do this:
|
|||||||
@snippet samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp basic_method
|
@snippet samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp basic_method
|
||||||
|
|
||||||
At first we make sure that the input images data is in unsigned char format. For this we use the
|
At first we make sure that the input images data is in unsigned char format. For this we use the
|
||||||
@ref cv::CV_Assert function that throws an error when the expression inside it is false.
|
@ref CV_Assert function (macro) that throws an error when the expression inside it is false.
|
||||||
@snippet samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp 8_bit
|
@snippet samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp 8_bit
|
||||||
@end_toggle
|
@end_toggle
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ In this tutorial, we first introduce how to obtain the custom OCR model, then ho
|
|||||||
|
|
||||||
After completing the model training, please use [transform_to_onnx.py](https://github.com/zihaomu/deep-text-recognition-benchmark/blob/master/transform_to_onnx.py) to convert the model into onnx format.
|
After completing the model training, please use [transform_to_onnx.py](https://github.com/zihaomu/deep-text-recognition-benchmark/blob/master/transform_to_onnx.py) to convert the model into onnx format.
|
||||||
|
|
||||||
#### Execute in webcam
|
### Execute in webcam
|
||||||
The Python version example code can be found at [here](https://github.com/opencv/opencv/blob/5.x/samples/dnn/text_detection.py).
|
The Python version example code can be found at [here](https://github.com/opencv/opencv/blob/5.x/samples/dnn/text_detection.py).
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
@ -44,9 +44,10 @@ Their performance at different text recognition datasets is shown in the table b
|
|||||||
| CRNN_VGG-BiLSTM-CTC | 82.63 | 82.07 | 92.96 | 88.867 | 66.28 | 71.01 | 62.37 | 78.03 | 8.45 |
|
| CRNN_VGG-BiLSTM-CTC | 82.63 | 82.07 | 92.96 | 88.867 | 66.28 | 71.01 | 62.37 | 78.03 | 8.45 |
|
||||||
| ResNet-CTC | 84.00 | 84.08 | 92.39 | 88.96 | 67.74 | 74.73 | 67.60 | 79.93 | 44.28 |
|
| ResNet-CTC | 84.00 | 84.08 | 92.39 | 88.96 | 67.74 | 74.73 | 67.60 | 79.93 | 44.28 |
|
||||||
|
|
||||||
The performance of the text recognition model were tesred on OpenCV DNN, and does not include the text detection model.
|
The performance of the text recognition model were tested on OpenCV DNN, and does not include the text detection model.
|
||||||
|
|
||||||
|
### Model selection suggestion
|
||||||
|
|
||||||
#### Model selection suggestion:
|
|
||||||
The input of text recognition model is the output of the text detection model, which causes the performance of text detection to greatly affect the performance of text recognition.
|
The input of text recognition model is the output of the text detection model, which causes the performance of text detection to greatly affect the performance of text recognition.
|
||||||
|
|
||||||
DenseNet_CTC has the smallest parameters and best FPS, and it is suitable for edge devices, which are very sensitive to the cost of calculation. If you have limited computing resources and want to achieve better accuracy, VGG_CTC is a good choice.
|
DenseNet_CTC has the smallest parameters and best FPS, and it is suitable for edge devices, which are very sensitive to the cost of calculation. If you have limited computing resources and want to achieve better accuracy, VGG_CTC is a good choice.
|
||||||
|
@ -17,7 +17,7 @@ In this tutorial you will learn how to use opencv_dnn module for image classific
|
|||||||
GoogLeNet trained network from [Caffe model zoo](http://caffe.berkeleyvision.org/model_zoo.html).
|
GoogLeNet trained network from [Caffe model zoo](http://caffe.berkeleyvision.org/model_zoo.html).
|
||||||
|
|
||||||
We will demonstrate results of this example on the following picture.
|
We will demonstrate results of this example on the following picture.
|
||||||

|

|
||||||
|
|
||||||
Source Code
|
Source Code
|
||||||
-----------
|
-----------
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
In this tutorial, we will introduce the APIs for TextRecognitionModel and TextDetectionModel in detail.
|
In this tutorial, we will introduce the APIs for TextRecognitionModel and TextDetectionModel in detail.
|
||||||
|
|
||||||
---
|
---
|
||||||
#### TextRecognitionModel:
|
### TextRecognitionModel
|
||||||
|
|
||||||
In the current version, @ref cv::dnn::TextRecognitionModel only supports CNN+RNN+CTC based algorithms,
|
In the current version, @ref cv::dnn::TextRecognitionModel only supports CNN+RNN+CTC based algorithms,
|
||||||
and the greedy decoding method for CTC is provided.
|
and the greedy decoding method for CTC is provided.
|
||||||
@ -38,7 +38,7 @@ Before recognition, you should `setVocabulary` and `setDecodeType`.
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
#### TextDetectionModel:
|
### TextDetectionModel
|
||||||
|
|
||||||
@ref cv::dnn::TextDetectionModel API provides these methods for text detection:
|
@ref cv::dnn::TextDetectionModel API provides these methods for text detection:
|
||||||
- cv::dnn::TextDetectionModel::detect() returns the results in std::vector<std::vector<Point>> (4-points quadrangles)
|
- cv::dnn::TextDetectionModel::detect() returns the results in std::vector<std::vector<Point>> (4-points quadrangles)
|
||||||
@ -60,7 +60,7 @@ We encourage you to add new algorithms to these APIs.
|
|||||||
|
|
||||||
## Pretrained Models
|
## Pretrained Models
|
||||||
|
|
||||||
#### TextRecognitionModel:
|
### TextRecognitionModel
|
||||||
|
|
||||||
```
|
```
|
||||||
crnn.onnx:
|
crnn.onnx:
|
||||||
@ -92,7 +92,7 @@ More models can be found in [here](https://drive.google.com/drive/folders/1cTbQ3
|
|||||||
which are taken from [clovaai](https://github.com/clovaai/deep-text-recognition-benchmark).
|
which are taken from [clovaai](https://github.com/clovaai/deep-text-recognition-benchmark).
|
||||||
You can train more models by [CRNN](https://github.com/meijieru/crnn.pytorch), and convert models by `torch.onnx.export`.
|
You can train more models by [CRNN](https://github.com/meijieru/crnn.pytorch), and convert models by `torch.onnx.export`.
|
||||||
|
|
||||||
#### TextDetectionModel:
|
### TextDetectionModel
|
||||||
|
|
||||||
```
|
```
|
||||||
- DB_IC15_resnet50.onnx:
|
- DB_IC15_resnet50.onnx:
|
||||||
@ -297,7 +297,7 @@ For more information, please refer to:
|
|||||||
- [samples/dnn/text_detection.cpp](https://github.com/opencv/opencv/blob/5.x/samples/dnn/text_detection.cpp)
|
- [samples/dnn/text_detection.cpp](https://github.com/opencv/opencv/blob/5.x/samples/dnn/text_detection.cpp)
|
||||||
- [samples/dnn/scene_text_spotting.cpp](https://github.com/opencv/opencv/blob/5.x/samples/dnn/scene_text_spotting.cpp)
|
- [samples/dnn/scene_text_spotting.cpp](https://github.com/opencv/opencv/blob/5.x/samples/dnn/scene_text_spotting.cpp)
|
||||||
|
|
||||||
#### Test with an image
|
### Test with an image
|
||||||
Examples:
|
Examples:
|
||||||
```bash
|
```bash
|
||||||
example_dnn_scene_text_recognition -mp=path/to/crnn_cs.onnx -i=path/to/an/image -rgb=1 -vp=/path/to/alphabet_94.txt
|
example_dnn_scene_text_recognition -mp=path/to/crnn_cs.onnx -i=path/to/an/image -rgb=1 -vp=/path/to/alphabet_94.txt
|
||||||
@ -306,7 +306,7 @@ example_dnn_scene_text_spotting -dmp=path/to/DB_IC15_resnet50.onnx -rmp=path/to/
|
|||||||
example_dnn_text_detection -dmp=path/to/EAST.pb -rmp=path/to/crnn_cs.onnx -i=path/to/an/image -rgb=1 -vp=path/to/alphabet_94.txt
|
example_dnn_text_detection -dmp=path/to/EAST.pb -rmp=path/to/crnn_cs.onnx -i=path/to/an/image -rgb=1 -vp=path/to/alphabet_94.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Test on public datasets
|
### Test on public datasets
|
||||||
Text Recognition:
|
Text Recognition:
|
||||||
|
|
||||||
The download link for testing images can be found in the **Images for Testing**
|
The download link for testing images can be found in the **Images for Testing**
|
||||||
|
@ -111,7 +111,7 @@ called and it will update the output image based on the current trackbar values.
|
|||||||
|
|
||||||
Let's analyze these two functions:
|
Let's analyze these two functions:
|
||||||
|
|
||||||
#### The erosion function
|
### The erosion function (CPP)
|
||||||
|
|
||||||
@snippet cpp/tutorial_code/ImgProc/Morphology_1.cpp erosion
|
@snippet cpp/tutorial_code/ImgProc/Morphology_1.cpp erosion
|
||||||
|
|
||||||
@ -135,7 +135,7 @@ receives three arguments:
|
|||||||
|
|
||||||
That is all. We are ready to perform the erosion of our image.
|
That is all. We are ready to perform the erosion of our image.
|
||||||
|
|
||||||
#### The dilation function
|
### The dilation function (CPP)
|
||||||
|
|
||||||
The code is below. As you can see, it is completely similar to the snippet of code for **erosion**.
|
The code is below. As you can see, it is completely similar to the snippet of code for **erosion**.
|
||||||
Here we also have the option of defining our kernel, its anchor point and the size of the operator
|
Here we also have the option of defining our kernel, its anchor point and the size of the operator
|
||||||
@ -175,7 +175,7 @@ In short we
|
|||||||
The action and state changed listeners added call at the end the `update` method which updates
|
The action and state changed listeners added call at the end the `update` method which updates
|
||||||
the image based on the current slider values. So every time we move any slider, the `update` method is triggered.
|
the image based on the current slider values. So every time we move any slider, the `update` method is triggered.
|
||||||
|
|
||||||
#### Updating the image
|
### Updating the image (Java)
|
||||||
|
|
||||||
To update the image we used the following implementation:
|
To update the image we used the following implementation:
|
||||||
|
|
||||||
@ -190,7 +190,7 @@ In other words we
|
|||||||
|
|
||||||
Let's analyze the `erode` and `dilate` methods:
|
Let's analyze the `erode` and `dilate` methods:
|
||||||
|
|
||||||
#### The erosion method
|
### The erosion method (Java)
|
||||||
|
|
||||||
@snippet java/tutorial_code/ImgProc/erosion_dilatation/MorphologyDemo1.java erosion
|
@snippet java/tutorial_code/ImgProc/erosion_dilatation/MorphologyDemo1.java erosion
|
||||||
|
|
||||||
@ -213,7 +213,7 @@ receives three arguments:
|
|||||||
|
|
||||||
That is all. We are ready to perform the erosion of our image.
|
That is all. We are ready to perform the erosion of our image.
|
||||||
|
|
||||||
#### The dilation function
|
### The dilation function (Java)
|
||||||
|
|
||||||
The code is below. As you can see, it is completely similar to the snippet of code for **erosion**.
|
The code is below. As you can see, it is completely similar to the snippet of code for **erosion**.
|
||||||
Here we also have the option of defining our kernel, its anchor point and the size of the operator
|
Here we also have the option of defining our kernel, its anchor point and the size of the operator
|
||||||
@ -240,7 +240,7 @@ called and it will update the output image based on the current trackbar values.
|
|||||||
|
|
||||||
Let's analyze these two functions:
|
Let's analyze these two functions:
|
||||||
|
|
||||||
#### The erosion function
|
### The erosion function (Python)
|
||||||
|
|
||||||
@snippet python/tutorial_code/imgProc/erosion_dilatation/morphology_1.py erosion
|
@snippet python/tutorial_code/imgProc/erosion_dilatation/morphology_1.py erosion
|
||||||
|
|
||||||
@ -262,7 +262,7 @@ specified, it is assumed to be in the center.
|
|||||||
|
|
||||||
That is all. We are ready to perform the erosion of our image.
|
That is all. We are ready to perform the erosion of our image.
|
||||||
|
|
||||||
#### The dilation function
|
### The dilation function (Python)
|
||||||
|
|
||||||
The code is below. As you can see, it is completely similar to the snippet of code for **erosion**.
|
The code is below. As you can see, it is completely similar to the snippet of code for **erosion**.
|
||||||
Here we also have the option of defining our kernel, its anchor point and the size of the operator
|
Here we also have the option of defining our kernel, its anchor point and the size of the operator
|
||||||
|
@ -133,7 +133,7 @@ Explanation
|
|||||||
Let's check the OpenCV functions that involve only the smoothing procedure, since the rest is
|
Let's check the OpenCV functions that involve only the smoothing procedure, since the rest is
|
||||||
already known by now.
|
already known by now.
|
||||||
|
|
||||||
#### Normalized Block Filter:
|
### Normalized Block Filter:
|
||||||
|
|
||||||
- OpenCV offers the function **blur()** to perform smoothing with this filter.
|
- OpenCV offers the function **blur()** to perform smoothing with this filter.
|
||||||
We specify 4 arguments (more details, check the Reference):
|
We specify 4 arguments (more details, check the Reference):
|
||||||
@ -157,7 +157,7 @@ already known by now.
|
|||||||
@snippet samples/python/tutorial_code/imgProc/Smoothing/smoothing.py blur
|
@snippet samples/python/tutorial_code/imgProc/Smoothing/smoothing.py blur
|
||||||
@end_toggle
|
@end_toggle
|
||||||
|
|
||||||
#### Gaussian Filter:
|
### Gaussian Filter:
|
||||||
|
|
||||||
- It is performed by the function **GaussianBlur()** :
|
- It is performed by the function **GaussianBlur()** :
|
||||||
Here we use 4 arguments (more details, check the OpenCV reference):
|
Here we use 4 arguments (more details, check the OpenCV reference):
|
||||||
@ -183,7 +183,7 @@ already known by now.
|
|||||||
@snippet samples/python/tutorial_code/imgProc/Smoothing/smoothing.py gaussianblur
|
@snippet samples/python/tutorial_code/imgProc/Smoothing/smoothing.py gaussianblur
|
||||||
@end_toggle
|
@end_toggle
|
||||||
|
|
||||||
#### Median Filter:
|
### Median Filter:
|
||||||
|
|
||||||
- This filter is provided by the **medianBlur()** function:
|
- This filter is provided by the **medianBlur()** function:
|
||||||
We use three arguments:
|
We use three arguments:
|
||||||
@ -203,7 +203,7 @@ already known by now.
|
|||||||
@snippet samples/python/tutorial_code/imgProc/Smoothing/smoothing.py medianblur
|
@snippet samples/python/tutorial_code/imgProc/Smoothing/smoothing.py medianblur
|
||||||
@end_toggle
|
@end_toggle
|
||||||
|
|
||||||
#### Bilateral Filter
|
### Bilateral Filter
|
||||||
|
|
||||||
- Provided by OpenCV function **bilateralFilter()**
|
- Provided by OpenCV function **bilateralFilter()**
|
||||||
We use 5 arguments:
|
We use 5 arguments:
|
||||||
|
@ -78,7 +78,7 @@ You can also download it from
|
|||||||
Explanation
|
Explanation
|
||||||
-----------
|
-----------
|
||||||
|
|
||||||
#### Declare the variables
|
### Declare the variables
|
||||||
|
|
||||||
First we declare the variables we are going to use:
|
First we declare the variables we are going to use:
|
||||||
|
|
||||||
@ -97,7 +97,7 @@ First we declare the variables we are going to use:
|
|||||||
Especial attention deserves the variable *rng* which is a random number generator. We use it to
|
Especial attention deserves the variable *rng* which is a random number generator. We use it to
|
||||||
generate the random border color, as we will see soon.
|
generate the random border color, as we will see soon.
|
||||||
|
|
||||||
#### Load an image
|
### Load an image
|
||||||
|
|
||||||
As usual we load our source image *src*:
|
As usual we load our source image *src*:
|
||||||
|
|
||||||
@ -113,7 +113,7 @@ As usual we load our source image *src*:
|
|||||||
@snippet python/tutorial_code/ImgTrans/MakeBorder/copy_make_border.py load
|
@snippet python/tutorial_code/ImgTrans/MakeBorder/copy_make_border.py load
|
||||||
@end_toggle
|
@end_toggle
|
||||||
|
|
||||||
#### Create a window
|
### Create a window
|
||||||
|
|
||||||
After giving a short intro of how to use the program, we create a window:
|
After giving a short intro of how to use the program, we create a window:
|
||||||
|
|
||||||
@ -129,7 +129,7 @@ After giving a short intro of how to use the program, we create a window:
|
|||||||
@snippet python/tutorial_code/ImgTrans/MakeBorder/copy_make_border.py create_window
|
@snippet python/tutorial_code/ImgTrans/MakeBorder/copy_make_border.py create_window
|
||||||
@end_toggle
|
@end_toggle
|
||||||
|
|
||||||
#### Initialize arguments
|
### Initialize arguments
|
||||||
|
|
||||||
Now we initialize the argument that defines the size of the borders (*top*, *bottom*, *left* and
|
Now we initialize the argument that defines the size of the borders (*top*, *bottom*, *left* and
|
||||||
*right*). We give them a value of 5% the size of *src*.
|
*right*). We give them a value of 5% the size of *src*.
|
||||||
@ -146,7 +146,7 @@ Now we initialize the argument that defines the size of the borders (*top*, *bot
|
|||||||
@snippet python/tutorial_code/ImgTrans/MakeBorder/copy_make_border.py init_arguments
|
@snippet python/tutorial_code/ImgTrans/MakeBorder/copy_make_border.py init_arguments
|
||||||
@end_toggle
|
@end_toggle
|
||||||
|
|
||||||
#### Loop
|
### Loop
|
||||||
|
|
||||||
The program runs in an infinite loop while the key **ESC** isn't pressed.
|
The program runs in an infinite loop while the key **ESC** isn't pressed.
|
||||||
If the user presses '**c**' or '**r**', the *borderType* variable
|
If the user presses '**c**' or '**r**', the *borderType* variable
|
||||||
@ -164,7 +164,7 @@ takes the value of *BORDER_CONSTANT* or *BORDER_REPLICATE* respectively:
|
|||||||
@snippet python/tutorial_code/ImgTrans/MakeBorder/copy_make_border.py check_keypress
|
@snippet python/tutorial_code/ImgTrans/MakeBorder/copy_make_border.py check_keypress
|
||||||
@end_toggle
|
@end_toggle
|
||||||
|
|
||||||
#### Random color
|
### Random color
|
||||||
|
|
||||||
In each iteration (after 0.5 seconds), the random border color (*value*) is updated...
|
In each iteration (after 0.5 seconds), the random border color (*value*) is updated...
|
||||||
|
|
||||||
@ -182,7 +182,7 @@ In each iteration (after 0.5 seconds), the random border color (*value*) is upda
|
|||||||
|
|
||||||
This value is a set of three numbers picked randomly in the range \f$[0,255]\f$.
|
This value is a set of three numbers picked randomly in the range \f$[0,255]\f$.
|
||||||
|
|
||||||
#### Form a border around the image
|
### Form a border around the image
|
||||||
|
|
||||||
Finally, we call the function **copyMakeBorder()** to apply the respective padding:
|
Finally, we call the function **copyMakeBorder()** to apply the respective padding:
|
||||||
|
|
||||||
@ -209,7 +209,7 @@ Finally, we call the function **copyMakeBorder()** to apply the respective paddi
|
|||||||
-# *value*: If *borderType* is *BORDER_CONSTANT*, this is the value used to fill the border
|
-# *value*: If *borderType* is *BORDER_CONSTANT*, this is the value used to fill the border
|
||||||
pixels.
|
pixels.
|
||||||
|
|
||||||
#### Display the results
|
### Display the results
|
||||||
|
|
||||||
We display our output image in the image created previously
|
We display our output image in the image created previously
|
||||||
|
|
||||||
|
@ -94,7 +94,7 @@ You can also download it from
|
|||||||
Explanation
|
Explanation
|
||||||
-----------
|
-----------
|
||||||
|
|
||||||
#### Load an image
|
### Load an image
|
||||||
|
|
||||||
@add_toggle_cpp
|
@add_toggle_cpp
|
||||||
@snippet cpp/tutorial_code/ImgTrans/filter2D_demo.cpp load
|
@snippet cpp/tutorial_code/ImgTrans/filter2D_demo.cpp load
|
||||||
@ -108,7 +108,7 @@ Explanation
|
|||||||
@snippet python/tutorial_code/ImgTrans/Filter2D/filter2D.py load
|
@snippet python/tutorial_code/ImgTrans/Filter2D/filter2D.py load
|
||||||
@end_toggle
|
@end_toggle
|
||||||
|
|
||||||
#### Initialize the arguments
|
### Initialize the arguments
|
||||||
|
|
||||||
@add_toggle_cpp
|
@add_toggle_cpp
|
||||||
@snippet cpp/tutorial_code/ImgTrans/filter2D_demo.cpp init_arguments
|
@snippet cpp/tutorial_code/ImgTrans/filter2D_demo.cpp init_arguments
|
||||||
@ -122,7 +122,7 @@ Explanation
|
|||||||
@snippet python/tutorial_code/ImgTrans/Filter2D/filter2D.py init_arguments
|
@snippet python/tutorial_code/ImgTrans/Filter2D/filter2D.py init_arguments
|
||||||
@end_toggle
|
@end_toggle
|
||||||
|
|
||||||
##### Loop
|
### Loop
|
||||||
|
|
||||||
Perform an infinite loop updating the kernel size and applying our linear filter to the input
|
Perform an infinite loop updating the kernel size and applying our linear filter to the input
|
||||||
image. Let's analyze that more in detail:
|
image. Let's analyze that more in detail:
|
||||||
|
@ -74,7 +74,7 @@ Explanation
|
|||||||
|
|
||||||
The image we used can be found [here](https://raw.githubusercontent.com/opencv/opencv/5.x/samples/data/smarties.png)
|
The image we used can be found [here](https://raw.githubusercontent.com/opencv/opencv/5.x/samples/data/smarties.png)
|
||||||
|
|
||||||
#### Load an image:
|
### Load an image:
|
||||||
|
|
||||||
@add_toggle_cpp
|
@add_toggle_cpp
|
||||||
@snippet samples/cpp/tutorial_code/ImgTrans/houghcircles.cpp load
|
@snippet samples/cpp/tutorial_code/ImgTrans/houghcircles.cpp load
|
||||||
@ -88,7 +88,7 @@ The image we used can be found [here](https://raw.githubusercontent.com/opencv/o
|
|||||||
@snippet samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py load
|
@snippet samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py load
|
||||||
@end_toggle
|
@end_toggle
|
||||||
|
|
||||||
#### Convert it to grayscale:
|
### Convert it to grayscale:
|
||||||
|
|
||||||
@add_toggle_cpp
|
@add_toggle_cpp
|
||||||
@snippet samples/cpp/tutorial_code/ImgTrans/houghcircles.cpp convert_to_gray
|
@snippet samples/cpp/tutorial_code/ImgTrans/houghcircles.cpp convert_to_gray
|
||||||
@ -102,7 +102,7 @@ The image we used can be found [here](https://raw.githubusercontent.com/opencv/o
|
|||||||
@snippet samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py convert_to_gray
|
@snippet samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py convert_to_gray
|
||||||
@end_toggle
|
@end_toggle
|
||||||
|
|
||||||
#### Apply a Median blur to reduce noise and avoid false circle detection:
|
### Apply a Median blur to reduce noise and avoid false circle detection:
|
||||||
|
|
||||||
@add_toggle_cpp
|
@add_toggle_cpp
|
||||||
@snippet samples/cpp/tutorial_code/ImgTrans/houghcircles.cpp reduce_noise
|
@snippet samples/cpp/tutorial_code/ImgTrans/houghcircles.cpp reduce_noise
|
||||||
@ -116,7 +116,7 @@ The image we used can be found [here](https://raw.githubusercontent.com/opencv/o
|
|||||||
@snippet samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py reduce_noise
|
@snippet samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py reduce_noise
|
||||||
@end_toggle
|
@end_toggle
|
||||||
|
|
||||||
#### Proceed to apply Hough Circle Transform:
|
### Proceed to apply Hough Circle Transform:
|
||||||
|
|
||||||
@add_toggle_cpp
|
@add_toggle_cpp
|
||||||
@snippet samples/cpp/tutorial_code/ImgTrans/houghcircles.cpp houghcircles
|
@snippet samples/cpp/tutorial_code/ImgTrans/houghcircles.cpp houghcircles
|
||||||
@ -144,7 +144,7 @@ The image we used can be found [here](https://raw.githubusercontent.com/opencv/o
|
|||||||
- *min_radius = 0*: Minimum radius to be detected. If unknown, put zero as default.
|
- *min_radius = 0*: Minimum radius to be detected. If unknown, put zero as default.
|
||||||
- *max_radius = 0*: Maximum radius to be detected. If unknown, put zero as default.
|
- *max_radius = 0*: Maximum radius to be detected. If unknown, put zero as default.
|
||||||
|
|
||||||
#### Draw the detected circles:
|
### Draw the detected circles:
|
||||||
|
|
||||||
@add_toggle_cpp
|
@add_toggle_cpp
|
||||||
@snippet samples/cpp/tutorial_code/ImgTrans/houghcircles.cpp draw
|
@snippet samples/cpp/tutorial_code/ImgTrans/houghcircles.cpp draw
|
||||||
@ -160,7 +160,7 @@ The image we used can be found [here](https://raw.githubusercontent.com/opencv/o
|
|||||||
|
|
||||||
You can see that we will draw the circle(s) on red and the center(s) with a small green dot
|
You can see that we will draw the circle(s) on red and the center(s) with a small green dot
|
||||||
|
|
||||||
#### Display the detected circle(s) and wait for the user to exit the program:
|
### Display the detected circle(s) and wait for the user to exit the program:
|
||||||
|
|
||||||
@add_toggle_cpp
|
@add_toggle_cpp
|
||||||
@snippet samples/cpp/tutorial_code/ImgTrans/houghcircles.cpp display
|
@snippet samples/cpp/tutorial_code/ImgTrans/houghcircles.cpp display
|
||||||
|
@ -129,7 +129,7 @@ The sample code that we will explain can be downloaded from
|
|||||||
Explanation
|
Explanation
|
||||||
-----------
|
-----------
|
||||||
|
|
||||||
#### Load an image:
|
### Load an image:
|
||||||
|
|
||||||
@add_toggle_cpp
|
@add_toggle_cpp
|
||||||
@snippet samples/cpp/tutorial_code/ImgTrans/houghlines.cpp load
|
@snippet samples/cpp/tutorial_code/ImgTrans/houghlines.cpp load
|
||||||
@ -143,7 +143,7 @@ Explanation
|
|||||||
@snippet samples/python/tutorial_code/ImgTrans/HoughLine/hough_lines.py load
|
@snippet samples/python/tutorial_code/ImgTrans/HoughLine/hough_lines.py load
|
||||||
@end_toggle
|
@end_toggle
|
||||||
|
|
||||||
#### Detect the edges of the image by using a Canny detector:
|
### Detect the edges of the image by using a Canny detector:
|
||||||
|
|
||||||
@add_toggle_cpp
|
@add_toggle_cpp
|
||||||
@snippet samples/cpp/tutorial_code/ImgTrans/houghlines.cpp edge_detection
|
@snippet samples/cpp/tutorial_code/ImgTrans/houghlines.cpp edge_detection
|
||||||
@ -160,7 +160,7 @@ Explanation
|
|||||||
Now we will apply the Hough Line Transform. We will explain how to use both OpenCV functions
|
Now we will apply the Hough Line Transform. We will explain how to use both OpenCV functions
|
||||||
available for this purpose.
|
available for this purpose.
|
||||||
|
|
||||||
#### Standard Hough Line Transform:
|
### Standard Hough Line Transform:
|
||||||
First, you apply the Transform:
|
First, you apply the Transform:
|
||||||
|
|
||||||
@add_toggle_cpp
|
@add_toggle_cpp
|
||||||
@ -199,7 +199,7 @@ And then you display the result by drawing the lines.
|
|||||||
@snippet samples/python/tutorial_code/ImgTrans/HoughLine/hough_lines.py draw_lines
|
@snippet samples/python/tutorial_code/ImgTrans/HoughLine/hough_lines.py draw_lines
|
||||||
@end_toggle
|
@end_toggle
|
||||||
|
|
||||||
#### Probabilistic Hough Line Transform
|
### Probabilistic Hough Line Transform
|
||||||
First you apply the transform:
|
First you apply the transform:
|
||||||
|
|
||||||
@add_toggle_cpp
|
@add_toggle_cpp
|
||||||
@ -242,7 +242,7 @@ And then you display the result by drawing the lines.
|
|||||||
@snippet samples/python/tutorial_code/ImgTrans/HoughLine/hough_lines.py draw_lines_p
|
@snippet samples/python/tutorial_code/ImgTrans/HoughLine/hough_lines.py draw_lines_p
|
||||||
@end_toggle
|
@end_toggle
|
||||||
|
|
||||||
#### Display the original image and the detected lines:
|
### Display the original image and the detected lines:
|
||||||
|
|
||||||
@add_toggle_cpp
|
@add_toggle_cpp
|
||||||
@snippet samples/cpp/tutorial_code/ImgTrans/houghlines.cpp imshow
|
@snippet samples/cpp/tutorial_code/ImgTrans/houghlines.cpp imshow
|
||||||
@ -256,7 +256,7 @@ And then you display the result by drawing the lines.
|
|||||||
@snippet samples/python/tutorial_code/ImgTrans/HoughLine/hough_lines.py imshow
|
@snippet samples/python/tutorial_code/ImgTrans/HoughLine/hough_lines.py imshow
|
||||||
@end_toggle
|
@end_toggle
|
||||||
|
|
||||||
#### Wait until the user exits the program
|
### Wait until the user exits the program
|
||||||
|
|
||||||
@add_toggle_cpp
|
@add_toggle_cpp
|
||||||
@snippet samples/cpp/tutorial_code/ImgTrans/houghlines.cpp exit
|
@snippet samples/cpp/tutorial_code/ImgTrans/houghlines.cpp exit
|
||||||
|
@ -81,7 +81,7 @@ Code
|
|||||||
Explanation
|
Explanation
|
||||||
-----------
|
-----------
|
||||||
|
|
||||||
#### Declare variables
|
### Declare variables
|
||||||
|
|
||||||
@add_toggle_cpp
|
@add_toggle_cpp
|
||||||
@snippet cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp variables
|
@snippet cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp variables
|
||||||
@ -95,7 +95,7 @@ Explanation
|
|||||||
@snippet samples/python/tutorial_code/ImgTrans/LaPlace/laplace_demo.py variables
|
@snippet samples/python/tutorial_code/ImgTrans/LaPlace/laplace_demo.py variables
|
||||||
@end_toggle
|
@end_toggle
|
||||||
|
|
||||||
#### Load source image
|
### Load source image
|
||||||
|
|
||||||
@add_toggle_cpp
|
@add_toggle_cpp
|
||||||
@snippet cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp load
|
@snippet cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp load
|
||||||
@ -109,7 +109,7 @@ Explanation
|
|||||||
@snippet samples/python/tutorial_code/ImgTrans/LaPlace/laplace_demo.py load
|
@snippet samples/python/tutorial_code/ImgTrans/LaPlace/laplace_demo.py load
|
||||||
@end_toggle
|
@end_toggle
|
||||||
|
|
||||||
#### Reduce noise
|
### Reduce noise
|
||||||
|
|
||||||
@add_toggle_cpp
|
@add_toggle_cpp
|
||||||
@snippet cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp reduce_noise
|
@snippet cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp reduce_noise
|
||||||
@ -123,7 +123,7 @@ Explanation
|
|||||||
@snippet samples/python/tutorial_code/ImgTrans/LaPlace/laplace_demo.py reduce_noise
|
@snippet samples/python/tutorial_code/ImgTrans/LaPlace/laplace_demo.py reduce_noise
|
||||||
@end_toggle
|
@end_toggle
|
||||||
|
|
||||||
#### Grayscale
|
### Grayscale
|
||||||
|
|
||||||
@add_toggle_cpp
|
@add_toggle_cpp
|
||||||
@snippet cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp convert_to_gray
|
@snippet cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp convert_to_gray
|
||||||
@ -137,7 +137,7 @@ Explanation
|
|||||||
@snippet samples/python/tutorial_code/ImgTrans/LaPlace/laplace_demo.py convert_to_gray
|
@snippet samples/python/tutorial_code/ImgTrans/LaPlace/laplace_demo.py convert_to_gray
|
||||||
@end_toggle
|
@end_toggle
|
||||||
|
|
||||||
#### Laplacian operator
|
### Laplacian operator
|
||||||
|
|
||||||
@add_toggle_cpp
|
@add_toggle_cpp
|
||||||
@snippet cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp laplacian
|
@snippet cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp laplacian
|
||||||
@ -160,7 +160,7 @@ Explanation
|
|||||||
this example.
|
this example.
|
||||||
- *scale*, *delta* and *BORDER_DEFAULT*: We leave them as default values.
|
- *scale*, *delta* and *BORDER_DEFAULT*: We leave them as default values.
|
||||||
|
|
||||||
#### Convert output to a *CV_8U* image
|
### Convert output to a *CV_8U* image
|
||||||
|
|
||||||
@add_toggle_cpp
|
@add_toggle_cpp
|
||||||
@snippet cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp convert
|
@snippet cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp convert
|
||||||
@ -174,7 +174,7 @@ Explanation
|
|||||||
@snippet samples/python/tutorial_code/ImgTrans/LaPlace/laplace_demo.py convert
|
@snippet samples/python/tutorial_code/ImgTrans/LaPlace/laplace_demo.py convert
|
||||||
@end_toggle
|
@end_toggle
|
||||||
|
|
||||||
#### Display the result
|
### Display the result
|
||||||
|
|
||||||
@add_toggle_cpp
|
@add_toggle_cpp
|
||||||
@snippet cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp display
|
@snippet cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp display
|
||||||
|
@ -58,7 +58,7 @@ Theory
|
|||||||
gradient of an image intensity function.
|
gradient of an image intensity function.
|
||||||
-# The Sobel Operator combines Gaussian smoothing and differentiation.
|
-# The Sobel Operator combines Gaussian smoothing and differentiation.
|
||||||
|
|
||||||
#### Formulation
|
### Formulation
|
||||||
|
|
||||||
Assuming that the image to be operated is \f$I\f$:
|
Assuming that the image to be operated is \f$I\f$:
|
||||||
|
|
||||||
@ -140,23 +140,23 @@ You can also download it from
|
|||||||
Explanation
|
Explanation
|
||||||
-----------
|
-----------
|
||||||
|
|
||||||
#### Declare variables
|
### Declare variables
|
||||||
|
|
||||||
@snippet cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp variables
|
@snippet cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp variables
|
||||||
|
|
||||||
#### Load source image
|
### Load source image
|
||||||
|
|
||||||
@snippet cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp load
|
@snippet cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp load
|
||||||
|
|
||||||
#### Reduce noise
|
### Reduce noise
|
||||||
|
|
||||||
@snippet cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp reduce_noise
|
@snippet cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp reduce_noise
|
||||||
|
|
||||||
#### Grayscale
|
### Grayscale
|
||||||
|
|
||||||
@snippet cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp convert_to_gray
|
@snippet cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp convert_to_gray
|
||||||
|
|
||||||
#### Sobel Operator
|
### Sobel Operator
|
||||||
|
|
||||||
@snippet cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp sobel
|
@snippet cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp sobel
|
||||||
|
|
||||||
@ -174,18 +174,18 @@ Explanation
|
|||||||
Notice that to calculate the gradient in *x* direction we use: \f$x_{order}= 1\f$ and
|
Notice that to calculate the gradient in *x* direction we use: \f$x_{order}= 1\f$ and
|
||||||
\f$y_{order} = 0\f$. We do analogously for the *y* direction.
|
\f$y_{order} = 0\f$. We do analogously for the *y* direction.
|
||||||
|
|
||||||
#### Convert output to a CV_8U image
|
### Convert output to a CV_8U image
|
||||||
|
|
||||||
@snippet cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp convert
|
@snippet cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp convert
|
||||||
|
|
||||||
#### Gradient
|
### Gradient
|
||||||
|
|
||||||
@snippet cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp blend
|
@snippet cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp blend
|
||||||
|
|
||||||
We try to approximate the *gradient* by adding both directional gradients (note that
|
We try to approximate the *gradient* by adding both directional gradients (note that
|
||||||
this is not an exact calculation at all! but it is good for our purposes).
|
this is not an exact calculation at all! but it is good for our purposes).
|
||||||
|
|
||||||
#### Show results
|
### Show results
|
||||||
|
|
||||||
@snippet cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp display
|
@snippet cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp display
|
||||||
|
|
||||||
|
@ -80,7 +80,7 @@ Explanation / Result
|
|||||||
|
|
||||||
Get image from [here](https://raw.githubusercontent.com/opencv/opencv/5.x/doc/tutorials/imgproc/morph_lines_detection/images/src.png) .
|
Get image from [here](https://raw.githubusercontent.com/opencv/opencv/5.x/doc/tutorials/imgproc/morph_lines_detection/images/src.png) .
|
||||||
|
|
||||||
#### Load Image
|
### Load Image
|
||||||
|
|
||||||
@add_toggle_cpp
|
@add_toggle_cpp
|
||||||
@snippet samples/cpp/tutorial_code/ImgProc/morph_lines_detection/Morphology_3.cpp load_image
|
@snippet samples/cpp/tutorial_code/ImgProc/morph_lines_detection/Morphology_3.cpp load_image
|
||||||
@ -96,7 +96,7 @@ Get image from [here](https://raw.githubusercontent.com/opencv/opencv/5.x/doc/tu
|
|||||||
|
|
||||||

|

|
||||||
|
|
||||||
#### Grayscale
|
### Grayscale
|
||||||
|
|
||||||
@add_toggle_cpp
|
@add_toggle_cpp
|
||||||
@snippet samples/cpp/tutorial_code/ImgProc/morph_lines_detection/Morphology_3.cpp gray
|
@snippet samples/cpp/tutorial_code/ImgProc/morph_lines_detection/Morphology_3.cpp gray
|
||||||
@ -112,7 +112,7 @@ Get image from [here](https://raw.githubusercontent.com/opencv/opencv/5.x/doc/tu
|
|||||||
|
|
||||||

|

|
||||||
|
|
||||||
#### Grayscale to Binary image
|
### Grayscale to Binary image
|
||||||
|
|
||||||
@add_toggle_cpp
|
@add_toggle_cpp
|
||||||
@snippet samples/cpp/tutorial_code/ImgProc/morph_lines_detection/Morphology_3.cpp bin
|
@snippet samples/cpp/tutorial_code/ImgProc/morph_lines_detection/Morphology_3.cpp bin
|
||||||
@ -128,7 +128,7 @@ Get image from [here](https://raw.githubusercontent.com/opencv/opencv/5.x/doc/tu
|
|||||||
|
|
||||||

|

|
||||||
|
|
||||||
#### Output images
|
### Output images
|
||||||
|
|
||||||
Now we are ready to apply morphological operations in order to extract the horizontal and vertical lines and as a consequence to separate the music notes from the music sheet, but first let's initialize the output images that we will use for that reason:
|
Now we are ready to apply morphological operations in order to extract the horizontal and vertical lines and as a consequence to separate the music notes from the music sheet, but first let's initialize the output images that we will use for that reason:
|
||||||
|
|
||||||
@ -144,7 +144,7 @@ Now we are ready to apply morphological operations in order to extract the horiz
|
|||||||
@snippet samples/python/tutorial_code/imgProc/morph_lines_detection/morph_lines_detection.py init
|
@snippet samples/python/tutorial_code/imgProc/morph_lines_detection/morph_lines_detection.py init
|
||||||
@end_toggle
|
@end_toggle
|
||||||
|
|
||||||
#### Structure elements
|
### Structure elements
|
||||||
|
|
||||||
As we specified in the theory in order to extract the object that we desire, we need to create the corresponding structure element. Since we want to extract the horizontal lines, a corresponding structure element for that purpose will have the following shape:
|
As we specified in the theory in order to extract the object that we desire, we need to create the corresponding structure element. Since we want to extract the horizontal lines, a corresponding structure element for that purpose will have the following shape:
|
||||||

|

|
||||||
@ -182,7 +182,7 @@ and again this is represented as follows:
|
|||||||
|
|
||||||

|

|
||||||
|
|
||||||
#### Refine edges / Result
|
### Refine edges / Result
|
||||||
|
|
||||||
As you can see we are almost there. However, at that point you will notice that the edges of the notes are a bit rough. For that reason we need to refine the edges in order to obtain a smoother result:
|
As you can see we are almost there. However, at that point you will notice that the edges of the notes are a bit rough. For that reason we need to refine the edges in order to obtain a smoother result:
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ Theory
|
|||||||
pyramid (with less resolution)
|
pyramid (with less resolution)
|
||||||
- In this tutorial we'll use the *Gaussian pyramid*.
|
- In this tutorial we'll use the *Gaussian pyramid*.
|
||||||
|
|
||||||
#### Gaussian Pyramid
|
### Gaussian Pyramid
|
||||||
|
|
||||||
- Imagine the pyramid as a set of layers in which the higher the layer, the smaller the size.
|
- Imagine the pyramid as a set of layers in which the higher the layer, the smaller the size.
|
||||||
|
|
||||||
@ -100,7 +100,7 @@ Explanation
|
|||||||
|
|
||||||
Let's check the general structure of the program:
|
Let's check the general structure of the program:
|
||||||
|
|
||||||
#### Load an image
|
### Load an image
|
||||||
|
|
||||||
@add_toggle_cpp
|
@add_toggle_cpp
|
||||||
@snippet cpp/tutorial_code/ImgProc/Pyramids/Pyramids.cpp load
|
@snippet cpp/tutorial_code/ImgProc/Pyramids/Pyramids.cpp load
|
||||||
@ -114,7 +114,7 @@ Let's check the general structure of the program:
|
|||||||
@snippet python/tutorial_code/imgProc/Pyramids/pyramids.py load
|
@snippet python/tutorial_code/imgProc/Pyramids/pyramids.py load
|
||||||
@end_toggle
|
@end_toggle
|
||||||
|
|
||||||
#### Create window
|
### Create window
|
||||||
|
|
||||||
@add_toggle_cpp
|
@add_toggle_cpp
|
||||||
@snippet cpp/tutorial_code/ImgProc/Pyramids/Pyramids.cpp show_image
|
@snippet cpp/tutorial_code/ImgProc/Pyramids/Pyramids.cpp show_image
|
||||||
@ -128,7 +128,7 @@ Let's check the general structure of the program:
|
|||||||
@snippet python/tutorial_code/imgProc/Pyramids/pyramids.py show_image
|
@snippet python/tutorial_code/imgProc/Pyramids/pyramids.py show_image
|
||||||
@end_toggle
|
@end_toggle
|
||||||
|
|
||||||
#### Loop
|
### Loop
|
||||||
|
|
||||||
@add_toggle_cpp
|
@add_toggle_cpp
|
||||||
@snippet cpp/tutorial_code/ImgProc/Pyramids/Pyramids.cpp loop
|
@snippet cpp/tutorial_code/ImgProc/Pyramids/Pyramids.cpp loop
|
||||||
|
@ -85,7 +85,7 @@ Setup Device for Testing and Debugging
|
|||||||
Usually the recipe above works as expected, but in some cases there are additional actions that must
|
Usually the recipe above works as expected, but in some cases there are additional actions that must
|
||||||
be performed. In this section we'll cover some cases.
|
be performed. In this section we'll cover some cases.
|
||||||
|
|
||||||
#### Windows host computer
|
### Windows host computer
|
||||||
|
|
||||||
If you have Windows 10 or higher then you don't have to do additional actions to connect
|
If you have Windows 10 or higher then you don't have to do additional actions to connect
|
||||||
a phone and run samples on it. However, earlier Windows versions require a longer procedure:
|
a phone and run samples on it. However, earlier Windows versions require a longer procedure:
|
||||||
@ -163,7 +163,7 @@ a phone and run samples on it. However, earlier Windows versions require a longe
|
|||||||
-# Now, in Eclipse go Run -\> Run/Debug to run your application in regular or debugging mode.
|
-# Now, in Eclipse go Run -\> Run/Debug to run your application in regular or debugging mode.
|
||||||
Device Chooser will let you choose among the devices.
|
Device Chooser will let you choose among the devices.
|
||||||
|
|
||||||
#### Linux host computer
|
### Linux host computer
|
||||||
|
|
||||||
While the latest Ubuntu versions work well with connected Android devices, there can be issues on older versions.
|
While the latest Ubuntu versions work well with connected Android devices, there can be issues on older versions.
|
||||||
However, most of them can be fixed easily. You have to create a new **/etc/udev/rules.d/51-android.rules** configuration file that contains
|
However, most of them can be fixed easily. You have to create a new **/etc/udev/rules.d/51-android.rules** configuration file that contains
|
||||||
@ -184,7 +184,7 @@ R58MB40Q3VP device
|
|||||||
savuor@rostislav-laptop:~/Android/Sdk/platform-tools$
|
savuor@rostislav-laptop:~/Android/Sdk/platform-tools$
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Mac OS host computer
|
### Mac OS host computer
|
||||||
|
|
||||||
No actions are required, just connect your device via USB and run adb devices to check connection.
|
No actions are required, just connect your device via USB and run adb devices to check connection.
|
||||||
|
|
||||||
|
@ -15,7 +15,7 @@ This tutorial is deprecated.
|
|||||||
@tableofcontents
|
@tableofcontents
|
||||||
|
|
||||||
OpenCV with CUDA for Tegra
|
OpenCV with CUDA for Tegra
|
||||||
==========================
|
--------------------------
|
||||||
|
|
||||||
This document is a basic guide to building the OpenCV libraries with CUDA support for use in the Tegra environment. It covers the basic elements of building the version 3.1.0 libraries from source code for three (3) different types of platforms:
|
This document is a basic guide to building the OpenCV libraries with CUDA support for use in the Tegra environment. It covers the basic elements of building the version 3.1.0 libraries from source code for three (3) different types of platforms:
|
||||||
|
|
||||||
@ -36,7 +36,7 @@ The OpenCV build system supports native compilation for all the supported platfo
|
|||||||
At the present time, this document focuses only on native compilation.
|
At the present time, this document focuses only on native compilation.
|
||||||
|
|
||||||
Getting the Source Code {#tutorial_building_tegra_cuda_getting_the_code}
|
Getting the Source Code {#tutorial_building_tegra_cuda_getting_the_code}
|
||||||
=======================
|
-----------------------
|
||||||
|
|
||||||
There are two (2) ways to get the OpenCV source code:
|
There are two (2) ways to get the OpenCV source code:
|
||||||
|
|
||||||
@ -45,8 +45,7 @@ There are two (2) ways to get the OpenCV source code:
|
|||||||
|
|
||||||
For this guide, the focus is on using the git repositories. This is because the 3.1.0 version of OpenCV will not build with CUDA 8.0 without applying a few small upstream changes from the git repository.
|
For this guide, the focus is on using the git repositories. This is because the 3.1.0 version of OpenCV will not build with CUDA 8.0 without applying a few small upstream changes from the git repository.
|
||||||
|
|
||||||
OpenCV
|
### OpenCV
|
||||||
------
|
|
||||||
|
|
||||||
Start with the `opencv` repository:
|
Start with the `opencv` repository:
|
||||||
|
|
||||||
@ -93,8 +92,7 @@ You should see output similar to:
|
|||||||
|
|
||||||
At this point, the `opencv` repository is ready for building.
|
At this point, the `opencv` repository is ready for building.
|
||||||
|
|
||||||
OpenCV Extra
|
### OpenCV Extra
|
||||||
------------
|
|
||||||
|
|
||||||
The `opencv_extra` repository contains extra data for the OpenCV library, including the data files used by the tests and demos. It must be cloned separately:
|
The `opencv_extra` repository contains extra data for the OpenCV library, including the data files used by the tests and demos. It must be cloned separately:
|
||||||
|
|
||||||
@ -111,12 +109,11 @@ You may opt to not fetch this repository if you do not plan on running the tests
|
|||||||
__Note:__ If you plan to run the tests, some tests expect the data to be present and will fail without it.
|
__Note:__ If you plan to run the tests, some tests expect the data to be present and will fail without it.
|
||||||
|
|
||||||
Preparation and Prerequisites {#tutorial_building_tegra_cuda_preparation}
|
Preparation and Prerequisites {#tutorial_building_tegra_cuda_preparation}
|
||||||
=============================
|
-----------------------------
|
||||||
|
|
||||||
To build OpenCV, you need a directory to create the configuration and build the libraries. You also need a number of 3rd-party libraries upon which OpenCV depends.
|
To build OpenCV, you need a directory to create the configuration and build the libraries. You also need a number of 3rd-party libraries upon which OpenCV depends.
|
||||||
|
|
||||||
Prerequisites for Ubuntu Linux
|
### Prerequisites for Ubuntu Linux
|
||||||
------------------------------
|
|
||||||
|
|
||||||
These are the basic requirements for building OpenCV for Tegra on Linux:
|
These are the basic requirements for building OpenCV for Tegra on Linux:
|
||||||
|
|
||||||
@ -186,8 +183,7 @@ The commands that will do this:
|
|||||||
|
|
||||||
Once all the necessary packages are installed, you can configure the build.
|
Once all the necessary packages are installed, you can configure the build.
|
||||||
|
|
||||||
Preparing the Build Area
|
### Preparing the Build Area
|
||||||
------------------------
|
|
||||||
|
|
||||||
Software projects that use the CMake system for configuring their builds expect the actual builds to be done outside of the source tree itself. For configuring and building OpenCV, create a directory called "build" in the same base directory into which you cloned the git repositories:
|
Software projects that use the CMake system for configuring their builds expect the actual builds to be done outside of the source tree itself. For configuring and building OpenCV, create a directory called "build" in the same base directory into which you cloned the git repositories:
|
||||||
|
|
||||||
@ -197,7 +193,7 @@ Software projects that use the CMake system for configuring their builds expect
|
|||||||
You are now ready to configure and build OpenCV.
|
You are now ready to configure and build OpenCV.
|
||||||
|
|
||||||
Configuring OpenCV for Building {#tutorial_building_tegra_cuda_configuring}
|
Configuring OpenCV for Building {#tutorial_building_tegra_cuda_configuring}
|
||||||
===============================
|
-------------------------------
|
||||||
|
|
||||||
The CMake configuration options given below for the different platforms are targeted towards the functionality needed for Tegra. They are based on the original configuration options used for building OpenCV 2.4.13.
|
The CMake configuration options given below for the different platforms are targeted towards the functionality needed for Tegra. They are based on the original configuration options used for building OpenCV 2.4.13.
|
||||||
|
|
||||||
@ -209,8 +205,7 @@ For the Linux-based platforms, the shown value for the `CMAKE_INSTALL_PREFIX` pa
|
|||||||
|
|
||||||
In each of the `cmake` invocations below, the last parameter, `OPENCV_TEST_DATA_PATH`, tells the build system where to find the test-data that is provided by the `opencv_extra` repository. When this is included, a `make install` installs this test-data alongside the libraries and example code, and a `make test` automatically provides this path to the tests that have to load data from it. If you did not clone the `opencv_extra` repository, do not include this parameter.
|
In each of the `cmake` invocations below, the last parameter, `OPENCV_TEST_DATA_PATH`, tells the build system where to find the test-data that is provided by the `opencv_extra` repository. When this is included, a `make install` installs this test-data alongside the libraries and example code, and a `make test` automatically provides this path to the tests that have to load data from it. If you did not clone the `opencv_extra` repository, do not include this parameter.
|
||||||
|
|
||||||
Vibrante V4L Configuration
|
### Vibrante V4L Configuration
|
||||||
--------------------------
|
|
||||||
|
|
||||||
Supported platform: Drive PX 2
|
Supported platform: Drive PX 2
|
||||||
|
|
||||||
@ -251,8 +246,7 @@ The configuration provided above builds the Python bindings for Python 2 (but no
|
|||||||
|
|
||||||
-DBUILD_opencv_python2=OFF
|
-DBUILD_opencv_python2=OFF
|
||||||
|
|
||||||
Jetson L4T Configuration
|
### Jetson L4T Configuration
|
||||||
------------------------
|
|
||||||
|
|
||||||
Supported platforms:
|
Supported platforms:
|
||||||
|
|
||||||
@ -261,7 +255,7 @@ Supported platforms:
|
|||||||
|
|
||||||
Configuration is slightly different for the Jetson TK1 and the Jetson TX1 systems.
|
Configuration is slightly different for the Jetson TK1 and the Jetson TX1 systems.
|
||||||
|
|
||||||
### Jetson TK1
|
#### Jetson TK1
|
||||||
|
|
||||||
$ cmake \
|
$ cmake \
|
||||||
-DCMAKE_BUILD_TYPE=Release \
|
-DCMAKE_BUILD_TYPE=Release \
|
||||||
@ -299,7 +293,7 @@ Configuration is slightly different for the Jetson TK1 and the Jetson TX1 system
|
|||||||
|
|
||||||
__Note:__ This uses CUDA 6.5, not 8.0.
|
__Note:__ This uses CUDA 6.5, not 8.0.
|
||||||
|
|
||||||
### Jetson TX1
|
#### Jetson TX1
|
||||||
|
|
||||||
$ cmake \
|
$ cmake \
|
||||||
-DCMAKE_BUILD_TYPE=Release \
|
-DCMAKE_BUILD_TYPE=Release \
|
||||||
@ -336,8 +330,7 @@ __Note:__ This uses CUDA 6.5, not 8.0.
|
|||||||
|
|
||||||
__Note:__ This configuration does not set the `ENABLE_NEON` parameter.
|
__Note:__ This configuration does not set the `ENABLE_NEON` parameter.
|
||||||
|
|
||||||
Ubuntu Desktop Linux Configuration
|
### Ubuntu Desktop Linux Configuration
|
||||||
----------------------------------
|
|
||||||
|
|
||||||
Supported platforms:
|
Supported platforms:
|
||||||
|
|
||||||
@ -383,12 +376,11 @@ This configuration is nearly identical to that for V4L and L4T, except that the
|
|||||||
As with previous examples, the configuration given above builds the Python bindings for Python 2 (but not Python 3) as part of the build process.
|
As with previous examples, the configuration given above builds the Python bindings for Python 2 (but not Python 3) as part of the build process.
|
||||||
|
|
||||||
Building OpenCV {#tutorial_building_tegra_cuda_building}
|
Building OpenCV {#tutorial_building_tegra_cuda_building}
|
||||||
===============
|
---------------
|
||||||
|
|
||||||
Once `cmake` finishes configuring OpenCV, building is done using the standard `make` utility.
|
Once `cmake` finishes configuring OpenCV, building is done using the standard `make` utility.
|
||||||
|
|
||||||
Building with `make`
|
### Building with `make`
|
||||||
--------------------
|
|
||||||
|
|
||||||
The only parameter that is needed for the invocation of `make` is the `-j` parameter for specifying how many parallel threads to use. This varies depending on the system and how much memory is available, other running processes, etc. The following table offers suggested values for this parameter:
|
The only parameter that is needed for the invocation of `make` is the `-j` parameter for specifying how many parallel threads to use. This varies depending on the system and how much memory is available, other running processes, etc. The following table offers suggested values for this parameter:
|
||||||
|
|
||||||
@ -408,12 +400,11 @@ By default, CMake hides the details of the build steps. If you need to see more
|
|||||||
$ make -j6 VERBOSE=1
|
$ make -j6 VERBOSE=1
|
||||||
|
|
||||||
Testing OpenCV {#tutorial_building_tegra_cuda_testing}
|
Testing OpenCV {#tutorial_building_tegra_cuda_testing}
|
||||||
==============
|
--------------
|
||||||
|
|
||||||
Once the build completes successfully, you have the option of running the extensive set of tests that OpenCV provides. If you did not clone the `opencv_extra` repository and specify the path to `testdata` in the `cmake` invocation, then testing is not recommended.
|
Once the build completes successfully, you have the option of running the extensive set of tests that OpenCV provides. If you did not clone the `opencv_extra` repository and specify the path to `testdata` in the `cmake` invocation, then testing is not recommended.
|
||||||
|
|
||||||
Testing under Linux
|
### Testing under Linux
|
||||||
-------------------
|
|
||||||
|
|
||||||
To run the basic tests under Linux, execute:
|
To run the basic tests under Linux, execute:
|
||||||
|
|
||||||
@ -425,7 +416,7 @@ This executes `ctest` to carry out the tests, as specified in CTest syntax withi
|
|||||||
|
|
||||||
In this example, there are two (2) arguments passed to `ctest`: `--verbose` and `--parallel 3`. The first argument causes the output from `ctest` to be more detailed, and the second causes `ctest` to run as many as three (3) tests in parallel. As with choosing a thread count for building, base any choice for testing on the available number of processor cores, physical memory, etc. Some of the tests do attempt to allocate significant amounts of memory.
|
In this example, there are two (2) arguments passed to `ctest`: `--verbose` and `--parallel 3`. The first argument causes the output from `ctest` to be more detailed, and the second causes `ctest` to run as many as three (3) tests in parallel. As with choosing a thread count for building, base any choice for testing on the available number of processor cores, physical memory, etc. Some of the tests do attempt to allocate significant amounts of memory.
|
||||||
|
|
||||||
### Known Issues with Tests
|
#### Known Issues with Tests
|
||||||
|
|
||||||
At present, not all of the tests in the OpenCV test suite pass. There are tests that fail whether or not CUDA is compiled, and there are tests that are only specific to CUDA that also do not currently pass.
|
At present, not all of the tests in the OpenCV test suite pass. There are tests that fail whether or not CUDA is compiled, and there are tests that are only specific to CUDA that also do not currently pass.
|
||||||
|
|
||||||
@ -434,7 +425,7 @@ __Note:__ There are no tests that pass without CUDA but fail only when CUDA is i
|
|||||||
As the full lists of failing tests vary based on platform, it is impractical to list them here.
|
As the full lists of failing tests vary based on platform, it is impractical to list them here.
|
||||||
|
|
||||||
Installing OpenCV {#tutorial_building_tegra_cuda_installing}
|
Installing OpenCV {#tutorial_building_tegra_cuda_installing}
|
||||||
=================
|
-----------------
|
||||||
|
|
||||||
Installing OpenCV is very straightforward. For the Linux-based platforms, the command is:
|
Installing OpenCV is very straightforward. For the Linux-based platforms, the command is:
|
||||||
|
|
||||||
@ -443,14 +434,13 @@ Installing OpenCV is very straightforward. For the Linux-based platforms, the co
|
|||||||
Depending on the chosen installation location, you may need root privilege to install.
|
Depending on the chosen installation location, you may need root privilege to install.
|
||||||
|
|
||||||
Building OpenCV 2.4.X {#tutorial_building_tegra_cuda_opencv_24X}
|
Building OpenCV 2.4.X {#tutorial_building_tegra_cuda_opencv_24X}
|
||||||
=====================
|
---------------------
|
||||||
|
|
||||||
If you wish to build your own version of the 2.4 version of OpenCV, there are only a few adjustments that must be made. At the time of this writing, the latest version on the 2.4 tree is 2.4.13. These instructions may work for later versions of 2.4, though they have not been tested for any earlier versions.
|
If you wish to build your own version of the 2.4 version of OpenCV, there are only a few adjustments that must be made. At the time of this writing, the latest version on the 2.4 tree is 2.4.13. These instructions may work for later versions of 2.4, though they have not been tested for any earlier versions.
|
||||||
|
|
||||||
__Note:__ The 2.4.X OpenCV source does not have the extra modules and code for Tegra that was upstreamed into the 3.X versions of OpenCV. This part of the guide is only for cases where you want to build a vanilla version of OpenCV 2.4.
|
__Note:__ The 2.4.X OpenCV source does not have the extra modules and code for Tegra that was upstreamed into the 3.X versions of OpenCV. This part of the guide is only for cases where you want to build a vanilla version of OpenCV 2.4.
|
||||||
|
|
||||||
Selecting the 2.4 Source
|
### Selecting the 2.4 Source
|
||||||
------------------------
|
|
||||||
|
|
||||||
First you must select the correct source branch or tag. If you want a specific version such as 2.4.13, you want to make a local branch based on the tag, as was done with the 3.1.0 tag above:
|
First you must select the correct source branch or tag. If you want a specific version such as 2.4.13, you want to make a local branch based on the tag, as was done with the 3.1.0 tag above:
|
||||||
|
|
||||||
@ -466,14 +456,13 @@ If you simply want the newest code from the 2.4 line of OpenCV, there is a `2.4`
|
|||||||
|
|
||||||
There is no need for the `git cherry-pick` commands used with 3.1.0 when building the 2.4.13 source.
|
There is no need for the `git cherry-pick` commands used with 3.1.0 when building the 2.4.13 source.
|
||||||
|
|
||||||
Configuring
|
### Configuring
|
||||||
-----------
|
|
||||||
|
|
||||||
Configuring is done with CMake as before. The primary difference is that OpenCV 2.4 only provides Python bindings for Python 2, and thus does not distinguish between Python 2 and Python 3 in the CMake parameters. There is only one parameter, `BUILD_opencv_python`. In addition, there is a build-related parameter that controls features in 2.4 that are not in 3.1.0. This parameter is `BUILD_opencv_nonfree`.
|
Configuring is done with CMake as before. The primary difference is that OpenCV 2.4 only provides Python bindings for Python 2, and thus does not distinguish between Python 2 and Python 3 in the CMake parameters. There is only one parameter, `BUILD_opencv_python`. In addition, there is a build-related parameter that controls features in 2.4 that are not in 3.1.0. This parameter is `BUILD_opencv_nonfree`.
|
||||||
|
|
||||||
Configuration still takes place in a separate directory that must be a sibling to the `opencv` and `opencv_extra` directories.
|
Configuration still takes place in a separate directory that must be a sibling to the `opencv` and `opencv_extra` directories.
|
||||||
|
|
||||||
### Configuring Vibrante V4L
|
#### Configuring Vibrante V4L
|
||||||
|
|
||||||
For DRIVE PX 2:
|
For DRIVE PX 2:
|
||||||
|
|
||||||
@ -510,7 +499,7 @@ For DRIVE PX 2:
|
|||||||
-DOPENCV_TEST_DATA_PATH=../opencv_extra/testdata \
|
-DOPENCV_TEST_DATA_PATH=../opencv_extra/testdata \
|
||||||
../opencv
|
../opencv
|
||||||
|
|
||||||
### Configuring Jetson L4T
|
#### Configuring Jetson L4T
|
||||||
|
|
||||||
For Jetson TK1:
|
For Jetson TK1:
|
||||||
|
|
||||||
@ -582,7 +571,7 @@ For Jetson TX1:
|
|||||||
-DOPENCV_TEST_DATA_PATH=../opencv_extra/testdata \
|
-DOPENCV_TEST_DATA_PATH=../opencv_extra/testdata \
|
||||||
../opencv
|
../opencv
|
||||||
|
|
||||||
### Configuring Desktop Ubuntu Linux
|
#### Configuring Desktop Ubuntu Linux
|
||||||
|
|
||||||
For both 14.04 LTS and 16.04 LTS:
|
For both 14.04 LTS and 16.04 LTS:
|
||||||
|
|
||||||
@ -618,13 +607,12 @@ For both 14.04 LTS and 16.04 LTS:
|
|||||||
-DOPENCV_TEST_DATA_PATH=../opencv_extra/testdata \
|
-DOPENCV_TEST_DATA_PATH=../opencv_extra/testdata \
|
||||||
../opencv
|
../opencv
|
||||||
|
|
||||||
Building, Testing and Installing
|
### Building, Testing and Installing
|
||||||
--------------------------------
|
|
||||||
|
|
||||||
Once configured, the steps of building, testing, and installing are the same as above for the 3.1.0 source.
|
Once configured, the steps of building, testing, and installing are the same as above for the 3.1.0 source.
|
||||||
|
|
||||||
CMake Parameter Reference {#tutorial_building_tegra_cuda_parameter_reference}
|
CMake Parameter Reference {#tutorial_building_tegra_cuda_parameter_reference}
|
||||||
=========================
|
-------------------------
|
||||||
|
|
||||||
The following is a table of all the parameters passed to CMake in the recommended invocations above. Some of these are parameters from CMake itself, while most are specific to OpenCV.
|
The following is a table of all the parameters passed to CMake in the recommended invocations above. Some of these are parameters from CMake itself, while most are specific to OpenCV.
|
||||||
|
|
||||||
|
@ -35,11 +35,11 @@ issue the following command to run the sample from the command line.
|
|||||||
cd path/to/samples/java/clojure/simple-sample
|
cd path/to/samples/java/clojure/simple-sample
|
||||||
lein run
|
lein run
|
||||||
@endcode
|
@endcode
|
||||||
|
|
||||||
Preamble
|
Preamble
|
||||||
--------
|
--------
|
||||||
|
|
||||||
For detailed instruction on installing OpenCV with desktop Java support refer to the @ref tutorial_java_dev_intro "corresponding
|
For detailed instruction on installing OpenCV with desktop Java support refer to the @ref tutorial_java_dev_intro "corresponding tutorial".
|
||||||
tutorial".
|
|
||||||
|
|
||||||
If you are in hurry, here is a minimum quick start guide to install OpenCV on Mac OS X:
|
If you are in hurry, here is a minimum quick start guide to install OpenCV on Mac OS X:
|
||||||
|
|
||||||
@ -63,6 +63,7 @@ make -j8
|
|||||||
# optional
|
# optional
|
||||||
# make install
|
# make install
|
||||||
@endcode
|
@endcode
|
||||||
|
|
||||||
Install Leiningen
|
Install Leiningen
|
||||||
-----------------
|
-----------------
|
||||||
|
|
||||||
@ -170,6 +171,7 @@ i386 -> x86
|
|||||||
arm -> arm
|
arm -> arm
|
||||||
sparc -> sparc
|
sparc -> sparc
|
||||||
@endcode
|
@endcode
|
||||||
|
|
||||||
### Package the native lib as a jar
|
### Package the native lib as a jar
|
||||||
|
|
||||||
Next you need to package the native lib in a jar file by using the jar command to create a new jar
|
Next you need to package the native lib in a jar file by using the jar command to create a new jar
|
||||||
@ -193,6 +195,7 @@ tree
|
|||||||
|
|
||||||
3 directories, 3 files
|
3 directories, 3 files
|
||||||
@endcode
|
@endcode
|
||||||
|
|
||||||
### Locally install the jars
|
### Locally install the jars
|
||||||
|
|
||||||
We are now ready to add the two jars as artifacts to the local maven repository with the help of the
|
We are now ready to add the two jars as artifacts to the local maven repository with the help of the
|
||||||
@ -402,6 +405,7 @@ Let's now try to port to Clojure the @ref tutorial_java_dev_intro "OpenCV Java t
|
|||||||
Instead of writing it in a source file we're going to evaluate it at the REPL.
|
Instead of writing it in a source file we're going to evaluate it at the REPL.
|
||||||
|
|
||||||
Following is the original Java source code of the cited sample.
|
Following is the original Java source code of the cited sample.
|
||||||
|
|
||||||
@code{.java}
|
@code{.java}
|
||||||
import org.opencv.core.Mat;
|
import org.opencv.core.Mat;
|
||||||
import org.opencv.core.CvType;
|
import org.opencv.core.CvType;
|
||||||
@ -430,20 +434,25 @@ Before start coding, we'd like to eliminate the boring need of interactively loa
|
|||||||
opencv lib any time we start a new REPL to interact with it.
|
opencv lib any time we start a new REPL to interact with it.
|
||||||
|
|
||||||
First, stop the REPL by evaluating the (exit) expression at the REPL prompt.
|
First, stop the REPL by evaluating the (exit) expression at the REPL prompt.
|
||||||
|
|
||||||
@code{.clojure}
|
@code{.clojure}
|
||||||
user=> (exit)
|
user=> (exit)
|
||||||
Bye for now!
|
Bye for now!
|
||||||
@endcode
|
@endcode
|
||||||
|
|
||||||
Then open your project.clj file and edit it as follows:
|
Then open your project.clj file and edit it as follows:
|
||||||
|
|
||||||
@code{.clojure}
|
@code{.clojure}
|
||||||
(defproject simple-sample "0.1.0-SNAPSHOT"
|
(defproject simple-sample "0.1.0-SNAPSHOT"
|
||||||
...
|
...
|
||||||
injections [(clojure.lang.RT/loadLibrary org.opencv.core.Core/NATIVE_LIBRARY_NAME)])
|
injections [(clojure.lang.RT/loadLibrary org.opencv.core.Core/NATIVE_LIBRARY_NAME)])
|
||||||
@endcode
|
@endcode
|
||||||
|
|
||||||
Here we're saying to load the opencv native lib anytime we run the REPL in such a way that we have
|
Here we're saying to load the opencv native lib anytime we run the REPL in such a way that we have
|
||||||
not anymore to remember to manually do it.
|
not anymore to remember to manually do it.
|
||||||
|
|
||||||
Rerun the lein repl task
|
Rerun the lein repl task
|
||||||
|
|
||||||
@code{.bash}
|
@code{.bash}
|
||||||
lein repl
|
lein repl
|
||||||
nREPL server started on port 51645 on host 127.0.0.1
|
nREPL server started on port 51645 on host 127.0.0.1
|
||||||
@ -458,11 +467,14 @@ Clojure 1.5.1
|
|||||||
|
|
||||||
user=>
|
user=>
|
||||||
@endcode
|
@endcode
|
||||||
|
|
||||||
Import the interested OpenCV java interfaces.
|
Import the interested OpenCV java interfaces.
|
||||||
|
|
||||||
@code{.clojure}
|
@code{.clojure}
|
||||||
user=> (import '[org.opencv.core Mat CvType Scalar])
|
user=> (import '[org.opencv.core Mat CvType Scalar])
|
||||||
org.opencv.core.Scalar
|
org.opencv.core.Scalar
|
||||||
@endcode
|
@endcode
|
||||||
|
|
||||||
We're going to mimic almost verbatim the original OpenCV java tutorial to:
|
We're going to mimic almost verbatim the original OpenCV java tutorial to:
|
||||||
|
|
||||||
- create a 5x10 matrix with all its elements initialized to 0
|
- create a 5x10 matrix with all its elements initialized to 0
|
||||||
|
@ -6,7 +6,7 @@ OpenCV environment variables reference {#tutorial_env_reference}
|
|||||||
|
|
||||||
@tableofcontents
|
@tableofcontents
|
||||||
|
|
||||||
### Introduction
|
## Introduction
|
||||||
|
|
||||||
OpenCV can change its behavior depending on the runtime environment:
|
OpenCV can change its behavior depending on the runtime environment:
|
||||||
- enable extra debugging output or performance tracing
|
- enable extra debugging output or performance tracing
|
||||||
@ -18,7 +18,7 @@ OpenCV can change its behavior depending on the runtime environment:
|
|||||||
- ⭐ marks most popular variables
|
- ⭐ marks most popular variables
|
||||||
- variables with names like this `VAR_${NAME}` describes family of variables, where `${NAME}` should be changed to one of predefined values, e.g. `VAR_TBB`, `VAR_OPENMP`, ...
|
- variables with names like this `VAR_${NAME}` describes family of variables, where `${NAME}` should be changed to one of predefined values, e.g. `VAR_TBB`, `VAR_OPENMP`, ...
|
||||||
|
|
||||||
##### Setting environment variable in Windows
|
### Setting environment variable in Windows
|
||||||
In terminal or cmd-file (bat-file):
|
In terminal or cmd-file (bat-file):
|
||||||
```.bat
|
```.bat
|
||||||
set MY_ENV_VARIABLE=true
|
set MY_ENV_VARIABLE=true
|
||||||
@ -30,7 +30,7 @@ In GUI:
|
|||||||
- In new window click on the "Environment variables" button
|
- In new window click on the "Environment variables" button
|
||||||
- Add an entry to the "User variables" list
|
- Add an entry to the "User variables" list
|
||||||
|
|
||||||
##### Setting environment variable in Linux
|
### Setting environment variable in Linux
|
||||||
|
|
||||||
In terminal or shell script:
|
In terminal or shell script:
|
||||||
```.sh
|
```.sh
|
||||||
@ -42,7 +42,7 @@ or as a single command:
|
|||||||
MY_ENV_VARIABLE=true ./my_app
|
MY_ENV_VARIABLE=true ./my_app
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Setting environment variable in Python
|
### Setting environment variable in Python
|
||||||
|
|
||||||
```.py
|
```.py
|
||||||
import os
|
import os
|
||||||
@ -51,7 +51,7 @@ import cv2 # variables set after this may not have effect
|
|||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
### Types
|
## Types
|
||||||
|
|
||||||
- _non-null_ - set to anything to enable feature, in some cases can be interpreted as other types (e.g. path)
|
- _non-null_ - set to anything to enable feature, in some cases can be interpreted as other types (e.g. path)
|
||||||
- _bool_ - `1`, `True`, `true`, `TRUE` / `0`, `False`, `false`, `FALSE`
|
- _bool_ - `1`, `True`, `true`, `TRUE` / `0`, `False`, `false`, `FALSE`
|
||||||
@ -61,7 +61,7 @@ import cv2 # variables set after this may not have effect
|
|||||||
- _paths_ - `;`-separated on Windows, `:`-separated on others
|
- _paths_ - `;`-separated on Windows, `:`-separated on others
|
||||||
|
|
||||||
|
|
||||||
### General, core
|
## General, core
|
||||||
| name | type | default | description |
|
| name | type | default | description |
|
||||||
|------|------|---------|-------------|
|
|------|------|---------|-------------|
|
||||||
| OPENCV_SKIP_CPU_BASELINE_CHECK | non-null | | do not check that current CPU supports all features used by the build (baseline) |
|
| OPENCV_SKIP_CPU_BASELINE_CHECK | non-null | | do not check that current CPU supports all features used by the build (baseline) |
|
||||||
@ -84,14 +84,14 @@ Links:
|
|||||||
- https://github.com/opencv/opencv/wiki/CPU-optimizations-build-options
|
- https://github.com/opencv/opencv/wiki/CPU-optimizations-build-options
|
||||||
|
|
||||||
|
|
||||||
### Logging
|
## Logging
|
||||||
| name | type | default | description |
|
| name | type | default | description |
|
||||||
|------|------|---------|-------------|
|
|------|------|---------|-------------|
|
||||||
| ⭐ OPENCV_LOG_LEVEL | string | | logging level (see accepted values below) |
|
| ⭐ OPENCV_LOG_LEVEL | string | | logging level (see accepted values below) |
|
||||||
| OPENCV_LOG_TIMESTAMP | bool | true | logging with timestamps |
|
| OPENCV_LOG_TIMESTAMP | bool | true | logging with timestamps |
|
||||||
| OPENCV_LOG_TIMESTAMP_NS | bool | false | add nsec to logging timestamps |
|
| OPENCV_LOG_TIMESTAMP_NS | bool | false | add nsec to logging timestamps |
|
||||||
|
|
||||||
##### Levels:
|
### Levels
|
||||||
- `0`, `O`, `OFF`, `S`, `SILENT`, `DISABLE`, `DISABLED`
|
- `0`, `O`, `OFF`, `S`, `SILENT`, `DISABLE`, `DISABLED`
|
||||||
- `F`, `FATAL`
|
- `F`, `FATAL`
|
||||||
- `E`, `ERROR`
|
- `E`, `ERROR`
|
||||||
@ -101,7 +101,7 @@ Links:
|
|||||||
- `V`, `VERBOSE`
|
- `V`, `VERBOSE`
|
||||||
|
|
||||||
|
|
||||||
### core/parallel_for
|
## core/parallel_for
|
||||||
| name | type | default | description |
|
| name | type | default | description |
|
||||||
|------|------|---------|-------------|
|
|------|------|---------|-------------|
|
||||||
| ⭐ OPENCV_FOR_THREADS_NUM | num | 0 | set number of threads |
|
| ⭐ OPENCV_FOR_THREADS_NUM | num | 0 | set number of threads |
|
||||||
@ -112,7 +112,7 @@ Links:
|
|||||||
| OPENCV_FOR_OPENMP_DYNAMIC_DISABLE | bool | false | use single OpenMP thread |
|
| OPENCV_FOR_OPENMP_DYNAMIC_DISABLE | bool | false | use single OpenMP thread |
|
||||||
|
|
||||||
|
|
||||||
### backends
|
## backends
|
||||||
OPENCV_LEGACY_WAITKEY
|
OPENCV_LEGACY_WAITKEY
|
||||||
Some modules have multiple available backends, following variables allow choosing specific backend or changing default priorities in which backends will be probed (e.g. when opening a video file).
|
Some modules have multiple available backends, following variables allow choosing specific backend or changing default priorities in which backends will be probed (e.g. when opening a video file).
|
||||||
|
|
||||||
@ -128,7 +128,7 @@ Some modules have multiple available backends, following variables allow choosin
|
|||||||
| OPENCV_VIDEOIO_PRIORITY_LIST | string, `,`-separated | | list of videoio backends in priority order |
|
| OPENCV_VIDEOIO_PRIORITY_LIST | string, `,`-separated | | list of videoio backends in priority order |
|
||||||
|
|
||||||
|
|
||||||
### plugins
|
## plugins
|
||||||
Some external dependencies can be detached into a dynamic library, which will be loaded at runtime (plugin). Following variables allow changing default search locations and naming pattern for these plugins.
|
Some external dependencies can be detached into a dynamic library, which will be loaded at runtime (plugin). Following variables allow changing default search locations and naming pattern for these plugins.
|
||||||
| name | type | default | description |
|
| name | type | default | description |
|
||||||
|------|------|---------|-------------|
|
|------|------|---------|-------------|
|
||||||
@ -141,7 +141,7 @@ Some external dependencies can be detached into a dynamic library, which will be
|
|||||||
| OPENCV_VIDEOIO_PLUGIN_PATH | paths | | directories to search for _videoio_ plugins |
|
| OPENCV_VIDEOIO_PLUGIN_PATH | paths | | directories to search for _videoio_ plugins |
|
||||||
| OPENCV_VIDEOIO_PLUGIN_${NAME} | string, glob | | _videoio_ plugin library name (glob) |
|
| OPENCV_VIDEOIO_PLUGIN_${NAME} | string, glob | | _videoio_ plugin library name (glob) |
|
||||||
|
|
||||||
### OpenCL
|
## OpenCL
|
||||||
|
|
||||||
**Note:** OpenCL device specification format is `<Platform>:<CPU|GPU|ACCELERATOR|nothing=GPU/CPU>:<deviceName>`, e.g. `AMD:GPU:`
|
**Note:** OpenCL device specification format is `<Platform>:<CPU|GPU|ACCELERATOR|nothing=GPU/CPU>:<deviceName>`, e.g. `AMD:GPU:`
|
||||||
|
|
||||||
@ -170,7 +170,7 @@ Some external dependencies can be detached into a dynamic library, which will be
|
|||||||
| OPENCV_OPENCL_FORCE | bool | false | force running OpenCL kernel even if usual conditions are not met (e.g. dst.isUMat) |
|
| OPENCV_OPENCL_FORCE | bool | false | force running OpenCL kernel even if usual conditions are not met (e.g. dst.isUMat) |
|
||||||
| OPENCV_OPENCL_PERF_CHECK_BYPASS | bool | false | force running OpenCL kernel even if usual performance-related conditions are not met (e.g. image is very small) |
|
| OPENCV_OPENCL_PERF_CHECK_BYPASS | bool | false | force running OpenCL kernel even if usual performance-related conditions are not met (e.g. image is very small) |
|
||||||
|
|
||||||
##### SVM (Shared Virtual Memory) - disabled by default
|
### SVM (Shared Virtual Memory) - disabled by default
|
||||||
| name | type | default | description |
|
| name | type | default | description |
|
||||||
|------|------|---------|-------------|
|
|------|------|---------|-------------|
|
||||||
| OPENCV_OPENCL_SVM_DISABLE | bool | false | disable SVM |
|
| OPENCV_OPENCL_SVM_DISABLE | bool | false | disable SVM |
|
||||||
@ -179,11 +179,11 @@ Some external dependencies can be detached into a dynamic library, which will be
|
|||||||
| OPENCV_OPENCL_SVM_CAPABILITIES_MASK | num | | |
|
| OPENCV_OPENCL_SVM_CAPABILITIES_MASK | num | | |
|
||||||
| OPENCV_OPENCL_SVM_BUFFERPOOL_LIMIT | num | | same as OPENCV_OPENCL_BUFFERPOOL_LIMIT, but for SVM buffers |
|
| OPENCV_OPENCL_SVM_BUFFERPOOL_LIMIT | num | | same as OPENCV_OPENCL_BUFFERPOOL_LIMIT, but for SVM buffers |
|
||||||
|
|
||||||
##### Links:
|
### Links:
|
||||||
- https://github.com/opencv/opencv/wiki/OpenCL-optimizations
|
- https://github.com/opencv/opencv/wiki/OpenCL-optimizations
|
||||||
|
|
||||||
|
|
||||||
### Tracing/Profiling
|
## Tracing/Profiling
|
||||||
| name | type | default | description |
|
| name | type | default | description |
|
||||||
|------|------|---------|-------------|
|
|------|------|---------|-------------|
|
||||||
| ⭐ OPENCV_TRACE | bool | false | enable trace |
|
| ⭐ OPENCV_TRACE | bool | false | enable trace |
|
||||||
@ -196,11 +196,11 @@ Some external dependencies can be detached into a dynamic library, which will be
|
|||||||
| OPENCV_TRACE_ITT_PARENT | bool | false | set parentID for ITT task |
|
| OPENCV_TRACE_ITT_PARENT | bool | false | set parentID for ITT task |
|
||||||
| OPENCV_TRACE_ITT_SET_THREAD_NAME | bool | false | set name for OpenCV's threads "OpenCVThread-%03d" |
|
| OPENCV_TRACE_ITT_SET_THREAD_NAME | bool | false | set name for OpenCV's threads "OpenCVThread-%03d" |
|
||||||
|
|
||||||
##### Links:
|
### Links:
|
||||||
- https://github.com/opencv/opencv/wiki/Profiling-OpenCV-Applications
|
- https://github.com/opencv/opencv/wiki/Profiling-OpenCV-Applications
|
||||||
|
|
||||||
|
|
||||||
##### Cache
|
## Cache
|
||||||
**Note:** Default tmp location is `%TMPDIR%` (Windows); `$XDG_CACHE_HOME`, `$HOME/.cache`, `/var/tmp`, `/tmp` (others)
|
**Note:** Default tmp location is `%TMPDIR%` (Windows); `$XDG_CACHE_HOME`, `$HOME/.cache`, `/var/tmp`, `/tmp` (others)
|
||||||
| name | type | default | description |
|
| name | type | default | description |
|
||||||
|------|------|---------|-------------|
|
|------|------|---------|-------------|
|
||||||
@ -210,7 +210,7 @@ Some external dependencies can be detached into a dynamic library, which will be
|
|||||||
| OPENCV_OPENCL_CACHE_DIR | path | default tmp location | cache directory for OpenCL kernels cache (subdirectory `opencl_cache`) |
|
| OPENCV_OPENCL_CACHE_DIR | path | default tmp location | cache directory for OpenCL kernels cache (subdirectory `opencl_cache`) |
|
||||||
|
|
||||||
|
|
||||||
### dnn
|
## dnn
|
||||||
**Note:** In the table below `dump_base_name` equals to `ocv_dnn_net_%05d_%02d` where first argument is internal network ID and the second - dump level.
|
**Note:** In the table below `dump_base_name` equals to `ocv_dnn_net_%05d_%02d` where first argument is internal network ID and the second - dump level.
|
||||||
| name | type | default | description |
|
| name | type | default | description |
|
||||||
|------|------|---------|-------------|
|
|------|------|---------|-------------|
|
||||||
@ -240,7 +240,7 @@ Some external dependencies can be detached into a dynamic library, which will be
|
|||||||
| OPENCV_OCL4DNN_TUNING_RAISE_CHECK_ERROR | bool | false | raise exception on errors (auto-tuning) |
|
| OPENCV_OCL4DNN_TUNING_RAISE_CHECK_ERROR | bool | false | raise exception on errors (auto-tuning) |
|
||||||
|
|
||||||
|
|
||||||
### Tests
|
## Tests
|
||||||
| name | type | default | description |
|
| name | type | default | description |
|
||||||
|------|------|---------|-------------|
|
|------|------|---------|-------------|
|
||||||
| ⭐ OPENCV_TEST_DATA_PATH | dir path | | set test data search location (e.g. `/home/user/opencv_extra/testdata`) |
|
| ⭐ OPENCV_TEST_DATA_PATH | dir path | | set test data search location (e.g. `/home/user/opencv_extra/testdata`) |
|
||||||
@ -254,11 +254,11 @@ Some external dependencies can be detached into a dynamic library, which will be
|
|||||||
| OPENCV_PERF_VALIDATION_DIR | dir path | | location of files read/written by `--perf_read_validation_results`/`--perf_write_validation_results` |
|
| OPENCV_PERF_VALIDATION_DIR | dir path | | location of files read/written by `--perf_read_validation_results`/`--perf_write_validation_results` |
|
||||||
| ⭐ OPENCV_PYTEST_FILTER | string (glob) | | test filter for Python tests |
|
| ⭐ OPENCV_PYTEST_FILTER | string (glob) | | test filter for Python tests |
|
||||||
|
|
||||||
##### Links:
|
### Links:
|
||||||
* https://github.com/opencv/opencv/wiki/QA_in_OpenCV
|
* https://github.com/opencv/opencv/wiki/QA_in_OpenCV
|
||||||
|
|
||||||
|
|
||||||
### videoio
|
## videoio
|
||||||
**Note:** extra FFmpeg options should be pased in form `key;value|key;value|key;value`, for example `hwaccel;cuvid|video_codec;h264_cuvid|vsync;0` or `vcodec;x264|vprofile;high|vlevel;4.0`
|
**Note:** extra FFmpeg options should be pased in form `key;value|key;value|key;value`, for example `hwaccel;cuvid|video_codec;h264_cuvid|vsync;0` or `vcodec;x264|vprofile;high|vlevel;4.0`
|
||||||
|
|
||||||
| name | type | default | description |
|
| name | type | default | description |
|
||||||
@ -288,7 +288,7 @@ Some external dependencies can be detached into a dynamic library, which will be
|
|||||||
| OPENCV_VIDEOWRITER_DEBUG | bool | false | enable debug messages for VideoWriter |
|
| OPENCV_VIDEOWRITER_DEBUG | bool | false | enable debug messages for VideoWriter |
|
||||||
| ⭐ OPENCV_VIDEOIO_DEBUG | bool | false | debug messages for both VideoCapture and VideoWriter |
|
| ⭐ OPENCV_VIDEOIO_DEBUG | bool | false | debug messages for both VideoCapture and VideoWriter |
|
||||||
|
|
||||||
##### videoio tests
|
### videoio tests
|
||||||
| name | type | default | description |
|
| name | type | default | description |
|
||||||
|------|------|---------|-------------|
|
|------|------|---------|-------------|
|
||||||
| OPENCV_TEST_VIDEOIO_BACKEND_REQUIRE_FFMPEG | | | test app will exit if no FFmpeg backend is available |
|
| OPENCV_TEST_VIDEOIO_BACKEND_REQUIRE_FFMPEG | | | test app will exit if no FFmpeg backend is available |
|
||||||
@ -297,14 +297,14 @@ Some external dependencies can be detached into a dynamic library, which will be
|
|||||||
| OPENCV_TEST_CAMERA_%d_FPS | num | | fps to set for N-th camera (0-based index) (waitAny_V4L test) |
|
| OPENCV_TEST_CAMERA_%d_FPS | num | | fps to set for N-th camera (0-based index) (waitAny_V4L test) |
|
||||||
|
|
||||||
|
|
||||||
### gapi
|
## gapi
|
||||||
| name | type | default | description |
|
| name | type | default | description |
|
||||||
|------|------|---------|-------------|
|
|------|------|---------|-------------|
|
||||||
| ⭐ GRAPH_DUMP_PATH | file path | | dump graph (dot format) |
|
| ⭐ GRAPH_DUMP_PATH | file path | | dump graph (dot format) |
|
||||||
| PIPELINE_MODELS_PATH | dir path | | pipeline_modeling_tool sample application uses this var |
|
| PIPELINE_MODELS_PATH | dir path | | pipeline_modeling_tool sample application uses this var |
|
||||||
| OPENCV_GAPI_INFERENCE_ENGINE_CORE_LIFETIME_WORKAROUND | bool | true (Windows, Apple), false (others) | similar to OPENCV_DNN_INFERENCE_ENGINE_CORE_LIFETIME_WORKAROUND |
|
| OPENCV_GAPI_INFERENCE_ENGINE_CORE_LIFETIME_WORKAROUND | bool | true (Windows, Apple), false (others) | similar to OPENCV_DNN_INFERENCE_ENGINE_CORE_LIFETIME_WORKAROUND |
|
||||||
|
|
||||||
##### gapi tests/samples
|
### gapi tests/samples
|
||||||
| name | type | default | description |
|
| name | type | default | description |
|
||||||
|------|------|---------|-------------|
|
|------|------|---------|-------------|
|
||||||
| PLAIDML_DEVICE | string | | specific to PlaidML backend test |
|
| PLAIDML_DEVICE | string | | specific to PlaidML backend test |
|
||||||
@ -312,12 +312,12 @@ Some external dependencies can be detached into a dynamic library, which will be
|
|||||||
| OPENCV_GAPI_ONNX_MODEL_PATH | dir path | | search location for ONNX models test |
|
| OPENCV_GAPI_ONNX_MODEL_PATH | dir path | | search location for ONNX models test |
|
||||||
| OPENCV_TEST_FREETYPE_FONT_PATH | file path | | location of TrueType font for one of tests |
|
| OPENCV_TEST_FREETYPE_FONT_PATH | file path | | location of TrueType font for one of tests |
|
||||||
|
|
||||||
##### Links:
|
### Links:
|
||||||
* https://github.com/opencv/opencv/wiki/Using-G-API-with-OpenVINO-Toolkit
|
* https://github.com/opencv/opencv/wiki/Using-G-API-with-OpenVINO-Toolkit
|
||||||
* https://github.com/opencv/opencv/wiki/Using-G-API-with-MS-ONNX-Runtime
|
* https://github.com/opencv/opencv/wiki/Using-G-API-with-MS-ONNX-Runtime
|
||||||
|
|
||||||
|
|
||||||
### highgui
|
## highgui
|
||||||
|
|
||||||
| name | type | default | description |
|
| name | type | default | description |
|
||||||
|------|------|---------|-------------|
|
|------|------|---------|-------------|
|
||||||
@ -325,14 +325,14 @@ Some external dependencies can be detached into a dynamic library, which will be
|
|||||||
| $XDG_RUNTIME_DIR | | | Wayland backend specific - create shared memory-mapped file for interprocess communication (named `opencv-shared-??????`) |
|
| $XDG_RUNTIME_DIR | | | Wayland backend specific - create shared memory-mapped file for interprocess communication (named `opencv-shared-??????`) |
|
||||||
|
|
||||||
|
|
||||||
### imgproc
|
## imgproc
|
||||||
| name | type | default | description |
|
| name | type | default | description |
|
||||||
|------|------|---------|-------------|
|
|------|------|---------|-------------|
|
||||||
| OPENCV_OPENCL_IMGPROC_MORPH_SPECIAL_KERNEL | bool | true (Apple), false (others) | use special OpenCL kernel for small morph kernel (Intel devices) |
|
| OPENCV_OPENCL_IMGPROC_MORPH_SPECIAL_KERNEL | bool | true (Apple), false (others) | use special OpenCL kernel for small morph kernel (Intel devices) |
|
||||||
| OPENCV_GAUSSIANBLUR_CHECK_BITEXACT_KERNELS | bool | false | validate Gaussian kernels before running (src is CV_16U, bit-exact version) |
|
| OPENCV_GAUSSIANBLUR_CHECK_BITEXACT_KERNELS | bool | false | validate Gaussian kernels before running (src is CV_16U, bit-exact version) |
|
||||||
|
|
||||||
|
|
||||||
### imgcodecs
|
## imgcodecs
|
||||||
| name | type | default | description |
|
| name | type | default | description |
|
||||||
|------|------|---------|-------------|
|
|------|------|---------|-------------|
|
||||||
| OPENCV_IMGCODECS_AVIF_MAX_FILE_SIZE | num | 64MB | limit input AVIF size |
|
| OPENCV_IMGCODECS_AVIF_MAX_FILE_SIZE | num | 64MB | limit input AVIF size |
|
||||||
|
@ -22,8 +22,7 @@ best to help you out.
|
|||||||
with the latest Microsoft Visual Studio IDE and do not take advantage of the most advanced
|
with the latest Microsoft Visual Studio IDE and do not take advantage of the most advanced
|
||||||
technologies we integrate into our library. .. _Windows_Install_Prebuild:
|
technologies we integrate into our library. .. _Windows_Install_Prebuild:
|
||||||
|
|
||||||
Installation by Using the Pre-built Libraries {#tutorial_windows_install_prebuilt}
|
## Installation by Using the Pre-built Libraries {#tutorial_windows_install_prebuilt}
|
||||||
=============================================
|
|
||||||
|
|
||||||
-# Launch a web browser of choice and go to our [page on
|
-# Launch a web browser of choice and go to our [page on
|
||||||
Sourceforge](http://sourceforge.net/projects/opencvlibrary/files/).
|
Sourceforge](http://sourceforge.net/projects/opencvlibrary/files/).
|
||||||
@ -35,8 +34,7 @@ Installation by Using the Pre-built Libraries {#tutorial_windows_install_prebuil
|
|||||||
|
|
||||||
-# To finalize the installation go to the @ref tutorial_windows_install_path section.
|
-# To finalize the installation go to the @ref tutorial_windows_install_path section.
|
||||||
|
|
||||||
Installation by Using git-bash (version>=2.14.1) and cmake (version >=3.9.1){#tutorial_windows_gitbash_build}
|
## Installation by Using git-bash (version>=2.14.1) and cmake (version >=3.9.1){#tutorial_windows_gitbash_build}
|
||||||
===============================================================
|
|
||||||
|
|
||||||
-# You must download [cmake (version >=3.9.1)](https://cmake.org) and install it. You must add cmake to PATH variable during installation
|
-# You must download [cmake (version >=3.9.1)](https://cmake.org) and install it. You must add cmake to PATH variable during installation
|
||||||
|
|
||||||
@ -108,8 +106,7 @@ CMAKE_OPTIONS=(-DBUILD_PERF_TESTS:BOOL=OFF -DBUILD_TESTS:BOOL=OFF -DBUILD_DOCS:B
|
|||||||
-# Next time you run this script, opencv and opencv_contrib will be updated and rebuild
|
-# Next time you run this script, opencv and opencv_contrib will be updated and rebuild
|
||||||
|
|
||||||
|
|
||||||
Installation by Making Your Own Libraries from the Source Files {#tutorial_windows_install_build}
|
## Installation by Making Your Own Libraries from the Source Files {#tutorial_windows_install_build}
|
||||||
===============================================================
|
|
||||||
|
|
||||||
You may find the content of this tutorial also inside the following videos:
|
You may find the content of this tutorial also inside the following videos:
|
||||||
[Part 1](https://www.youtube.com/watch?v=NnovZ1cTlMs) and [Part 2](https://www.youtube.com/watch?v=qGNWMcfWwPU), hosted on YouTube.
|
[Part 1](https://www.youtube.com/watch?v=NnovZ1cTlMs) and [Part 2](https://www.youtube.com/watch?v=qGNWMcfWwPU), hosted on YouTube.
|
||||||
@ -364,8 +361,7 @@ libraries). If you do not need the support for some of these, you can just freel
|
|||||||
caused mostly by old video card drivers. For testing the GPU (if built) run the
|
caused mostly by old video card drivers. For testing the GPU (if built) run the
|
||||||
*performance_gpu.exe* sample application.
|
*performance_gpu.exe* sample application.
|
||||||
|
|
||||||
Set the OpenCV environment variable and add it to the systems path {#tutorial_windows_install_path}
|
## Set the OpenCV environment variable and add it to the systems path {#tutorial_windows_install_path}
|
||||||
=================================================================
|
|
||||||
|
|
||||||
First, we set an environment variable to make our work easier. This will hold the build directory of
|
First, we set an environment variable to make our work easier. This will hold the build directory of
|
||||||
our OpenCV library that we use in our projects. Start up a command window and enter:
|
our OpenCV library that we use in our projects. Start up a command window and enter:
|
||||||
|
@ -1,4 +0,0 @@
|
|||||||
Object Detection (objdetect module) {#tutorial_table_of_content_objdetect}
|
|
||||||
===================================
|
|
||||||
|
|
||||||
Content has been moved to this page: @ref tutorial_table_of_content_other
|
|
@ -1601,7 +1601,7 @@ objectPoints[i].size() for each i.
|
|||||||
@param image_size Size of the image used only to initialize the camera intrinsic matrix.
|
@param image_size Size of the image used only to initialize the camera intrinsic matrix.
|
||||||
@param K Output 3x3 floating-point camera intrinsic matrix
|
@param K Output 3x3 floating-point camera intrinsic matrix
|
||||||
\f$\cameramatrix{A}\f$ . If
|
\f$\cameramatrix{A}\f$ . If
|
||||||
@ref CALIB_USE_INTRINSIC_GUESS is specified, some or all of fx, fy, cx, cy must be
|
@ref cv::CALIB_USE_INTRINSIC_GUESS is specified, some or all of fx, fy, cx, cy must be
|
||||||
initialized before calling the function.
|
initialized before calling the function.
|
||||||
@param D Output vector of distortion coefficients \f$\distcoeffsfisheye\f$.
|
@param D Output vector of distortion coefficients \f$\distcoeffsfisheye\f$.
|
||||||
@param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view.
|
@param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view.
|
||||||
@ -1611,19 +1611,19 @@ space (in which object points are specified) to the world coordinate space, that
|
|||||||
position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).
|
position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).
|
||||||
@param tvecs Output vector of translation vectors estimated for each pattern view.
|
@param tvecs Output vector of translation vectors estimated for each pattern view.
|
||||||
@param flags Different flags that may be zero or a combination of the following values:
|
@param flags Different flags that may be zero or a combination of the following values:
|
||||||
- @ref CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of
|
- @ref cv::CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of
|
||||||
fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
|
fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
|
||||||
center ( imageSize is used), and focal distances are computed in a least-squares fashion.
|
center ( imageSize is used), and focal distances are computed in a least-squares fashion.
|
||||||
- @ref CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration
|
- @ref cv::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration
|
||||||
of intrinsic optimization.
|
of intrinsic optimization.
|
||||||
- @ref CALIB_CHECK_COND The functions will check validity of condition number.
|
- @ref cv::CALIB_CHECK_COND The functions will check validity of condition number.
|
||||||
- @ref CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero.
|
- @ref cv::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero.
|
||||||
- @ref CALIB_FIX_K1,..., @ref CALIB_FIX_K4 Selected distortion coefficients
|
- @ref cv::CALIB_FIX_K1,..., @ref cv::CALIB_FIX_K4 Selected distortion coefficients
|
||||||
are set to zeros and stay zero.
|
are set to zeros and stay zero.
|
||||||
- @ref CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global
|
- @ref cv::CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global
|
||||||
optimization. It stays at the center or at a different location specified when @ref CALIB_USE_INTRINSIC_GUESS is set too.
|
optimization. It stays at the center or at a different location specified when @ref cv::CALIB_USE_INTRINSIC_GUESS is set too.
|
||||||
- @ref CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global
|
- @ref cv::CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global
|
||||||
optimization. It is the \f$max(width,height)/\pi\f$ or the provided \f$f_x\f$, \f$f_y\f$ when @ref CALIB_USE_INTRINSIC_GUESS is set too.
|
optimization. It is the \f$max(width,height)/\pi\f$ or the provided \f$f_x\f$, \f$f_y\f$ when @ref cv::CALIB_USE_INTRINSIC_GUESS is set too.
|
||||||
@param criteria Termination criteria for the iterative optimization algorithm.
|
@param criteria Termination criteria for the iterative optimization algorithm.
|
||||||
*/
|
*/
|
||||||
CV_EXPORTS_W double calibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, const Size& image_size,
|
CV_EXPORTS_W double calibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, const Size& image_size,
|
||||||
@ -1639,7 +1639,7 @@ observed by the first camera.
|
|||||||
observed by the second camera.
|
observed by the second camera.
|
||||||
@param K1 Input/output first camera intrinsic matrix:
|
@param K1 Input/output first camera intrinsic matrix:
|
||||||
\f$\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\f$ , \f$j = 0,\, 1\f$ . If
|
\f$\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\f$ , \f$j = 0,\, 1\f$ . If
|
||||||
any of @ref CALIB_USE_INTRINSIC_GUESS , @ref CALIB_FIX_INTRINSIC are specified,
|
any of @ref cv::CALIB_USE_INTRINSIC_GUESS , @ref cv::CALIB_FIX_INTRINSIC are specified,
|
||||||
some or all of the matrix components must be initialized.
|
some or all of the matrix components must be initialized.
|
||||||
@param D1 Input/output vector of distortion coefficients \f$\distcoeffsfisheye\f$ of 4 elements.
|
@param D1 Input/output vector of distortion coefficients \f$\distcoeffsfisheye\f$ of 4 elements.
|
||||||
@param K2 Input/output second camera intrinsic matrix. The parameter is similar to K1 .
|
@param K2 Input/output second camera intrinsic matrix. The parameter is similar to K1 .
|
||||||
@ -1658,16 +1658,16 @@ to camera coordinate space of the first camera of the stereo pair.
|
|||||||
@param tvecs Output vector of translation vectors estimated for each pattern view, see parameter description
|
@param tvecs Output vector of translation vectors estimated for each pattern view, see parameter description
|
||||||
of previous output parameter ( rvecs ).
|
of previous output parameter ( rvecs ).
|
||||||
@param flags Different flags that may be zero or a combination of the following values:
|
@param flags Different flags that may be zero or a combination of the following values:
|
||||||
- @ref CALIB_FIX_INTRINSIC Fix K1, K2? and D1, D2? so that only R, T matrices
|
- @ref cv::CALIB_FIX_INTRINSIC Fix K1, K2? and D1, D2? so that only R, T matrices
|
||||||
are estimated.
|
are estimated.
|
||||||
- @ref CALIB_USE_INTRINSIC_GUESS K1, K2 contains valid initial values of
|
- @ref cv::CALIB_USE_INTRINSIC_GUESS K1, K2 contains valid initial values of
|
||||||
fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
|
fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
|
||||||
center (imageSize is used), and focal distances are computed in a least-squares fashion.
|
center (imageSize is used), and focal distances are computed in a least-squares fashion.
|
||||||
- @ref CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration
|
- @ref cv::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration
|
||||||
of intrinsic optimization.
|
of intrinsic optimization.
|
||||||
- @ref CALIB_CHECK_COND The functions will check validity of condition number.
|
- @ref cv::CALIB_CHECK_COND The functions will check validity of condition number.
|
||||||
- @ref CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero.
|
- @ref cv::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero.
|
||||||
- @ref CALIB_FIX_K1,..., @ref CALIB_FIX_K4 Selected distortion coefficients are set to zeros and stay
|
- @ref cv::CALIB_FIX_K1,..., @ref cv::CALIB_FIX_K4 Selected distortion coefficients are set to zeros and stay
|
||||||
zero.
|
zero.
|
||||||
@param criteria Termination criteria for the iterative optimization algorithm.
|
@param criteria Termination criteria for the iterative optimization algorithm.
|
||||||
*/
|
*/
|
||||||
|
@ -62,10 +62,6 @@
|
|||||||
@defgroup core Core functionality
|
@defgroup core Core functionality
|
||||||
@{
|
@{
|
||||||
@defgroup core_basic Basic structures
|
@defgroup core_basic Basic structures
|
||||||
@defgroup core_c C structures and operations
|
|
||||||
@{
|
|
||||||
@defgroup core_c_glue Connections with C++
|
|
||||||
@}
|
|
||||||
@defgroup core_array Operations on arrays
|
@defgroup core_array Operations on arrays
|
||||||
@defgroup core_async Asynchronous API
|
@defgroup core_async Asynchronous API
|
||||||
@defgroup core_xml XML/YAML Persistence
|
@defgroup core_xml XML/YAML Persistence
|
||||||
|
@ -140,7 +140,6 @@ public:
|
|||||||
|
|
||||||
//! @} core_utils
|
//! @} core_utils
|
||||||
|
|
||||||
//! @endcond
|
|
||||||
|
|
||||||
//! @addtogroup core_basic
|
//! @addtogroup core_basic
|
||||||
//! @{
|
//! @{
|
||||||
|
File diff suppressed because it is too large
Load Diff
1115
modules/core/include/opencv2/core/matx.inl.hpp
Normal file
1115
modules/core/include/opencv2/core/matx.inl.hpp
Normal file
File diff suppressed because it is too large
Load Diff
@ -696,9 +696,6 @@ protected:
|
|||||||
|
|
||||||
/////////////////// XML & YAML I/O implementation //////////////////
|
/////////////////// XML & YAML I/O implementation //////////////////
|
||||||
|
|
||||||
//! @relates cv::FileStorage
|
|
||||||
//! @{
|
|
||||||
|
|
||||||
CV_EXPORTS void write( FileStorage& fs, const String& name, int value );
|
CV_EXPORTS void write( FileStorage& fs, const String& name, int value );
|
||||||
CV_EXPORTS void write( FileStorage& fs, const String& name, float value );
|
CV_EXPORTS void write( FileStorage& fs, const String& name, float value );
|
||||||
CV_EXPORTS void write( FileStorage& fs, const String& name, double value );
|
CV_EXPORTS void write( FileStorage& fs, const String& name, double value );
|
||||||
@ -715,11 +712,6 @@ CV_EXPORTS void writeScalar( FileStorage& fs, float value );
|
|||||||
CV_EXPORTS void writeScalar( FileStorage& fs, double value );
|
CV_EXPORTS void writeScalar( FileStorage& fs, double value );
|
||||||
CV_EXPORTS void writeScalar( FileStorage& fs, const String& value );
|
CV_EXPORTS void writeScalar( FileStorage& fs, const String& value );
|
||||||
|
|
||||||
//! @}
|
|
||||||
|
|
||||||
//! @relates cv::FileNode
|
|
||||||
//! @{
|
|
||||||
|
|
||||||
CV_EXPORTS void read(const FileNode& node, int& value, int default_value);
|
CV_EXPORTS void read(const FileNode& node, int& value, int default_value);
|
||||||
CV_EXPORTS void read(const FileNode& node, float& value, float default_value);
|
CV_EXPORTS void read(const FileNode& node, float& value, float default_value);
|
||||||
CV_EXPORTS void read(const FileNode& node, double& value, double default_value);
|
CV_EXPORTS void read(const FileNode& node, double& value, double default_value);
|
||||||
@ -796,10 +788,7 @@ static inline void read(const FileNode& node, Range& value, const Range& default
|
|||||||
value.start = temp.x; value.end = temp.y;
|
value.start = temp.x; value.end = temp.y;
|
||||||
}
|
}
|
||||||
|
|
||||||
//! @}
|
|
||||||
|
|
||||||
/** @brief Writes string to a file storage.
|
/** @brief Writes string to a file storage.
|
||||||
@relates cv::FileStorage
|
|
||||||
*/
|
*/
|
||||||
CV_EXPORTS FileStorage& operator << (FileStorage& fs, const String& str);
|
CV_EXPORTS FileStorage& operator << (FileStorage& fs, const String& str);
|
||||||
|
|
||||||
@ -884,9 +873,6 @@ namespace internal
|
|||||||
|
|
||||||
//! @endcond
|
//! @endcond
|
||||||
|
|
||||||
//! @relates cv::FileStorage
|
|
||||||
//! @{
|
|
||||||
|
|
||||||
template<typename _Tp> static inline
|
template<typename _Tp> static inline
|
||||||
void write(FileStorage& fs, const _Tp& value)
|
void write(FileStorage& fs, const _Tp& value)
|
||||||
{
|
{
|
||||||
@ -1118,10 +1104,6 @@ static inline void write(FileStorage& fs, const std::vector<DMatch>& vec)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
//! @} FileStorage
|
|
||||||
|
|
||||||
//! @relates cv::FileNode
|
|
||||||
//! @{
|
|
||||||
|
|
||||||
static inline
|
static inline
|
||||||
void read(const FileNode& node, bool& value, bool default_value)
|
void read(const FileNode& node, bool& value, bool default_value)
|
||||||
@ -1208,11 +1190,6 @@ void read( const FileNode& node, std::vector<DMatch>& vec, const std::vector<DMa
|
|||||||
read(node, vec);
|
read(node, vec);
|
||||||
}
|
}
|
||||||
|
|
||||||
//! @} FileNode
|
|
||||||
|
|
||||||
//! @relates cv::FileStorage
|
|
||||||
//! @{
|
|
||||||
|
|
||||||
/** @brief Writes data to a file storage.
|
/** @brief Writes data to a file storage.
|
||||||
*/
|
*/
|
||||||
template<typename _Tp> static inline
|
template<typename _Tp> static inline
|
||||||
@ -1244,11 +1221,6 @@ FileStorage& operator << (FileStorage& fs, char* value)
|
|||||||
return (fs << String(value));
|
return (fs << String(value));
|
||||||
}
|
}
|
||||||
|
|
||||||
//! @} FileStorage
|
|
||||||
|
|
||||||
//! @relates cv::FileNodeIterator
|
|
||||||
//! @{
|
|
||||||
|
|
||||||
/** @brief Reads data from a file storage.
|
/** @brief Reads data from a file storage.
|
||||||
*/
|
*/
|
||||||
template<typename _Tp> static inline
|
template<typename _Tp> static inline
|
||||||
@ -1268,11 +1240,6 @@ FileNodeIterator& operator >> (FileNodeIterator& it, std::vector<_Tp>& vec)
|
|||||||
return it;
|
return it;
|
||||||
}
|
}
|
||||||
|
|
||||||
//! @} FileNodeIterator
|
|
||||||
|
|
||||||
//! @relates cv::FileNode
|
|
||||||
//! @{
|
|
||||||
|
|
||||||
/** @brief Reads data from a file storage.
|
/** @brief Reads data from a file storage.
|
||||||
*/
|
*/
|
||||||
template<typename _Tp> static inline
|
template<typename _Tp> static inline
|
||||||
@ -1323,11 +1290,6 @@ void operator >> (const FileNode& n, DMatch& m)
|
|||||||
it >> m.queryIdx >> m.trainIdx >> m.imgIdx >> m.distance;
|
it >> m.queryIdx >> m.trainIdx >> m.imgIdx >> m.distance;
|
||||||
}
|
}
|
||||||
|
|
||||||
//! @} FileNode
|
|
||||||
|
|
||||||
//! @relates cv::FileNodeIterator
|
|
||||||
//! @{
|
|
||||||
|
|
||||||
CV_EXPORTS bool operator == (const FileNodeIterator& it1, const FileNodeIterator& it2);
|
CV_EXPORTS bool operator == (const FileNodeIterator& it1, const FileNodeIterator& it2);
|
||||||
CV_EXPORTS bool operator != (const FileNodeIterator& it1, const FileNodeIterator& it2);
|
CV_EXPORTS bool operator != (const FileNodeIterator& it1, const FileNodeIterator& it2);
|
||||||
|
|
||||||
@ -1343,8 +1305,6 @@ bool operator < (const FileNodeIterator& it1, const FileNodeIterator& it2)
|
|||||||
return it1.remaining() > it2.remaining();
|
return it1.remaining() > it2.remaining();
|
||||||
}
|
}
|
||||||
|
|
||||||
//! @} FileNodeIterator
|
|
||||||
|
|
||||||
} // cv
|
} // cv
|
||||||
|
|
||||||
#endif // OPENCV_CORE_PERSISTENCE_HPP
|
#endif // OPENCV_CORE_PERSISTENCE_HPP
|
||||||
|
@ -56,15 +56,15 @@
|
|||||||
@defgroup features2d_main Feature Detection and Description
|
@defgroup features2d_main Feature Detection and Description
|
||||||
@defgroup features2d_match Descriptor Matchers
|
@defgroup features2d_match Descriptor Matchers
|
||||||
|
|
||||||
Matchers of keypoint descriptors in OpenCV have wrappers with a common interface that enables you to
|
Matchers of keypoint descriptors in OpenCV have wrappers with a common interface that enables
|
||||||
easily switch between different algorithms solving the same problem. This section is devoted to
|
you to easily switch between different algorithms solving the same problem. This section is
|
||||||
matching descriptors that are represented as vectors in a multidimensional space. All objects that
|
devoted to matching descriptors that are represented as vectors in a multidimensional space.
|
||||||
implement vector descriptor matchers inherit the DescriptorMatcher interface.
|
All objects that implement vector descriptor matchers inherit the DescriptorMatcher interface.
|
||||||
|
|
||||||
@defgroup features2d_draw Drawing Function of Keypoints and Matches
|
@defgroup features2d_draw Drawing Function of Keypoints and Matches
|
||||||
@defgroup features2d_category Object Categorization
|
@defgroup features2d_category Object Categorization
|
||||||
|
|
||||||
This section describes approaches based on local 2D features and used to categorize objects.
|
This section describes approaches based on local 2D features and used to categorize objects.
|
||||||
|
|
||||||
@defgroup feature2d_hal Hardware Acceleration Layer
|
@defgroup feature2d_hal Hardware Acceleration Layer
|
||||||
@{
|
@{
|
||||||
@ -560,10 +560,6 @@ public:
|
|||||||
CV_WRAP virtual String getDefaultName() const CV_OVERRIDE;
|
CV_WRAP virtual String getDefaultName() const CV_OVERRIDE;
|
||||||
};
|
};
|
||||||
|
|
||||||
//! @} features2d_main
|
|
||||||
|
|
||||||
//! @addtogroup features2d_main
|
|
||||||
//! @{
|
|
||||||
|
|
||||||
/** @brief Wrapping class for feature detection using the FAST method.
|
/** @brief Wrapping class for feature detection using the FAST method.
|
||||||
|
|
||||||
@ -615,10 +611,6 @@ Check @ref tutorial_py_fast "the corresponding tutorial" for more details.
|
|||||||
CV_EXPORTS void FAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints,
|
CV_EXPORTS void FAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints,
|
||||||
int threshold, bool nonmaxSuppression=true, FastFeatureDetector::DetectorType type=FastFeatureDetector::TYPE_9_16 );
|
int threshold, bool nonmaxSuppression=true, FastFeatureDetector::DetectorType type=FastFeatureDetector::TYPE_9_16 );
|
||||||
|
|
||||||
//! @} features2d_main
|
|
||||||
|
|
||||||
//! @addtogroup features2d_main
|
|
||||||
//! @{
|
|
||||||
|
|
||||||
/** @brief Wrapping class for feature detection using the AGAST method. :
|
/** @brief Wrapping class for feature detection using the AGAST method. :
|
||||||
*/
|
*/
|
||||||
@ -776,10 +768,6 @@ public:
|
|||||||
CV_WRAP virtual const std::vector<std::vector<cv::Point> >& getBlobContours() const = 0;
|
CV_WRAP virtual const std::vector<std::vector<cv::Point> >& getBlobContours() const = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
//! @} features2d_main
|
|
||||||
|
|
||||||
//! @addtogroup features2d_main
|
|
||||||
//! @{
|
|
||||||
|
|
||||||
/** @brief Class implementing the KAZE keypoint detector and descriptor extractor, described in @cite ABD12 .
|
/** @brief Class implementing the KAZE keypoint detector and descriptor extractor, described in @cite ABD12 .
|
||||||
|
|
||||||
@ -908,7 +896,6 @@ public:
|
|||||||
CV_WRAP virtual int getMaxPoints() const = 0;
|
CV_WRAP virtual int getMaxPoints() const = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
//! @} features2d_main
|
|
||||||
|
|
||||||
/****************************************************************************************\
|
/****************************************************************************************\
|
||||||
* Distance *
|
* Distance *
|
||||||
@ -973,6 +960,8 @@ struct L1
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
//! @} features2d_main
|
||||||
|
|
||||||
/****************************************************************************************\
|
/****************************************************************************************\
|
||||||
* DescriptorMatcher *
|
* DescriptorMatcher *
|
||||||
\****************************************************************************************/
|
\****************************************************************************************/
|
||||||
@ -1414,6 +1403,9 @@ CV_EXPORTS_AS(drawMatchesKnn) void drawMatches( InputArray img1, const std::vect
|
|||||||
* Functions to evaluate the feature detectors and [generic] descriptor extractors *
|
* Functions to evaluate the feature detectors and [generic] descriptor extractors *
|
||||||
\****************************************************************************************/
|
\****************************************************************************************/
|
||||||
|
|
||||||
|
//! @addtogroup features2d_main
|
||||||
|
//! @{
|
||||||
|
|
||||||
CV_EXPORTS void evaluateFeatureDetector( const Mat& img1, const Mat& img2, const Mat& H1to2,
|
CV_EXPORTS void evaluateFeatureDetector( const Mat& img1, const Mat& img2, const Mat& H1to2,
|
||||||
std::vector<KeyPoint>* keypoints1, std::vector<KeyPoint>* keypoints2,
|
std::vector<KeyPoint>* keypoints1, std::vector<KeyPoint>* keypoints2,
|
||||||
float& repeatability, int& correspCount,
|
float& repeatability, int& correspCount,
|
||||||
@ -1426,6 +1418,8 @@ CV_EXPORTS void computeRecallPrecisionCurve( const std::vector<std::vector<DMatc
|
|||||||
CV_EXPORTS float getRecall( const std::vector<Point2f>& recallPrecisionCurve, float l_precision );
|
CV_EXPORTS float getRecall( const std::vector<Point2f>& recallPrecisionCurve, float l_precision );
|
||||||
CV_EXPORTS int getNearestPoint( const std::vector<Point2f>& recallPrecisionCurve, float l_precision );
|
CV_EXPORTS int getNearestPoint( const std::vector<Point2f>& recallPrecisionCurve, float l_precision );
|
||||||
|
|
||||||
|
//! @}
|
||||||
|
|
||||||
/****************************************************************************************\
|
/****************************************************************************************\
|
||||||
* Bag of visual words *
|
* Bag of visual words *
|
||||||
\****************************************************************************************/
|
\****************************************************************************************/
|
||||||
@ -1586,8 +1580,6 @@ protected:
|
|||||||
|
|
||||||
//! @} features2d_category
|
//! @} features2d_category
|
||||||
|
|
||||||
//! @} features2d
|
|
||||||
|
|
||||||
} /* namespace cv */
|
} /* namespace cv */
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -41,6 +41,10 @@ G-API documentation is organized into the following chapters:
|
|||||||
|
|
||||||
- API Reference: functions and classes
|
- API Reference: functions and classes
|
||||||
|
|
||||||
|
- @subpage gapi_ref
|
||||||
|
|
||||||
|
Core G-API classes, data types, backends, etc.
|
||||||
|
|
||||||
- @subpage gapi_core
|
- @subpage gapi_core
|
||||||
|
|
||||||
Core G-API operations - arithmetic, boolean, and other matrix
|
Core G-API operations - arithmetic, boolean, and other matrix
|
||||||
@ -51,6 +55,14 @@ G-API documentation is organized into the following chapters:
|
|||||||
Image processing functions: color space conversions, various
|
Image processing functions: color space conversions, various
|
||||||
filters, etc.
|
filters, etc.
|
||||||
|
|
||||||
|
- @subpage gapi_video
|
||||||
|
|
||||||
|
Video processing functionality.
|
||||||
|
|
||||||
|
- @subpage gapi_draw
|
||||||
|
|
||||||
|
Drawing and composition functionality
|
||||||
|
|
||||||
# API Example {#gapi_example}
|
# API Example {#gapi_example}
|
||||||
|
|
||||||
A very basic example of G-API pipeline is shown below:
|
A very basic example of G-API pipeline is shown below:
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
# G-API Implementation details
|
# G-API Implementation details
|
||||||
|
|
||||||
Note -- this section is still in progress.
|
@note this section is still in progress.
|
||||||
|
|
||||||
# API layer {#gapi_detail_api}
|
# API layer {#gapi_detail_api}
|
||||||
|
|
||||||
|
@ -10,7 +10,7 @@
|
|||||||
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
|
||||||
/** \defgroup gapi G-API framework
|
/** \defgroup gapi_ref G-API framework
|
||||||
@{
|
@{
|
||||||
@defgroup gapi_main_classes G-API Main Classes
|
@defgroup gapi_main_classes G-API Main Classes
|
||||||
@defgroup gapi_data_objects G-API Data Types
|
@defgroup gapi_data_objects G-API Data Types
|
||||||
|
@ -249,6 +249,8 @@ template<typename T> struct wrap_serialize
|
|||||||
} // namespace s11n
|
} // namespace s11n
|
||||||
} // namespace gapi
|
} // namespace gapi
|
||||||
|
|
||||||
|
/** @} gapi_compile_args */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Ask G-API to dump compiled graph in Graphviz format under
|
* @brief Ask G-API to dump compiled graph in Graphviz format under
|
||||||
* the given file name.
|
* the given file name.
|
||||||
@ -261,7 +263,6 @@ struct graph_dump_path
|
|||||||
{
|
{
|
||||||
std::string m_dump_path;
|
std::string m_dump_path;
|
||||||
};
|
};
|
||||||
/** @} */
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Ask G-API to use threaded executor when cv::GComputation
|
* @brief Ask G-API to use threaded executor when cv::GComputation
|
||||||
@ -276,7 +277,6 @@ struct GAPI_EXPORTS use_threaded_executor
|
|||||||
|
|
||||||
uint32_t num_threads;
|
uint32_t num_threads;
|
||||||
};
|
};
|
||||||
/** @} */
|
|
||||||
|
|
||||||
namespace detail
|
namespace detail
|
||||||
{
|
{
|
||||||
|
@ -388,7 +388,6 @@ protected:
|
|||||||
/// @private
|
/// @private
|
||||||
std::shared_ptr<Priv> m_priv;
|
std::shared_ptr<Priv> m_priv;
|
||||||
};
|
};
|
||||||
/** @} */
|
|
||||||
|
|
||||||
namespace gapi {
|
namespace gapi {
|
||||||
|
|
||||||
@ -413,7 +412,6 @@ struct GAPI_EXPORTS_W_SIMPLE queue_capacity
|
|||||||
GAPI_PROP_RW
|
GAPI_PROP_RW
|
||||||
size_t capacity;
|
size_t capacity;
|
||||||
};
|
};
|
||||||
/** @} */
|
|
||||||
} // namespace streaming
|
} // namespace streaming
|
||||||
} // namespace gapi
|
} // namespace gapi
|
||||||
|
|
||||||
@ -425,6 +423,8 @@ template<> struct CompileArgTag<cv::gapi::streaming::queue_capacity>
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** @} gapi_main_classes */
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // OPENCV_GAPI_GSTREAMING_COMPILED_HPP
|
#endif // OPENCV_GAPI_GSTREAMING_COMPILED_HPP
|
||||||
|
@ -690,7 +690,6 @@ namespace wip { namespace ov {
|
|||||||
* taking into account the i/o data transfer.
|
* taking into account the i/o data transfer.
|
||||||
*/
|
*/
|
||||||
struct benchmark_mode { };
|
struct benchmark_mode { };
|
||||||
/** @} */
|
|
||||||
|
|
||||||
} // namespace ov
|
} // namespace ov
|
||||||
} // namespace wip
|
} // namespace wip
|
||||||
|
39
modules/highgui/doc/highgui_qt.cpp
Normal file
39
modules/highgui/doc/highgui_qt.cpp
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
#include "opencv2/highgui.hpp"
|
||||||
|
|
||||||
|
int main(int argc, char *argv[])
|
||||||
|
{
|
||||||
|
int value = 50;
|
||||||
|
int value2 = 0;
|
||||||
|
|
||||||
|
namedWindow("main1",WINDOW_NORMAL);
|
||||||
|
namedWindow("main2",WINDOW_AUTOSIZE | WINDOW_GUI_NORMAL);
|
||||||
|
createTrackbar( "track1", "main1", &value, 255, NULL);
|
||||||
|
|
||||||
|
String nameb1 = "button1";
|
||||||
|
String nameb2 = "button2";
|
||||||
|
|
||||||
|
createButton(nameb1,callbackButton,&nameb1,QT_CHECKBOX,1);
|
||||||
|
createButton(nameb2,callbackButton,NULL,QT_CHECKBOX,0);
|
||||||
|
createTrackbar( "track2", NULL, &value2, 255, NULL);
|
||||||
|
createButton("button5",callbackButton1,NULL,QT_RADIOBOX,0);
|
||||||
|
createButton("button6",callbackButton2,NULL,QT_RADIOBOX,1);
|
||||||
|
|
||||||
|
setMouseCallback( "main2",on_mouse,NULL );
|
||||||
|
|
||||||
|
Mat img1 = imread("files/flower.jpg");
|
||||||
|
VideoCapture video;
|
||||||
|
video.open("files/hockey.avi");
|
||||||
|
|
||||||
|
Mat img2,img3;
|
||||||
|
while( waitKey(33) != 27 )
|
||||||
|
{
|
||||||
|
img1.convertTo(img2,-1,1,value);
|
||||||
|
video >> img3;
|
||||||
|
|
||||||
|
imshow("main1",img2);
|
||||||
|
imshow("main2",img3);
|
||||||
|
}
|
||||||
|
|
||||||
|
destroyAllWindows();
|
||||||
|
return 0;
|
||||||
|
}
|
@ -85,50 +85,8 @@ It provides easy interface to:
|
|||||||
created. Then, a new button is attached to it.
|
created. Then, a new button is attached to it.
|
||||||
|
|
||||||
See below the example used to generate the figure:
|
See below the example used to generate the figure:
|
||||||
@code
|
|
||||||
int main(int argc, char *argv[])
|
|
||||||
{
|
|
||||||
|
|
||||||
int value = 50;
|
|
||||||
int value2 = 0;
|
|
||||||
|
|
||||||
|
|
||||||
namedWindow("main1",WINDOW_NORMAL);
|
|
||||||
namedWindow("main2",WINDOW_AUTOSIZE | WINDOW_GUI_NORMAL);
|
|
||||||
createTrackbar( "track1", "main1", &value, 255, NULL);
|
|
||||||
|
|
||||||
String nameb1 = "button1";
|
|
||||||
String nameb2 = "button2";
|
|
||||||
|
|
||||||
createButton(nameb1,callbackButton,&nameb1,QT_CHECKBOX,1);
|
|
||||||
createButton(nameb2,callbackButton,NULL,QT_CHECKBOX,0);
|
|
||||||
createTrackbar( "track2", NULL, &value2, 255, NULL);
|
|
||||||
createButton("button5",callbackButton1,NULL,QT_RADIOBOX,0);
|
|
||||||
createButton("button6",callbackButton2,NULL,QT_RADIOBOX,1);
|
|
||||||
|
|
||||||
setMouseCallback( "main2",on_mouse,NULL );
|
|
||||||
|
|
||||||
Mat img1 = imread("files/flower.jpg");
|
|
||||||
VideoCapture video;
|
|
||||||
video.open("files/hockey.avi");
|
|
||||||
|
|
||||||
Mat img2,img3;
|
|
||||||
|
|
||||||
while( waitKey(33) != 27 )
|
|
||||||
{
|
|
||||||
img1.convertTo(img2,-1,1,value);
|
|
||||||
video >> img3;
|
|
||||||
|
|
||||||
imshow("main1",img2);
|
|
||||||
imshow("main2",img3);
|
|
||||||
}
|
|
||||||
|
|
||||||
destroyAllWindows();
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
@endcode
|
|
||||||
|
|
||||||
|
@include highgui_qt.cpp
|
||||||
|
|
||||||
@defgroup highgui_winrt WinRT support
|
@defgroup highgui_winrt WinRT support
|
||||||
|
|
||||||
@ -139,33 +97,33 @@ It provides easy interface to:
|
|||||||
|
|
||||||
See below the example used to generate the figure:
|
See below the example used to generate the figure:
|
||||||
@code
|
@code
|
||||||
void sample_app::MainPage::ShowWindow()
|
void sample_app::MainPage::ShowWindow()
|
||||||
|
{
|
||||||
|
static cv::String windowName("sample");
|
||||||
|
cv::winrt_initContainer(this->cvContainer);
|
||||||
|
cv::namedWindow(windowName); // not required
|
||||||
|
|
||||||
|
cv::Mat image = cv::imread("Assets/sample.jpg");
|
||||||
|
cv::Mat converted = cv::Mat(image.rows, image.cols, CV_8UC4);
|
||||||
|
cv::cvtColor(image, converted, COLOR_BGR2BGRA);
|
||||||
|
cv::imshow(windowName, converted); // this will create window if it hasn't been created before
|
||||||
|
|
||||||
|
int state = 42;
|
||||||
|
cv::TrackbarCallback callback = [](int pos, void* userdata)
|
||||||
{
|
{
|
||||||
static cv::String windowName("sample");
|
if (pos == 0) {
|
||||||
cv::winrt_initContainer(this->cvContainer);
|
cv::destroyWindow(windowName);
|
||||||
cv::namedWindow(windowName); // not required
|
}
|
||||||
|
};
|
||||||
cv::Mat image = cv::imread("Assets/sample.jpg");
|
cv::TrackbarCallback callbackTwin = [](int pos, void* userdata)
|
||||||
cv::Mat converted = cv::Mat(image.rows, image.cols, CV_8UC4);
|
{
|
||||||
cv::cvtColor(image, converted, COLOR_BGR2BGRA);
|
if (pos >= 70) {
|
||||||
cv::imshow(windowName, converted); // this will create window if it hasn't been created before
|
cv::destroyAllWindows();
|
||||||
|
}
|
||||||
int state = 42;
|
};
|
||||||
cv::TrackbarCallback callback = [](int pos, void* userdata)
|
cv::createTrackbar("Sample trackbar", windowName, &state, 100, callback);
|
||||||
{
|
cv::createTrackbar("Twin brother", windowName, &state, 100, callbackTwin);
|
||||||
if (pos == 0) {
|
}
|
||||||
cv::destroyWindow(windowName);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
cv::TrackbarCallback callbackTwin = [](int pos, void* userdata)
|
|
||||||
{
|
|
||||||
if (pos >= 70) {
|
|
||||||
cv::destroyAllWindows();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
cv::createTrackbar("Sample trackbar", windowName, &state, 100, callback);
|
|
||||||
cv::createTrackbar("Twin brother", windowName, &state, 100, callbackTwin);
|
|
||||||
}
|
|
||||||
@endcode
|
@endcode
|
||||||
@}
|
@}
|
||||||
*/
|
*/
|
||||||
|
@ -48,7 +48,6 @@
|
|||||||
/**
|
/**
|
||||||
@defgroup imgcodecs Image file reading and writing
|
@defgroup imgcodecs Image file reading and writing
|
||||||
@{
|
@{
|
||||||
@defgroup imgcodecs_c C API
|
|
||||||
@defgroup imgcodecs_flags Flags used for image file reading and writing
|
@defgroup imgcodecs_flags Flags used for image file reading and writing
|
||||||
@defgroup imgcodecs_ios iOS glue
|
@defgroup imgcodecs_ios iOS glue
|
||||||
@defgroup imgcodecs_macosx MacOS(OSX) glue
|
@defgroup imgcodecs_macosx MacOS(OSX) glue
|
||||||
@ -297,7 +296,7 @@ It also demonstrates how to save multiple images in a TIFF file:
|
|||||||
CV_EXPORTS_W bool imwrite( const String& filename, InputArray img,
|
CV_EXPORTS_W bool imwrite( const String& filename, InputArray img,
|
||||||
const std::vector<int>& params = std::vector<int>());
|
const std::vector<int>& params = std::vector<int>());
|
||||||
|
|
||||||
/// @overload multi-image overload for bindings
|
//! @brief multi-image overload for bindings
|
||||||
CV_WRAP static inline
|
CV_WRAP static inline
|
||||||
bool imwritemulti(const String& filename, InputArrayOfArrays img,
|
bool imwritemulti(const String& filename, InputArrayOfArrays img,
|
||||||
const std::vector<int>& params = std::vector<int>())
|
const std::vector<int>& params = std::vector<int>())
|
||||||
|
@ -1,10 +1,12 @@
|
|||||||
Color conversions {#imgproc_color_conversions}
|
Color conversions {#imgproc_color_conversions}
|
||||||
=================
|
=================
|
||||||
|
|
||||||
See cv::cvtColor and cv::ColorConversionCodes
|
See cv::cvtColor and cv::ColorConversionCodes
|
||||||
|
|
||||||
@todo document other conversion modes
|
@todo document other conversion modes
|
||||||
|
|
||||||
@anchor color_convert_rgb_gray
|
@anchor color_convert_rgb_gray
|
||||||
RGB \f$\leftrightarrow\f$ GRAY
|
RGB \emoji arrow_right GRAY
|
||||||
------------------------------
|
------------------------------
|
||||||
Transformations within RGB space like adding/removing the alpha channel, reversing the channel
|
Transformations within RGB space like adding/removing the alpha channel, reversing the channel
|
||||||
order, conversion to/from 16-bit RGB color (R5:G6:B5 or R5:G5:B5), as well as conversion
|
order, conversion to/from 16-bit RGB color (R5:G6:B5 or R5:G5:B5), as well as conversion
|
||||||
@ -20,7 +22,7 @@ More advanced channel reordering can also be done with cv::mixChannels.
|
|||||||
@see cv::COLOR_BGR2GRAY, cv::COLOR_RGB2GRAY, cv::COLOR_GRAY2BGR, cv::COLOR_GRAY2RGB
|
@see cv::COLOR_BGR2GRAY, cv::COLOR_RGB2GRAY, cv::COLOR_GRAY2BGR, cv::COLOR_GRAY2RGB
|
||||||
|
|
||||||
@anchor color_convert_rgb_xyz
|
@anchor color_convert_rgb_xyz
|
||||||
RGB \f$\leftrightarrow\f$ CIE XYZ.Rec 709 with D65 white point
|
RGB \emoji arrow_right CIE XYZ.Rec 709 with D65 white point
|
||||||
--------------------------------------------------------------
|
--------------------------------------------------------------
|
||||||
\f[\begin{bmatrix} X \\ Y \\ Z
|
\f[\begin{bmatrix} X \\ Y \\ Z
|
||||||
\end{bmatrix} \leftarrow \begin{bmatrix} 0.412453 & 0.357580 & 0.180423 \\ 0.212671 & 0.715160 & 0.072169 \\ 0.019334 & 0.119193 & 0.950227
|
\end{bmatrix} \leftarrow \begin{bmatrix} 0.412453 & 0.357580 & 0.180423 \\ 0.212671 & 0.715160 & 0.072169 \\ 0.019334 & 0.119193 & 0.950227
|
||||||
@ -35,7 +37,7 @@ RGB \f$\leftrightarrow\f$ CIE XYZ.Rec 709 with D65 white point
|
|||||||
@see cv::COLOR_BGR2XYZ, cv::COLOR_RGB2XYZ, cv::COLOR_XYZ2BGR, cv::COLOR_XYZ2RGB
|
@see cv::COLOR_BGR2XYZ, cv::COLOR_RGB2XYZ, cv::COLOR_XYZ2BGR, cv::COLOR_XYZ2RGB
|
||||||
|
|
||||||
@anchor color_convert_rgb_ycrcb
|
@anchor color_convert_rgb_ycrcb
|
||||||
RGB \f$\leftrightarrow\f$ YCrCb JPEG (or YCC)
|
RGB \emoji arrow_right YCrCb JPEG (or YCC)
|
||||||
---------------------------------------------
|
---------------------------------------------
|
||||||
\f[Y \leftarrow 0.299 \cdot R + 0.587 \cdot G + 0.114 \cdot B\f]
|
\f[Y \leftarrow 0.299 \cdot R + 0.587 \cdot G + 0.114 \cdot B\f]
|
||||||
\f[Cr \leftarrow (R-Y) \cdot 0.713 + delta\f]
|
\f[Cr \leftarrow (R-Y) \cdot 0.713 + delta\f]
|
||||||
@ -49,7 +51,7 @@ Y, Cr, and Cb cover the whole value range.
|
|||||||
@see cv::COLOR_BGR2YCrCb, cv::COLOR_RGB2YCrCb, cv::COLOR_YCrCb2BGR, cv::COLOR_YCrCb2RGB
|
@see cv::COLOR_BGR2YCrCb, cv::COLOR_RGB2YCrCb, cv::COLOR_YCrCb2BGR, cv::COLOR_YCrCb2RGB
|
||||||
|
|
||||||
@anchor color_convert_rgb_hsv
|
@anchor color_convert_rgb_hsv
|
||||||
RGB \f$\leftrightarrow\f$ HSV
|
RGB \emoji arrow_right HSV
|
||||||
-----------------------------
|
-----------------------------
|
||||||
In case of 8-bit and 16-bit images, R, G, and B are converted to the floating-point format and
|
In case of 8-bit and 16-bit images, R, G, and B are converted to the floating-point format and
|
||||||
scaled to fit the 0 to 1 range.
|
scaled to fit the 0 to 1 range.
|
||||||
@ -71,7 +73,7 @@ The values are then converted to the destination data type:
|
|||||||
@see cv::COLOR_BGR2HSV, cv::COLOR_RGB2HSV, cv::COLOR_HSV2BGR, cv::COLOR_HSV2RGB
|
@see cv::COLOR_BGR2HSV, cv::COLOR_RGB2HSV, cv::COLOR_HSV2BGR, cv::COLOR_HSV2RGB
|
||||||
|
|
||||||
@anchor color_convert_rgb_hls
|
@anchor color_convert_rgb_hls
|
||||||
RGB \f$\leftrightarrow\f$ HLS
|
RGB \emoji arrow_right HLS
|
||||||
-----------------------------
|
-----------------------------
|
||||||
In case of 8-bit and 16-bit images, R, G, and B are converted to the floating-point format and
|
In case of 8-bit and 16-bit images, R, G, and B are converted to the floating-point format and
|
||||||
scaled to fit the 0 to 1 range.
|
scaled to fit the 0 to 1 range.
|
||||||
@ -96,7 +98,7 @@ The values are then converted to the destination data type:
|
|||||||
@see cv::COLOR_BGR2HLS, cv::COLOR_RGB2HLS, cv::COLOR_HLS2BGR, cv::COLOR_HLS2RGB
|
@see cv::COLOR_BGR2HLS, cv::COLOR_RGB2HLS, cv::COLOR_HLS2BGR, cv::COLOR_HLS2RGB
|
||||||
|
|
||||||
@anchor color_convert_rgb_lab
|
@anchor color_convert_rgb_lab
|
||||||
RGB \f$\leftrightarrow\f$ CIE L\*a\*b\*
|
RGB \emoji arrow_right CIE L\*a\*b\*
|
||||||
---------------------------------------
|
---------------------------------------
|
||||||
In case of 8-bit and 16-bit images, R, G, and B are converted to the floating-point format and
|
In case of 8-bit and 16-bit images, R, G, and B are converted to the floating-point format and
|
||||||
scaled to fit the 0 to 1 range.
|
scaled to fit the 0 to 1 range.
|
||||||
@ -121,7 +123,7 @@ are then converted to the destination data type:
|
|||||||
@see cv::COLOR_BGR2Lab, cv::COLOR_RGB2Lab, cv::COLOR_Lab2BGR, cv::COLOR_Lab2RGB
|
@see cv::COLOR_BGR2Lab, cv::COLOR_RGB2Lab, cv::COLOR_Lab2BGR, cv::COLOR_Lab2RGB
|
||||||
|
|
||||||
@anchor color_convert_rgb_luv
|
@anchor color_convert_rgb_luv
|
||||||
RGB \f$\leftrightarrow\f$ CIE L\*u\*v\*
|
RGB \emoji arrow_right CIE L\*u\*v\*
|
||||||
---------------------------------------
|
---------------------------------------
|
||||||
In case of 8-bit and 16-bit images, R, G, and B are converted to the floating-point format and
|
In case of 8-bit and 16-bit images, R, G, and B are converted to the floating-point format and
|
||||||
scaled to fit 0 to 1 range.
|
scaled to fit 0 to 1 range.
|
||||||
@ -148,7 +150,7 @@ sources on the web, primarily from the Charles Poynton site <http://www.poynton.
|
|||||||
@see cv::COLOR_BGR2Luv, cv::COLOR_RGB2Luv, cv::COLOR_Luv2BGR, cv::COLOR_Luv2RGB
|
@see cv::COLOR_BGR2Luv, cv::COLOR_RGB2Luv, cv::COLOR_Luv2BGR, cv::COLOR_Luv2RGB
|
||||||
|
|
||||||
@anchor color_convert_bayer
|
@anchor color_convert_bayer
|
||||||
Bayer \f$\rightarrow\f$ RGB
|
Bayer \emoji arrow_right RGB
|
||||||
---------------------------
|
---------------------------
|
||||||
The Bayer pattern is widely used in CCD and CMOS cameras. It enables you to get color pictures
|
The Bayer pattern is widely used in CCD and CMOS cameras. It enables you to get color pictures
|
||||||
from a single plane where R, G, and B pixels (sensors of a particular component) are interleaved
|
from a single plane where R, G, and B pixels (sensors of a particular component) are interleaved
|
||||||
|
@ -46,143 +46,143 @@
|
|||||||
#include "opencv2/core.hpp"
|
#include "opencv2/core.hpp"
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@defgroup imgproc Image Processing
|
@defgroup imgproc Image Processing
|
||||||
|
|
||||||
This module includes image-processing functions.
|
This module includes image-processing functions.
|
||||||
|
|
||||||
@{
|
@{
|
||||||
@defgroup imgproc_filter Image Filtering
|
@defgroup imgproc_filter Image Filtering
|
||||||
|
|
||||||
Functions and classes described in this section are used to perform various linear or non-linear
|
Functions and classes described in this section are used to perform various linear or non-linear
|
||||||
filtering operations on 2D images (represented as Mat's). It means that for each pixel location
|
filtering operations on 2D images (represented as Mat's). It means that for each pixel location
|
||||||
\f$(x,y)\f$ in the source image (normally, rectangular), its neighborhood is considered and used to
|
\f$(x,y)\f$ in the source image (normally, rectangular), its neighborhood is considered and used to
|
||||||
compute the response. In case of a linear filter, it is a weighted sum of pixel values. In case of
|
compute the response. In case of a linear filter, it is a weighted sum of pixel values. In case of
|
||||||
morphological operations, it is the minimum or maximum values, and so on. The computed response is
|
morphological operations, it is the minimum or maximum values, and so on. The computed response is
|
||||||
stored in the destination image at the same location \f$(x,y)\f$. It means that the output image
|
stored in the destination image at the same location \f$(x,y)\f$. It means that the output image
|
||||||
will be of the same size as the input image. Normally, the functions support multi-channel arrays,
|
will be of the same size as the input image. Normally, the functions support multi-channel arrays,
|
||||||
in which case every channel is processed independently. Therefore, the output image will also have
|
in which case every channel is processed independently. Therefore, the output image will also have
|
||||||
the same number of channels as the input one.
|
the same number of channels as the input one.
|
||||||
|
|
||||||
Another common feature of the functions and classes described in this section is that, unlike
|
Another common feature of the functions and classes described in this section is that, unlike
|
||||||
simple arithmetic functions, they need to extrapolate values of some non-existing pixels. For
|
simple arithmetic functions, they need to extrapolate values of some non-existing pixels. For
|
||||||
example, if you want to smooth an image using a Gaussian \f$3 \times 3\f$ filter, then, when
|
example, if you want to smooth an image using a Gaussian \f$3 \times 3\f$ filter, then, when
|
||||||
processing the left-most pixels in each row, you need pixels to the left of them, that is, outside
|
processing the left-most pixels in each row, you need pixels to the left of them, that is, outside
|
||||||
of the image. You can let these pixels be the same as the left-most image pixels ("replicated
|
of the image. You can let these pixels be the same as the left-most image pixels ("replicated
|
||||||
border" extrapolation method), or assume that all the non-existing pixels are zeros ("constant
|
border" extrapolation method), or assume that all the non-existing pixels are zeros ("constant
|
||||||
border" extrapolation method), and so on. OpenCV enables you to specify the extrapolation method.
|
border" extrapolation method), and so on. OpenCV enables you to specify the extrapolation method.
|
||||||
For details, see #BorderTypes
|
For details, see #BorderTypes
|
||||||
|
|
||||||
@anchor filter_depths
|
@anchor filter_depths
|
||||||
### Depth combinations
|
### Depth combinations
|
||||||
Input depth (src.depth()) | Output depth (ddepth)
|
Input depth (src.depth()) | Output depth (ddepth)
|
||||||
--------------------------|----------------------
|
--------------------------|----------------------
|
||||||
CV_8U | -1/CV_16S/CV_32F/CV_64F
|
CV_8U | -1/CV_16S/CV_32F/CV_64F
|
||||||
CV_16U/CV_16S | -1/CV_32F/CV_64F
|
CV_16U/CV_16S | -1/CV_32F/CV_64F
|
||||||
CV_32F | -1/CV_32F
|
CV_32F | -1/CV_32F
|
||||||
CV_64F | -1/CV_64F
|
CV_64F | -1/CV_64F
|
||||||
|
|
||||||
@note when ddepth=-1, the output image will have the same depth as the source.
|
@note when ddepth=-1, the output image will have the same depth as the source.
|
||||||
|
|
||||||
@note if you need double floating-point accuracy and using single floating-point input data
|
@note if you need double floating-point accuracy and using single floating-point input data
|
||||||
(CV_32F input and CV_64F output depth combination), you can use @ref Mat.convertTo to convert
|
(CV_32F input and CV_64F output depth combination), you can use @ref Mat.convertTo to convert
|
||||||
the input data to the desired precision.
|
the input data to the desired precision.
|
||||||
|
|
||||||
@defgroup imgproc_transform Geometric Image Transformations
|
@defgroup imgproc_transform Geometric Image Transformations
|
||||||
|
|
||||||
The functions in this section perform various geometrical transformations of 2D images. They do not
|
The functions in this section perform various geometrical transformations of 2D images. They do not
|
||||||
change the image content but deform the pixel grid and map this deformed grid to the destination
|
change the image content but deform the pixel grid and map this deformed grid to the destination
|
||||||
image. In fact, to avoid sampling artifacts, the mapping is done in the reverse order, from
|
image. In fact, to avoid sampling artifacts, the mapping is done in the reverse order, from
|
||||||
destination to the source. That is, for each pixel \f$(x, y)\f$ of the destination image, the
|
destination to the source. That is, for each pixel \f$(x, y)\f$ of the destination image, the
|
||||||
functions compute coordinates of the corresponding "donor" pixel in the source image and copy the
|
functions compute coordinates of the corresponding "donor" pixel in the source image and copy the
|
||||||
pixel value:
|
pixel value:
|
||||||
|
|
||||||
\f[\texttt{dst} (x,y)= \texttt{src} (f_x(x,y), f_y(x,y))\f]
|
\f[\texttt{dst} (x,y)= \texttt{src} (f_x(x,y), f_y(x,y))\f]
|
||||||
|
|
||||||
In case when you specify the forward mapping \f$\left<g_x, g_y\right>: \texttt{src} \rightarrow
|
In case when you specify the forward mapping \f$\left<g_x, g_y\right>: \texttt{src} \rightarrow
|
||||||
\texttt{dst}\f$, the OpenCV functions first compute the corresponding inverse mapping
|
\texttt{dst}\f$, the OpenCV functions first compute the corresponding inverse mapping
|
||||||
\f$\left<f_x, f_y\right>: \texttt{dst} \rightarrow \texttt{src}\f$ and then use the above formula.
|
\f$\left<f_x, f_y\right>: \texttt{dst} \rightarrow \texttt{src}\f$ and then use the above formula.
|
||||||
|
|
||||||
The actual implementations of the geometrical transformations, from the most generic remap and to
|
The actual implementations of the geometrical transformations, from the most generic remap and to
|
||||||
the simplest and the fastest resize, need to solve two main problems with the above formula:
|
the simplest and the fastest resize, need to solve two main problems with the above formula:
|
||||||
|
|
||||||
- Extrapolation of non-existing pixels. Similarly to the filtering functions described in the
|
- Extrapolation of non-existing pixels. Similarly to the filtering functions described in the
|
||||||
previous section, for some \f$(x,y)\f$, either one of \f$f_x(x,y)\f$, or \f$f_y(x,y)\f$, or both
|
previous section, for some \f$(x,y)\f$, either one of \f$f_x(x,y)\f$, or \f$f_y(x,y)\f$, or both
|
||||||
of them may fall outside of the image. In this case, an extrapolation method needs to be used.
|
of them may fall outside of the image. In this case, an extrapolation method needs to be used.
|
||||||
OpenCV provides the same selection of extrapolation methods as in the filtering functions. In
|
OpenCV provides the same selection of extrapolation methods as in the filtering functions. In
|
||||||
addition, it provides the method #BORDER_TRANSPARENT. This means that the corresponding pixels in
|
addition, it provides the method #BORDER_TRANSPARENT. This means that the corresponding pixels in
|
||||||
the destination image will not be modified at all.
|
the destination image will not be modified at all.
|
||||||
|
|
||||||
- Interpolation of pixel values. Usually \f$f_x(x,y)\f$ and \f$f_y(x,y)\f$ are floating-point
|
- Interpolation of pixel values. Usually \f$f_x(x,y)\f$ and \f$f_y(x,y)\f$ are floating-point
|
||||||
numbers. This means that \f$\left<f_x, f_y\right>\f$ can be either an affine or perspective
|
numbers. This means that \f$\left<f_x, f_y\right>\f$ can be either an affine or perspective
|
||||||
transformation, or radial lens distortion correction, and so on. So, a pixel value at fractional
|
transformation, or radial lens distortion correction, and so on. So, a pixel value at fractional
|
||||||
coordinates needs to be retrieved. In the simplest case, the coordinates can be just rounded to the
|
coordinates needs to be retrieved. In the simplest case, the coordinates can be just rounded to the
|
||||||
nearest integer coordinates and the corresponding pixel can be used. This is called a
|
nearest integer coordinates and the corresponding pixel can be used. This is called a
|
||||||
nearest-neighbor interpolation. However, a better result can be achieved by using more
|
nearest-neighbor interpolation. However, a better result can be achieved by using more
|
||||||
sophisticated [interpolation methods](http://en.wikipedia.org/wiki/Multivariate_interpolation) ,
|
sophisticated [interpolation methods](http://en.wikipedia.org/wiki/Multivariate_interpolation) ,
|
||||||
where a polynomial function is fit into some neighborhood of the computed pixel \f$(f_x(x,y),
|
where a polynomial function is fit into some neighborhood of the computed pixel \f$(f_x(x,y),
|
||||||
f_y(x,y))\f$, and then the value of the polynomial at \f$(f_x(x,y), f_y(x,y))\f$ is taken as the
|
f_y(x,y))\f$, and then the value of the polynomial at \f$(f_x(x,y), f_y(x,y))\f$ is taken as the
|
||||||
interpolated pixel value. In OpenCV, you can choose between several interpolation methods. See
|
interpolated pixel value. In OpenCV, you can choose between several interpolation methods. See
|
||||||
#resize for details.
|
#resize for details.
|
||||||
|
|
||||||
@note The geometrical transformations do not work with `CV_8S` or `CV_32S` images.
|
@note The geometrical transformations do not work with `CV_8S` or `CV_32S` images.
|
||||||
|
|
||||||
@defgroup imgproc_misc Miscellaneous Image Transformations
|
@defgroup imgproc_misc Miscellaneous Image Transformations
|
||||||
@defgroup imgproc_draw Drawing Functions
|
@defgroup imgproc_draw Drawing Functions
|
||||||
|
|
||||||
Drawing functions work with matrices/images of arbitrary depth. The boundaries of the shapes can be
|
Drawing functions work with matrices/images of arbitrary depth. The boundaries of the shapes can be
|
||||||
rendered with antialiasing (implemented only for 8-bit images for now). All the functions include
|
rendered with antialiasing (implemented only for 8-bit images for now). All the functions include
|
||||||
the parameter color that uses an RGB value (that may be constructed with the Scalar constructor )
|
the parameter color that uses an RGB value (that may be constructed with the Scalar constructor )
|
||||||
for color images and brightness for grayscale images. For color images, the channel ordering is
|
for color images and brightness for grayscale images. For color images, the channel ordering is
|
||||||
normally *Blue, Green, Red*. This is what imshow, imread, and imwrite expect. So, if you form a
|
normally *Blue, Green, Red*. This is what imshow, imread, and imwrite expect. So, if you form a
|
||||||
color using the Scalar constructor, it should look like:
|
color using the Scalar constructor, it should look like:
|
||||||
|
|
||||||
\f[\texttt{Scalar} (blue \_ component, green \_ component, red \_ component[, alpha \_ component])\f]
|
\f[\texttt{Scalar} (blue \_ component, green \_ component, red \_ component[, alpha \_ component])\f]
|
||||||
|
|
||||||
If you are using your own image rendering and I/O functions, you can use any channel ordering. The
|
If you are using your own image rendering and I/O functions, you can use any channel ordering. The
|
||||||
drawing functions process each channel independently and do not depend on the channel order or even
|
drawing functions process each channel independently and do not depend on the channel order or even
|
||||||
on the used color space. The whole image can be converted from BGR to RGB or to a different color
|
on the used color space. The whole image can be converted from BGR to RGB or to a different color
|
||||||
space using cvtColor .
|
space using cvtColor .
|
||||||
|
|
||||||
If a drawn figure is partially or completely outside the image, the drawing functions clip it. Also,
|
If a drawn figure is partially or completely outside the image, the drawing functions clip it. Also,
|
||||||
many drawing functions can handle pixel coordinates specified with sub-pixel accuracy. This means
|
many drawing functions can handle pixel coordinates specified with sub-pixel accuracy. This means
|
||||||
that the coordinates can be passed as fixed-point numbers encoded as integers. The number of
|
that the coordinates can be passed as fixed-point numbers encoded as integers. The number of
|
||||||
fractional bits is specified by the shift parameter and the real point coordinates are calculated as
|
fractional bits is specified by the shift parameter and the real point coordinates are calculated as
|
||||||
\f$\texttt{Point}(x,y)\rightarrow\texttt{Point2f}(x*2^{-shift},y*2^{-shift})\f$ . This feature is
|
\f$\texttt{Point}(x,y)\rightarrow\texttt{Point2f}(x*2^{-shift},y*2^{-shift})\f$ . This feature is
|
||||||
especially effective when rendering antialiased shapes.
|
especially effective when rendering antialiased shapes.
|
||||||
|
|
||||||
@note The functions do not support alpha-transparency when the target image is 4-channel. In this
|
@note The functions do not support alpha-transparency when the target image is 4-channel. In this
|
||||||
case, the color[3] is simply copied to the repainted pixels. Thus, if you want to paint
|
case, the color[3] is simply copied to the repainted pixels. Thus, if you want to paint
|
||||||
semi-transparent shapes, you can paint them in a separate buffer and then blend it with the main
|
semi-transparent shapes, you can paint them in a separate buffer and then blend it with the main
|
||||||
image.
|
image.
|
||||||
|
|
||||||
@defgroup imgproc_color_conversions Color Space Conversions
|
@defgroup imgproc_color_conversions Color Space Conversions
|
||||||
@defgroup imgproc_colormap ColorMaps in OpenCV
|
@defgroup imgproc_colormap ColorMaps in OpenCV
|
||||||
|
|
||||||
The human perception isn't built for observing fine changes in grayscale images. Human eyes are more
|
The human perception isn't built for observing fine changes in grayscale images. Human eyes are more
|
||||||
sensitive to observing changes between colors, so you often need to recolor your grayscale images to
|
sensitive to observing changes between colors, so you often need to recolor your grayscale images to
|
||||||
get a clue about them. OpenCV now comes with various colormaps to enhance the visualization in your
|
get a clue about them. OpenCV now comes with various colormaps to enhance the visualization in your
|
||||||
computer vision application.
|
computer vision application.
|
||||||
|
|
||||||
In OpenCV you only need applyColorMap to apply a colormap on a given image. The following sample
|
In OpenCV you only need applyColorMap to apply a colormap on a given image. The following sample
|
||||||
code reads the path to an image from command line, applies a Jet colormap on it and shows the
|
code reads the path to an image from command line, applies a Jet colormap on it and shows the
|
||||||
result:
|
result:
|
||||||
|
|
||||||
@include snippets/imgproc_applyColorMap.cpp
|
@include snippets/imgproc_applyColorMap.cpp
|
||||||
|
|
||||||
@see #ColormapTypes
|
@see #ColormapTypes
|
||||||
|
|
||||||
@defgroup imgproc_subdiv2d Planar Subdivision
|
@defgroup imgproc_subdiv2d Planar Subdivision
|
||||||
|
|
||||||
The Subdiv2D class described in this section is used to perform various planar subdivision on
|
The Subdiv2D class described in this section is used to perform various planar subdivision on
|
||||||
a set of 2D points (represented as vector of Point2f). OpenCV subdivides a plane into triangles
|
a set of 2D points (represented as vector of Point2f). OpenCV subdivides a plane into triangles
|
||||||
using the Delaunay's algorithm, which corresponds to the dual graph of the Voronoi diagram.
|
using the Delaunay's algorithm, which corresponds to the dual graph of the Voronoi diagram.
|
||||||
In the figure below, the Delaunay's triangulation is marked with black lines and the Voronoi
|
In the figure below, the Delaunay's triangulation is marked with black lines and the Voronoi
|
||||||
diagram with red lines.
|
diagram with red lines.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
The subdivisions can be used for the 3D piece-wise transformation of a plane, morphing, fast
|
The subdivisions can be used for the 3D piece-wise transformation of a plane, morphing, fast
|
||||||
location of points on the plane, building special graphs (such as NNG,RNG), and so forth.
|
location of points on the plane, building special graphs (such as NNG,RNG), and so forth.
|
||||||
|
|
||||||
@defgroup imgproc_hist Histograms
|
@defgroup imgproc_hist Histograms
|
||||||
@defgroup imgproc_shape Structural Analysis and Shape Descriptors
|
@defgroup imgproc_shape Structural Analysis and Shape Descriptors
|
||||||
|
@ -52,7 +52,7 @@
|
|||||||
var pos = url.lastIndexOf('/javadoc/');
|
var pos = url.lastIndexOf('/javadoc/');
|
||||||
url = pos >= 0 ? (url.substring(0, pos) + '/javadoc/mymath.js') : (window.location.origin + '/mymath.js');
|
url = pos >= 0 ? (url.substring(0, pos) + '/javadoc/mymath.js') : (window.location.origin + '/mymath.js');
|
||||||
var script = document.createElement('script');
|
var script = document.createElement('script');
|
||||||
script.src = '@OPENCV_MATHJAX_RELPATH@/MathJax.js?config=TeX-AMS-MML_HTMLorMML,' + url;
|
script.src = '@OPENCV_MATHJAX_RELPATH@/es5/tex-chtml.js,' + url;
|
||||||
document.getElementsByTagName('head')[0].appendChild(script);
|
document.getElementsByTagName('head')[0].appendChild(script);
|
||||||
</script>
|
</script>
|
||||||
]]>
|
]]>
|
||||||
|
@ -54,59 +54,61 @@
|
|||||||
@{
|
@{
|
||||||
@defgroup objdetect_cascade_classifier Cascade Classifier for Object Detection
|
@defgroup objdetect_cascade_classifier Cascade Classifier for Object Detection
|
||||||
|
|
||||||
The object detector described below has been initially proposed by Paul Viola @cite Viola01 and
|
The object detector described below has been initially proposed by Paul Viola @cite Viola01 and
|
||||||
improved by Rainer Lienhart @cite Lienhart02 .
|
improved by Rainer Lienhart @cite Lienhart02 .
|
||||||
|
|
||||||
First, a classifier (namely a *cascade of boosted classifiers working with haar-like features*) is
|
First, a classifier (namely a *cascade of boosted classifiers working with haar-like features*) is
|
||||||
trained with a few hundred sample views of a particular object (i.e., a face or a car), called
|
trained with a few hundred sample views of a particular object (i.e., a face or a car), called
|
||||||
positive examples, that are scaled to the same size (say, 20x20), and negative examples - arbitrary
|
positive examples, that are scaled to the same size (say, 20x20), and negative examples - arbitrary
|
||||||
images of the same size.
|
images of the same size.
|
||||||
|
|
||||||
After a classifier is trained, it can be applied to a region of interest (of the same size as used
|
After a classifier is trained, it can be applied to a region of interest (of the same size as used
|
||||||
during the training) in an input image. The classifier outputs a "1" if the region is likely to show
|
during the training) in an input image. The classifier outputs a "1" if the region is likely to show
|
||||||
the object (i.e., face/car), and "0" otherwise. To search for the object in the whole image one can
|
the object (i.e., face/car), and "0" otherwise. To search for the object in the whole image one can
|
||||||
move the search window across the image and check every location using the classifier. The
|
move the search window across the image and check every location using the classifier. The
|
||||||
classifier is designed so that it can be easily "resized" in order to be able to find the objects of
|
classifier is designed so that it can be easily "resized" in order to be able to find the objects of
|
||||||
interest at different sizes, which is more efficient than resizing the image itself. So, to find an
|
interest at different sizes, which is more efficient than resizing the image itself. So, to find an
|
||||||
object of an unknown size in the image the scan procedure should be done several times at different
|
object of an unknown size in the image the scan procedure should be done several times at different
|
||||||
scales.
|
scales.
|
||||||
|
|
||||||
The word "cascade" in the classifier name means that the resultant classifier consists of several
|
The word "cascade" in the classifier name means that the resultant classifier consists of several
|
||||||
simpler classifiers (*stages*) that are applied subsequently to a region of interest until at some
|
simpler classifiers (*stages*) that are applied subsequently to a region of interest until at some
|
||||||
stage the candidate is rejected or all the stages are passed. The word "boosted" means that the
|
stage the candidate is rejected or all the stages are passed. The word "boosted" means that the
|
||||||
classifiers at every stage of the cascade are complex themselves and they are built out of basic
|
classifiers at every stage of the cascade are complex themselves and they are built out of basic
|
||||||
classifiers using one of four different boosting techniques (weighted voting). Currently Discrete
|
classifiers using one of four different boosting techniques (weighted voting). Currently Discrete
|
||||||
Adaboost, Real Adaboost, Gentle Adaboost and Logitboost are supported. The basic classifiers are
|
Adaboost, Real Adaboost, Gentle Adaboost and Logitboost are supported. The basic classifiers are
|
||||||
decision-tree classifiers with at least 2 leaves. Haar-like features are the input to the basic
|
decision-tree classifiers with at least 2 leaves. Haar-like features are the input to the basic
|
||||||
classifiers, and are calculated as described below. The current algorithm uses the following
|
classifiers, and are calculated as described below. The current algorithm uses the following
|
||||||
Haar-like features:
|
Haar-like features:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
The feature used in a particular classifier is specified by its shape (1a, 2b etc.), position within
|
The feature used in a particular classifier is specified by its shape (1a, 2b etc.), position within
|
||||||
the region of interest and the scale (this scale is not the same as the scale used at the detection
|
the region of interest and the scale (this scale is not the same as the scale used at the detection
|
||||||
stage, though these two scales are multiplied). For example, in the case of the third line feature
|
stage, though these two scales are multiplied). For example, in the case of the third line feature
|
||||||
(2c) the response is calculated as the difference between the sum of image pixels under the
|
(2c) the response is calculated as the difference between the sum of image pixels under the
|
||||||
rectangle covering the whole feature (including the two white stripes and the black stripe in the
|
rectangle covering the whole feature (including the two white stripes and the black stripe in the
|
||||||
middle) and the sum of the image pixels under the black stripe multiplied by 3 in order to
|
middle) and the sum of the image pixels under the black stripe multiplied by 3 in order to
|
||||||
compensate for the differences in the size of areas. The sums of pixel values over a rectangular
|
compensate for the differences in the size of areas. The sums of pixel values over a rectangular
|
||||||
regions are calculated rapidly using integral images (see below and the integral description).
|
regions are calculated rapidly using integral images (see below and the integral description).
|
||||||
|
|
||||||
Check @ref tutorial_cascade_classifier "the corresponding tutorial" for more details.
|
Check @ref tutorial_cascade_classifier "the corresponding tutorial" for more details.
|
||||||
|
|
||||||
The following reference is for the detection part only. There is a separate application called
|
The following reference is for the detection part only. There is a separate application called
|
||||||
opencv_traincascade that can train a cascade of boosted classifiers from a set of samples.
|
opencv_traincascade that can train a cascade of boosted classifiers from a set of samples.
|
||||||
|
|
||||||
@note In the new C++ interface it is also possible to use LBP (local binary pattern) features in
|
@note In the new C++ interface it is also possible to use LBP (local binary pattern) features in
|
||||||
addition to Haar-like features. .. [Viola01] Paul Viola and Michael J. Jones. Rapid Object Detection
|
addition to Haar-like features. .. [Viola01] Paul Viola and Michael J. Jones. Rapid Object Detection
|
||||||
using a Boosted Cascade of Simple Features. IEEE CVPR, 2001. The paper is available online at
|
using a Boosted Cascade of Simple Features. IEEE CVPR, 2001. The paper is available online at
|
||||||
<https://github.com/SvHey/thesis/blob/master/Literature/ObjectDetection/violaJones_CVPR2001.pdf>
|
<https://github.com/SvHey/thesis/blob/master/Literature/ObjectDetection/violaJones_CVPR2001.pdf>
|
||||||
|
|
||||||
@defgroup objdetect_hog HOG (Histogram of Oriented Gradients) descriptor and object detector
|
@defgroup objdetect_hog HOG (Histogram of Oriented Gradients) descriptor and object detector
|
||||||
@defgroup objdetect_barcode Barcode detection and decoding
|
@defgroup objdetect_barcode Barcode detection and decoding
|
||||||
@defgroup objdetect_qrcode QRCode detection and encoding
|
@defgroup objdetect_qrcode QRCode detection and encoding
|
||||||
@defgroup objdetect_dnn_face DNN-based face detection and recognition
|
@defgroup objdetect_dnn_face DNN-based face detection and recognition
|
||||||
Check @ref tutorial_dnn_face "the corresponding tutorial" for more details.
|
|
||||||
|
Check @ref tutorial_dnn_face "the corresponding tutorial" for more details.
|
||||||
|
|
||||||
@defgroup objdetect_common Common functions and classes
|
@defgroup objdetect_common Common functions and classes
|
||||||
@defgroup objdetect_aruco ArUco markers and boards detection for robust camera pose estimation
|
@defgroup objdetect_aruco ArUco markers and boards detection for robust camera pose estimation
|
||||||
@{
|
@{
|
||||||
|
@ -55,30 +55,29 @@ This module includes photo processing algorithms
|
|||||||
@defgroup photo_denoise Denoising
|
@defgroup photo_denoise Denoising
|
||||||
@defgroup photo_hdr HDR imaging
|
@defgroup photo_hdr HDR imaging
|
||||||
|
|
||||||
This section describes high dynamic range imaging algorithms namely tonemapping, exposure alignment,
|
This section describes high dynamic range imaging algorithms namely tonemapping, exposure alignment,
|
||||||
camera calibration with multiple exposures and exposure fusion.
|
camera calibration with multiple exposures and exposure fusion.
|
||||||
|
|
||||||
@defgroup photo_decolor Contrast Preserving Decolorization
|
@defgroup photo_decolor Contrast Preserving Decolorization
|
||||||
|
|
||||||
Useful links:
|
Useful links:
|
||||||
|
|
||||||
http://www.cse.cuhk.edu.hk/leojia/projects/color2gray/index.html
|
http://www.cse.cuhk.edu.hk/leojia/projects/color2gray/index.html
|
||||||
|
|
||||||
@defgroup photo_clone Seamless Cloning
|
@defgroup photo_clone Seamless Cloning
|
||||||
|
|
||||||
Useful links:
|
Useful links:
|
||||||
|
|
||||||
https://www.learnopencv.com/seamless-cloning-using-opencv-python-cpp
|
https://www.learnopencv.com/seamless-cloning-using-opencv-python-cpp
|
||||||
|
|
||||||
@defgroup photo_render Non-Photorealistic Rendering
|
@defgroup photo_render Non-Photorealistic Rendering
|
||||||
|
|
||||||
Useful links:
|
Useful links:
|
||||||
|
|
||||||
http://www.inf.ufrgs.br/~eslgastal/DomainTransform
|
http://www.inf.ufrgs.br/~eslgastal/DomainTransform
|
||||||
|
|
||||||
https://www.learnopencv.com/non-photorealistic-rendering-using-opencv-python-c/
|
https://www.learnopencv.com/non-photorealistic-rendering-using-opencv-python-c/
|
||||||
|
|
||||||
@defgroup photo_c C API
|
|
||||||
@}
|
@}
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -217,7 +217,7 @@ camera.
|
|||||||
@param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
|
@param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
|
||||||
camera.
|
camera.
|
||||||
@param Q Output \f$4 \times 4\f$ disparity-to-depth mapping matrix (see reprojectImageTo3D ).
|
@param Q Output \f$4 \times 4\f$ disparity-to-depth mapping matrix (see reprojectImageTo3D ).
|
||||||
@param flags Operation flags that may be zero or @ref CALIB_ZERO_DISPARITY . If the flag is set,
|
@param flags Operation flags that may be zero or @ref cv::CALIB_ZERO_DISPARITY . If the flag is set,
|
||||||
the function makes the principal points of each camera have the same pixel coordinates in the
|
the function makes the principal points of each camera have the same pixel coordinates in the
|
||||||
rectified views. And if the flag is not set, the function may still shift the images in the
|
rectified views. And if the flag is not set, the function may still shift the images in the
|
||||||
horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
|
horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
|
||||||
|
@ -49,7 +49,6 @@
|
|||||||
@{
|
@{
|
||||||
@defgroup video_motion Motion Analysis
|
@defgroup video_motion Motion Analysis
|
||||||
@defgroup video_track Object Tracking
|
@defgroup video_track Object Tracking
|
||||||
@defgroup video_c C API
|
|
||||||
@}
|
@}
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -1,12 +1,14 @@
|
|||||||
Video I/O with OpenCV Overview {#videoio_overview}
|
Video I/O with OpenCV Overview {#videoio_overview}
|
||||||
===================================
|
==============================
|
||||||
|
|
||||||
### See also:
|
@tableofcontents
|
||||||
|
|
||||||
|
@sa
|
||||||
- @ref videoio "Video I/O Code Reference"
|
- @ref videoio "Video I/O Code Reference"
|
||||||
- Tutorials: @ref tutorial_table_of_content_app
|
- Tutorials: @ref tutorial_table_of_content_app
|
||||||
|
|
||||||
General Information
|
General Information
|
||||||
===================
|
-------------------
|
||||||
|
|
||||||
The OpenCV @ref videoio module is a set of classes and functions to read and write video or images sequence.
|
The OpenCV @ref videoio module is a set of classes and functions to read and write video or images sequence.
|
||||||
|
|
||||||
@ -53,10 +55,11 @@ cv::VideoCapture cap(filename, cv::CAP_MSMF);
|
|||||||
//or specify the apiPreference with open
|
//or specify the apiPreference with open
|
||||||
cap.open(filename, cv::CAP_MSMF);
|
cap.open(filename, cv::CAP_MSMF);
|
||||||
```
|
```
|
||||||
|
|
||||||
@sa cv::VideoCapture::open() , cv::VideoCapture::VideoCapture()
|
@sa cv::VideoCapture::open() , cv::VideoCapture::VideoCapture()
|
||||||
|
|
||||||
#### How to enable backends
|
|
||||||
|
How to enable backends
|
||||||
|
----------------------
|
||||||
|
|
||||||
There are two kinds of videoio backends: built-in backends and plugins which will be loaded at runtime (since OpenCV 4.1.0). Use functions cv::videoio_registry::getBackends, cv::videoio_registry::hasBackend and cv::videoio_registry::getBackendName to check actual presence of backend during runtime.
|
There are two kinds of videoio backends: built-in backends and plugins which will be loaded at runtime (since OpenCV 4.1.0). Use functions cv::videoio_registry::getBackends, cv::videoio_registry::hasBackend and cv::videoio_registry::getBackendName to check actual presence of backend during runtime.
|
||||||
|
|
||||||
@ -71,7 +74,9 @@ To enable dynamically-loaded videoio backend (currently supported: GStreamer and
|
|||||||
|
|
||||||
@note Don't forget to clean CMake cache when switching between these two modes
|
@note Don't forget to clean CMake cache when switching between these two modes
|
||||||
|
|
||||||
#### Use 3rd party drivers or cameras
|
|
||||||
|
Use 3rd party drivers or cameras
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
Many industrial cameras or some video I/O devices don't provide standard driver interfaces
|
Many industrial cameras or some video I/O devices don't provide standard driver interfaces
|
||||||
for the operating system. Thus you can't use VideoCapture or VideoWriter with these devices.
|
for the operating system. Thus you can't use VideoCapture or VideoWriter with these devices.
|
||||||
@ -83,6 +88,7 @@ It is a common case that these libraries read/write images from/to a memory buff
|
|||||||
possible to make a `Mat` header for memory buffer (user-allocated data) and process it
|
possible to make a `Mat` header for memory buffer (user-allocated data) and process it
|
||||||
in-place using OpenCV functions. See cv::Mat::Mat() for more details.
|
in-place using OpenCV functions. See cv::Mat::Mat() for more details.
|
||||||
|
|
||||||
|
|
||||||
The FFmpeg library
|
The FFmpeg library
|
||||||
------------------
|
------------------
|
||||||
|
|
||||||
|
@ -45,9 +45,9 @@ int main( int argc, char** argv )
|
|||||||
printf( " ** Press 'r' to set the border to be replicated \n");
|
printf( " ** Press 'r' to set the border to be replicated \n");
|
||||||
printf( " ** Press 'ESC' to exit the program \n");
|
printf( " ** Press 'ESC' to exit the program \n");
|
||||||
|
|
||||||
//![create_window]
|
//![create_window]
|
||||||
namedWindow( window_name, WINDOW_AUTOSIZE );
|
namedWindow( window_name, WINDOW_AUTOSIZE );
|
||||||
//![create_window]
|
//![create_window]
|
||||||
|
|
||||||
//![init_arguments]
|
//![init_arguments]
|
||||||
// Initialize arguments for the filter
|
// Initialize arguments for the filter
|
||||||
|
Loading…
Reference in New Issue
Block a user