diff --git a/CMakeLists.txt b/CMakeLists.txt
index 2d288af83d..7dccb9e838 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -255,6 +255,7 @@ OCV_OPTION(WITH_ITT "Include Intel ITT support" ON
# ===================================================
OCV_OPTION(BUILD_SHARED_LIBS "Build shared libraries (.dll/.so) instead of static ones (.lib/.a)" NOT (ANDROID OR APPLE_FRAMEWORK) )
OCV_OPTION(BUILD_opencv_apps "Build utility applications (used for example to train classifiers)" (NOT ANDROID AND NOT WINRT) IF (NOT APPLE_FRAMEWORK) )
+OCV_OPTION(BUILD_opencv_js "Build JavaScript bindings by Emscripten" OFF )
OCV_OPTION(BUILD_ANDROID_EXAMPLES "Build examples for Android platform" ON IF ANDROID )
OCV_OPTION(BUILD_DOCS "Create build rules for OpenCV Documentation" ON IF (NOT WINRT OR APPLE_FRAMEWORK))
OCV_OPTION(BUILD_EXAMPLES "Build all examples" OFF )
diff --git a/doc/CMakeLists.txt b/doc/CMakeLists.txt
index 424340958b..1f6550f8b3 100644
--- a/doc/CMakeLists.txt
+++ b/doc/CMakeLists.txt
@@ -33,8 +33,9 @@ endif(HAVE_DOC_GENERATOR)
if(BUILD_DOCS AND DOXYGEN_FOUND)
# not documented modules list
- list(APPEND blacklist "ts" "java" "python2" "python3" "world" "contrib_world")
+ list(APPEND blacklist "ts" "java" "python2" "python3" "js" "world" "contrib_world")
unset(CMAKE_DOXYGEN_TUTORIAL_CONTRIB_ROOT)
+ unset(CMAKE_DOXYGEN_TUTORIAL_JS_ROOT)
# gathering headers
set(paths_include)
@@ -134,11 +135,13 @@ if(BUILD_DOCS AND DOXYGEN_FOUND)
set(faqfile "${CMAKE_CURRENT_SOURCE_DIR}/faq.markdown")
set(tutorial_path "${CMAKE_CURRENT_SOURCE_DIR}/tutorials")
set(tutorial_py_path "${CMAKE_CURRENT_SOURCE_DIR}/py_tutorials")
+ set(CMAKE_DOXYGEN_TUTORIAL_JS_ROOT "- @ref tutorial_js_root")
+ set(tutorial_js_path "${CMAKE_CURRENT_SOURCE_DIR}/js_tutorials")
set(example_path "${CMAKE_SOURCE_DIR}/samples")
# set export variables
- string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_INPUT_LIST "${rootfile} ; ${faqfile} ; ${paths_include} ; ${paths_hal_interface} ; ${paths_doc} ; ${tutorial_path} ; ${tutorial_py_path} ; ${paths_tutorial} ; ${tutorial_contrib_root}")
- string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_IMAGE_PATH "${paths_doc} ; ${tutorial_path} ; ${tutorial_py_path} ; ${paths_tutorial}")
+ string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_INPUT_LIST "${rootfile} ; ${faqfile} ; ${paths_include} ; ${paths_hal_interface} ; ${paths_doc} ; ${tutorial_path} ; ${tutorial_py_path} ; ${tutorial_js_path} ; ${paths_tutorial} ; ${tutorial_contrib_root}")
+ string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_IMAGE_PATH "${paths_doc} ; ${tutorial_path} ; ${tutorial_py_path} ; ${tutorial_js_path} ; ${paths_tutorial}")
# TODO: remove paths_doc from EXAMPLE_PATH after face module tutorials/samples moved to separate folders
string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_EXAMPLE_PATH "${example_path} ; ${paths_doc} ; ${paths_sample}")
set(CMAKE_DOXYGEN_LAYOUT "${CMAKE_CURRENT_SOURCE_DIR}/DoxygenLayout.xml")
@@ -163,9 +166,43 @@ if(BUILD_DOCS AND DOXYGEN_FOUND)
configure_file(Doxyfile.in ${doxyfile} @ONLY)
configure_file(root.markdown.in ${rootfile} @ONLY)
+ # js tutorial assets
+ set(opencv_tutorial_html_dir "${CMAKE_CURRENT_BINARY_DIR}/doxygen/html")
+ set(js_tutorials_assets_dir "${CMAKE_CURRENT_SOURCE_DIR}/js_tutorials/js_assets")
+ set(js_tutorials_assets_deps "")
+
+ # make sure the build directory exists
+ file(MAKE_DIRECTORY "${opencv_tutorial_html_dir}")
+
+ # gather and copy specific files for js tutorials
+ file(GLOB_RECURSE js_assets "${js_tutorials_assets_dir}/*")
+ ocv_list_filterout(js_assets "\\\\.eslintrc.json")
+ list(APPEND js_assets "${OpenCV_SOURCE_DIR}/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/Data/box.mp4")
+
+ if(BUILD_opencv_js)
+ set(ocv_js_dir "${CMAKE_BINARY_DIR}/bin")
+ set(ocv_js "opencv.js")
+ list(APPEND js_assets "${ocv_js_dir}/${ocv_js}")
+ endif()
+
+ # copy haar cascade files
+ set(haar_cascade_files "")
+ set(data_harrcascades_path "${OpenCV_SOURCE_DIR}/data/haarcascades/")
+ list(APPEND js_tutorials_assets_deps "${data_harrcascades_path}/haarcascade_frontalface_default.xml" "${data_harrcascades_path}/haarcascade_eye.xml")
+
+ foreach(f ${js_assets})
+ get_filename_component(fname "${f}" NAME)
+ add_custom_command(OUTPUT "${opencv_tutorial_html_dir}/${fname}"
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different "${f}" "${opencv_tutorial_html_dir}/${fname}"
+ DEPENDS "${f}"
+ COMMENT "Copying ${fname}"
+ )
+ list(APPEND js_tutorials_assets_deps "${f}" "${opencv_tutorial_html_dir}/${fname}")
+ endforeach()
+
add_custom_target(doxygen
COMMAND ${DOXYGEN_EXECUTABLE} ${doxyfile}
- DEPENDS ${doxyfile} ${rootfile} ${bibfile} ${deps}
+ DEPENDS ${doxyfile} ${rootfile} ${bibfile} ${deps} ${js_tutorials_assets_deps}
)
install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/doxygen/html
DESTINATION "${OPENCV_DOC_INSTALL_PATH}"
diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in
index 1db5427338..1276a6b9c8 100644
--- a/doc/Doxyfile.in
+++ b/doc/Doxyfile.in
@@ -174,7 +174,7 @@ FORMULA_FONTSIZE = 14
FORMULA_TRANSPARENT = YES
USE_MATHJAX = YES
MATHJAX_FORMAT = HTML-CSS
-MATHJAX_RELPATH = http://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0
+MATHJAX_RELPATH = https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0
MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
MATHJAX_CODEFILE = @CMAKE_CURRENT_SOURCE_DIR@/mymath.js
SEARCHENGINE = YES
diff --git a/doc/js_tutorials/js_assets/.eslintrc.json b/doc/js_tutorials/js_assets/.eslintrc.json
new file mode 100644
index 0000000000..abe9037a7d
--- /dev/null
+++ b/doc/js_tutorials/js_assets/.eslintrc.json
@@ -0,0 +1,22 @@
+{
+ "extends": "google",
+ "parserOptions": {
+ "ecmaVersion": 6
+ },
+ "rules": {
+ "max-len": ["error", 100, {"ignoreUrls": true}],
+ "quotes": ["error", "single"],
+ "indent": ["error", 4, {"ArrayExpression": "first",
+ "ObjectExpression": "first",
+ "CallExpression": {"arguments": "first"},
+ "SwitchCase": 1}],
+ "require-jsdoc": "off",
+ "new-cap": "off"
+ },
+ "plugins": ["html"],
+ "settings": {
+ "html/javascript-mime-types": ["text/javascript", "text/code-snippet"],
+ "html/indent": "0",
+ "html/report-bad-indent": "error"
+ }
+}
diff --git a/doc/js_tutorials/js_assets/apple.jpg b/doc/js_tutorials/js_assets/apple.jpg
new file mode 100644
index 0000000000..a00252b92f
Binary files /dev/null and b/doc/js_tutorials/js_assets/apple.jpg differ
diff --git a/doc/js_tutorials/js_assets/coins.jpg b/doc/js_tutorials/js_assets/coins.jpg
new file mode 100644
index 0000000000..bf55f35a51
Binary files /dev/null and b/doc/js_tutorials/js_assets/coins.jpg differ
diff --git a/doc/js_tutorials/js_assets/cup.mp4 b/doc/js_tutorials/js_assets/cup.mp4
new file mode 100644
index 0000000000..fbe79aa782
Binary files /dev/null and b/doc/js_tutorials/js_assets/cup.mp4 differ
diff --git a/doc/js_tutorials/js_assets/handDst.jpg b/doc/js_tutorials/js_assets/handDst.jpg
new file mode 100644
index 0000000000..5ed148a047
Binary files /dev/null and b/doc/js_tutorials/js_assets/handDst.jpg differ
diff --git a/doc/js_tutorials/js_assets/handSrc.jpg b/doc/js_tutorials/js_assets/handSrc.jpg
new file mode 100644
index 0000000000..083ab56341
Binary files /dev/null and b/doc/js_tutorials/js_assets/handSrc.jpg differ
diff --git a/doc/js_tutorials/js_assets/js_basic_ops_copymakeborder.html b/doc/js_tutorials/js_assets/js_basic_ops_copymakeborder.html
new file mode 100644
index 0000000000..152db303c3
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_basic_ops_copymakeborder.html
@@ -0,0 +1,59 @@
+
+
+
+
+Image Padding Example
+
+
+
+Image Padding Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_basic_ops_roi.html b/doc/js_tutorials/js_assets/js_basic_ops_roi.html
new file mode 100644
index 0000000000..6d08f8729d
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_basic_ops_roi.html
@@ -0,0 +1,59 @@
+
+
+
+
+Image ROI Example
+
+
+
+Image ROI Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_bg_subtraction.html b/doc/js_tutorials/js_assets/js_bg_subtraction.html
new file mode 100644
index 0000000000..79f860c18f
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_bg_subtraction.html
@@ -0,0 +1,126 @@
+
+
+
+
+Background Subtraction Example
+
+
+
+Background Subtraction Example
+
+ Click Start/Stop button to start or stop the camera capture.
+ The videoInput is a <video> element used as input.
+ The canvasOutput is a <canvas> element used as output.
+ The code of <textarea> will be executed when video is started.
+ You can modify the code to investigate more.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ videoInput
+
+
+ canvasOutput
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/doc/js_tutorials/js_assets/js_camshift.html b/doc/js_tutorials/js_assets/js_camshift.html
new file mode 100644
index 0000000000..046ab20efd
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_camshift.html
@@ -0,0 +1,172 @@
+
+
+
+
+CamShift Example
+
+
+
+CamShift Example
+
+ Click Start/Stop button to start or stop the video.
+ The videoInput is a <video> element used as CamShift input.
+ The canvasOutput is a <canvas> element used as CamShift output.
+ The code of <textarea> will be executed when video is started.
+ You can modify the code to investigate more.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ videoInput
+
+
+ canvasOutput
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_canny.html b/doc/js_tutorials/js_assets/js_canny.html
new file mode 100644
index 0000000000..7101b73787
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_canny.html
@@ -0,0 +1,58 @@
+
+
+
+
+Image Canny Example
+
+
+
+Image Canny Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_colorspaces_cvtColor.html b/doc/js_tutorials/js_assets/js_colorspaces_cvtColor.html
new file mode 100644
index 0000000000..db0b474116
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_colorspaces_cvtColor.html
@@ -0,0 +1,57 @@
+
+
+
+
+Convert Color Example
+
+
+
+Convert Color Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_colorspaces_inRange.html b/doc/js_tutorials/js_assets/js_colorspaces_inRange.html
new file mode 100644
index 0000000000..1574085fa4
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_colorspaces_inRange.html
@@ -0,0 +1,59 @@
+
+
+
+
+Image InRange Example
+
+
+
+Image InRange Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_contour_features_approxPolyDP.html b/doc/js_tutorials/js_assets/js_contour_features_approxPolyDP.html
new file mode 100644
index 0000000000..ed3aafc563
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_contour_features_approxPolyDP.html
@@ -0,0 +1,76 @@
+
+
+
+
+Image ApproxPolyDP Example
+
+
+
+Image ApproxPolyDP Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_contour_features_area.html b/doc/js_tutorials/js_assets/js_contour_features_area.html
new file mode 100644
index 0000000000..5aff938fb9
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_contour_features_area.html
@@ -0,0 +1,63 @@
+
+
+
+
+Image Area Example
+
+
+
+Image Area Example
+
+ <canvas> elements named canvasInput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_contour_features_boundingRect.html b/doc/js_tutorials/js_assets/js_contour_features_boundingRect.html
new file mode 100644
index 0000000000..0fc168867f
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_contour_features_boundingRect.html
@@ -0,0 +1,69 @@
+
+
+
+
+Bounding Rect Example
+
+
+
+Bounding Rect Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_contour_features_convexHull.html b/doc/js_tutorials/js_assets/js_contour_features_convexHull.html
new file mode 100644
index 0000000000..67b83f728b
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_contour_features_convexHull.html
@@ -0,0 +1,76 @@
+
+
+
+
+Convex Hull Example
+
+
+
+Convex Hull Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_contour_features_fitEllipse.html b/doc/js_tutorials/js_assets/js_contour_features_fitEllipse.html
new file mode 100644
index 0000000000..4e0efdecf0
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_contour_features_fitEllipse.html
@@ -0,0 +1,67 @@
+
+
+
+
+Fit Ellipse Example
+
+
+
+Fit Ellipse Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_contour_features_fitLine.html b/doc/js_tutorials/js_assets/js_contour_features_fitLine.html
new file mode 100644
index 0000000000..4a1fa7623d
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_contour_features_fitLine.html
@@ -0,0 +1,76 @@
+
+
+
+
+Fit Line Example
+
+
+
+Fit Line Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_contour_features_minAreaRect.html b/doc/js_tutorials/js_assets/js_contour_features_minAreaRect.html
new file mode 100644
index 0000000000..518c97a26c
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_contour_features_minAreaRect.html
@@ -0,0 +1,71 @@
+
+
+
+
+Min Area Rect Example
+
+
+
+Min Area Rect Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_contour_features_minEnclosingCircle.html b/doc/js_tutorials/js_assets/js_contour_features_minEnclosingCircle.html
new file mode 100644
index 0000000000..1bfd1fb6ea
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_contour_features_minEnclosingCircle.html
@@ -0,0 +1,67 @@
+
+
+
+
+Min Enclosing Circle Example
+
+
+
+Min Enclosing Circle Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_contour_features_moments.html b/doc/js_tutorials/js_assets/js_contour_features_moments.html
new file mode 100644
index 0000000000..f9f2b049dd
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_contour_features_moments.html
@@ -0,0 +1,62 @@
+
+
+
+
+Image Moments Example
+
+
+
+Image Moments Example
+
+ <canvas> elements named canvasInput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_contour_features_perimeter.html b/doc/js_tutorials/js_assets/js_contour_features_perimeter.html
new file mode 100644
index 0000000000..c773cc9ff5
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_contour_features_perimeter.html
@@ -0,0 +1,62 @@
+
+
+
+
+Image Perimeter Example
+
+
+
+Image Perimeter Example
+
+ <canvas> elements named canvasInput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_contour_properties_transpose.html b/doc/js_tutorials/js_assets/js_contour_properties_transpose.html
new file mode 100644
index 0000000000..04e8c37477
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_contour_properties_transpose.html
@@ -0,0 +1,58 @@
+
+
+
+
+Image Transpose Example
+
+
+
+Image Transpose Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_contours_begin_contours.html b/doc/js_tutorials/js_assets/js_contours_begin_contours.html
new file mode 100644
index 0000000000..7fe9a94622
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_contours_begin_contours.html
@@ -0,0 +1,67 @@
+
+
+
+
+Image Contours Example
+
+
+
+Image Contours Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_contours_more_functions_convexityDefects.html b/doc/js_tutorials/js_assets/js_contours_more_functions_convexityDefects.html
new file mode 100644
index 0000000000..5df0762a2b
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_contours_more_functions_convexityDefects.html
@@ -0,0 +1,77 @@
+
+
+
+
+Convexity Defects Example
+
+
+
+Convexity Defects Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_contours_more_functions_shape.html b/doc/js_tutorials/js_assets/js_contours_more_functions_shape.html
new file mode 100644
index 0000000000..529b40e5eb
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_contours_more_functions_shape.html
@@ -0,0 +1,73 @@
+
+
+
+
+Match Shape Example
+
+
+
+Match Shape Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_example_style.css b/doc/js_tutorials/js_assets/js_example_style.css
new file mode 100644
index 0000000000..7e2e30f4a1
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_example_style.css
@@ -0,0 +1,71 @@
+body, div, p {
+ font: 400 14px/22px Roboto,sans-serif;
+}
+canvas, img, video {
+ border: 1px solid black;
+}
+td {
+ padding: 10px 0px 0px 10px;
+ text-align: center;
+}
+button {
+ display: inline-block;
+ color: #fff;
+ background-color: #337ab7;
+ border-color: #2e6da4;
+ padding: 6px 12px;
+ margin-bottom: 0;
+ font-size: 14px;
+ font-weight: bold;
+ text-align: center;
+ white-space: nowrap;
+ vertical-align: middle;
+ -ms-touch-action: manipulation;
+ touch-action: manipulation;
+ cursor: pointer;
+ -webkit-user-select: none;
+ -moz-user-select: none;
+ -ms-user-select: none;
+ user-select: none;
+ background-image: none;
+ border: 1px solid transparent;
+ border-radius: 4px;
+}
+button[disabled] {
+ cursor: not-allowed;
+ filter: alpha(opacity=65);
+ -webkit-box-shadow: none;
+ box-shadow: none;
+ opacity: .65;
+}
+.control {
+ margin-bottom: 3px;
+}
+.err {
+ color: red;
+ font-weight: bold;
+}
+.inputoutput {
+ margin-top: 1em;
+ margin-bottom: 1em;
+}
+.caption {
+ margin: 0;
+ font-weight: bold;
+}
+.code {
+ padding: 4px 6px;
+ margin: 4px 8px 4px 2px;
+ background-color: #FBFCFD;
+ border: 1px solid #C4CFE5;
+ font-family: monospace, fixed;
+ font-size: 13px;
+ min-height: 13px;
+ line-height: 1.0;
+ text-wrap: unrestricted;
+ padding-bottom: 0px;
+ margin: 0px;
+}
+.hidden {
+ display: none;
+}
diff --git a/doc/js_tutorials/js_assets/js_face_detection.html b/doc/js_tutorials/js_assets/js_face_detection.html
new file mode 100644
index 0000000000..627e104b68
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_face_detection.html
@@ -0,0 +1,94 @@
+
+
+
+
+Face Detection Example
+
+
+
+Face Detection Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_face_detection_camera.html b/doc/js_tutorials/js_assets/js_face_detection_camera.html
new file mode 100644
index 0000000000..36dcce792f
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_face_detection_camera.html
@@ -0,0 +1,147 @@
+
+
+
+
+Face Detection Camera Example
+
+
+
+Face Detection Camera Example
+
+ Click Start/Stop button to start or stop the camera capture.
+ The videoInput is a <video> element used as face detector input.
+ The canvasOutput is a <canvas> element used as face detector output.
+ The code of <textarea> will be executed when video is started.
+ You can modify the code to investigate more.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ videoInput
+
+
+ canvasOutput
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_filtering_GaussianBlur.html b/doc/js_tutorials/js_assets/js_filtering_GaussianBlur.html
new file mode 100644
index 0000000000..c204e50253
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_filtering_GaussianBlur.html
@@ -0,0 +1,58 @@
+
+
+
+
+Gaussian Blur Example
+
+
+
+Gaussian Blur Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_filtering_bilateralFilter.html b/doc/js_tutorials/js_assets/js_filtering_bilateralFilter.html
new file mode 100644
index 0000000000..3144ae6903
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_filtering_bilateralFilter.html
@@ -0,0 +1,58 @@
+
+
+
+
+Bilateral Filter Example
+
+
+
+Bilateral Filter Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_filtering_blur.html b/doc/js_tutorials/js_assets/js_filtering_blur.html
new file mode 100644
index 0000000000..cad49249c3
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_filtering_blur.html
@@ -0,0 +1,60 @@
+
+
+
+
+Image Blur Example
+
+
+
+Image Blur Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_filtering_filter.html b/doc/js_tutorials/js_assets/js_filtering_filter.html
new file mode 100644
index 0000000000..dffa65ebd2
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_filtering_filter.html
@@ -0,0 +1,59 @@
+
+
+
+
+Image Filter Example
+
+
+
+Image Filter Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_filtering_medianBlur.html b/doc/js_tutorials/js_assets/js_filtering_medianBlur.html
new file mode 100644
index 0000000000..26e4eeb845
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_filtering_medianBlur.html
@@ -0,0 +1,57 @@
+
+
+
+
+Median Blur Example
+
+
+
+Median Blur Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_fourier_transform_dft.html b/doc/js_tutorials/js_assets/js_fourier_transform_dft.html
new file mode 100644
index 0000000000..53e7cae76c
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_fourier_transform_dft.html
@@ -0,0 +1,118 @@
+
+
+
+
+Image DFT Example
+
+
+
+Image DFT Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_geometric_transformations_getAffineTransform.html b/doc/js_tutorials/js_assets/js_geometric_transformations_getAffineTransform.html
new file mode 100644
index 0000000000..c0eb3099ca
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_geometric_transformations_getAffineTransform.html
@@ -0,0 +1,64 @@
+
+
+
+
+Get Affine Transform Example
+
+
+
+Get Affine Transform Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_geometric_transformations_resize.html b/doc/js_tutorials/js_assets/js_geometric_transformations_resize.html
new file mode 100644
index 0000000000..ab0a8793ec
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_geometric_transformations_resize.html
@@ -0,0 +1,58 @@
+
+
+
+
+Image Resize Example
+
+
+
+Image Resize Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_geometric_transformations_rotateWarpAffine.html b/doc/js_tutorials/js_assets/js_geometric_transformations_rotateWarpAffine.html
new file mode 100644
index 0000000000..88df91261c
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_geometric_transformations_rotateWarpAffine.html
@@ -0,0 +1,60 @@
+
+
+
+
+Rotate Transform Example
+
+
+
+Rotate Transform Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_geometric_transformations_warpAffine.html b/doc/js_tutorials/js_assets/js_geometric_transformations_warpAffine.html
new file mode 100644
index 0000000000..047cbd8852
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_geometric_transformations_warpAffine.html
@@ -0,0 +1,59 @@
+
+
+
+
+Affine Transform Example
+
+
+
+Affine Transform Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_geometric_transformations_warpPerspective.html b/doc/js_tutorials/js_assets/js_geometric_transformations_warpPerspective.html
new file mode 100644
index 0000000000..69588518fc
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_geometric_transformations_warpPerspective.html
@@ -0,0 +1,65 @@
+
+
+
+
+Perspectiv Transform Example
+
+
+
+Perspectiv Transform Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_grabcut_grabCut.html b/doc/js_tutorials/js_assets/js_grabcut_grabCut.html
new file mode 100644
index 0000000000..732d9ef8e2
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_grabcut_grabCut.html
@@ -0,0 +1,75 @@
+
+
+
+
+Image GrabCut Example
+
+
+
+Image GrabCut Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_gradients_Laplacian.html b/doc/js_tutorials/js_assets/js_gradients_Laplacian.html
new file mode 100644
index 0000000000..198ee091c5
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_gradients_Laplacian.html
@@ -0,0 +1,58 @@
+
+
+
+
+Image Laplacian Example
+
+
+
+Image Laplacian Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_gradients_Sobel.html b/doc/js_tutorials/js_assets/js_gradients_Sobel.html
new file mode 100644
index 0000000000..9ef8efa055
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_gradients_Sobel.html
@@ -0,0 +1,67 @@
+
+
+
+
+Image Sobel Example
+
+
+
+Image Sobel Example
+
+ <canvas> elements named canvasInput , canvasOutputx and canvasOutputy have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_gradients_absSobel.html b/doc/js_tutorials/js_assets/js_gradients_absSobel.html
new file mode 100644
index 0000000000..86d6bb306f
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_gradients_absSobel.html
@@ -0,0 +1,66 @@
+
+
+
+
+Image AbsSobel Example
+
+
+
+Image AbsSobel Example
+
+ <canvas> elements named canvasInput , canvasOutput8U and canvasOutput64F have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_histogram_backprojection_calcBackProject.html b/doc/js_tutorials/js_assets/js_histogram_backprojection_calcBackProject.html
new file mode 100644
index 0000000000..dc79945b96
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_histogram_backprojection_calcBackProject.html
@@ -0,0 +1,78 @@
+
+
+
+
+Back Project Example
+
+
+
+Back Project Example
+
+ <canvas> elements named srcCanvasInput , dstCanvasInput and canvasInput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_histogram_begins_calcHist.html b/doc/js_tutorials/js_assets/js_histogram_begins_calcHist.html
new file mode 100644
index 0000000000..ef2383adda
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_histogram_begins_calcHist.html
@@ -0,0 +1,78 @@
+
+
+
+
+Image Histogram Example
+
+
+
+Image Histogram Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_histogram_equalization_createCLAHE.html b/doc/js_tutorials/js_assets/js_histogram_equalization_createCLAHE.html
new file mode 100644
index 0000000000..a739095e4b
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_histogram_equalization_createCLAHE.html
@@ -0,0 +1,63 @@
+
+
+
+
+Image CLAHE Example
+
+
+
+Image CLAHE Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_histogram_equalization_equalizeHist.html b/doc/js_tutorials/js_assets/js_histogram_equalization_equalizeHist.html
new file mode 100644
index 0000000000..de6a6779b8
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_histogram_equalization_equalizeHist.html
@@ -0,0 +1,58 @@
+
+
+
+
+Equalize Histogram Example
+
+
+
+Equalize Histogram Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_houghcircles_HoughCirclesP.html b/doc/js_tutorials/js_assets/js_houghcircles_HoughCirclesP.html
new file mode 100644
index 0000000000..20243865f1
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_houghcircles_HoughCirclesP.html
@@ -0,0 +1,69 @@
+
+
+
+
+Hough Circles Example
+
+
+
+Hough Circles Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_houghlines_HoughLines.html b/doc/js_tutorials/js_assets/js_houghlines_HoughLines.html
new file mode 100644
index 0000000000..1a382673cc
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_houghlines_HoughLines.html
@@ -0,0 +1,73 @@
+
+
+
+
+Hough Lines Example
+
+
+
+Hough Lines Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_houghlines_HoughLinesP.html b/doc/js_tutorials/js_assets/js_houghlines_HoughLinesP.html
new file mode 100644
index 0000000000..db499ad410
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_houghlines_HoughLinesP.html
@@ -0,0 +1,67 @@
+
+
+
+
+Image HoughLinesP Example
+
+
+
+Image HoughLinesP Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_image_arithmetics_bitwise.html b/doc/js_tutorials/js_assets/js_image_arithmetics_bitwise.html
new file mode 100644
index 0000000000..e35e3573d2
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_image_arithmetics_bitwise.html
@@ -0,0 +1,94 @@
+
+
+
+
+Image Bitwise Example
+
+
+
+Image Bitwise Example
+
+ <canvas> elements named imageCanvasInput , logoCanvasInput and CanvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_image_display.html b/doc/js_tutorials/js_assets/js_image_display.html
new file mode 100644
index 0000000000..a309912431
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_image_display.html
@@ -0,0 +1,59 @@
+
+
+
+
+Image Read and Show Example
+
+
+
+Image Read and Show Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_imgproc_camera.html b/doc/js_tutorials/js_assets/js_imgproc_camera.html
new file mode 100644
index 0000000000..2df68d7f33
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_imgproc_camera.html
@@ -0,0 +1,700 @@
+
+
+
+
+Image Processing Video Example
+
+
+
+
+Image Processing Video Example
+
+ Open the controls and try different image processing filters.
+
+
+
+
+
+
+
+
+ Current Filter: Pass Through
+
+
+
+ Select Filter:
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Your browser does not support the video tag.
+
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_meanshift.html b/doc/js_tutorials/js_assets/js_meanshift.html
new file mode 100644
index 0000000000..9e29002c8d
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_meanshift.html
@@ -0,0 +1,170 @@
+
+
+
+
+MeanShift Example
+
+
+
+MeanShift Example
+
+ Click Start/Stop button to start or stop the video.
+ The videoInput is a <video> element used as meanShift input.
+ The canvasOutput is a <canvas> element used as meanShift output.
+ The code of <textarea> will be executed when video is started.
+ You can modify the code to investigate more.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ videoInput
+
+
+ canvasOutput
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_morphological_ops_blackHat.html b/doc/js_tutorials/js_assets/js_morphological_ops_blackHat.html
new file mode 100644
index 0000000000..063dd67528
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_morphological_ops_blackHat.html
@@ -0,0 +1,59 @@
+
+
+
+
+Black Hat Example
+
+
+
+Black Hat Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_morphological_ops_closing.html b/doc/js_tutorials/js_assets/js_morphological_ops_closing.html
new file mode 100644
index 0000000000..585208f391
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_morphological_ops_closing.html
@@ -0,0 +1,58 @@
+
+
+
+
+Image Closing Example
+
+
+
+Image Closing Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_morphological_ops_dilate.html b/doc/js_tutorials/js_assets/js_morphological_ops_dilate.html
new file mode 100644
index 0000000000..ed578fb06b
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_morphological_ops_dilate.html
@@ -0,0 +1,59 @@
+
+
+
+
+Image Dilate Example
+
+
+
+Image Dilate Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_morphological_ops_erode.html b/doc/js_tutorials/js_assets/js_morphological_ops_erode.html
new file mode 100644
index 0000000000..86fe282477
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_morphological_ops_erode.html
@@ -0,0 +1,59 @@
+
+
+
+
+Image Erode Example
+
+
+
+Image Erode Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_morphological_ops_getStructuringElement.html b/doc/js_tutorials/js_assets/js_morphological_ops_getStructuringElement.html
new file mode 100644
index 0000000000..4a90f8c6ea
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_morphological_ops_getStructuringElement.html
@@ -0,0 +1,61 @@
+
+
+
+
+Get Structuring Element Example
+
+
+
+Get Structuring Element Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_morphological_ops_gradient.html b/doc/js_tutorials/js_assets/js_morphological_ops_gradient.html
new file mode 100644
index 0000000000..46d5c3c367
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_morphological_ops_gradient.html
@@ -0,0 +1,59 @@
+
+
+
+
+Image Gradient Example
+
+
+
+Image Gradient Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_morphological_ops_opening.html b/doc/js_tutorials/js_assets/js_morphological_ops_opening.html
new file mode 100644
index 0000000000..1afd34037f
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_morphological_ops_opening.html
@@ -0,0 +1,60 @@
+
+
+
+
+Image Opening Example
+
+
+
+Image Opening Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_morphological_ops_topHat.html b/doc/js_tutorials/js_assets/js_morphological_ops_topHat.html
new file mode 100644
index 0000000000..6781b3856b
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_morphological_ops_topHat.html
@@ -0,0 +1,59 @@
+
+
+
+
+Top Hat Example
+
+
+
+Top Hat Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_optical_flow_dense.html b/doc/js_tutorials/js_assets/js_optical_flow_dense.html
new file mode 100644
index 0000000000..83bd597c38
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_optical_flow_dense.html
@@ -0,0 +1,163 @@
+
+
+
+
+Dense Optical Flow Example
+
+
+
+Dense Optical Flow Example
+
+ Click Start/Stop button to start or stop the video.
+ The videoInput is a <video> element used as input.
+ The canvasOutput is a <canvas> element used as output.
+ We get a 2-channel array with optical flow vectors, (u,v). We find their magnitude and direction.
+ We color code the result for better visualization. Direction corresponds to Hue value of the image.
+ Magnitude corresponds to Value plane.
+ The code of <textarea> will be executed when video is started.
+ You can modify the code to investigate more.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ videoInput
+
+
+ canvasOutput
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_optical_flow_lucas_kanade.html b/doc/js_tutorials/js_assets/js_optical_flow_lucas_kanade.html
new file mode 100644
index 0000000000..91f4d210e1
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_optical_flow_lucas_kanade.html
@@ -0,0 +1,190 @@
+
+
+
+
+Lucas-Kanade Optical Flow Example
+
+
+
+Lucas-Kanade Optical Flow Example
+
+ Click Start/Stop button to start or stop the video.
+ The videoInput is a <video> element used as input.
+ The canvasOutput is a <canvas> element used as output.
+ To decide the points, we use cv.goodFeaturesToTrack() . We take the first frame, detect some Shi-Tomasi corner points in it, then we iteratively track those points using cv.calcOpticalFlowPyrLK .
+ The code of <textarea> will be executed when video is started.
+ You can modify the code to investigate more.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ videoInput
+
+
+ canvasOutput
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_pyramids_pyrDown.html b/doc/js_tutorials/js_assets/js_pyramids_pyrDown.html
new file mode 100644
index 0000000000..c664d4a769
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_pyramids_pyrDown.html
@@ -0,0 +1,57 @@
+
+
+
+
+Image PyrDown Example
+
+
+
+Image PyrDown Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_pyramids_pyrUp.html b/doc/js_tutorials/js_assets/js_pyramids_pyrUp.html
new file mode 100644
index 0000000000..16b77f0fdf
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_pyramids_pyrUp.html
@@ -0,0 +1,57 @@
+
+
+
+
+Image PyrUp Example
+
+
+
+Image PyrUp Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_setup_usage.html b/doc/js_tutorials/js_assets/js_setup_usage.html
new file mode 100644
index 0000000000..fce929027e
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_setup_usage.html
@@ -0,0 +1,50 @@
+
+
+
+
+Hello OpenCV.js
+
+
+
+Hello OpenCV.js
+OpenCV.js is loading...
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_template_matching_matchTemplate.html b/doc/js_tutorials/js_assets/js_template_matching_matchTemplate.html
new file mode 100644
index 0000000000..0532c7d7f4
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_template_matching_matchTemplate.html
@@ -0,0 +1,69 @@
+
+
+
+
+Template Match Example
+
+
+
+Template Match Example
+
+ <canvas> elements named imageCanvasInput , templateCanvasInput
+ and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_thresholding_adaptiveThreshold.html b/doc/js_tutorials/js_assets/js_thresholding_adaptiveThreshold.html
new file mode 100644
index 0000000000..a7a2a3e817
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_thresholding_adaptiveThreshold.html
@@ -0,0 +1,59 @@
+
+
+
+
+Adaptive Threshold Example
+
+
+
+Adaptive Threshold Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_thresholding_threshold.html b/doc/js_tutorials/js_assets/js_thresholding_threshold.html
new file mode 100644
index 0000000000..24cb9ab116
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_thresholding_threshold.html
@@ -0,0 +1,58 @@
+
+
+
+
+Image Threshold Example
+
+
+
+Image Threshold Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_trackbar.html b/doc/js_tutorials/js_assets/js_trackbar.html
new file mode 100644
index 0000000000..ff9818f3b3
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_trackbar.html
@@ -0,0 +1,105 @@
+
+
+
+
+Trackbar Example
+
+
+
+Trackbar Example
+
+ <canvas> elements named canvasInput1 , canvasInput2 and canvasOutput have been prepared.
+ The code of <textarea> will be executed when <input> element named trackbar value changes.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_video_display.html b/doc/js_tutorials/js_assets/js_video_display.html
new file mode 100644
index 0000000000..f59c380bd4
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_video_display.html
@@ -0,0 +1,120 @@
+
+
+
+
+Video Capture Example
+
+
+
+Video Capture Example
+
+ Click Start/Stop button to start or stop the camera capture.
+ The videoInput is a <video> element used as OpenCV.js input.
+ The canvasOutput is a <canvas> element used as OpenCv.js output.
+ The code of <textarea> will be executed when video is started.
+ You can modify the code to investigate more.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ videoInput
+
+
+ canvasOutput
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_watershed_background.html b/doc/js_tutorials/js_assets/js_watershed_background.html
new file mode 100644
index 0000000000..243878876e
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_watershed_background.html
@@ -0,0 +1,67 @@
+
+
+
+
+Image Background Example
+
+
+
+Image Background Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_watershed_distanceTransform.html b/doc/js_tutorials/js_assets/js_watershed_distanceTransform.html
new file mode 100644
index 0000000000..e3857e4761
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_watershed_distanceTransform.html
@@ -0,0 +1,72 @@
+
+
+
+
+Distance Transform Example
+
+
+
+Distance Transform Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_watershed_foreground.html b/doc/js_tutorials/js_assets/js_watershed_foreground.html
new file mode 100644
index 0000000000..c41f156802
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_watershed_foreground.html
@@ -0,0 +1,73 @@
+
+
+
+
+Image Foreground Example
+
+
+
+Image Foreground Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_watershed_threshold.html b/doc/js_tutorials/js_assets/js_watershed_threshold.html
new file mode 100644
index 0000000000..b0f118e360
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_watershed_threshold.html
@@ -0,0 +1,61 @@
+
+
+
+
+Image Threshold Example
+
+
+
+Image Threshold Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/js_watershed_watershed.html b/doc/js_tutorials/js_assets/js_watershed_watershed.html
new file mode 100644
index 0000000000..bab7fa7bd4
--- /dev/null
+++ b/doc/js_tutorials/js_assets/js_watershed_watershed.html
@@ -0,0 +1,100 @@
+
+
+
+
+Image Watershed Example
+
+
+
+Image Watershed Example
+
+ <canvas> elements named canvasInput and canvasOutput have been prepared.
+ Click Try it button to see the result. You can choose another image.
+ You can change the code in the <textarea> to investigate more.
+
+
+
+
+
+
+
+
diff --git a/doc/js_tutorials/js_assets/lena.jpg b/doc/js_tutorials/js_assets/lena.jpg
new file mode 100644
index 0000000000..1e544aab3a
Binary files /dev/null and b/doc/js_tutorials/js_assets/lena.jpg differ
diff --git a/doc/js_tutorials/js_assets/lenaFace.png b/doc/js_tutorials/js_assets/lenaFace.png
new file mode 100644
index 0000000000..d59376b1c9
Binary files /dev/null and b/doc/js_tutorials/js_assets/lenaFace.png differ
diff --git a/doc/js_tutorials/js_assets/opencv_logo.jpg b/doc/js_tutorials/js_assets/opencv_logo.jpg
new file mode 100644
index 0000000000..a2854e1e9e
Binary files /dev/null and b/doc/js_tutorials/js_assets/opencv_logo.jpg differ
diff --git a/doc/js_tutorials/js_assets/orange.jpg b/doc/js_tutorials/js_assets/orange.jpg
new file mode 100644
index 0000000000..1566afb5aa
Binary files /dev/null and b/doc/js_tutorials/js_assets/orange.jpg differ
diff --git a/doc/js_tutorials/js_assets/shape.jpg b/doc/js_tutorials/js_assets/shape.jpg
new file mode 100644
index 0000000000..a4acdcc2fb
Binary files /dev/null and b/doc/js_tutorials/js_assets/shape.jpg differ
diff --git a/doc/js_tutorials/js_assets/utils.js b/doc/js_tutorials/js_assets/utils.js
new file mode 100644
index 0000000000..d59cceea69
--- /dev/null
+++ b/doc/js_tutorials/js_assets/utils.js
@@ -0,0 +1,132 @@
+function Utils(errorOutputId) { // eslint-disable-line no-unused-vars
+ let self = this;
+ this.errorOutput = document.getElementById(errorOutputId);
+
+ const OPENCV_URL = 'opencv.js';
+ this.loadOpenCv = function(onloadCallback) {
+ let script = document.createElement('script');
+ script.setAttribute('async', '');
+ script.setAttribute('type', 'text/javascript');
+ script.addEventListener('load', () => {
+ console.log(cv.getBuildInformation());
+ onloadCallback();
+ });
+ script.addEventListener('error', () => {
+ this.printError('Failed to load ' + OPENCV_URL);
+ });
+ script.src = OPENCV_URL;
+ let node = document.getElementsByTagName('script')[0];
+ node.parentNode.insertBefore(script, node);
+ };
+
+ this.loadImageToCanvas = function(url, cavansId) {
+ let canvas = document.getElementById(cavansId);
+ let ctx = canvas.getContext('2d');
+ let img = new Image();
+ img.crossOrigin = 'anonymous';
+ img.onload = function() {
+ canvas.width = img.width;
+ canvas.height = img.height;
+ ctx.drawImage(img, 0, 0, img.width, img.height);
+ };
+ img.src = url;
+ };
+
+ this.executeCode = function(textAreaId) {
+ try {
+ this.clearError();
+ let code = document.getElementById(textAreaId).value;
+ eval(code);
+ } catch (err) {
+ this.printError(err);
+ }
+ };
+
+ this.clearError = function() {
+ this.errorOutput.innerHTML = '';
+ };
+
+ this.printError = function(err) {
+ if (typeof err === 'undefined') {
+ err = '';
+ } else if (typeof err === 'number') {
+ if (!isNaN(err)) {
+ if (typeof cv !== 'undefined') {
+ err = 'Exception: ' + cv.exceptionFromPtr(err).msg;
+ }
+ }
+ } else if (typeof err === 'string') {
+ let ptr = Number(err.split(' ')[0]);
+ if (!isNaN(ptr)) {
+ if (typeof cv !== 'undefined') {
+ err = 'Exception: ' + cv.exceptionFromPtr(ptr).msg;
+ }
+ }
+ } else if (err instanceof Error) {
+ err = err.stack.replace(/\n/g, ' ');
+ }
+ this.errorOutput.innerHTML = err;
+ };
+
+ this.loadCode = function(scriptId, textAreaId) {
+ let scriptNode = document.getElementById(scriptId);
+ let textArea = document.getElementById(textAreaId);
+ if (scriptNode.type !== 'text/code-snippet') {
+ throw Error('Unknown code snippet type');
+ }
+ textArea.value = scriptNode.text.replace(/^\n/, '');
+ };
+
+ this.addFileInputHandler = function(fileInputId, canvasId) {
+ let inputElement = document.getElementById(fileInputId);
+ inputElement.addEventListener('change', (e) => {
+ let imgUrl = URL.createObjectURL(e.target.files[0]);
+ loadImageToCanvas(imgUrl, canvasId);
+ }, false);
+ };
+
+ function onVideoCanPlay() {
+ if (self.onCameraStartedCallback) {
+ self.onCameraStartedCallback(self.stream, self.video);
+ }
+ };
+
+ this.startCamera = function(resolution, callback, videoId) {
+ const constraints = {
+ 'qvga': {width: {exact: 320}, height: {exact: 240}},
+ 'vga': {width: {exact: 640}, height: {exact: 480}}};
+ let video = document.getElementById(videoId);
+ if (!video) {
+ video = document.createElement('video');
+ }
+
+ let videoConstraint = constraints[resolution];
+ if (!videoConstraint) {
+ videoConstraint = true;
+ }
+
+ navigator.mediaDevices.getUserMedia({video: videoConstraint, audio: false})
+ .then(function(stream) {
+ video.srcObject = stream;
+ video.play();
+ self.video = video;
+ self.stream = stream;
+ self.onCameraStartedCallback = callback;
+ video.addEventListener('canplay', onVideoCanPlay, false);
+ })
+ .catch(function(err) {
+ self.printError('Camera Error: ' + err.name + ' ' + err.message);
+ });
+ };
+
+ this.stopCamera = function() {
+ if (this.video) {
+ this.video.pause();
+ this.video.srcObject = null;
+ this.video.removeEventListener('canplay', onVideoCanPlay);
+ }
+ if (this.stream) {
+ this.stream.getVideoTracks()[0].stop();
+ }
+ };
+};
diff --git a/doc/js_tutorials/js_core/js_basic_ops/js_basic_ops.markdown b/doc/js_tutorials/js_core/js_basic_ops/js_basic_ops.markdown
new file mode 100644
index 0000000000..79ca0862c1
--- /dev/null
+++ b/doc/js_tutorials/js_core/js_basic_ops/js_basic_ops.markdown
@@ -0,0 +1,266 @@
+Basic Operations on Images {#tutorial_js_basic_ops}
+==========================
+
+Goal
+----
+
+- Learn how to access image properties
+- Learn how to construct Mat
+- Learn how to copy Mat
+- Learn how to convert the type of Mat
+- Learn how to use MatVector
+- Learn how to access pixel values and modify them
+- Learn how to set Region of Interest (ROI)
+- Learn how to split and merge images
+
+Accessing Image Properties
+--------------------------
+
+Image properties include number of rows, columns and size, depth, channels, type of image data.
+
+@code{.js}
+let src = cv.imread("canvasInput");
+console.log('image width: ' + src.cols + '\n' +
+ 'image height: ' + src.rows + '\n' +
+ 'image size: ' + src.size().width + '*' src.size().height + '\n' +
+ 'image depth: ' + src.depth() + '\n' +
+ 'image channels ' + src.channels() + '\n' +
+ 'image type: ' + src.type() + '\n');
+@endcode
+
+@note src.type() is very important while debugging because a large number of errors in OpenCV.js
+code are caused by invalid data type.
+
+How to construct Mat
+--------------------
+
+There are 4 basic constructors:
+
+@code{.js}
+// 1. default constructor
+let mat = new cv.Mat();
+// 2. two-dimensional arrays by size and type
+let mat = new cv.Mat(size, type);
+// 3. two-dimensional arrays by rows, cols, and type
+let mat = new cv.Mat(rows, cols, type);
+// 4. two-dimensional arrays by rows, cols, and type with initialization value
+let mat = new cv.Mat(rows, cols, type, new cv.Scalar());
+@endcode
+
+There are 3 static functions:
+
+@code{.js}
+// 1. Create a Mat which is full of zeros
+let mat = cv.Mat.zeros(rows, cols, type);
+// 2. Create a Mat which is full of ones
+let mat = cv.Mat.ones(rows, cols, type);
+// 3. Create a Mat which is an identity matrix
+let mat = cv.Mat.eye(rows, cols, type);
+@endcode
+
+There are 2 factory functions:
+@code{.js}
+// 1. Use JS array to construct a mat.
+// For example: let mat = cv.matFromArray(2, 2, cv.CV_8UC1, [1, 2, 3, 4]);
+let mat = cv.matFromArray(rows, cols, type, array);
+// 2. Use imgData to construct a mat
+let ctx = canvas.getContext("2d");
+let imgData = ctx.getImageData(0, 0, canvas.width, canvas.height);
+let mat = cv.matFromImageData(imgData);
+@endcode
+
+@note Don't forget to delete cv.Mat when you don't want to use it any more.
+
+How to copy Mat
+---------------
+
+There are 2 ways to copy a Mat:
+
+@code{.js}
+// 1. Clone
+let dst = src.clone();
+// 2. CopyTo(only entries indicated in the mask are copied)
+src.copyTo(dst, mask);
+@endcode
+
+How to convert the type of Mat
+------------------------------
+
+We use the function: **convertTo(m, rtype, alpha = 1, beta = 0)**
+@param m output matrix; if it does not have a proper size or type before the operation, it is reallocated.
+@param rtype desired output matrix type or, rather, the depth since the number of channels are the same as the input has; if rtype is negative, the output matrix will have the same type as the input.
+@param alpha optional scale factor.
+@param beta optional delta added to the scaled values.
+
+@code{.js}
+src.convertTo(dst, rtype);
+@endcode
+
+How use MatVector
+-----------------
+
+@code{.js}
+let mat = new cv.Mat();
+// Initialise a MatVector
+let matVec = new cv.MatVector();
+// Push a Mat back into MatVector
+matVec.push_back(mat);
+// Get a Mat fom MatVector
+let cnt = matVec.get(0);
+mat.delete(); matVec.delete(); cnt.delete();
+@endcode
+
+@note Don't forget to delete cv.Mat, cv.MatVector and cnt(the Mat you get from MatVector) when you don't want to use them any more.
+
+Accessing and Modifying pixel values
+------------------------------------
+
+Firstly, you should know the following type relationship:
+
+Data Properties | C++ Type | JavaScript Typed Array | Mat Type
+--------------- | -------- | ---------------------- | --------
+data | uchar | Uint8Array | CV_8U
+data8S | char | Int8Array | CV_8S
+data16U | ushort | Uint16Array | CV_16U
+data16S | short | Int16Array | CV_16S
+data32S | int | Int32Array | CV_32S
+data32F | float | Float32Array | CV_32F
+data64F | double | Float64Array | CV_64F
+
+**1. data**
+
+@code{.js}
+let row = 3, col = 4;
+let src = cv.imread("canvasInput");
+if (src.isContinuous()) {
+ let R = src.data[row * src.cols * src.channels() + col * src.channels()];
+ let G = src.data[row * src.cols * src.channels() + col * src.channels() + 1];
+ let B = src.data[row * src.cols * src.channels() + col * src.channels() + 2];
+ let A = src.data[row * src.cols * src.channels() + col * src.channels() + 3];
+}
+@endcode
+
+@note Data manipulation is only valid for continuous Mat. You should use isContinuous() to check first.
+
+**2. at**
+
+Mat Type | At Manipulation
+--------- | ---------------
+CV_8U | ucharAt
+CV_8S | charAt
+CV_16U | ushortAt
+CV_16S | shortAt
+CV_32S | intAt
+CV_32F | floatAt
+CV_64F | doubleAt
+
+@code{.js}
+let row = 3, col = 4;
+let src = cv.imread("canvasInput");
+let R = src.ucharAt(row, col * src.channels());
+let G = src.ucharAt(row, col * src.channels() + 1);
+let B = src.ucharAt(row, col * src.channels() + 2);
+let A = src.ucharAt(row, col * src.channels() + 3);
+@endcode
+
+@note At manipulation is only for single channel access and the value can't be modified.
+
+**3. ptr**
+
+Mat Type | Ptr Manipulation | JavaScript Typed Array
+-------- | --------------- | ----------------------
+CV_8U | ucharPtr | Uint8Array
+CV_8S | charPtr | Int8Array
+CV_16U | ushortPtr | Uint16Array
+CV_16S | shortPtr | Int16Array
+CV_32S | intPtr | Int32Array
+CV_32F | floatPtr | Float32Array
+CV_64F | doublePtr | Float64Array
+
+@code{.js}
+let row = 3, col = 4;
+let src = cv.imread("canvasInput");
+let pixel = src.ucharPtr(row, col);
+let R = pixel[0];
+let G = pixel[1];
+let B = pixel[2];
+let A = pixel[3];
+@endcode
+
+mat.ucharPtr(k) get the k th row of the mat. mat.ucharPtr(i, j) get the i th row and the j th column of the mat.
+
+Image ROI
+---------
+
+Sometimes, you will have to play with certain region of images. For eye detection in images, first
+face detection is done all over the image and when face is obtained, we select the face region alone
+and search for eyes inside it instead of searching whole image. It improves accuracy (because eyes
+are always on faces) and performance (because we search for a small area)
+
+We use the function: **roi (rect)**
+@param rect rectangle Region of Interest.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+
+Splitting and Merging Image Channels
+------------------------------------
+
+Sometimes you will need to work separately on R,G,B channels of image. Then you need to split the
+RGB images to single planes. Or another time, you may need to join these individual channels to RGB
+image.
+
+@code{.js}
+let src = cv.imread("canvasInput");
+let rgbaPlanes = new cv.MatVector();
+// Split the Mat
+cv.split(src, rgbaPlanes);
+// Get R channel
+let R = rgbaPlanes.get(0);
+// Merge all channels
+cv.merge(rgbaPlanes, src);
+src.delete(); rgbaPlanes.delete(); R.delete();
+@endcode
+
+@note Don't forget to delete cv.Mat, cv.MatVector and R(the Mat you get from MatVector) when you don't want to use them any more.
+
+Making Borders for Images (Padding)
+-----------------------------------
+
+If you want to create a border around the image, something like a photo frame, you can use
+**cv.copyMakeBorder()** function. But it has more applications for convolution operation, zero
+padding etc. This function takes following arguments:
+
+- **src** - input image
+- **top**, **bottom**, **left**, **right** - border width in number of pixels in corresponding
+ directions
+
+- **borderType** - Flag defining what kind of border to be added. It can be following types:
+ - **cv.BORDER_CONSTANT** - Adds a constant colored border. The value should be given
+ as next argument.
+ - **cv.BORDER_REFLECT** - Border will be mirror reflection of the border elements,
+ like this : *fedcba|abcdefgh|hgfedcb*
+ - **cv.BORDER_REFLECT_101** or **cv.BORDER_DEFAULT** - Same as above, but with a
+ slight change, like this : *gfedcb|abcdefgh|gfedcba*
+ - **cv.BORDER_REPLICATE** - Last element is replicated throughout, like this:
+ *aaaaaa|abcdefgh|hhhhhhh*
+ - **cv.BORDER_WRAP** - Can't explain, it will look like this :
+ *cdefgh|abcdefgh|abcdefg*
+
+- **value** - Color of border if border type is cv.BORDER_CONSTANT
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
\ No newline at end of file
diff --git a/doc/js_tutorials/js_core/js_image_arithmetics/js_image_arithmetics.markdown b/doc/js_tutorials/js_core/js_image_arithmetics/js_image_arithmetics.markdown
new file mode 100644
index 0000000000..73e483943d
--- /dev/null
+++ b/doc/js_tutorials/js_core/js_image_arithmetics/js_image_arithmetics.markdown
@@ -0,0 +1,62 @@
+Arithmetic Operations on Images {#tutorial_js_image_arithmetics}
+===============================
+
+Goal
+----
+
+- Learn several arithmetic operations on images like addition, subtraction, bitwise operations
+ etc.
+- You will learn these functions : **cv.add()**, **cv.subtract()** etc.
+
+Image Addition
+--------------
+
+You can add two images by OpenCV function, cv.add(). res = img1 + img2. Both images should be of same depth and type.
+
+For example, consider below sample:
+@code{.js}
+let src1 = cv.imread("canvasInput1");
+let src2 = cv.imread("canvasInput2");
+let dst = new cv.Mat();
+let mask = new cv.Mat();
+let dtype = -1;
+cv.add(src1, src2, dst, mask, dtype);
+src1.delete(); src2.delete(); dst.delete(); mask.delete();
+@endcode
+
+Image Subtraction
+--------------
+
+You can subtract two images by OpenCV function, cv.subtract(). res = img1 - img2. Both images should be of same depth and type.
+
+For example, consider below sample:
+@code{.js}
+let src1 = cv.imread("canvasInput1");
+let src2 = cv.imread("canvasInput2");
+let dst = new cv.Mat();
+let mask = new cv.Mat();
+let dtype = -1;
+cv.subtract(src1, src2, dst, mask, dtype);
+src1.delete(); src2.delete(); dst.delete(); mask.delete();
+@endcode
+
+Bitwise Operations
+------------------
+
+This includes bitwise AND, OR, NOT and XOR operations. They will be highly useful while extracting
+any part of the image, defining and working with non-rectangular
+ROI etc. Below we will see an example on how to change a particular region of an image.
+
+I want to put OpenCV logo above an image. If I add two images, it will change color. If I blend it,
+I get an transparent effect. But I want it to be opaque. If it was a rectangular region, I could use
+ROI as we did in last chapter. But OpenCV logo is a not a rectangular shape. So you can do it with
+bitwise operations.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
\ No newline at end of file
diff --git a/doc/js_tutorials/js_core/js_some_data_structures/js_image_arithmetics.markdown b/doc/js_tutorials/js_core/js_some_data_structures/js_image_arithmetics.markdown
new file mode 100644
index 0000000000..990819e089
--- /dev/null
+++ b/doc/js_tutorials/js_core/js_some_data_structures/js_image_arithmetics.markdown
@@ -0,0 +1,120 @@
+Some Data Structures {#tutorial_js_some_data_structures}
+===============================
+
+Goal
+----
+
+- You will learn some data structures : **Point**, **Scalar**, **Size**, **Circle**, **Rect**, **RotatedRect** etc.
+
+Scalar is array type in Javascript. Point, Size, Circle, Rect and RotatedRect are object type in JavaScript.
+
+Point
+--------------
+
+There are 2 ways to construct a Point and they are the same:
+@code{.js}
+// The first way
+let point = new cv.Point(x, y);
+// The second way
+let point = {x: x, y: y};
+@endcode
+
+@param x x coordinate of the point.(the origin is the top left corner of the image)
+@param y y coordinate of the point.
+
+Scalar
+--------------
+
+There are 2 ways to construct a Scalar and they are the same:
+@code{.js}
+// The first way
+let scalar = new cv.Scalar(R, G, B, Alpha);
+// The second way
+let scalar = [R, G, B, Alpha];
+@endcode
+
+@param R pixel value of red channel.
+@param G pixel value of green channel.
+@param B pixel value of blue channel.
+@param Alpha pixel value of alpha channel.
+
+Size
+------------------
+
+There are 2 ways to construct a Size and they are the same:
+@code{.js}
+// The first way
+let size = new cv.Size(width, height);
+// The second way
+let size = {width : width, height : height};
+@endcode
+
+@param width the width of the size.
+@param height the height of the size.
+
+Circle
+------------------
+
+There are 2 ways to construct a Circle and they are the same:
+@code{.js}
+// The first way
+let circle = new cv.Circle(center, radius);
+// The second way
+let circle = {center : center, radius : radius};
+@endcode
+
+@param center the center of the circle.
+@param radius the radius of the circle.
+
+Rect
+------------------
+
+There are 2 ways to construct a Rect and they are the same:
+@code{.js}
+// The first way
+let rect = new cv.Rect(x, y, width, height);
+// The second way
+let rect = {x : x, y : y, width : width, height : height};
+@endcode
+
+@param x x coordinate of the vertex which is the top left corner of the rectangle.
+@param y y coordinate of the vertex which is the top left corner of the rectangle.
+@param width the width of the rectangle.
+@param height the height of the rectangle.
+
+RotatedRect
+------------------
+
+There are 2 ways to construct a RotatedRect and they are the same:
+@code{.js}
+// The first way
+let rotatedRect = new cv.RotatedRect(center, size, angle);
+// The second way
+let rotatedRect = {center : center, size : size, angle : angle};
+@endcode
+
+@param center the rectangle mass center.
+@param size width and height of the rectangle.
+@param angle the rotation angle in a clockwise direction. When the angle is 0, 90, 180, 270 etc., the rectangle becomes an up-right rectangle.
+
+Learn how to get the vertices from rotatedRect:
+
+We use the function: **cv.RotatedRect.points(rotatedRect)**
+@param rotatedRect rotated rectangle
+
+@code{.js}
+let vertices = cv.RotatedRect.points(rotatedRect);
+let point1 = vertices[0];
+let point2 = vertices[1];
+let point3 = vertices[2];
+let point4 = vertices[3];
+@endcode
+
+Learn how to get the bounding rectangle from rotatedRect:
+
+We use the function: **cv.RotatedRect.boundingRect(rotatedRect)**
+@param rotatedRect rotated rectangle
+
+@code{.js}
+let boundingRect = cv.RotatedRect.boundingRect(rotatedRect);
+@endcode
\ No newline at end of file
diff --git a/doc/js_tutorials/js_core/js_table_of_contents_core.markdown b/doc/js_tutorials/js_core/js_table_of_contents_core.markdown
new file mode 100644
index 0000000000..afb68ed5c7
--- /dev/null
+++ b/doc/js_tutorials/js_core/js_table_of_contents_core.markdown
@@ -0,0 +1,16 @@
+Core Operations {#tutorial_js_table_of_contents_core}
+===============
+
+- @subpage tutorial_js_basic_ops
+
+ Learn to read and
+ edit pixel values, working with image ROI and other basic operations.
+
+- @subpage tutorial_js_image_arithmetics
+
+ Perform arithmetic
+ operations on images
+
+- @subpage tutorial_js_some_data_structures
+
+ Learn some data structures
diff --git a/doc/js_tutorials/js_gui/js_image_display/images/Imread_Imshow_Tutorial_Result.png b/doc/js_tutorials/js_gui/js_image_display/images/Imread_Imshow_Tutorial_Result.png
new file mode 100644
index 0000000000..4de5c184e6
Binary files /dev/null and b/doc/js_tutorials/js_gui/js_image_display/images/Imread_Imshow_Tutorial_Result.png differ
diff --git a/doc/js_tutorials/js_gui/js_image_display/js_image_display.markdown b/doc/js_tutorials/js_gui/js_image_display/js_image_display.markdown
new file mode 100644
index 0000000000..efe65e320e
--- /dev/null
+++ b/doc/js_tutorials/js_gui/js_image_display/js_image_display.markdown
@@ -0,0 +1,95 @@
+Getting Started with Images {#tutorial_js_image_display}
+===========================
+
+Goals
+-----
+
+- Learn how to read an image and how to display it in a web.
+
+Read an image
+-------------
+
+OpenCV.js saves images as cv.Mat type. We use HTML canvas element to transfer cv.Mat to the web
+or in reverse. The ImageData interface can represent or set the underlying pixel data of an area of a
+canvas element.
+
+@sa Please refer to canvas docs for more details.
+
+First, create an ImageData obj from canvas:
+@code{.js}
+let canvas = document.getElementById(canvasInputId);
+let ctx = canvas.getContext('2d');
+let imgData = ctx.getImageData(0, 0, canvas.width, canvas.height);
+@endcode
+
+Then, use cv.matFromImageData to construct a cv.Mat:
+@code{.js}
+let src = cv.matFromImageData(imgData);
+@endcode
+
+@note Because canvas only support 8-bit RGBA image with continuous storage, the cv.Mat type is cv.CV_8UC4.
+It is different from native OpenCV because images returned and shown by the native **imread** and
+**imshow** have the channels stored in BGR order.
+
+Display an image
+----------------
+
+First, convert the type of src to cv.CV_8UC4:
+@code{.js}
+let dst = new cv.Mat();
+// scale and shift are used to map the data to [0, 255].
+src.convertTo(dst, cv.CV_8U, scale, shift);
+// *** is GRAY, RGB, or RGBA, according to src.channels() is 1, 3 or 4.
+cv.cvtColor(dst, dst, cv.COLOR_***2RGBA);
+@endcode
+
+Then, new an ImageData obj from dst:
+@code{.js}
+let imgData = new ImageData(new Uint8ClampedArray(dst.data, dst.cols, dst.rows);
+@endcode
+
+Finally, display it:
+@code{.js}
+let canvas = document.getElementById(canvasOutputId);
+let ctx = canvas.getContext('2d');
+ctx.clearRect(0, 0, canvas.width, canvas.height);
+canvas.width = imgData.width;
+canvas.height = imgData.height;
+ctx.putImageData(imgData, 0, 0);
+@endcode
+
+In OpenCV.js
+------------
+
+OpenCV.js implements image reading and showing using the above method.
+
+We use **cv.imread (imageSource)** to read an image from html canvas or img element.
+@param imageSource canvas element or id, or img element or id.
+@return mat with channels stored in RGBA order.
+
+We use **cv.imshow (canvasSource, mat)** to display it. The function may scale the mat,
+depending on its depth:
+- If the mat is 8-bit unsigned, it is displayed as is.
+- If the mat is 16-bit unsigned or 32-bit integer, the pixels are divided by 256. That
+is, the value range [0,255*256] is mapped to [0,255].
+- If the mat is 32-bit floating-point, the pixel values are multiplied by 255. That is,
+the value range [0,1] is mapped to [0,255].
+
+@param canvasSource canvas element or id.
+@param mat mat to be shown.
+
+The above code of image reading and showing could be simplified as below.
+@code{.js}
+let img = cv.imread(imageSource);
+cv.imshow(canvasOutput, img);
+img.delete();
+@endcode
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
diff --git a/doc/js_tutorials/js_gui/js_table_of_contents_gui.markdown b/doc/js_tutorials/js_gui/js_table_of_contents_gui.markdown
new file mode 100644
index 0000000000..3a968bfb38
--- /dev/null
+++ b/doc/js_tutorials/js_gui/js_table_of_contents_gui.markdown
@@ -0,0 +1,14 @@
+GUI Features {#tutorial_js_table_of_contents_gui}
+============
+
+- @subpage tutorial_js_image_display
+
+ Learn to load an image and display it in a web
+
+- @subpage tutorial_js_video_display
+
+ Learn to capture video from Camera and play it
+
+- @subpage tutorial_js_trackbar
+
+ Create trackbar to control certain parameters
diff --git a/doc/js_tutorials/js_gui/js_trackbar/images/Trackbar_Tutorial_Range.png b/doc/js_tutorials/js_gui/js_trackbar/images/Trackbar_Tutorial_Range.png
new file mode 100644
index 0000000000..d897cba3e3
Binary files /dev/null and b/doc/js_tutorials/js_gui/js_trackbar/images/Trackbar_Tutorial_Range.png differ
diff --git a/doc/js_tutorials/js_gui/js_trackbar/images/Trackbar_Tutorial_Result.png b/doc/js_tutorials/js_gui/js_trackbar/images/Trackbar_Tutorial_Result.png
new file mode 100644
index 0000000000..8f3d4116b7
Binary files /dev/null and b/doc/js_tutorials/js_gui/js_trackbar/images/Trackbar_Tutorial_Result.png differ
diff --git a/doc/js_tutorials/js_gui/js_trackbar/js_trackbar.markdown b/doc/js_tutorials/js_gui/js_trackbar/js_trackbar.markdown
new file mode 100644
index 0000000000..ed1c021f62
--- /dev/null
+++ b/doc/js_tutorials/js_gui/js_trackbar/js_trackbar.markdown
@@ -0,0 +1,73 @@
+Add a Trackbar to Your Application {#tutorial_js_trackbar}
+==================================
+
+Goal
+----
+
+- Use HTML DOM Input Range Object to add a trackbar to your application.
+
+Code Demo
+---------
+
+Here, we will create a simple application that blends two images. We will let the user enter the
+weight by using the trackbar.
+
+First, we need to create three canvas elements: two for input and one for output. Please refer to
+the tutorial @ref tutorial_js_image_display.
+@code{.js}
+let src1 = cv.imread('canvasInput1');
+let src2 = cv.imread('canvasInput2');
+@endcode
+
+Then, we use HTML DOM Input Range Object to implement the trackbar, which is shown as below.
+![](images/Trackbar_Tutorial_Range.png)
+
+@note <input> elements with type="range" are not supported in Internet Explorer 9 and earlier versions.
+
+You can create an <input> element with type="range" with the document.createElement() method:
+@code{.js}
+let x = document.createElement('INPUT');
+x.setAttribute('type', 'range');
+@endcode
+
+You can access an <input> element with type="range" with getElementById():
+@code{.js}
+let x = document.getElementById('myRange');
+@endcode
+
+As a trackbar, the range element need a trackbar name, the default value, minimum value, maximum value,
+step and the callback function which is executed everytime trackbar value changes. The callback function
+always has a default argument, which is the trackbar position. Additionally, a text element to display the
+trackbar value is fine. In our case, we can create the trackbar as below:
+@code{.html}
+Weight:
+
+@endcode
+
+Finally, we can use the trackbar value in the callback function, blend the two images, and display the result.
+@code{.js}
+let weightValue = document.getElementById('weightValue');
+let trackbar = document.getElementById('trackbar');
+weightValue.setAttribute('value', trackbar.value);
+let alpha = trackbar.value/trackbar.max;
+let beta = ( 1.0 - alpha );
+let src1 = cv.imread('canvasInput1');
+let src2 = cv.imread('canvasInput2');
+let dst = new cv.Mat();
+cv.addWeighted( src1, alpha, src2, beta, 0.0, dst, -1);
+cv.imshow('canvasOutput', dst);
+dst.delete();
+src1.delete();
+src2.delete();
+@endcode
+
+@sa cv.addWeighted
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
diff --git a/doc/js_tutorials/js_gui/js_video_display/js_video_display.markdown b/doc/js_tutorials/js_gui/js_video_display/js_video_display.markdown
new file mode 100644
index 0000000000..613c593a08
--- /dev/null
+++ b/doc/js_tutorials/js_gui/js_video_display/js_video_display.markdown
@@ -0,0 +1,105 @@
+Getting Started with Videos {#tutorial_js_video_display}
+===========================
+
+Goal
+----
+
+- Learn to capture video from a camera and display it.
+
+Capture video from camera
+-------------------------
+
+Often, we have to capture live stream with a camera. In OpenCV.js, we use [WebRTC](https://webrtc.org/)
+and HTML canvas element to implement this. Let's capture a video from the camera(built-in
+or a usb), convert it into grayscale video and display it.
+
+To capture a video, you need to add some HTML elements to the web page:
+- a <video> to display video from camera directly
+- a <canvas> to transfer video to canvas ImageData frame-by-frame
+- another <canvas> to display the video OpenCV.js gets
+
+First, we use WebRTC navigator.mediaDevices.getUserMedia to get the media stream.
+@code{.js}
+let video = document.getElementById("videoInput"); // video is the id of video tag
+navigator.mediaDevices.getUserMedia({ video: true, audio: false })
+ .then(function(stream) {
+ video.srcObject = stream;
+ video.play();
+ })
+ .catch(function(err) {
+ console.log("An error occured! " + err);
+ });
+@endcode
+
+@note This function is unnecessary when you capture video from a video file. But notice that
+HTML video element only supports video formats of Ogg(Theora), WebM(VP8/VP9) or MP4(H.264).
+
+Playing video
+-------------
+Now, the browser gets the camera stream. Then, we use CanvasRenderingContext2D.drawImage() method
+of the Canvas 2D API to draw video onto the canvas. Finally, we can use the method in @ref tutorial_js_image_display
+ to read and display image in canvas. For playing video, cv.imshow() should be executed every delay
+milliseconds. We recommend setTimeout() method. And if the video is 30fps, the delay milliseconds
+should be (1000/30 - processing_time).
+@code{.js}
+let canvasFrame = document.getElementById("canvasFrame"); // canvasFrame is the id of
+let context = canvasFrame.getContext("2d");
+let src = new cv.Mat(height, width, cv.CV_8UC4);
+let dst = new cv.Mat(height, width, cv.CV_8UC1);
+
+const FPS = 30;
+function processVideo() {
+ let begin = Date.now();
+ context.drawImage(video, 0, 0, width, height);
+ src.data.set(context.getImageData(0, 0, width, height).data);
+ cv.cvtColor(src, dst, cv.COLOR_RGBA2GRAY);
+ cv.imshow("canvasOutput", dst); // canvasOutput is the id of another ;
+ // schedule next one.
+ let delay = 1000/FPS - (Date.now() - begin);
+ setTimeout(processVideo, delay);
+}
+
+// schedule first one.
+setTimeout(processVideo, 0);
+@endcode
+
+OpenCV.js implements **cv.VideoCapture (videoSource)** using the above method. You need not to
+add the hidden canvas element manually.
+@param videoSource the video id or element.
+@return cv.VideoCapture instance
+
+We use **read (image)** to get one frame of the video. For performance reasons, the image should be
+constructed with cv.CV_8UC4 type and same size as the video.
+@param image image with cv.CV_8UC4 type and same size as the video.
+
+The above code of playing video could be simplified as below.
+@code{.js}
+let src = new cv.Mat(height, width, cv.CV_8UC4);
+let dst = new cv.Mat(height, width, cv.CV_8UC1);
+let cap = new cv.VideoCapture(videoSource);
+
+const FPS = 30;
+function processVideo() {
+ let begin = Date.now();
+ cap.read(src);
+ cv.cvtColor(src, dst, cv.COLOR_RGBA2GRAY);
+ cv.imshow("canvasOutput", dst);
+ // schedule next one.
+ let delay = 1000/FPS - (Date.now() - begin);
+ setTimeout(processVideo, delay);
+}
+
+// schedule first one.
+setTimeout(processVideo, 0);
+@endcode
+
+@note Remember to delete src and dst after when stop.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
\ No newline at end of file
diff --git a/doc/js_tutorials/js_imgproc/js_canny/js_canny.markdown b/doc/js_tutorials/js_imgproc/js_canny/js_canny.markdown
new file mode 100644
index 0000000000..3f52d90897
--- /dev/null
+++ b/doc/js_tutorials/js_imgproc/js_canny/js_canny.markdown
@@ -0,0 +1,88 @@
+Canny Edge Detection {#tutorial_js_canny}
+====================
+
+Goal
+----
+
+- Concept of Canny edge detection
+- OpenCV functions for that : **cv.Canny()**
+
+Theory
+------
+
+Canny Edge Detection is a popular edge detection algorithm. It was developed by John F. Canny in 1986. It is a multi-stage algorithm and we will go through each stages.
+
+-# **Noise Reduction**
+
+ Since edge detection is susceptible to noise in the image, first step is to remove the noise in the
+ image with a 5x5 Gaussian filter. We have already seen this in previous chapters.
+
+-# **Finding Intensity Gradient of the Image**
+
+ Smoothened image is then filtered with a Sobel kernel in both horizontal and vertical direction to
+ get first derivative in horizontal direction (\f$G_x\f$) and vertical direction (\f$G_y\f$). From these two
+ images, we can find edge gradient and direction for each pixel as follows:
+
+ \f[
+ Edge\_Gradient \; (G) = \sqrt{G_x^2 + G_y^2} \\
+ Angle \; (\theta) = \tan^{-1} \bigg(\frac{G_y}{G_x}\bigg)
+ \f]
+
+ Gradient direction is always perpendicular to edges. It is rounded to one of four angles
+ representing vertical, horizontal and two diagonal directions.
+
+-# **Non-maximum Suppression**
+
+ After getting gradient magnitude and direction, a full scan of image is done to remove any unwanted
+ pixels which may not constitute the edge. For this, at every pixel, pixel is checked if it is a
+ local maximum in its neighborhood in the direction of gradient. Check the image below:
+
+ ![image](images/nms.jpg)
+
+ Point A is on the edge ( in vertical direction). Gradient direction is normal to the edge. Point B
+ and C are in gradient directions. So point A is checked with point B and C to see if it forms a
+ local maximum. If so, it is considered for next stage, otherwise, it is suppressed ( put to zero).
+
+ In short, the result you get is a binary image with "thin edges".
+
+-# **Hysteresis Thresholding**
+
+ This stage decides which are all edges are really edges and which are not. For this, we need two
+ threshold values, minVal and maxVal. Any edges with intensity gradient more than maxVal are sure to
+ be edges and those below minVal are sure to be non-edges, so discarded. Those who lie between these
+ two thresholds are classified edges or non-edges based on their connectivity. If they are connected
+ to "sure-edge" pixels, they are considered to be part of edges. Otherwise, they are also discarded.
+ See the image below:
+
+ ![image](images/hysteresis.jpg)
+
+ The edge A is above the maxVal, so considered as "sure-edge". Although edge C is below maxVal, it is
+ connected to edge A, so that also considered as valid edge and we get that full curve. But edge B,
+ although it is above minVal and is in same region as that of edge C, it is not connected to any
+ "sure-edge", so that is discarded. So it is very important that we have to select minVal and maxVal
+ accordingly to get the correct result.
+
+ This stage also removes small pixels noises on the assumption that edges are long lines.
+
+So what we finally get is strong edges in the image.
+
+Canny Edge Detection in OpenCV
+------------------------------
+
+We use the function: **cv.Canny(image, edges, threshold1, threshold2, apertureSize = 3, L2gradient = false)**
+@param image 8-bit input image.
+@param edges output edge map; single channels 8-bit image, which has the same size as image.
+@param threshold1 first threshold for the hysteresis procedure.
+@param threshold2 second threshold for the hysteresis procedure..
+@param apertureSize aperture size for the Sobel operator.
+@param L2gradient specifies the equation for finding gradient
+magnitude. If it is True, it uses the equation mentioned above which is more accurate, otherwise it uses this function: \f$Edge\_Gradient \; (G) = |G_x| + |G_y|\f$.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
\ No newline at end of file
diff --git a/doc/js_tutorials/js_imgproc/js_colorspaces/js_colorspaces.markdown b/doc/js_tutorials/js_imgproc/js_colorspaces/js_colorspaces.markdown
new file mode 100644
index 0000000000..9dbfe96131
--- /dev/null
+++ b/doc/js_tutorials/js_imgproc/js_colorspaces/js_colorspaces.markdown
@@ -0,0 +1,52 @@
+Changing Colorspaces {#tutorial_js_colorspaces}
+====================
+
+Goal
+----
+
+- In this tutorial, you will learn how to convert images from one color-space to another, like
+ RGB \f$\leftrightarrow\f$ Gray, RGB \f$\leftrightarrow\f$ HSV etc.
+- You will learn following functions : **cv.cvtColor()**, **cv.inRange()** etc.
+
+cvtColor
+--------------------
+
+There are more than 150 color-space conversion methods available in OpenCV. But we will look into
+the most widely used one: RGB \f$\leftrightarrow\f$ Gray.
+
+We use the function: **cv.cvtColor (src, dst, code, dstCn = 0)**
+@param src input image.
+@param dst output image of the same size and depth as src
+@param code color space conversion code(see **cv.ColorConversionCodes**).
+@param dstCn number of channels in the destination image; if the parameter is 0, the number of the channels is derived automatically from src and code.
+
+For RGB \f$\rightarrow\f$ Gray conversion we use the code cv.COLOR_RGBA2GRAY.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+inRange
+---------------
+
+Checks if array elements lie between the elements of two other arrays.
+
+We use the function: **cv.inRange (src, lowerb, upperb, dst)**
+@param src first input image.
+@param lowerb inclusive lower boundary Mat of the same size as src.
+@param upperb inclusive upper boundary Mat of the same size as src.
+@param dst output image of the same size as src and cv.CV_8U type.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
\ No newline at end of file
diff --git a/doc/js_tutorials/js_imgproc/js_contours/js_contour_features/js_contour_features.markdown b/doc/js_tutorials/js_imgproc/js_contours/js_contour_features/js_contour_features.markdown
new file mode 100644
index 0000000000..22544b280c
--- /dev/null
+++ b/doc/js_tutorials/js_imgproc/js_contours/js_contour_features/js_contour_features.markdown
@@ -0,0 +1,252 @@
+Contour Features {#tutorial_js_contour_features}
+================
+
+Goal
+----
+
+- To find the different features of contours, like area, perimeter, centroid, bounding box etc
+- You will learn plenty of functions related to contours.
+
+1. Moments
+----------
+
+Image moments help you to calculate some features like center of mass of the object, area of the
+object etc. Check out the wikipedia page on [Image
+Moments](http://en.wikipedia.org/wiki/Image_moment)
+
+We use the function: **cv.moments (array, binaryImage = false)**
+@param array raster image (single-channel, 8-bit or floating-point 2D array) or an array ( 1×N or N×1 ) of 2D points.
+@param binaryImage if it is true, all non-zero image pixels are treated as 1's. The parameter is used for images only.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+From this moments, you can extract useful data like area, centroid etc. Centroid is given by the
+relations, \f$C_x = \frac{M_{10}}{M_{00}}\f$ and \f$C_y = \frac{M_{01}}{M_{00}}\f$. This can be done as
+follows:
+@code{.js}
+let cx = M.m10/M.m00
+let cy = M.m01/M.m00
+@endcode
+
+2. Contour Area
+---------------
+
+Contour area is given by the function **cv.contourArea()** or from moments, **M['m00']**.
+
+We use the function: **cv.contourArea (contour, oriented = false)**
+@param contour input vector of 2D points (contour vertices)
+@param oriented oriented area flag. If it is true, the function returns a signed area value, depending on the contour orientation (clockwise or counter-clockwise). Using this feature you can determine orientation of a contour by taking the sign of an area. By default, the parameter is false, which means that the absolute value is returned.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+3. Contour Perimeter
+--------------------
+
+It is also called arc length. It can be found out using **cv.arcLength()** function.
+
+We use the function: **cv.arcLength (curve, closed)**
+@param curve input vector of 2D points.
+@param closed flag indicating whether the curve is closed or not.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+4. Contour Approximation
+------------------------
+
+It approximates a contour shape to another shape with less number of vertices depending upon the
+precision we specify. It is an implementation of [Douglas-Peucker
+algorithm](http://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm). Check the wikipedia page
+for algorithm and demonstration.
+
+We use the function: **cv.approxPolyDP (curve, approxCurve, epsilon, closed)**
+@param curve input vector of 2D points stored in cv.Mat.
+@param approxCurve result of the approximation. The type should match the type of the input curve.
+@param epsilon parameter specifying the approximation accuracy. This is the maximum distance between the original curve and its approximation.
+@param closed If true, the approximated curve is closed (its first and last vertices are connected). Otherwise, it is not closed.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+5. Convex Hull
+--------------
+
+Convex Hull will look similar to contour approximation, but it is not (Both may provide same results
+in some cases). Here, **cv.convexHull()** function checks a curve for convexity defects and
+corrects it. Generally speaking, convex curves are the curves which are always bulged out, or
+at-least flat. And if it is bulged inside, it is called convexity defects. For example, check the
+below image of hand. Red line shows the convex hull of hand. The double-sided arrow marks shows the
+convexity defects, which are the local maximum deviations of hull from contours.
+
+![image](images/convexitydefects.jpg)
+
+We use the function: **cv.convexHull (points, hull, clockwise = false, returnPoints = true)**
+@param points input 2D point set.
+@param hull output convex hull.
+@param clockwise orientation flag. If it is true, the output convex hull is oriented clockwise. Otherwise, it is oriented counter-clockwise. The assumed coordinate system has its X axis pointing to the right, and its Y axis pointing upwards.
+@param returnPoints operation flag. In case of a matrix, when the flag is true, the function returns convex hull points. Otherwise, it returns indices of the convex hull points.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+6. Checking Convexity
+---------------------
+
+There is a function to check if a curve is convex or not, **cv.isContourConvex()**. It just return
+whether True or False. Not a big deal.
+
+@code{.js}
+cv.isContourConvex(cnt);
+@endcode
+
+7. Bounding Rectangle
+---------------------
+
+There are two types of bounding rectangles.
+
+### 7.a. Straight Bounding Rectangle
+
+It is a straight rectangle, it doesn't consider the rotation of the object. So area of the bounding
+rectangle won't be minimum.
+
+We use the function: **cv.boundingRect (points)**
+@param points input 2D point set.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+### 7.b. Rotated Rectangle
+
+Here, bounding rectangle is drawn with minimum area, so it considers the rotation also.
+
+We use the function: **cv.minAreaRect (points)**
+@param points input 2D point set.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+8. Minimum Enclosing Circle
+---------------------------
+
+Next we find the circumcircle of an object using the function **cv.minEnclosingCircle()**. It is a
+circle which completely covers the object with minimum area.
+
+We use the functions: **cv.minEnclosingCircle (points)**
+@param points input 2D point set.
+
+**cv.circle (img, center, radius, color, thickness = 1, lineType = cv.LINE_8, shift = 0)**
+@param img image where the circle is drawn.
+@param center center of the circle.
+@param radius radius of the circle.
+@param color circle color.
+@param thickness thickness of the circle outline, if positive. Negative thickness means that a filled circle is to be drawn.
+@param lineType type of the circle boundary.
+@param shift number of fractional bits in the coordinates of the center and in the radius value.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+9. Fitting an Ellipse
+---------------------
+
+Next one is to fit an ellipse to an object. It returns the rotated rectangle in which the ellipse is
+inscribed.
+We use the functions: **cv.fitEllipse (points)**
+@param points input 2D point set.
+
+**cv.ellipse1 (img, box, color, thickness = 1, lineType = cv.LINE_8)**
+@param img image.
+@param box alternative ellipse representation via RotatedRect. This means that the function draws an ellipse inscribed in the rotated rectangle.
+@param color ellipse color.
+@param thickness thickness of the ellipse arc outline, if positive. Otherwise, this indicates that a filled ellipse sector is to be drawn.
+@param lineType type of the ellipse boundary.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+10. Fitting a Line
+------------------
+
+Similarly we can fit a line to a set of points. We can approximate a straight line to it.
+
+We use the functions: **cv.fitLine (points, line, distType, param, reps, aeps)**
+@param points input 2D point set.
+@param line output line parameters. It should be a Mat of 4 elements[vx, vy, x0, y0], where [vx, vy] is a normalized vector collinear to the line and [x0, y0] is a point on the line.
+@param distType distance used by the M-estimator(see cv.DistanceTypes).
+@param param numerical parameter ( C ) for some types of distances. If it is 0, an optimal value is chosen.
+@param reps sufficient accuracy for the radius (distance between the coordinate origin and the line).
+@param aeps sufficient accuracy for the angle. 0.01 would be a good default value for reps and aeps.
+
+**cv.line (img, pt1, pt2, color, thickness = 1, lineType = cv.LINE_8, shift = 0)**
+@param img image.
+@param pt1 first point of the line segment.
+@param pt2 second point of the line segment.
+@param color line color.
+@param thickness line thickness.
+@param lineType type of the line,.
+@param shift number of fractional bits in the point coordinates.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
\ No newline at end of file
diff --git a/doc/js_tutorials/js_imgproc/js_contours/js_contour_properties/js_contour_properties.markdown b/doc/js_tutorials/js_imgproc/js_contours/js_contour_properties/js_contour_properties.markdown
new file mode 100644
index 0000000000..1dbb15c4cf
--- /dev/null
+++ b/doc/js_tutorials/js_imgproc/js_contours/js_contour_properties/js_contour_properties.markdown
@@ -0,0 +1,110 @@
+Contour Properties {#tutorial_js_contour_properties}
+==================
+
+Goal
+----
+
+- Here we will learn to extract some frequently used properties of objects like Solidity, Equivalent
+Diameter, Mask image, Mean Intensity etc.
+
+1. Aspect Ratio
+---------------
+
+It is the ratio of width to height of bounding rect of the object.
+
+\f[Aspect \; Ratio = \frac{Width}{Height}\f]
+@code{.js}
+let rect = cv.boundingRect(cnt);
+let aspectRatio = rect.width / rect.height;
+@endcode
+
+2. Extent
+---------
+
+Extent is the ratio of contour area to bounding rectangle area.
+
+\f[Extent = \frac{Object \; Area}{Bounding \; Rectangle \; Area}\f]
+@code{.js}
+let area = cv.contourArea(cnt, false);
+let rect = cv.boundingRect(cnt));
+let rectArea = rect.width * rect.height;
+let extent = area / rectArea;
+@endcode
+
+3. Solidity
+-----------
+
+Solidity is the ratio of contour area to its convex hull area.
+
+\f[Solidity = \frac{Contour \; Area}{Convex \; Hull \; Area}\f]
+@code{.js}
+let area = cv.contourArea(cnt, false);
+cv.convexHull(cnt, hull, false, true);
+let hullArea = cv.contourArea(hull, false);
+let solidity = area / hullArea;
+@endcode
+
+4. Equivalent Diameter
+----------------------
+
+Equivalent Diameter is the diameter of the circle whose area is same as the contour area.
+
+\f[Equivalent \; Diameter = \sqrt{\frac{4 \times Contour \; Area}{\pi}}\f]
+@code{.js}
+let area = cv.contourArea(cnt, false);
+let equiDiameter = Math.sqrt(4 * area / Math.PI);
+@endcode
+
+5. Orientation
+--------------
+
+Orientation is the angle at which object is directed. Following method also gives the Major Axis and
+Minor Axis lengths.
+@code{.js}
+let rotatedRect = cv.fitEllipse(cnt);
+let angle = rotatedRect.angle;
+@endcode
+
+6. Mask and Pixel Points
+------------------------
+
+In some cases, we may need all the points which comprises that object.
+
+We use the function: **cv.transpose (src, dst)**
+@param src input array.
+@param dst output array of the same type as src.
+
+\htmlonly
+
+\endhtmlonly
+
+7. Maximum Value, Minimum Value and their locations
+---------------------------------------------------
+
+We use the function: **cv.minMaxLoc(src, mask)**
+@param src input single-channel array.
+@param mask optional mask used to select a sub-array.
+
+@code{.js}
+let result = cv.minMaxLoc(src, mask);
+let minVal = result.minVal;
+let maxVal = result.maxVal;
+let minLoc = result.minLoc;
+let maxLoc = result.maxLoc;
+@endcode
+
+8. Mean Color or Mean Intensity
+-------------------------------
+
+Here, we can find the average color of an object. Or it can be average intensity of the object in
+grayscale mode. We again use the same mask to do it.
+
+We use the function: **cv.mean (src, mask)**
+@param src input array that should have from 1 to 4 channels so that the result can be stored in Scalar.
+@param mask optional operation mask.
+
+@code{.js}
+let average = cv.mean(src, mask);
+@endcode
diff --git a/doc/js_tutorials/js_imgproc/js_contours/js_contours_begin/js_contours_begin.markdown b/doc/js_tutorials/js_imgproc/js_contours/js_contours_begin/js_contours_begin.markdown
new file mode 100644
index 0000000000..48eb92b7dd
--- /dev/null
+++ b/doc/js_tutorials/js_imgproc/js_contours/js_contours_begin/js_contours_begin.markdown
@@ -0,0 +1,72 @@
+Contours : Getting Started {#tutorial_js_contours_begin}
+==========================
+
+Goal
+----
+
+- Understand what contours are.
+- Learn to find contours, draw contours etc
+- You will learn these functions : **cv.findContours()**, **cv.drawContours()**
+
+What are contours?
+------------------
+
+Contours can be explained simply as a curve joining all the continuous points (along the boundary),
+having same color or intensity. The contours are a useful tool for shape analysis and object
+detection and recognition.
+
+- For better accuracy, use binary images. So before finding contours, apply threshold or canny
+ edge detection.
+- Since opencv 3.2 source image is not modified by this function.
+- In OpenCV, finding contours is like finding white object from black background. So remember,
+ object to be found should be white and background should be black.
+
+How to draw the contours?
+-------------------------
+
+To draw the contours, cv.drawContours function is used. It can also be used to draw any shape
+provided you have its boundary points.
+
+We use the functions: **cv.findContours (image, contours, hierarchy, mode, method, offset = new cv.Point(0, 0))**
+@param image source, an 8-bit single-channel image. Non-zero pixels are treated as 1's. Zero pixels remain 0's, so the image is treated as binary.
+@param contours detected contours.
+@param hierarchy containing information about the image topology. It has as many elements as the number of contours.
+@param mode contour retrieval mode(see cv.RetrievalModes).
+@param method contour approximation method(see cv.ContourApproximationModes).
+@param offset optional offset by which every contour point is shifted. This is useful if the contours are extracted from the image ROI and then they should be analyzed in the whole image context.
+
+**cv.drawContours (image, contours, contourIdx, color, thickness = 1, lineType = cv.LINE_8, hierarchy = new cv.Mat(), maxLevel = INT_MAX, offset = new cv.Point(0, 0))**
+@param image destination image.
+@param contours all the input contours.
+@param contourIdx parameter indicating a contour to draw. If it is negative, all the contours are drawn.
+@param color color of the contours.
+@param thickness thickness of lines the contours are drawn with. If it is negative, the contour interiors are drawn.
+@param lineType line connectivity(see cv.LineTypes).
+@param hierarchy optional information about hierarchy. It is only needed if you want to draw only some of the contours(see maxLevel).
+
+@param maxLevel maximal level for drawn contours. If it is 0, only the specified contour is drawn. If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This parameter is only taken into account when there is hierarchy available.
+@param offset optional contour shift parameter.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+Contour Approximation Method
+============================
+
+This is the fifth argument in cv.findContours function. What does it denote actually?
+
+Above, we told that contours are the boundaries of a shape with same intensity. It stores the (x,y)
+coordinates of the boundary of a shape. But does it store all the coordinates ? That is specified by
+this contour approximation method.
+
+If you pass cv.ContourApproximationModes.CHAIN_APPROX_NONE.value, all the boundary points are stored. But actually do we need all
+the points? For eg, you found the contour of a straight line. Do you need all the points on the line
+to represent that line? No, we need just two end points of that line. This is what
+cv2.CHAIN_APPROX_SIMPLE does. It removes all redundant points and compresses the contour, thereby
+saving memory.
\ No newline at end of file
diff --git a/doc/js_tutorials/js_imgproc/js_contours/js_contours_hierarchy/js_contours_hierarchy.markdown b/doc/js_tutorials/js_imgproc/js_contours/js_contours_hierarchy/js_contours_hierarchy.markdown
new file mode 100644
index 0000000000..5dc807742b
--- /dev/null
+++ b/doc/js_tutorials/js_imgproc/js_contours/js_contours_hierarchy/js_contours_hierarchy.markdown
@@ -0,0 +1,158 @@
+Contours Hierarchy {#tutorial_js_contours_hierarchy}
+==================
+
+Goal
+----
+
+- This time, we learn about the hierarchy of contours, i.e. the parent-child relationship in Contours.
+
+Theory
+------
+
+In the last few articles on contours, we have worked with several functions related to contours
+provided by OpenCV. But when we found the contours in image using **cv.findContours()** function,
+we have passed an argument, **Contour Retrieval Mode**. We usually passed **cv.RETR_LIST** or
+**cv.RETR_TREE** and it worked nice. But what does it actually mean ?
+
+Also, in the output, we got three arrays, first is the image, second is our contours, and one more
+output which we named as **hierarchy** (Please checkout the codes in previous articles). But we
+never used this hierarchy anywhere. Then what is this hierarchy and what is it for ? What is its
+relationship with the previous mentioned function argument ?
+
+That is what we are going to deal in this article.
+
+### What is Hierarchy?
+
+Normally we use the **cv.findContours()** function to detect objects in an image, right ? Sometimes
+objects are in different locations. But in some cases, some shapes are inside other shapes. Just
+like nested figures. In this case, we call outer one as **parent** and inner one as **child**. This
+way, contours in an image has some relationship to each other. And we can specify how one contour is
+connected to each other, like, is it child of some other contour, or is it a parent etc.
+Representation of this relationship is called the **Hierarchy**.
+
+Consider an example image below :
+
+![image](images/hierarchy.png)
+
+In this image, there are a few shapes which I have numbered from **0-5**. *2 and 2a* denotes the
+external and internal contours of the outermost box.
+
+Here, contours 0,1,2 are **external or outermost**. We can say, they are in **hierarchy-0** or
+simply they are in **same hierarchy level**.
+
+Next comes **contour-2a**. It can be considered as a **child of contour-2** (or in opposite way,
+contour-2 is parent of contour-2a). So let it be in **hierarchy-1**. Similarly contour-3 is child of
+contour-2a and it comes in next hierarchy. Finally contours 4,5 are the children of contour-3a, and
+they come in the last hierarchy level. From the way I numbered the boxes, I would say contour-4 is
+the first child of contour-3a (It can be contour-5 also).
+
+I mentioned these things to understand terms like **same hierarchy level**, **external contour**,
+**child contour**, **parent contour**, **first child** etc. Now let's get into OpenCV.
+
+### Hierarchy Representation in OpenCV
+
+So each contour has its own information regarding what hierarchy it is, who is its child, who is its
+parent etc. OpenCV represents it as an array of four values : **[Next, Previous, First_Child,
+Parent]**
+
+*"Next denotes next contour at the same hierarchical level."*
+
+For eg, take contour-0 in our picture. Who is next contour in its same level ? It is contour-1. So
+simply put Next = 1. Similarly for Contour-1, next is contour-2. So Next = 2.
+
+What about contour-2? There is no next contour in the same level. So simply, put Next = -1. What
+about contour-4? It is in same level with contour-5. So its next contour is contour-5, so Next = 5.
+
+*"Previous denotes previous contour at the same hierarchical level."*
+
+It is same as above. Previous contour of contour-1 is contour-0 in the same level. Similarly for
+contour-2, it is contour-1. And for contour-0, there is no previous, so put it as -1.
+
+*"First_Child denotes its first child contour."*
+
+There is no need of any explanation. For contour-2, child is contour-2a. So it gets the
+corresponding index value of contour-2a. What about contour-3a? It has two children. But we take
+only first child. And it is contour-4. So First_Child = 4 for contour-3a.
+
+*"Parent denotes index of its parent contour."*
+
+It is just opposite of **First_Child**. Both for contour-4 and contour-5, parent contour is
+contour-3a. For contour-3a, it is contour-3 and so on.
+
+@note If there is no child or parent, that field is taken as -1
+
+So now we know about the hierarchy style used in OpenCV, we can check into Contour Retrieval Modes
+in OpenCV with the help of same image given above. ie what do flags like cv.RETR_LIST,
+cv.RETR_TREE, cv.RETR_CCOMP, cv.RETR_EXTERNAL etc mean?
+
+Contour Retrieval Mode
+----------------------
+
+### 1. RETR_LIST
+
+This is the simplest of the four flags (from explanation point of view). It simply retrieves all the
+contours, but doesn't create any parent-child relationship. **Parents and kids are equal under this
+rule, and they are just contours**. ie they all belongs to same hierarchy level.
+
+So here, 3rd and 4th term in hierarchy array is always -1. But obviously, Next and Previous terms
+will have their corresponding values.
+
+### 2. RETR_EXTERNAL
+
+If you use this flag, it returns only extreme outer flags. All child contours are left behind. **We
+can say, under this law, Only the eldest in every family is taken care of. It doesn't care about
+other members of the family)**.
+
+
+### 3. RETR_CCOMP
+
+This flag retrieves all the contours and arranges them to a 2-level hierarchy. ie external contours
+of the object (ie its boundary) are placed in hierarchy-1. And the contours of holes inside object
+(if any) is placed in hierarchy-2. If any object inside it, its contour is placed again in
+hierarchy-1 only. And its hole in hierarchy-2 and so on.
+
+Just consider the image of a "big white zero" on a black background. Outer circle of zero belongs to
+first hierarchy, and inner circle of zero belongs to second hierarchy.
+
+We can explain it with a simple image. Here I have labelled the order of contours in red color and
+the hierarchy they belongs to, in green color (either 1 or 2). The order is same as the order OpenCV
+detects contours.
+
+![image](images/ccomp_hierarchy.png)
+
+So consider first contour, ie contour-0. It is hierarchy-1. It has two holes, contours 1&2, and they
+belong to hierarchy-2. So for contour-0, Next contour in same hierarchy level is contour-3. And
+there is no previous one. And its first is child is contour-1 in hierarchy-2. It has no parent,
+because it is in hierarchy-1. So its hierarchy array is [3,-1,1,-1]
+
+Now take contour-1. It is in hierarchy-2. Next one in same hierarchy (under the parenthood of
+contour-1) is contour-2. No previous one. No child, but parent is contour-0. So array is
+[2,-1,-1,0].
+
+Similarly contour-2 : It is in hierarchy-2. There is not next contour in same hierarchy under
+contour-0. So no Next. Previous is contour-1. No child, parent is contour-0. So array is
+[-1,1,-1,0].
+
+Contour - 3 : Next in hierarchy-1 is contour-5. Previous is contour-0. Child is contour-4 and no
+parent. So array is [5,0,4,-1].
+
+Contour - 4 : It is in hierarchy 2 under contour-3 and it has no sibling. So no next, no previous,
+no child, parent is contour-3. So array is [-1,-1,-1,3].
+
+
+### 4. RETR_TREE
+
+And this is the final guy, Mr.Perfect. It retrieves all the contours and creates a full family
+hierarchy list. **It even tells, who is the grandpa, father, son, grandson and even beyond... :)**.
+
+For examle, I took above image, rewrite the code for cv.RETR_TREE, reorder the contours as per the
+result given by OpenCV and analyze it. Again, red letters give the contour number and green letters
+give the hierarchy order.
+
+![image](images/tree_hierarchy.png)
+
+Take contour-0 : It is in hierarchy-0. Next contour in same hierarchy is contour-7. No previous
+contours. Child is contour-1. And no parent. So array is [7,-1,1,-1].
+
+Take contour-2 : It is in hierarchy-1. No contour in same level. No previous one. Child is
+contour-2. Parent is contour-0. So array is [-1,-1,2,0].
diff --git a/doc/js_tutorials/js_imgproc/js_contours/js_contours_more_functions/js_contours_more_functions.markdown b/doc/js_tutorials/js_imgproc/js_contours/js_contours_more_functions/js_contours_more_functions.markdown
new file mode 100644
index 0000000000..b753116662
--- /dev/null
+++ b/doc/js_tutorials/js_imgproc/js_contours/js_contours_more_functions/js_contours_more_functions.markdown
@@ -0,0 +1,72 @@
+Contours : More Functions {#tutorial_js_contours_more_functions}
+=========================
+
+Goal
+----
+
+- Convexity defects and how to find them.
+- Finding shortest distance from a point to a polygon
+- Matching different shapes
+
+Theory and Code
+---------------
+
+### 1. Convexity Defects
+
+We saw what is convex hull in second chapter about contours. Any deviation of the object from this
+hull can be considered as convexity defect.We can visualize it using an image. We draw a
+line joining start point and end point, then draw a circle at the farthest point.
+
+@note Remember we have to pass returnPoints = False while finding convex hull, in order to find
+convexity defects.
+
+We use the function: **cv.convexityDefects (contour, convexhull, convexityDefect)**
+@param contour input contour.
+@param convexhull convex hull obtained using convexHull that should contain indices of the contour points that make the hull
+@param convexityDefect the output vector of convexity defects. Each convexity defect is represented as 4-element(start_index, end_index, farthest_pt_index, fixpt_depth), where indices are 0-based indices in the original contour of the convexity defect beginning, end and the farthest point, and fixpt_depth is fixed-point approximation (with 8 fractional bits) of the distance between the farthest contour point and the hull. That is, to get the floating-point value of the depth will be fixpt_depth/256.0.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+### 2. Point Polygon Test
+
+This function finds the shortest distance between a point in the image and a contour. It returns the
+distance which is negative when point is outside the contour, positive when point is inside and zero
+if point is on the contour.
+
+We use the function: **cv.pointPolygonTest (contour, pt, measureDist)**
+@param contour input contour.
+@param pt point tested against the contour.
+@param measureDist if true, the function estimates the signed distance from the point to the nearest contour edge. Otherwise, the function only checks if the point is inside a contour or not.
+
+@code{.js}
+let dist = cv.pointPolygonTest(cnt, new cv.Point(50, 50), true);
+@endcode
+
+### 3. Match Shapes
+
+OpenCV comes with a function **cv.matchShapes()** which enables us to compare two shapes, or two
+contours and returns a metric showing the similarity. The lower the result, the better match it is.
+It is calculated based on the hu-moment values. Different measurement methods are explained in the
+docs.
+
+We use the function: **cv.matchShapes (contour1, contour2, method, parameter)**
+@param contour1 first contour or grayscale image.
+@param contour2 second contour or grayscale image.
+@param method comparison method, see cv::ShapeMatchModes
+@param parameter method-specific parameter(not supported now).
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
\ No newline at end of file
diff --git a/doc/js_tutorials/js_imgproc/js_contours/js_table_of_contents_contours.markdown b/doc/js_tutorials/js_imgproc/js_contours/js_table_of_contents_contours.markdown
new file mode 100644
index 0000000000..73f6f074f1
--- /dev/null
+++ b/doc/js_tutorials/js_imgproc/js_contours/js_table_of_contents_contours.markdown
@@ -0,0 +1,26 @@
+Contours in OpenCV.js {#tutorial_js_table_of_contents_contours}
+==================
+
+- @subpage tutorial_js_contours_begin
+
+ Learn to find and draw Contours.
+
+- @subpage tutorial_js_contour_features
+
+ Learn
+ to find different features of contours like area, perimeter, bounding rectangle etc.
+
+- @subpage tutorial_js_contour_properties
+
+ Learn
+ to find different properties of contours like Solidity, Mean Intensity etc.
+
+- @subpage tutorial_js_contours_more_functions
+
+ Learn
+ to find convexity defects, pointPolygonTest, match different shapes etc.
+
+- @subpage tutorial_js_contours_hierarchy
+
+ Learn
+ about Contour Hierarchy
diff --git a/doc/js_tutorials/js_imgproc/js_filtering/js_filtering.markdown b/doc/js_tutorials/js_imgproc/js_filtering/js_filtering.markdown
new file mode 100644
index 0000000000..8ac7eca4cf
--- /dev/null
+++ b/doc/js_tutorials/js_imgproc/js_filtering/js_filtering.markdown
@@ -0,0 +1,163 @@
+Smoothing Images {#tutorial_js_filtering}
+================
+
+Goals
+-----
+
+- Blur the images with various low pass filters
+- Apply custom-made filters to images (2D convolution)
+
+2D Convolution ( Image Filtering )
+----------------------------------
+
+As in one-dimensional signals, images also can be filtered with various low-pass filters(LPF),
+high-pass filters(HPF) etc. LPF helps in removing noises, blurring the images etc. HPF filters helps
+in finding edges in the images.
+
+OpenCV provides a function **cv.filter2D()** to convolve a kernel with an image. As an example, we
+will try an averaging filter on an image. A 5x5 averaging filter kernel will look like below:
+
+\f[K = \frac{1}{25} \begin{bmatrix} 1 & 1 & 1 & 1 & 1 \\ 1 & 1 & 1 & 1 & 1 \\ 1 & 1 & 1 & 1 & 1 \\ 1 & 1 & 1 & 1 & 1 \\ 1 & 1 & 1 & 1 & 1 \end{bmatrix}\f]
+
+We use the functions: **cv.filter2D (src, dst, ddepth, kernel, anchor = new cv.Point(-1, -1), delta = 0, borderType = cv.BORDER_DEFAULT)**
+@param src input image.
+@param dst output image of the same size and the same number of channels as src.
+@param ddepth desired depth of the destination image.
+@param kernel convolution kernel (or rather a correlation kernel), a single-channel floating point matrix; if you want to apply different kernels to different channels, split the image into separate color planes using split and process them individually.
+@param anchor anchor of the kernel that indicates the relative position of a filtered point within the kernel; the anchor should lie within the kernel; default value new cv.Point(-1, -1) means that the anchor is at the kernel center.
+@param delta optional value added to the filtered pixels before storing them in dst.
+@param borderType pixel extrapolation method(see cv.BorderTypes).
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+Image Blurring (Image Smoothing)
+--------------------------------
+
+Image blurring is achieved by convolving the image with a low-pass filter kernel. It is useful for
+removing noises. It actually removes high frequency content (eg: noise, edges) from the image. So
+edges are blurred a little bit in this operation. (Well, there are blurring techniques which doesn't
+blur the edges too). OpenCV provides mainly four types of blurring techniques.
+
+### 1. Averaging
+
+This is done by convolving image with a normalized box filter. It simply takes the average of all
+the pixels under kernel area and replace the central element. This is done by the function
+**cv.blur()** or **cv.boxFilter()**. Check the docs for more details about the kernel. We should
+specify the width and height of kernel. A 3x3 normalized box filter would look like below:
+
+\f[K = \frac{1}{9} \begin{bmatrix} 1 & 1 & 1 \\ 1 & 1 & 1 \\ 1 & 1 & 1 \end{bmatrix}\f]
+
+We use the functions: **cv.blur (src, dst, ksize, anchor = new cv.Point(-1, -1), borderType = cv.BORDER_DEFAULT)**
+@param src input image; it can have any number of channels, which are processed independently, but the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
+@param dst output image of the same size and type as src.
+@param ksize blurring kernel size.
+@param anchor anchor point; anchor = new cv.Point(-1, -1) means that the anchor is at the kernel center.
+@param borderType border mode used to extrapolate pixels outside of the image(see cv.BorderTypes).
+
+**cv.boxFilter (src, dst, ddepth, ksize, anchor = new cv.Point(-1, -1), normalize = true, borderType = cv.BORDER_DEFAULT)**
+@param src input image.
+@param dst output image of the same size and type as src.
+@param ddepth the output image depth (-1 to use src.depth()).
+@param ksize blurring kernel size.
+@param anchor anchor point; anchor = new cv.Point(-1, -1) means that the anchor is at the kernel center.
+@param normalize flag, specifying whether the kernel is normalized by its area or not.
+@param borderType border mode used to extrapolate pixels outside of the image(see cv.BorderTypes).
+
+@note If you don't want to use normalized box filter, use **cv.boxFilter()**. Pass an argument
+normalize = false to the function.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+### 2. Gaussian Blurring
+
+In this, instead of box filter, gaussian kernel is used.
+
+We use the function: **cv.GaussianBlur (src, dst, ksize, sigmaX, sigmaY = 0, borderType = cv.BORDER_DEFAULT)**
+@param src input image; the image can have any number of channels, which are processed independently, but the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
+@param dst output image of the same size and type as src.
+@param ksize blurring kernel size.
+@param sigmaX Gaussian kernel standard deviation in X direction.
+@param sigmaY Gaussian kernel standard deviation in Y direction; if sigmaY is zero, it is set to be equal to sigmaX, if both sigmas are zeros, they are computed from ksize.width and ksize.height, to fully control the result regardless of possible future modifications of all this semantics, it is recommended to specify all of ksize, sigmaX, and sigmaY.
+@param borderType pixel extrapolation method(see cv.BorderTypes).
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+### 3. Median Blurring
+
+Here, the function **cv.medianBlur()** takes median of all the pixels under kernel area and central
+element is replaced with this median value. This is highly effective against salt-and-pepper noise
+in the images. Interesting thing is that, in the above filters, central element is a newly
+calculated value which may be a pixel value in the image or a new value. But in median blurring,
+central element is always replaced by some pixel value in the image. It reduces the noise
+effectively. Its kernel size should be a positive odd integer.
+
+We use the function: **cv.medianBlur (src, dst, ksize)**
+@param src input 1, 3, or 4 channel image; when ksize is 3 or 5, the image depth should be cv.CV_8U, cv.CV_16U, or cv.CV_32F, for larger aperture sizes, it can only be cv.CV_8U.
+@param dst destination array of the same size and type as src.
+@param ksize aperture linear size; it must be odd and greater than 1, for example: 3, 5, 7 ...
+
+@note The median filter uses cv.BORDER_REPLICATE internally to cope with border pixels.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+### 4. Bilateral Filtering
+
+**cv.bilateralFilter()** is highly effective in noise removal while keeping edges sharp. But the
+operation is slower compared to other filters. We already saw that gaussian filter takes the a
+neighbourhood around the pixel and find its gaussian weighted average. This gaussian filter is a
+function of space alone, that is, nearby pixels are considered while filtering. It doesn't consider
+whether pixels have almost same intensity. It doesn't consider whether pixel is an edge pixel or
+not. So it blurs the edges also, which we don't want to do.
+
+Bilateral filter also takes a gaussian filter in space, but one more gaussian filter which is a
+function of pixel difference. Gaussian function of space make sure only nearby pixels are considered
+for blurring while gaussian function of intensity difference make sure only those pixels with
+similar intensity to central pixel is considered for blurring. So it preserves the edges since
+pixels at edges will have large intensity variation.
+
+We use the function: **cv.bilateralFilter (src, dst, d, sigmaColor, sigmaSpace, borderType = cv.BORDER_DEFAULT)**
+@param src source 8-bit or floating-point, 1-channel or 3-channel image.
+@param dst output image of the same size and type as src.
+@param d diameter of each pixel neighborhood that is used during filtering. If it is non-positive, it is computed from sigmaSpace.
+@param sigmaColor filter sigma in the color space. A larger value of the parameter means that farther colors within the pixel neighborhood will be mixed together, resulting in larger areas of semi-equal color.
+@param sigmaSpace filter sigma in the coordinate space. A larger value of the parameter means that farther pixels will influence each other as long as their colors are close enough. When d>0, it specifies the neighborhood size regardless of sigmaSpace. Otherwise, d is proportional to sigmaSpace.
+@param borderType border mode used to extrapolate pixels outside of the image(see cv.BorderTypes).
+
+@note For simplicity, you can set the 2 sigma values to be the same. If they are small (< 10), the filter will not have much effect, whereas if they are large (> 150), they will have a very strong effect, making the image look "cartoonish". Large filters (d > 5) are very slow, so it is recommended to use d=5 for real-time applications, and perhaps d=9 for offline applications that need heavy noise filtering.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
\ No newline at end of file
diff --git a/doc/js_tutorials/js_imgproc/js_geometric_transformations/js_geometric_transformations.markdown b/doc/js_tutorials/js_imgproc/js_geometric_transformations/js_geometric_transformations.markdown
new file mode 100644
index 0000000000..d565e6bd2c
--- /dev/null
+++ b/doc/js_tutorials/js_imgproc/js_geometric_transformations/js_geometric_transformations.markdown
@@ -0,0 +1,145 @@
+Geometric Transformations of Images {#tutorial_js_geometric_transformations}
+===================================
+
+Goals
+-----
+
+- Learn how to apply different geometric transformation to images like translation, rotation, affine
+ transformation etc.
+- You will learn these functions: **cv.resize**, **cv.warpAffine**, **cv.getAffineTransform** and **cv.warpPerspective**
+
+Transformations
+---------------
+
+
+### Scaling
+
+Scaling is just resizing of the image. OpenCV comes with a function **cv.resize()** for this
+purpose. The size of the image can be specified manually, or you can specify the scaling factor.
+Different interpolation methods are used. Preferable interpolation methods are **cv.INTER_AREA**
+for shrinking and **cv.INTER_CUBIC** (slow) & **cv.INTER_LINEAR** for zooming.
+
+We use the function: **cv.resize (src, dst, dsize, fx = 0, fy = 0, interpolation = cv.INTER_LINEAR)**
+@param src input image
+@param dst output image; it has the size dsize (when it is non-zero) or the size computed from src.size(), fx, and fy; the type of dst is the same as of src.
+@param dsize output image size; if it equals zero, it is computed as:
+ \f[𝚍𝚜𝚒𝚣𝚎 = 𝚂𝚒𝚣𝚎(𝚛𝚘𝚞𝚗𝚍(𝚏𝚡*𝚜𝚛𝚌.𝚌𝚘𝚕𝚜), 𝚛𝚘𝚞𝚗𝚍(𝚏𝚢*𝚜𝚛𝚌.𝚛𝚘𝚠𝚜))\f]
+ Either dsize or both fx and fy must be non-zero.
+@param fx scale factor along the horizontal axis; when it equals 0, it is computed as \f[(𝚍𝚘𝚞𝚋𝚕𝚎)𝚍𝚜𝚒𝚣𝚎.𝚠𝚒𝚍𝚝𝚑/𝚜𝚛𝚌.𝚌𝚘𝚕𝚜\f]
+
+@param fy scale factor along the vertical axis; when it equals 0, it is computed as \f[(𝚍𝚘𝚞𝚋𝚕𝚎)𝚍𝚜𝚒𝚣𝚎.𝚑𝚎𝚒𝚐𝚑𝚝/𝚜𝚛𝚌.𝚛𝚘𝚠𝚜\f]
+@param interpolation interpolation method(see **cv.InterpolationFlags**)
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+### Translation
+
+Translation is the shifting of object's location. If you know the shift in (x,y) direction, let it
+be \f$(t_x,t_y)\f$, you can create the transformation matrix \f$\textbf{M}\f$ as follows:
+
+\f[M = \begin{bmatrix} 1 & 0 & t_x \\ 0 & 1 & t_y \end{bmatrix}\f]
+
+We use the function: **cv.warpAffine (src, dst, M, dsize, flags = cv.INTER_LINEAR, borderMode = cv.BORDER_CONSTANT, borderValue = new cv.Scalar())**
+@param src input image.
+@param dst output image that has the size dsize and the same type as src.
+@param Mat 2 × 3 transformation matrix(cv.CV_64FC1 type).
+@param dsize size of the output image.
+@param flags combination of interpolation methods(see cv.InterpolationFlags) and the optional flag WARP_INVERSE_MAP that means that M is the inverse transformation ( 𝚍𝚜𝚝→𝚜𝚛𝚌 )
+@param borderMode pixel extrapolation method (see cv.BorderTypes); when borderMode = BORDER_TRANSPARENT, it means that the pixels in the destination image corresponding to the "outliers" in the source image are not modified by the function.
+@param borderValue value used in case of a constant border; by default, it is 0.
+
+rows.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+### Rotation
+
+Rotation of an image for an angle \f$\theta\f$ is achieved by the transformation matrix of the form
+
+\f[M = \begin{bmatrix} cos\theta & -sin\theta \\ sin\theta & cos\theta \end{bmatrix}\f]
+
+But OpenCV provides scaled rotation with adjustable center of rotation so that you can rotate at any
+location you prefer. Modified transformation matrix is given by
+
+\f[\begin{bmatrix} \alpha & \beta & (1- \alpha ) \cdot center.x - \beta \cdot center.y \\ - \beta & \alpha & \beta \cdot center.x + (1- \alpha ) \cdot center.y \end{bmatrix}\f]
+
+where:
+
+\f[\begin{array}{l} \alpha = scale \cdot \cos \theta , \\ \beta = scale \cdot \sin \theta \end{array}\f]
+
+We use the function: **cv.getRotationMatrix2D (center, angle, scale)**
+@param center center of the rotation in the source image.
+@param angle rotation angle in degrees. Positive values mean counter-clockwise rotation (the coordinate origin is assumed to be the top-left corner).
+@param scale isotropic scale factor.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+### Affine Transformation
+
+In affine transformation, all parallel lines in the original image will still be parallel in the
+output image. To find the transformation matrix, we need three points from input image and their
+corresponding locations in output image. Then **cv.getAffineTransform** will create a 2x3 matrix
+which is to be passed to **cv.warpAffine**.
+
+We use the function: **cv.getAffineTransform (src, dst)**
+
+@param src three points([3, 1] size and cv.CV_32FC2 type) from input imag.
+@param dst three corresponding points([3, 1] size and cv.CV_32FC2 type) in output image.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+### Perspective Transformation
+
+For perspective transformation, you need a 3x3 transformation matrix. Straight lines will remain straight even after the transformation. To find this transformation matrix, you need 4 points on the input image and corresponding points on the output image. Among these 4 points, 3 of them should not be collinear. Then transformation matrix can be found by the function **cv.getPerspectiveTransform**. Then apply **cv.warpPerspective** with this 3x3 transformation matrix.
+
+We use the functions: **cv.warpPerspective (src, dst, M, dsize, flags = cv.INTER_LINEAR, borderMode = cv.BORDER_CONSTANT, borderValue = new cv.Scalar())**
+
+@param src input image.
+@param dst output image that has the size dsize and the same type as src.
+@param Mat 3 × 3 transformation matrix(cv.CV_64FC1 type).
+@param dsize size of the output image.
+@param flags combination of interpolation methods (cv.INTER_LINEAR or cv.INTER_NEAREST) and the optional flag WARP_INVERSE_MAP, that sets M as the inverse transformation (𝚍𝚜𝚝→𝚜𝚛𝚌).
+@param borderMode pixel extrapolation method (cv.BORDER_CONSTANT or cv.BORDER_REPLICATE).
+@param borderValue value used in case of a constant border; by default, it is 0.
+
+**cv.getPerspectiveTransform (src, dst)**
+
+@param src coordinates of quadrangle vertices in the source image.
+@param dst coordinates of the corresponding quadrangle vertices in the destination image.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
\ No newline at end of file
diff --git a/doc/js_tutorials/js_imgproc/js_grabcut/js_grabcut.markdown b/doc/js_tutorials/js_imgproc/js_grabcut/js_grabcut.markdown
new file mode 100644
index 0000000000..570a490fea
--- /dev/null
+++ b/doc/js_tutorials/js_imgproc/js_grabcut/js_grabcut.markdown
@@ -0,0 +1,76 @@
+Foreground Extraction using GrabCut Algorithm {#tutorial_js_grabcut}
+=========================================================
+
+Goal
+----
+
+- We will learn GrabCut algorithm to extract foreground in images
+
+Theory
+------
+
+GrabCut algorithm was designed by Carsten Rother, Vladimir Kolmogorov & Andrew Blake from Microsoft
+Research Cambridge, UK. in their paper, ["GrabCut": interactive foreground extraction using iterated
+graph cuts](http://dl.acm.org/citation.cfm?id=1015720) . An algorithm was needed for foreground
+extraction with minimal user interaction, and the result was GrabCut.
+
+How it works from user point of view ? Initially user draws a rectangle around the foreground region
+(foreground region should be completely inside the rectangle). Then algorithm segments it
+iteratively to get the best result. Done. But in some cases, the segmentation won't be fine, like,
+it may have marked some foreground region as background and vice versa. In that case, user need to
+do fine touch-ups. Just give some strokes on the images where some faulty results are there. Strokes
+basically says *"Hey, this region should be foreground, you marked it background, correct it in next
+iteration"* or its opposite for background. Then in the next iteration, you get better results.
+
+What happens in background ?
+
+- User inputs the rectangle. Everything outside this rectangle will be taken as sure background
+ (That is the reason it is mentioned before that your rectangle should include all the
+ objects). Everything inside rectangle is unknown. Similarly any user input specifying
+ foreground and background are considered as hard-labelling which means they won't change in
+ the process.
+- Computer does an initial labelling depeding on the data we gave. It labels the foreground and
+ background pixels (or it hard-labels)
+- Now a Gaussian Mixture Model(GMM) is used to model the foreground and background.
+- Depending on the data we gave, GMM learns and create new pixel distribution. That is, the
+ unknown pixels are labelled either probable foreground or probable background depending on its
+ relation with the other hard-labelled pixels in terms of color statistics (It is just like
+ clustering).
+- A graph is built from this pixel distribution. Nodes in the graphs are pixels. Additional two
+ nodes are added, **Source node** and **Sink node**. Every foreground pixel is connected to
+ Source node and every background pixel is connected to Sink node.
+- The weights of edges connecting pixels to source node/end node are defined by the probability
+ of a pixel being foreground/background. The weights between the pixels are defined by the edge
+ information or pixel similarity. If there is a large difference in pixel color, the edge
+ between them will get a low weight.
+- Then a mincut algorithm is used to segment the graph. It cuts the graph into two separating
+ source node and sink node with minimum cost function. The cost function is the sum of all
+ weights of the edges that are cut. After the cut, all the pixels connected to Source node
+ become foreground and those connected to Sink node become background.
+- The process is continued until the classification converges.
+
+It is illustrated in below image (Image Courtesy: )
+
+![image](images/grabcut_scheme.jpg)
+
+Demo
+----
+
+We use the function: **cv.grabCut (image, mask, rect, bgdModel, fgdModel, iterCount, mode = cv.GC_EVAL)**
+
+@param image input 8-bit 3-channel image.
+@param mask input/output 8-bit single-channel mask. The mask is initialized by the function when mode is set to GC_INIT_WITH_RECT. Its elements may have one of the cv.rabCutClasses.
+@param rect ROI containing a segmented object. The pixels outside of the ROI are marked as "obvious background". The parameter is only used when mode==GC_INIT_WITH_RECT.
+@param bgdModel temporary array for the background model. Do not modify it while you are processing the same image.
+@param fgdModel temporary arrays for the foreground model. Do not modify it while you are processing the same image.
+@param iterCount number of iterations the algorithm should make before returning the result. Note that the result can be refined with further calls with mode==GC_INIT_WITH_MASK or mode==GC_EVAL .
+@param mode operation mode that could be one of the cv::GrabCutModes
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
\ No newline at end of file
diff --git a/doc/js_tutorials/js_imgproc/js_gradients/js_gradients.markdown b/doc/js_tutorials/js_imgproc/js_gradients/js_gradients.markdown
new file mode 100644
index 0000000000..21e36a0bd9
--- /dev/null
+++ b/doc/js_tutorials/js_imgproc/js_gradients/js_gradients.markdown
@@ -0,0 +1,100 @@
+Image Gradients {#tutorial_js_gradients}
+===============
+
+Goal
+----
+
+- Find Image gradients, edges etc
+- We will learn following functions : **cv.Sobel()**, **cv.Scharr()**, **cv.Laplacian()** etc
+
+Theory
+------
+
+OpenCV provides three types of gradient filters or High-pass filters, Sobel, Scharr and Laplacian.
+We will see each one of them.
+
+### 1. Sobel and Scharr Derivatives
+
+Sobel operators is a joint Gausssian smoothing plus differentiation operation, so it is more
+resistant to noise. You can specify the direction of derivatives to be taken, vertical or horizontal
+(by the arguments, yorder and xorder respectively). You can also specify the size of kernel by the
+argument ksize. If ksize = -1, a 3x3 Scharr filter is used which gives better results than 3x3 Sobel
+filter. Please see the docs for kernels used.
+
+We use the functions: **cv.Sobel (src, dst, ddepth, dx, dy, ksize = 3, scale = 1, delta = 0, borderType = cv.BORDER_DEFAULT)**
+@param src input image.
+@param dst output image of the same size and the same number of channels as src.
+@param ddepth output image depth(see cv.combinations); in the case of 8-bit input images it will result in truncated derivatives.
+@param dx order of the derivative x.
+@param dy order of the derivative y.
+@param ksize size of the extended Sobel kernel; it must be 1, 3, 5, or 7.
+@param scale optional scale factor for the computed derivative values.
+@param delta optional delta value that is added to the results prior to storing them in dst.
+@param borderType pixel extrapolation method(see cv.BorderTypes).
+
+**cv.Scharr (src, dst, ddepth, dx, dy, scale = 1, delta = 0, borderType = cv.BORDER_DEFAULT)**
+@param src input image.
+@param dst output image of the same size and the same number of channels as src.
+@param ddepth output image depth(see cv.combinations).
+@param dx order of the derivative x.
+@param dy order of the derivative y.
+@param scale optional scale factor for the computed derivative values.
+@param delta optional delta value that is added to the results prior to storing them in dst.
+@param borderType pixel extrapolation method(see cv.BorderTypes).
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+### 2. Laplacian Derivatives
+
+It calculates the Laplacian of the image given by the relation,
+\f$\Delta src = \frac{\partial ^2{src}}{\partial x^2} + \frac{\partial ^2{src}}{\partial y^2}\f$ where
+each derivative is found using Sobel derivatives. If ksize = 1, then following kernel is used for
+filtering:
+
+\f[kernel = \begin{bmatrix} 0 & 1 & 0 \\ 1 & -4 & 1 \\ 0 & 1 & 0 \end{bmatrix}\f]
+
+We use the function: **cv.Laplacian (src, dst, ddepth, ksize = 1, scale = 1, delta = 0, borderType = cv.BORDER_DEFAULT)**
+@param src input image.
+@param dst output image of the same size and the same number of channels as src.
+@param ddepth output image depth.
+@param ksize aperture size used to compute the second-derivative filters.
+@param scale optional scale factor for the computed Laplacian values.
+@param delta optional delta value that is added to the results prior to storing them in dst.
+@param borderType pixel extrapolation method(see cv.BorderTypes).
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+One Important Matter!
+---------------------
+
+In our last example, output datatype is cv.CV_8U. But there is a slight problem with
+that. Black-to-White transition is taken as Positive slope (it has a positive value) while
+White-to-Black transition is taken as a Negative slope (It has negative value). So when you convert
+data to cv.CV_8U, all negative slopes are made zero. In simple words, you miss that edge.
+
+If you want to detect both edges, better option is to keep the output datatype to some higher forms,
+like cv.CV_16S, cv.CV_64F etc, take its absolute value and then convert back to cv.CV_8U.
+Below code demonstrates this procedure for a horizontal Sobel filter and difference in results.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
\ No newline at end of file
diff --git a/doc/js_tutorials/js_imgproc/js_histograms/js_histogram_backprojection/js_histogram_backprojection.markdown b/doc/js_tutorials/js_imgproc/js_histograms/js_histogram_backprojection/js_histogram_backprojection.markdown
new file mode 100644
index 0000000000..5ad9492c7d
--- /dev/null
+++ b/doc/js_tutorials/js_imgproc/js_histograms/js_histogram_backprojection/js_histogram_backprojection.markdown
@@ -0,0 +1,59 @@
+Histogram - 3 : Histogram Backprojection {#tutorial_js_histogram_backprojection}
+========================================
+
+Goal
+----
+
+- We will learn about histogram backprojection.
+
+Theory
+------
+
+It was proposed by **Michael J. Swain , Dana H. Ballard** in their paper **Indexing via color
+histograms**.
+
+**What is it actually in simple words?** It is used for image segmentation or finding objects of
+interest in an image. In simple words, it creates an image of the same size (but single channel) as
+that of our input image, where each pixel corresponds to the probability of that pixel belonging to
+our object. In more simpler worlds, the output image will have our object of interest in more white
+compared to remaining part. Well, that is an intuitive explanation. (I can't make it more simpler).
+Histogram Backprojection is used with camshift algorithm etc.
+
+**How do we do it ?** We create a histogram of an image containing our object of interest (in our
+case, the ground, leaving player and other things). The object should fill the image as far as
+possible for better results. And a color histogram is preferred over grayscale histogram, because
+color of the object is a better way to define the object than its grayscale intensity. We then
+"back-project" this histogram over our test image where we need to find the object, ie in other
+words, we calculate the probability of every pixel belonging to the ground and show it. The
+resulting output on proper thresholding gives us the ground alone.
+
+Backprojection in OpenCV
+------------------------
+
+We use the functions: **cv.calcBackProject (images, channels, hist, dst, ranges, scale)**
+
+@param images source arrays. They all should have the same depth, cv.CV_8U, cv.CV_16U or cv.CV_32F , and the same size. Each of them can have an arbitrary number of channels.
+@param channels the list of channels used to compute the back projection. The number of channels must match the histogram dimensionality.
+@param hist input histogram that can be dense or sparse.
+@param dst destination back projection array that is a single-channel array of the same size and depth as images[0].
+@param ranges array of arrays of the histogram bin boundaries in each dimension(see cv.calcHist).
+@param scale optional scale factor for the output back projection.
+
+**cv.normalize (src, dst, alpha = 1, beta = 0, norm_type = cv.NORM_L2, dtype = -1, mask = new cv.Mat())**
+
+@param src input array.
+@param dst output array of the same size as src .
+@param alpha norm value to normalize to or the lower range boundary in case of the range normalization.
+@param beta upper range boundary in case of the range normalization; it is not used for the norm normalization.
+@param norm_type normalization type (see cv.NormTypes).
+@param dtype when negative, the output array has the same type as src; otherwise, it has the same number of channels as src and the depth = CV_MAT_DEPTH(dtype).
+@param mask optional operation mask.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
\ No newline at end of file
diff --git a/doc/js_tutorials/js_imgproc/js_histograms/js_histogram_begins/js_histogram_begins.markdown b/doc/js_tutorials/js_imgproc/js_histograms/js_histogram_begins/js_histogram_begins.markdown
new file mode 100644
index 0000000000..fdcadf409c
--- /dev/null
+++ b/doc/js_tutorials/js_imgproc/js_histograms/js_histogram_begins/js_histogram_begins.markdown
@@ -0,0 +1,51 @@
+Histograms - 1 : Find, Plot, Analyze !!! {#tutorial_js_histogram_begins}
+========================================
+
+Goal
+----
+
+- Find histograms
+- Plot histograms
+- You will learn the function: **cv.calcHist()**.
+
+Theory
+------
+
+So what is histogram ? You can consider histogram as a graph or plot, which gives you an overall
+idea about the intensity distribution of an image. It is a plot with pixel values (ranging from 0 to
+255, not always) in X-axis and corresponding number of pixels in the image on Y-axis.
+
+It is just another way of understanding the image. By looking at the histogram of an image, you get
+intuition about contrast, brightness, intensity distribution etc of that image. Almost all image
+processing tools today, provides features on histogram. Below is an image from [Cambridge in Color
+website](http://www.cambridgeincolour.com/tutorials/histograms1.htm), and I recommend you to visit
+the site for more details.
+
+![image](histogram_sample.jpg)
+
+You can see the image and its histogram. (Remember, this histogram is drawn for grayscale image, not
+color image). Left region of histogram shows the amount of darker pixels in image and right region
+shows the amount of brighter pixels. From the histogram, you can see dark region is more than
+brighter region, and amount of midtones (pixel values in mid-range, say around 127) are very less.
+
+Find Histogram
+--------------
+
+We use the function: **cv.calcHist (image, channels, mask, hist, histSize, ranges, accumulate = false)**
+
+@param image source arrays. They all should have the same depth, cv.CV_8U, cv.CV_16U or cv.CV_32F , and the same size. Each of them can have an arbitrary number of channels.
+@param channels list of the dims channels used to compute the histogram.
+@param mask optional mask. If the matrix is not empty, it must be an 8-bit array of the same size as images[i] . The non-zero mask elements mark the array elements counted in the histogram.
+@param hist output histogram(cv.CV_32F type), which is a dense or sparse dims -dimensional array.
+@param histSize array of histogram sizes in each dimension.
+@param ranges array of the dims arrays of the histogram bin boundaries in each dimension.
+@param accumulate accumulation flag. If it is set, the histogram is not cleared in the beginning when it is allocated. This feature enables you to compute a single histogram from several sets of arrays, or to update the histogram in time.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
\ No newline at end of file
diff --git a/doc/js_tutorials/js_imgproc/js_histograms/js_histogram_equalization/js_histogram_equalization.markdown b/doc/js_tutorials/js_imgproc/js_histograms/js_histogram_equalization/js_histogram_equalization.markdown
new file mode 100644
index 0000000000..332d456cb9
--- /dev/null
+++ b/doc/js_tutorials/js_imgproc/js_histograms/js_histogram_equalization/js_histogram_equalization.markdown
@@ -0,0 +1,63 @@
+Histograms - 2: Histogram Equalization {#tutorial_js_histogram_equalization}
+======================================
+
+Goal
+----
+
+- We will learn the concepts of histogram equalization and use it to improve the contrast of our
+ images.
+
+Theory
+------
+
+Consider an image whose pixel values are confined to some specific range of values only. For eg,
+brighter image will have all pixels confined to high values. But a good image will have pixels from
+all regions of the image. So you need to stretch this histogram to either ends (as given in below
+image, from wikipedia) and that is what Histogram Equalization does (in simple words). This normally
+improves the contrast of the image.
+
+![image](images/histogram_equalization.png)
+
+I would recommend you to read the wikipedia page on [Histogram
+Equalization](http://en.wikipedia.org/wiki/Histogram_equalization) for more details about it. It has
+a very good explanation with worked out examples, so that you would understand almost everything
+after reading that.
+
+Histograms Equalization in OpenCV
+---------------------------------
+
+We use the function: **cv.equalizeHist (src, dst)**
+
+@param src source 8-bit single channel image.
+@param dst destination image of the same size and type as src.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+CLAHE (Contrast Limited Adaptive Histogram Equalization)
+--------------------------------------------------------
+
+In **adaptive histogram equalization**, image is divided into small blocks called "tiles" (tileSize is 8x8 by default in OpenCV). Then each of these blocks are histogram equalized as usual. So in a small area, histogram would confine to a small region
+(unless there is noise). If noise is there, it will be amplified. To avoid this, **contrast limiting** is applied. If any histogram bin is above the specified contrast limit (by default 40 in OpenCV), those pixels are clipped and distributed uniformly to other bins before applying histogram equalization. After equalization, to remove artifacts in tile borders, bilinear interpolation is applied.
+
+We use the class: **cv.CLAHE (clipLimit = 40, tileGridSize = new cv.Size(8, 8))**
+
+@param clipLimit threshold for contrast limiting.
+@param tileGridSize size of grid for histogram equalization. Input image will be divided into equally sized rectangular tiles. tileGridSize defines the number of tiles in row and column.
+
+@note Don't forget to delete CLAHE!
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
\ No newline at end of file
diff --git a/doc/js_tutorials/js_imgproc/js_histograms/js_table_of_contents_histograms.markdown b/doc/js_tutorials/js_imgproc/js_histograms/js_table_of_contents_histograms.markdown
new file mode 100644
index 0000000000..75656e46e9
--- /dev/null
+++ b/doc/js_tutorials/js_imgproc/js_histograms/js_table_of_contents_histograms.markdown
@@ -0,0 +1,14 @@
+Histograms in OpenCV.js {#tutorial_js_table_of_contents_histograms}
+====================
+
+- @subpage tutorial_js_histogram_begins
+
+ Learn to find and draw Contours
+
+- @subpage tutorial_js_histogram_equalization
+
+ Learn to Equalize Histograms to get better contrast for images
+
+- @subpage tutorial_js_histogram_backprojection
+
+ Learn histogram backprojection to segment colored objects
diff --git a/doc/js_tutorials/js_imgproc/js_houghcircles/js_houghcircles.markdown b/doc/js_tutorials/js_imgproc/js_houghcircles/js_houghcircles.markdown
new file mode 100644
index 0000000000..a5f55c7424
--- /dev/null
+++ b/doc/js_tutorials/js_imgproc/js_houghcircles/js_houghcircles.markdown
@@ -0,0 +1,38 @@
+Hough Circle Transform {#tutorial_js_houghcircles}
+======================
+
+Goal
+----
+
+- We will learn to use Hough Transform to find circles in an image.
+- We will learn these functions: **cv.HoughCircles()**
+
+Theory
+------
+
+A circle is represented mathematically as \f$(x-x_{center})^2 + (y - y_{center})^2 = r^2\f$ where
+\f$(x_{center},y_{center})\f$ is the center of the circle, and \f$r\f$ is the radius of the circle. From
+equation, we can see we have 3 parameters, so we need a 3D accumulator for hough transform, which
+would be highly ineffective. So OpenCV uses more trickier method, **Hough Gradient Method** which
+uses the gradient information of edges.
+
+We use the function: **cv.HoughCircles (image, circles, method, dp, minDist, param1 = 100, param2 = 100, minRadius = 0, maxRadius = 0)**
+
+@param image 8-bit, single-channel, grayscale input image.
+@param circles output vector of found circles(cv.CV_32FC3 type). Each vector is encoded as a 3-element floating-point vector (x,y,radius) .
+@param method detection method(see cv.HoughModes). Currently, the only implemented method is HOUGH_GRADIENT
+@param dp inverse ratio of the accumulator resolution to the image resolution. For example, if dp = 1 , the accumulator has the same resolution as the input image. If dp = 2 , the accumulator has half as big width and height.
+@param minDist minimum distance between the centers of the detected circles. If the parameter is too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is too large, some circles may be missed.
+@param param1 first method-specific parameter. In case of HOUGH_GRADIENT , it is the higher threshold of the two passed to the Canny edge detector (the lower one is twice smaller).
+@param param2 second method-specific parameter. In case of HOUGH_GRADIENT , it is the accumulator threshold for the circle centers at the detection stage. The smaller it is, the more false circles may be detected. Circles, corresponding to the larger accumulator values, will be returned first.
+@param minRadius minimum circle radius.
+@param maxRadius maximum circle radius.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
\ No newline at end of file
diff --git a/doc/js_tutorials/js_imgproc/js_houghlines/js_houghlines.markdown b/doc/js_tutorials/js_imgproc/js_houghlines/js_houghlines.markdown
new file mode 100644
index 0000000000..3f78775491
--- /dev/null
+++ b/doc/js_tutorials/js_imgproc/js_houghlines/js_houghlines.markdown
@@ -0,0 +1,119 @@
+Hough Line Transform {#tutorial_js_houghlines}
+====================
+
+Goal
+----
+
+- We will understand the concept of the Hough Transform.
+- We will learn how to use it to detect lines in an image.
+- We will learn the following functions: **cv.HoughLines()**, **cv.HoughLinesP()**
+
+Theory
+------
+
+The Hough Transform is a popular technique to detect any shape, if you can represent that shape in a
+mathematical form. It can detect the shape even if it is broken or distorted a little bit. We will
+see how it works for a line.
+
+A line can be represented as \f$y = mx+c\f$ or in a parametric form, as
+\f$\rho = x \cos \theta + y \sin \theta\f$ where \f$\rho\f$ is the perpendicular distance from the origin to the
+line, and \f$\theta\f$ is the angle formed by this perpendicular line and the horizontal axis measured in
+counter-clockwise (That direction varies on how you represent the coordinate system. This
+representation is used in OpenCV). Check the image below:
+
+![image](images/houghlines1.svg)
+
+So if the line is passing below the origin, it will have a positive rho and an angle less than 180. If it
+is going above the origin, instead of taking an angle greater than 180, the angle is taken less than 180,
+and rho is taken negative. Any vertical line will have 0 degree and horizontal lines will have 90
+degree.
+
+Now let's see how the Hough Transform works for lines. Any line can be represented in these two terms,
+\f$(\rho, \theta)\f$. So first it creates a 2D array or accumulator (to hold the values of the two parameters)
+and it is set to 0 initially. Let rows denote the \f$\rho\f$ and columns denote the \f$\theta\f$. Size of
+array depends on the accuracy you need. Suppose you want the accuracy of angles to be 1 degree, you will
+need 180 columns. For \f$\rho\f$, the maximum distance possible is the diagonal length of the image. So
+taking one pixel accuracy, the number of rows can be the diagonal length of the image.
+
+Consider a 100x100 image with a horizontal line at the middle. Take the first point of the line. You
+know its (x,y) values. Now in the line equation, put the values \f$\theta = 0,1,2,....,180\f$ and check
+the \f$\rho\f$ you get. For every \f$(\rho, \theta)\f$ pair, you increment value by one in our accumulator
+in its corresponding \f$(\rho, \theta)\f$ cells. So now in accumulator, the cell (50,90) = 1 along with
+some other cells.
+
+Now take the second point on the line. Do the same as above. Increment the values in the cells
+corresponding to \f$(\rho, \theta)\f$ you got. This time, the cell (50,90) = 2. What you actually
+do is voting the \f$(\rho, \theta)\f$ values. You continue this process for every point on the line. At
+each point, the cell (50,90) will be incremented or voted up, while other cells may or may not be
+voted up. This way, at the end, the cell (50,90) will have maximum votes. So if you search the
+accumulator for maximum votes, you get the value (50,90) which says, there is a line in this image
+at a distance 50 from the origin and at angle 90 degrees. It is well shown in the below animation (Image
+Courtesy: [Amos Storkey](http://homepages.inf.ed.ac.uk/amos/hough.html) )
+
+![](houghlinesdemo.gif)
+
+This is how hough transform works for lines. It is simple. Below is an image which shows the accumulator. Bright spots at some locations
+denote they are the parameters of possible lines in the image. (Image courtesy: [Wikipedia](http://en.wikipedia.org/wiki/Hough_transform) )
+
+![](houghlines2.jpg)
+
+Hough Transform in OpenCV
+=========================
+
+Everything explained above is encapsulated in the OpenCV function, **cv.HoughLines()**. It simply returns an array of (\f$(\rho, \theta)\f$ values. \f$\rho\f$ is measured in pixels and \f$\theta\f$ is measured in radians. First parameter,
+Input image should be a binary image, so apply threshold or use canny edge detection before
+applying hough transform.
+
+We use the function: **cv.HoughLines (image, lines, rho, theta, threshold, srn = 0, stn = 0, min_theta = 0, max_theta = Math.PI)**
+@param image 8-bit, single-channel binary source image. The image may be modified by the function.
+@param lines output vector of lines(cv.32FC2 type). Each line is represented by a two-element vector (ρ,θ) . ρ is the distance from the coordinate origin (0,0). θ is the line rotation angle in radians.
+@param rho distance resolution of the accumulator in pixels.
+@param theta angle resolution of the accumulator in radians.
+@param threshold accumulator threshold parameter. Only those lines are returned that get enough votes
+@param srn for the multi-scale Hough transform, it is a divisor for the distance resolution rho . The coarse accumulator distance resolution is rho and the accurate accumulator resolution is rho/srn . If both srn=0 and stn=0 , the classical Hough transform is used. Otherwise, both these parameters should be positive.
+@param stn for the multi-scale Hough transform, it is a divisor for the distance resolution theta.
+@param min_theta for standard and multi-scale Hough transform, minimum angle to check for lines. Must fall between 0 and max_theta.
+@param max_theta for standard and multi-scale Hough transform, maximum angle to check for lines. Must fall between min_theta and CV_PI.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+Probabilistic Hough Transform
+-----------------------------
+
+In the hough transform, you can see that even for a line with two arguments, it takes a lot of
+computation. Probabilistic Hough Transform is an optimization of the Hough Transform we saw. It doesn't
+take all the points into consideration. Instead, it takes only a random subset of points which is
+sufficient for line detection. Just we have to decrease the threshold. See image below which compares
+Hough Transform and Probabilistic Hough Transform in Hough space. (Image Courtesy :
+[Franck Bettinger's home page](http://phdfb1.free.fr/robot/mscthesis/node14.html) )
+
+![image](images/houghlines4.png)
+
+OpenCV implementation is based on Robust Detection of Lines Using the Progressive Probabilistic
+Hough Transform by Matas, J. and Galambos, C. and Kittler, J.V. @cite Matas00.
+
+We use the function: **cv.HoughLinesP (image, lines, rho, theta, threshold, minLineLength = 0, maxLineGap = 0)**
+
+@param image 8-bit, single-channel binary source image. The image may be modified by the function.
+@param lines output vector of lines(cv.32SC4 type). Each line is represented by a 4-element vector (x1,y1,x2,y2) ,where (x1,y1) and (x2,y2) are the ending points of each detected line segment.
+@param rho distance resolution of the accumulator in pixels.
+@param theta angle resolution of the accumulator in radians.
+@param threshold accumulator threshold parameter. Only those lines are returned that get enough votes
+@param minLineLength minimum line length. Line segments shorter than that are rejected.
+@param maxLineGap maximum allowed gap between points on the same line to link them.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
\ No newline at end of file
diff --git a/doc/js_tutorials/js_imgproc/js_imgproc_camera/js_imgproc_camera.markdown b/doc/js_tutorials/js_imgproc/js_imgproc_camera/js_imgproc_camera.markdown
new file mode 100644
index 0000000000..cbda5b0a59
--- /dev/null
+++ b/doc/js_tutorials/js_imgproc/js_imgproc_camera/js_imgproc_camera.markdown
@@ -0,0 +1,14 @@
+Image Processing for Video Capture {#tutorial_js_imgproc_camera}
+==================================
+
+Goal
+----
+
+- learn image processing for video capture.
+
+
+\htmlonly
+
+\endhtmlonly
\ No newline at end of file
diff --git a/doc/js_tutorials/js_imgproc/js_morphological_ops/js_morphological_ops.markdown b/doc/js_tutorials/js_imgproc/js_morphological_ops/js_morphological_ops.markdown
new file mode 100644
index 0000000000..b5e10e0e67
--- /dev/null
+++ b/doc/js_tutorials/js_imgproc/js_morphological_ops/js_morphological_ops.markdown
@@ -0,0 +1,177 @@
+Morphological Transformations {#tutorial_js_morphological_ops}
+=============================
+
+Goal
+----
+
+- We will learn different morphological operations like Erosion, Dilation, Opening, Closing
+ etc.
+- We will learn different functions like : **cv.erode()**, **cv.dilate()**,
+ **cv.morphologyEx()** etc.
+
+Theory
+------
+
+Morphological transformations are some simple operations based on the image shape. It is normally
+performed on binary images. It needs two inputs, one is our original image, second one is called
+**structuring element** or **kernel** which decides the nature of operation. Two basic morphological
+operators are Erosion and Dilation. Then its variant forms like Opening, Closing, Gradient etc also
+comes into play. We will see them one-by-one with help of following image:
+
+![image](shape.jpg)
+
+### 1. Erosion
+
+The basic idea of erosion is just like soil erosion only, it erodes away the boundaries of
+foreground object (Always try to keep foreground in white). So what it does? The kernel slides
+through the image (as in 2D convolution). A pixel in the original image (either 1 or 0) will be
+considered 1 only if all the pixels under the kernel is 1, otherwise it is eroded (made to zero).
+
+So what happends is that, all the pixels near boundary will be discarded depending upon the size of
+kernel. So the thickness or size of the foreground object decreases or simply white region decreases
+in the image. It is useful for removing small white noises (as we have seen in colorspace chapter),
+detach two connected objects etc.
+
+We use the function: **cv.erode (src, dst, kernel, anchor = new cv.Point(-1, -1), iterations = 1, borderType = cv.BORDER_CONSTANT, borderValue = cv.morphologyDefaultBorderValue())**
+@param src input image; the number of channels can be arbitrary, but the depth should be one of cv.CV_8U, cv.CV_16U, cv.CV_16S, cv.CV_32F or cv.CV_64F.
+@param dst output image of the same size and type as src.
+@param kernel structuring element used for erosion.
+@param anchor position of the anchor within the element; default value new cv.Point(-1, -1) means that the anchor is at the element center.
+@param iterations number of times erosion is applied.
+@param borderType pixel extrapolation method(see cv.BorderTypes).
+@param borderValue border value in case of a constant border
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+### 2. Dilation
+
+It is just opposite of erosion. Here, a pixel element is '1' if atleast one pixel under the kernel
+is '1'. So it increases the white region in the image or size of foreground object increases.
+Normally, in cases like noise removal, erosion is followed by dilation. Because, erosion removes
+white noises, but it also shrinks our object. So we dilate it. Since noise is gone, they won't come
+back, but our object area increases. It is also useful in joining broken parts of an object.
+
+We use the function: **cv.dilate (src, dst, kernel, anchor = new cv.Point(-1, -1), iterations = 1, borderType = cv.BORDER_CONSTANT, borderValue = cv.morphologyDefaultBorderValue())**
+@param src input image; the number of channels can be arbitrary, but the depth should be one of cv.CV_8U, cv.CV_16U, cv.CV_16S, cv.CV_32F or cv.CV_64F.
+@param dst output image of the same size and type as src.
+@param kernel structuring element used for dilation.
+@param anchor position of the anchor within the element; default value new cv.Point(-1, -1) means that the anchor is at the element center.
+@param iterations number of times dilation is applied.
+@param borderType pixel extrapolation method(see cv.BorderTypes).
+@param borderValue border value in case of a constant border
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+### 3. Opening
+
+Opening is just another name of **erosion followed by dilation**. It is useful in removing noise.
+
+We use the function: **cv.morphologyEx (src, dst, op, kernel, anchor = new cv.Point(-1, -1), iterations = 1, borderType = cv.BORDER_CONSTANT, borderValue = cv.morphologyDefaultBorderValue())**
+@param src source image. The number of channels can be arbitrary. The depth should be one of cv.CV_8U, cv.CV_16U, cv.CV_16S, cv.CV_32F or cv.CV_64F
+@param dst destination image of the same size and type as source image.
+@param op type of a morphological operation, (see cv.MorphTypes).
+@param kernel structuring element. It can be created using cv.getStructuringElement.
+@param anchor anchor position with the kernel. Negative values mean that the anchor is at the kernel center.
+@param iterations number of times dilation is applied.
+@param borderType pixel extrapolation method(see cv.BorderTypes).
+@param borderValue border value in case of a constant border. The default value has a special meaning.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+### 4. Closing
+
+Closing is reverse of Opening, **Dilation followed by Erosion**. It is useful in closing small holes
+inside the foreground objects, or small black points on the object.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+### 5. Morphological Gradient
+
+It is the difference between dilation and erosion of an image.
+
+The result will look like the outline of the object.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+### 6. Top Hat
+
+It is the difference between input image and Opening of the image.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+### 7. Black Hat
+
+It is the difference between the closing of the input image and input image.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+Structuring Element
+-------------------
+
+We manually created a structuring elements in the previous examples with help of cv.Mat.ones. It is
+rectangular shape. But in some cases, you may need elliptical/circular shaped kernels. So for this
+purpose, OpenCV has a function, **cv.getStructuringElement()**. You just pass the shape and size of
+the kernel, you get the desired kernel.
+
+We use the function: **cv.getStructuringElement (shape, ksize, anchor = new cv.Point(-1, -1))**
+@param shape element shape that could be one of cv.MorphShapes
+@param ksize size of the structuring element.
+@param anchor anchor position within the element. The default value [−1,−1] means that the anchor is at the center. Note that only the shape of a cross-shaped element depends on the anchor position. In other cases the anchor just regulates how much the result of the morphological operation is shifted.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
\ No newline at end of file
diff --git a/doc/js_tutorials/js_imgproc/js_pyramids/js_pyramids.markdown b/doc/js_tutorials/js_imgproc/js_pyramids/js_pyramids.markdown
new file mode 100644
index 0000000000..979fae5d12
--- /dev/null
+++ b/doc/js_tutorials/js_imgproc/js_pyramids/js_pyramids.markdown
@@ -0,0 +1,70 @@
+Image Pyramids {#tutorial_js_pyramids}
+==============
+
+Goal
+----
+
+- We will learn about Image Pyramids
+- We will learn these functions: **cv.pyrUp()**, **cv.pyrDown()**
+
+Theory
+------
+
+Normally, we used to work with an image of constant size. But in some occassions, we need to work
+with images of different resolution of the same image. For example, while searching for something in
+an image, like face, we are not sure at what size the object will be present in the image. In that
+case, we will need to create a set of images with different resolution and search for object in all
+the images. These set of images with different resolution are called Image Pyramids (because when
+they are kept in a stack with biggest image at bottom and smallest image at top look like a
+pyramid).
+
+There are two kinds of Image Pyramids. 1) Gaussian Pyramid and 2) Laplacian Pyramids
+
+Higher level (Low resolution) in a Gaussian Pyramid is formed by removing consecutive rows and
+columns in Lower level (higher resolution) image. Then each pixel in higher level is formed by the
+contribution from 5 pixels in underlying level with gaussian weights. By doing so, a \f$M \times N\f$
+image becomes \f$M/2 \times N/2\f$ image. So area reduces to one-fourth of original area. It is called
+an Octave. The same pattern continues as we go upper in pyramid (ie, resolution decreases).
+Similarly while expanding, area becomes 4 times in each level. We can find Gaussian pyramids using
+**cv.pyrDown()** and **cv.pyrUp()** functions.
+
+Laplacian Pyramids are formed from the Gaussian Pyramids. There is no exclusive function for that.
+Laplacian pyramid images are like edge images only. Most of its elements are zeros. They are used in
+image compression. A level in Laplacian Pyramid is formed by the difference between that level in
+Gaussian Pyramid and expanded version of its upper level in Gaussian Pyramid.
+
+Downsample
+------
+
+We use the function: **cv.pyrDown (src, dst, dstsize = new cv.Size(0, 0), borderType = cv.BORDER_DEFAULT)**
+@param src input image.
+@param dst output image; it has the specified size and the same type as src.
+@param dstsize size of the output image.
+@param borderType pixel extrapolation method(see cv.BorderTypes, cv.BORDER_CONSTANT isn't supported).
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+Upsample
+------
+
+We use the function: **cv.pyrUp (src, dst, dstsize = new cv.Size(0, 0), borderType = cv.BORDER_DEFAULT)**
+@param src input image.
+@param dst output image; it has the specified size and the same type as src.
+@param dstsize size of the output image.
+@param borderType pixel extrapolation method(see cv.BorderTypes, only cv.BORDER_DEFAULT is supported).
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
\ No newline at end of file
diff --git a/doc/js_tutorials/js_imgproc/js_table_of_contents_imgproc.markdown b/doc/js_tutorials/js_imgproc/js_table_of_contents_imgproc.markdown
new file mode 100644
index 0000000000..3bb809be71
--- /dev/null
+++ b/doc/js_tutorials/js_imgproc/js_table_of_contents_imgproc.markdown
@@ -0,0 +1,79 @@
+Image Processing {#tutorial_js_table_of_contents_imgproc}
+==========================
+
+- @subpage tutorial_js_colorspaces
+
+ Learn how to change images between different color spaces.
+
+- @subpage tutorial_js_geometric_transformations
+
+ Learn how to apply different geometric transformations to images like rotation, translation etc.
+
+- @subpage tutorial_js_thresholding
+
+ Learn
+ how to convert images to binary images using global thresholding, Adaptive thresholding, Otsu's
+ binarization etc.
+
+- @subpage tutorial_js_filtering
+
+ Learn
+ how to blur the images, filter the images with custom kernels etc.
+
+- @subpage tutorial_js_morphological_ops
+
+ Learn about morphological transformations like Erosion, Dilation, Opening, Closing etc.
+
+- @subpage tutorial_js_gradients
+
+ Learn
+ how to find image gradients, edges etc.
+
+- @subpage tutorial_js_canny
+
+ Learn
+ how to find edges with Canny Edge Detection.
+
+- @subpage tutorial_js_pyramids
+
+ Learn about image pyramids and how to use them for image blending.
+
+- @subpage tutorial_js_table_of_contents_contours
+
+ Learn
+ about Contours in OpenCV.js.
+
+- @subpage tutorial_js_table_of_contents_histograms
+
+ Learn
+ about histograms in OpenCV.js.
+
+- @subpage tutorial_js_table_of_contents_transforms
+
+ Learn
+ different Image Transforms in OpenCV.js like Fourier Transform, Cosine Transform etc.
+
+- @subpage tutorial_js_template_matching
+
+ Learn
+ how to search for an object in an image using Template Matching.
+
+- @subpage tutorial_js_houghlines
+
+ Learn how to detect lines in an image.
+
+- @subpage tutorial_js_houghcircles
+
+ Learn how to detect circles in an image.
+
+- @subpage tutorial_js_watershed
+
+ Learn how to segment images with watershed segmentation.
+
+- @subpage tutorial_js_grabcut
+
+ Learn how to extract foreground with GrabCut algorithm.
+
+- @subpage tutorial_js_imgproc_camera
+
+ Learn image processing for video capture.
diff --git a/doc/js_tutorials/js_imgproc/js_template_matching/js_template_matching.markdown b/doc/js_tutorials/js_imgproc/js_template_matching/js_template_matching.markdown
new file mode 100644
index 0000000000..21c8a78ba6
--- /dev/null
+++ b/doc/js_tutorials/js_imgproc/js_template_matching/js_template_matching.markdown
@@ -0,0 +1,45 @@
+Template Matching {#tutorial_js_template_matching}
+=================
+
+Goals
+-----
+
+- To find objects in an image using Template Matching
+- You will learn these functions : **cv.matchTemplate()**, **cv.minMaxLoc()**
+
+Theory
+------
+
+Template Matching is a method for searching and finding the location of a template image in a larger
+image. OpenCV comes with a function **cv.matchTemplate()** for this purpose. It simply slides the
+template image over the input image (as in 2D convolution) and compares the template and patch of
+input image under the template image. Several comparison methods are implemented in OpenCV. (You can
+check docs for more details). It returns a grayscale image, where each pixel denotes how much does
+the neighbourhood of that pixel match with template.
+
+If input image is of size (WxH) and template image is of size (wxh), output image will have a size
+of (W-w+1, H-h+1). Once you got the result, you can use **cv.minMaxLoc()** function to find where
+is the maximum/minimum value. Take it as the top-left corner of rectangle and take (w,h) as width
+and height of the rectangle. That rectangle is your region of template.
+
+@note If you are using cv.TM_SQDIFF as comparison method, minimum value gives the best match.
+
+Template Matching in OpenCV
+---------------------------
+
+We use the function: **cv.matchTemplate (image, templ, result, method, mask = new cv.Mat())**
+
+@param image image where the search is running. It must be 8-bit or 32-bit floating-point.
+@param templ searched template. It must be not greater than the source image and have the same data type.
+@param result map of comparison results. It must be single-channel 32-bit floating-point.
+@param method parameter specifying the comparison method(see cv.TemplateMatchModes).
+@param mask mask of searched template. It must have the same datatype and size with templ. It is not set by default.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
\ No newline at end of file
diff --git a/doc/js_tutorials/js_imgproc/js_thresholding/js_thresholding.markdown b/doc/js_tutorials/js_imgproc/js_thresholding/js_thresholding.markdown
new file mode 100644
index 0000000000..0ee6aba7d2
--- /dev/null
+++ b/doc/js_tutorials/js_imgproc/js_thresholding/js_thresholding.markdown
@@ -0,0 +1,74 @@
+Image Thresholding {#tutorial_js_thresholding}
+==================
+
+Goal
+----
+
+- In this tutorial, you will learn Simple thresholding, Adaptive thresholding, Otsu's thresholding
+ etc.
+- You will learn these functions : **cv.threshold**, **cv.adaptiveThreshold** etc.
+
+Simple Thresholding
+-------------------
+
+Here, the matter is straight forward. If pixel value is greater than a threshold value, it is
+assigned one value (may be white), else it is assigned another value (may be black).
+
+We use the function: **cv.threshold (src, dst, thresh, maxval, type)**
+@param src input array.
+@param dst output array of the same size and type and the same number of channels as src.
+@param thresh threshold value.
+@param maxval maximum value to use with the cv.THRESH_BINARY and cv.THRESH_BINARY_INV thresholding types.
+@param type thresholding type(see cv.ThresholdTypes).
+
+**thresholding type** - OpenCV provides different styles of thresholding and it is decided
+by the fourth parameter of the function. Different types are:
+
+- cv.THRESH_BINARY
+- cv.THRESH_BINARY_INV
+- cv.THRESH_TRUNC
+- cv.THRESH_TOZERO
+- cv.THRESH_OTSU
+- cv.THRESH_TRIANGLE
+
+@note Input image should be single channel only in case of cv.THRESH_OTSU or cv.THRESH_TRIANGLE flags
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+Adaptive Thresholding
+---------------------
+
+In the previous section, we used a global value as threshold value. But it may not be good in all
+the conditions where image has different lighting conditions in different areas. In that case, we go
+for adaptive thresholding. In this, the algorithm calculate the threshold for a small regions of the
+image. So we get different thresholds for different regions of the same image and it gives us better
+results for images with varying illumination.
+
+We use the function: **cv.adaptiveThreshold (src, dst, maxValue, adaptiveMethod, thresholdType, blockSize, C)**
+@param src source 8-bit single-channel image.
+@param dst dstination image of the same size and the same type as src.
+@param maxValue non-zero value assigned to the pixels for which the condition is satisfied
+@param adaptiveMethod adaptive thresholding algorithm to use.
+@param thresholdType thresholding type that must be either cv.THRESH_BINARY or cv.THRESH_BINARY_INV.
+@param blockSize size of a pixel neighborhood that is used to calculate a threshold value for the pixel: 3, 5, 7, and so on.
+@param C constant subtracted from the mean or weighted mean (see the details below). Normally, it is positive but may be zero or negative as well.
+
+**adaptiveMethod** - It decides how thresholding value is calculated:
+ - cv.ADAPTIVE_THRESH_MEAN_C
+ - cv.ADAPTIVE_THRESH_GAUSSIAN_C
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
\ No newline at end of file
diff --git a/doc/js_tutorials/js_imgproc/js_transforms/js_fourier_transform/js_fourier_transform.markdown b/doc/js_tutorials/js_imgproc/js_transforms/js_fourier_transform/js_fourier_transform.markdown
new file mode 100644
index 0000000000..9b773c90d5
--- /dev/null
+++ b/doc/js_tutorials/js_imgproc/js_transforms/js_fourier_transform/js_fourier_transform.markdown
@@ -0,0 +1,89 @@
+Fourier Transform {#tutorial_js_fourier_transform}
+=================
+
+Goal
+----
+
+- To find the Fourier Transform of images using OpenCV
+- Some applications of Fourier Transform
+- We will learn following functions : **cv.dft()** etc
+
+Theory
+------
+
+Fourier Transform is used to analyze the frequency characteristics of various filters. For images,
+**2D Discrete Fourier Transform (DFT)** is used to find the frequency domain. A fast algorithm
+called **Fast Fourier Transform (FFT)** is used for calculation of DFT. Details about these can be
+found in any image processing or signal processing textbooks.
+
+For a sinusoidal signal, \f$x(t) = A \sin(2 \pi ft)\f$, we can say \f$f\f$ is the frequency of signal, and
+if its frequency domain is taken, we can see a spike at \f$f\f$. If signal is sampled to form a discrete
+signal, we get the same frequency domain, but is periodic in the range \f$[- \pi, \pi]\f$ or \f$[0,2\pi]\f$
+(or \f$[0,N]\f$ for N-point DFT). You can consider an image as a signal which is sampled in two
+directions. So taking fourier transform in both X and Y directions gives you the frequency
+representation of image.
+
+More intuitively, for the sinusoidal signal, if the amplitude varies so fast in short time, you can
+say it is a high frequency signal. If it varies slowly, it is a low frequency signal. You can extend
+the same idea to images. Where does the amplitude varies drastically in images ? At the edge points,
+or noises. So we can say, edges and noises are high frequency contents in an image. If there is no
+much changes in amplitude, it is a low frequency component.
+
+Performance of DFT calculation is better for some array size. It is fastest when array size is power
+of two. The arrays whose size is a product of 2’s, 3’s, and 5’s are also processed quite
+efficiently. So if you are worried about the performance of your code, you can modify the size of
+the array to any optimal size (by padding zeros) before finding DFT. OpenCV provides a function, **cv.getOptimalDFTSize()** for this.
+
+Now we will see how to find the Fourier Transform.
+
+Fourier Transform in OpenCV
+---------------------------
+
+Performance of DFT calculation is better for some array size. It is fastest when array size is power of two. The arrays whose size is a product of 2’s, 3’s, and 5’s are also processed quite efficiently. So if you are worried about the performance of your code, you can modify the size of the array to any optimal size (by padding zeros). So how do we find this optimal size ? OpenCV provides a function, cv.getOptimalDFTSize() for this.
+
+We use the functions: **cv.dft (src, dst, flags = 0, nonzeroRows = 0)**
+
+@param src input array that could be real or complex.
+@param dst output array whose size and type depends on the flags.
+@param flags transformation flags, representing a combination of the cv.DftFlags
+@param nonzeroRows when the parameter is not zero, the function assumes that only the first nonzeroRows rows of the input array (DFT_INVERSE is not set) or only the first nonzeroRows of the output array (DFT_INVERSE is set) contain non-zeros, thus, the function can handle the rest of the rows more efficiently and save some time; this technique is very useful for calculating array cross-correlation or convolution using DFT.
+
+**cv.getOptimalDFTSize (vecsize)**
+
+@param vecsize vector size.
+
+**cv.copyMakeBorder (src, dst, top, bottom, left, right, borderType, value = new cv.Scalar())**
+
+@param src input array that could be real or complex.
+@param dst output array whose size and type depends on the flags.
+@param top parameter specifying how many top pixels in each direction from the source image rectangle to extrapolate.
+@param bottom parameter specifying how many bottom pixels in each direction from the source image rectangle to extrapolate.
+@param left parameter specifying how many left pixels in each direction from the source image rectangle to extrapolate.
+@param right parameter specifying how many right pixels in each direction from the source image rectangle to extrapolate.
+@param borderType border type.
+@param value border value if borderType == cv.BORDER_CONSTANT.
+
+**cv.magnitude (x, y, magnitude)**
+
+@param x floating-point array of x-coordinates of the vectors.
+@param y floating-point array of y-coordinates of the vectors; it must have the same size as x.
+@param magnitude output array of the same size and type as x.
+
+**cv.split (m, mv)**
+
+@param m input multi-channel array.
+@param mv output vector of arrays; the arrays themselves are reallocated, if needed.
+
+**cv.merge (mv, dst)**
+
+@param mv input vector of matrices to be merged; all the matrices in mv must have the same size and the same depth.
+@param dst output array of the same size and the same depth as mv[0]; The number of channels will be the total number of channels in the matrix array.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
\ No newline at end of file
diff --git a/doc/js_tutorials/js_imgproc/js_transforms/js_table_of_contents_transforms.markdown b/doc/js_tutorials/js_imgproc/js_transforms/js_table_of_contents_transforms.markdown
new file mode 100644
index 0000000000..2ed14241a8
--- /dev/null
+++ b/doc/js_tutorials/js_imgproc/js_transforms/js_table_of_contents_transforms.markdown
@@ -0,0 +1,5 @@
+Image Transforms in OpenCV.js {#tutorial_js_table_of_contents_transforms}
+==========================
+
+- @subpage tutorial_js_fourier_transform
+ Learn to find the Fourier Transform of images
diff --git a/doc/js_tutorials/js_imgproc/js_watershed/js_watershed.markdown b/doc/js_tutorials/js_imgproc/js_watershed/js_watershed.markdown
new file mode 100644
index 0000000000..1554744052
--- /dev/null
+++ b/doc/js_tutorials/js_imgproc/js_watershed/js_watershed.markdown
@@ -0,0 +1,144 @@
+Image Segmentation with Watershed Algorithm {#tutorial_js_watershed}
+===========================================
+
+Goal
+----
+
+- We will learn how to use marker-based image segmentation using watershed algorithm
+- We will learn: **cv.watershed()**
+
+Theory
+------
+
+Any grayscale image can be viewed as a topographic surface where high intensity denotes peaks and
+hills while low intensity denotes valleys. You start filling every isolated valleys (local minima)
+with different colored water (labels). As the water rises, depending on the peaks (gradients)
+nearby, water from different valleys, obviously with different colors will start to merge. To avoid
+that, you build barriers in the locations where water merges. You continue the work of filling water
+and building barriers until all the peaks are under water. Then the barriers you created gives you
+the segmentation result. This is the "philosophy" behind the watershed. You can visit the [CMM
+webpage on watershed](http://cmm.ensmp.fr/~beucher/wtshed.html) to understand it with the help of
+some animations.
+
+But this approach gives you oversegmented result due to noise or any other irregularities in the
+image. So OpenCV implemented a marker-based watershed algorithm where you specify which are all
+valley points are to be merged and which are not. It is an interactive image segmentation. What we
+do is to give different labels for our object we know. Label the region which we are sure of being
+the foreground or object with one color (or intensity), label the region which we are sure of being
+background or non-object with another color and finally the region which we are not sure of
+anything, label it with 0. That is our marker. Then apply watershed algorithm. Then our marker will
+be updated with the labels we gave, and the boundaries of objects will have a value of -1.
+
+Code
+----
+
+Below we will see an example on how to use the Distance Transform along with watershed to segment
+mutually touching objects.
+
+Consider the coins image below, the coins are touching each other. Even if you threshold it, it will
+be touching each other.
+
+We start with finding an approximate estimate of the coins. For that, we can use the Otsu's
+binarization.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+Now we need to remove any small white noises in the image. For that we can use morphological
+opening. To remove any small holes in the object, we can use morphological closing. So, now we know
+for sure that region near to center of objects are foreground and region much away from the object
+are background. Only region we are not sure is the boundary region of coins.
+
+So we need to extract the area which we are sure they are coins. Erosion removes the boundary
+pixels. So whatever remaining, we can be sure it is coin. That would work if objects were not
+touching each other. But since they are touching each other, another good option would be to find
+the distance transform and apply a proper threshold. Next we need to find the area which we are sure
+they are not coins. For that, we dilate the result. Dilation increases object boundary to
+background. This way, we can make sure whatever region in background in result is really a
+background, since boundary region is removed. See the image below.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+The remaining regions are those which we don't have any idea, whether it is coins or background.
+Watershed algorithm should find it. These areas are normally around the boundaries of coins where
+foreground and background meet (Or even two different coins meet). We call it border. It can be
+obtained from subtracting sure_fg area from sure_bg area.
+
+We use the function: **cv.distanceTransform (src, dst, distanceType, maskSize, labelType = cv.CV_32F)**
+
+@param src 8-bit, single-channel (binary) source image.
+@param dst output image with calculated distances. It is a 8-bit or 32-bit floating-point, single-channel image of the same size as src.
+@param distanceType type of distance(see cv.DistanceTypes).
+@param maskSize size of the distance transform mask, see (cv.DistanceTransformMasks).
+@param labelType type of output image. It can be cv.CV_8U or cv.CV_32F. Type cv.CV_8U can be used only for the first variant of the function and distanceType == DIST_L1.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+In the thresholded image, we get some regions of coins which we are sure of coins
+and they are detached now. (In some cases, you may be interested in only foreground segmentation,
+not in separating the mutually touching objects. In that case, you need not use distance transform,
+just erosion is sufficient. Erosion is just another method to extract sure foreground area, that's
+all.)
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
+
+Now we know for sure which are region of coins, which are background and all. So we create marker
+(it is an array of same size as that of original image, but with int32 datatype) and label the
+regions inside it. The regions we know for sure (whether foreground or background) are labelled with
+any positive integers, but different integers, and the area we don't know for sure are just left as
+zero. For this we use **cv.connectedComponents()**. It labels background of the image with 0, then
+other objects are labelled with integers starting from 1.
+
+But we know that if background is marked with 0, watershed will consider it as unknown area. So we
+want to mark it with different integer. Instead, we will mark unknown region, defined by unknown,
+with 0.
+
+Now our marker is ready. It is time for final step, apply watershed. Then marker image will be
+modified. The boundary region will be marked with -1.
+
+We use the function: **cv.connectedComponents (image, labels, connectivity = 8, ltype = cv.CV_32S)**
+@param image the 8-bit single-channel image to be labeled.
+@param labels destination labeled image(cv.CV_32SC1 type).
+@param connectivity 8 or 4 for 8-way or 4-way connectivity respectively.
+@param ltype output image label type. Currently cv.CV_32S and cv.CV_16U are supported.
+
+We use the function: **cv.watershed (image, markers)**
+
+@param image input 8-bit 3-channel image.
+@param markers input/output 32-bit single-channel image (map) of markers. It should have the same size as image .
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
\ No newline at end of file
diff --git a/doc/js_tutorials/js_objdetect/js_face_detection/js_face_detection.markdown b/doc/js_tutorials/js_objdetect/js_face_detection/js_face_detection.markdown
new file mode 100644
index 0000000000..a2787e4172
--- /dev/null
+++ b/doc/js_tutorials/js_objdetect/js_face_detection/js_face_detection.markdown
@@ -0,0 +1,107 @@
+Face Detection using Haar Cascades {#tutorial_js_face_detection}
+==================================
+
+Goal
+----
+
+- learn the basics of face detection using Haar Feature-based Cascade Classifiers
+- extend the same for eye detection etc.
+
+Basics
+------
+
+Object Detection using Haar feature-based cascade classifiers is an effective method proposed by Paul Viola and Michael Jones in the 2001 paper, "Rapid Object Detection using a
+Boosted Cascade of Simple Features". It is a machine learning based approach in which a cascade
+function is trained from a lot of positive and negative images. It is then used to detect objects in
+other images.
+
+Here we will work with face detection. Initially, the algorithm needs a lot of positive images
+(images of faces) and negative images (images without faces) to train the classifier. Then we need
+to extract features from it. For this, Haar features shown in below image are used. They are just
+like our convolutional kernel. Each feature is a single value obtained by subtracting the sum of pixels
+under the white rectangle from the sum of pixels under the black rectangle.
+
+![image](images/haar_features.jpg)
+
+Now all possible sizes and locations of each kernel are used to calculate plenty of features. For each
+feature calculation, we need to find the sum of the pixels under the white and black rectangles. To solve this,
+they introduced the integral images. It simplifies calculation of the sum of the pixels, how large may be
+the number of pixels, to an operation involving just four pixels.
+
+But among all these features we calculated, most of them are irrelevant. For example, consider the
+image below. Top row shows two good features. The first feature selected seems to focus on the
+property that the region of the eyes is often darker than the region of the nose and cheeks. The
+second feature selected relies on the property that the eyes are darker than the bridge of the nose.
+But the same windows applying on cheeks or any other place is irrelevant. So how do we select the
+best features out of 160000+ features? It is achieved by **Adaboost**.
+
+![image](images/haar.png)
+
+For this, we apply each and every feature on all the training images. For each feature, it finds the
+best threshold which will classify the faces to positive and negative. But obviously, there will be
+errors or misclassifications. We select the features with minimum error rate, which means they are
+the features that best classifies the face and non-face images. (The process is not as simple as
+this. Each image is given an equal weight in the beginning. After each classification, weights of
+misclassified images are increased. Then again same process is done. New error rates are calculated.
+Also new weights. The process is continued until required accuracy or error rate is achieved or
+required number of features are found).
+
+Final classifier is a weighted sum of these weak classifiers. It is called weak because it alone
+can't classify the image, but together with others forms a strong classifier. The paper says even
+200 features provide detection with 95% accuracy. Their final setup had around 6000 features.
+(Imagine a reduction from 160000+ features to 6000 features. That is a big gain).
+
+So now you take an image. Take each 24x24 window. Apply 6000 features to it. Check if it is face or
+not. Wow.. Wow.. Isn't it a little inefficient and time consuming? Yes, it is. Authors have a good
+solution for that.
+
+In an image, most of the image region is non-face region. So it is a better idea to have a simple
+method to check if a window is not a face region. If it is not, discard it in a single shot. Don't
+process it again. Instead focus on region where there can be a face. This way, we can find more time
+to check a possible face region.
+
+For this they introduced the concept of **Cascade of Classifiers**. Instead of applying all the 6000
+features on a window, group the features into different stages of classifiers and apply one-by-one.
+(Normally first few stages will contain very less number of features). If a window fails the first
+stage, discard it. We don't consider remaining features on it. If it passes, apply the second stage
+of features and continue the process. The window which passes all stages is a face region. How is
+the plan !!!
+
+Authors' detector had 6000+ features with 38 stages with 1, 10, 25, 25 and 50 features in first five
+stages. (Two features in the above image is actually obtained as the best two features from
+Adaboost). According to authors, on an average, 10 features out of 6000+ are evaluated per
+sub-window.
+
+So this is a simple intuitive explanation of how Viola-Jones face detection works. Read paper for
+more details.
+
+Haar-cascade Detection in OpenCV
+--------------------------------
+
+Here we will deal with detection. OpenCV already contains many pre-trained classifiers for face,
+eyes, smile etc. Those XML files are stored in opencv/data/haarcascades/ folder. Let's create a face
+and eye detector with OpenCV.
+
+We use the function: **detectMultiScale (image, objects, scaleFactor = 1.1, minNeighbors = 3, flags = 0, minSize = new cv.Size(0, 0), maxSize = new cv.Size(0, 0))**
+
+@param image matrix of the type CV_8U containing an image where objects are detected.
+@param objects vector of rectangles where each rectangle contains the detected object. The rectangles may be partially outside the original image.
+@param scaleFactor parameter specifying how much the image size is reduced at each image scale.
+@param minNeighbors parameter specifying how many neighbors each candidate rectangle should have to retain it.
+@param flags parameter with the same meaning for an old cascade as in the function cvHaarDetectObjects. It is not used for a new cascade.
+@param minSize minimum possible object size. Objects smaller than this are ignored.
+@param maxSize maximum possible object size. Objects larger than this are ignored. If maxSize == minSize model is evaluated on single scale.
+
+@note Don't forget to delete CascadeClassifier and RectVector!
+
+Try it
+------
+
+Try this demo using the code above. Canvas elements named haarCascadeDetectionCanvasInput and haarCascadeDetectionCanvasOutput have been prepared. Choose an image and
+click `Try it` to see the result. You can change the code in the textbox to investigate more.
+
+\htmlonly
+
+\endhtmlonly
\ No newline at end of file
diff --git a/doc/js_tutorials/js_objdetect/js_face_detection/js_face_detection_camera.markdown b/doc/js_tutorials/js_objdetect/js_face_detection/js_face_detection_camera.markdown
new file mode 100644
index 0000000000..58759eff2a
--- /dev/null
+++ b/doc/js_tutorials/js_objdetect/js_face_detection/js_face_detection_camera.markdown
@@ -0,0 +1,15 @@
+Face Detection in Video Capture {#tutorial_js_face_detection_camera}
+==================================
+
+Goal
+----
+
+- learn how to detect faces in video capture.
+
+@note If you don't know how to capture video from camera, please review @ref tutorial_js_video_display.
+
+\htmlonly
+
+\endhtmlonly
\ No newline at end of file
diff --git a/doc/js_tutorials/js_objdetect/js_table_of_contents_objdetect.markdown b/doc/js_tutorials/js_objdetect/js_table_of_contents_objdetect.markdown
new file mode 100644
index 0000000000..3a6975f634
--- /dev/null
+++ b/doc/js_tutorials/js_objdetect/js_table_of_contents_objdetect.markdown
@@ -0,0 +1,11 @@
+Object Detection {#tutorial_js_table_of_contents_objdetect}
+================
+
+- @subpage tutorial_js_face_detection
+
+ Face detection
+ using haar-cascades
+
+- @subpage tutorial_js_face_detection_camera
+
+ Face Detection in Video Capture
diff --git a/doc/js_tutorials/js_setup/js_intro/js_intro.markdown b/doc/js_tutorials/js_setup/js_intro/js_intro.markdown
new file mode 100644
index 0000000000..416aa3ded5
--- /dev/null
+++ b/doc/js_tutorials/js_setup/js_intro/js_intro.markdown
@@ -0,0 +1,45 @@
+Introduction to OpenCV.js and Tutorials {#tutorial_js_intro}
+=======================================
+
+OpenCV
+------
+
+OpenCV was created at Intel in 1999 by **Gary Bradski**. The first release came out in 2000. **Vadim Pisarevsky** joined Gary Bradski to manage Intel's Russian software OpenCV team. In 2005, OpenCV was used on Stanley; the vehicle that won the 2005 DARPA Grand Challenge. Later, its active development continued under the support of Willow Garage, with Gary Bradski and Vadim Pisarevsky leading the project. OpenCV now supports a multitude of algorithms related to Computer Vision and Machine Learning and is expanding day by day.
+
+OpenCV supports a wide variety of programming languages such as C++, Python, and Java, and is available on different platforms including Windows, Linux, OS X, Android, and iOS. Interfaces for high-speed GPU operations based on CUDA and OpenCL are also under active development. OpenCV.js brings OpenCV to the open web platform and makes it available to the JavaScript programmer.
+
+OpenCV.js: OpenCV for the JavaScript programmer
+-------------
+
+Web is the most ubiquitous open computing platform. With HTML5 standards implemented in every browser, web applications are able to render online video with HTML5 video tags, capture webcam video via WebRTC API, and access each pixel of a video frame via canvas API. With abundance of available multimedia content, web developers are in need of a wide array of image and vision processing algorithms in JavaScript to build innovative applications. This requirement is even more essential for emerging applications on the web, such as Web Virtual Reality (WebVR) and Augmented Reality (WebAR). All of these use cases demand efficient implementations of computation-intensive vision kernels on web.
+
+[Emscripten](http://kripken.github.io/emscripten-site) is an LLVM-to-JavaScript compiler. It takes LLVM bitcode - which can be generated from C/C++ using clang, and compiles that into asm.js or WebAssembly that can execute directly inside the web browsers. . Asm.js is a highly optimizable, low-level subset of JavaScript. Asm.js enables ahead-of-time compilation and optimization in JavaScript engine that provide near-to-native execution speed. WebAssembly is a new portable, size- and load-time-efficient binary format suitable for compilation to the web. WebAssembly aims to execute at native speed. WebAssembly is currently being designed as an open standard by W3C.
+
+OpenCV.js is a JavaScript binding for selected subset of OpenCV functions for the web platform. It allows emerging web applications with multimedia processing to benefit from the wide variety of vision functions available in OpenCV. OpenCV.js leverages Emscripten to compile OpenCV functions into asm.js or WebAssembly targets, and provides a JavaScript APIs for web application to access them. The future versions of the library will take advantage of acceleration APIs that are available on the Web such as SIMD and multi-threaded execution.
+
+OpenCV.js was initially created in Parallel Architectures and Systems Group at University of California Irvine (UCI) as a research project funded by Intel Corporation. OpenCV.js was further improved and integrated into the OpenCV project as part of Google Summer of Code 2017 program.
+
+OpenCV.js Tutorials
+-----------------------
+
+OpenCV introduces a new set of tutorials that will guide you through various functions available in OpenCV.js. **This guide is mainly focused on OpenCV 3.x version**.
+
+The purpose of OpenCV.js tutorials is to:
+-# Help with adaptability of OpenCV in web development
+-# Help the web community, developers and computer vision researchers to interactively access a variety of web-based OpenCV examples to help them understand specific vision algorithms.
+
+Because OpenCV.js is able to run directly inside browser, the OpenCV.js tutorial web pages are intuitive and interactive. For example, using WebRTC API and evaluating JavaScript code would allow developers to change the parameters of CV functions and do live CV coding on web pages to see the results in real time.
+
+Prior knowledge of JavaScript and web application development is recommended to understand this guide.
+
+Contributors
+------------
+
+Below is the list of contributors of OpenCV.js bindings and tutorials.
+
+- Sajjad Taheri (Architect of the initial version and GSoC mentor, University of California, Irvine)
+- Congxiang Pan (GSoC student, Shanghai Jiao Tong University)
+- Gang Song (GSoC student, Shanghai Jiao Tong University)
+- Wenyao Gan (Student intern, Shanghai Jiao Tong University)
+- Mohammad Reza Haghighat (Project initiator & sponsor, Intel Corporation)
+- Ningxin Hu (Students' supervisor, Intel Corporation)
\ No newline at end of file
diff --git a/doc/js_tutorials/js_setup/js_setup/js_setup.markdown b/doc/js_tutorials/js_setup/js_setup/js_setup.markdown
new file mode 100644
index 0000000000..5f6b43b167
--- /dev/null
+++ b/doc/js_tutorials/js_setup/js_setup/js_setup.markdown
@@ -0,0 +1,105 @@
+Build OpenCV.js {#tutorial_js_setup}
+===============================
+
+
+Installing Emscripten
+-----------------------------
+
+[Emscripten](https://github.com/kripken/emscripten) is an LLVM-to-JavaScript compiler. We will use Emscripten to build OpenCV.js.
+
+To Install Emscripten, follow instructions of [Emscripten SDK](https://kripken.github.io/emscripten-site/docs/getting_started/downloads.html).
+
+For example:
+@code{.bash}
+./emsdk update
+./emsdk install latest
+./emsdk activate latest
+@endcode
+
+@note
+To compile to [WebAssembly](http://webassembly.org), you need to install and activate [Binaryen](https://github.com/WebAssembly/binaryen) with the `emsdk` command. Please refer to [Developer's Guide](http://webassembly.org/getting-started/developers-guide/) for more details.
+
+After install, ensure the `EMSCRIPTEN` environment is setup correctly.
+
+For example:
+@code{.bash}
+source ./emsdk_env.sh
+echo ${EMSCRIPTEN}
+@endcode
+
+Obtaining OpenCV Source Code
+--------------------------
+
+You can use the latest stable OpenCV version or you can grab the latest snapshot from our [Git
+repository](https://github.com/opencv/opencv.git).
+
+### Obtaining the Latest Stable OpenCV Version
+
+- Go to our [releases page](http://opencv.org/releases.html).
+- Download the source archive and unpack it.
+
+### Obtaining the Cutting-edge OpenCV from the Git Repository
+
+Launch Git client and clone [OpenCV repository](http://github.com/opencv/opencv).
+
+For example:
+@code{.bash}
+git clone https://github.com/opencv/opencv.git
+@endcode
+
+@note
+It requires `git` installed in your development environment.
+
+Building OpenCV.js from Source
+---------------------------------------
+
+-# To build `opencv.js`, execute python script `/platforms/js/build_js.py `.
+
+ For example, to build in `build_js` directory:
+ @code{.bash}
+ cd opencv
+ python ./platforms/js/build_js.py build_js
+ @endcode
+
+ @note
+ It requires `python` and `cmake` installed in your development environment.
+
+-# The build script builds asm.js version by default. To build WebAssembly version, append `--build_wasm` switch.
+
+ For example, to build wasm version in `build_wasm` directory:
+ @code{.bash}
+ python ./platforms/js/build_js.py build_wasm --build_wasm
+ @endcode
+
+-# [optional] To build documents, append `--build_doc` option.
+
+ For example:
+ @code{.bash}
+ python ./platforms/js/build_js.py build_js --build_doc
+ @endcode
+
+ @note
+ It requires `doxygen` installed in your development environment.
+
+-# [optional] To build tests, append `--build_test` option.
+
+ For example:
+ @code{.bash}
+ python ./platforms/js/build_js.py build_js --build_test
+ @endcode
+
+ To run tests, launch a local web server in \/bin folder. For example, node http-server which serves on `localhost:8080`.
+
+ Navigate the web browser to `http://localhost:8080/tests.html`, which runs the unit tests automatically.
+
+ You can also run tests using Node.js.
+
+ For example:
+ @code{.sh}
+ cd bin
+ npm install
+ node tests.js
+ @endcode
+
+ @note
+ It requires `node` installed in your development environment.
diff --git a/doc/js_tutorials/js_setup/js_table_of_contents_setup.markdown b/doc/js_tutorials/js_setup/js_table_of_contents_setup.markdown
new file mode 100644
index 0000000000..4570e287d1
--- /dev/null
+++ b/doc/js_tutorials/js_setup/js_table_of_contents_setup.markdown
@@ -0,0 +1,14 @@
+Introduction to OpenCV.js {#tutorial_js_table_of_contents_setup}
+======================
+
+- @subpage tutorial_js_intro
+
+ Introduction of OpenCV.js and Tutorials
+
+- @subpage tutorial_js_usage
+
+ Get started with OpenCV.js
+
+- @subpage tutorial_js_setup
+
+ Build OpenCV.js from source
diff --git a/doc/js_tutorials/js_setup/js_usage/js_usage.markdown b/doc/js_tutorials/js_setup/js_usage/js_usage.markdown
new file mode 100644
index 0000000000..72f481df7a
--- /dev/null
+++ b/doc/js_tutorials/js_setup/js_usage/js_usage.markdown
@@ -0,0 +1,140 @@
+Using OpenCV.js {#tutorial_js_usage}
+===============================
+
+Steps
+-----
+
+In this tutorial, you will learn how to include and start to use `opencv.js` inside a web page.
+
+### Create a web page
+
+First, let's create a simple web page that is able to upload an image.
+
+@code{.js}
+
+
+
+
+Hello OpenCV.js
+
+
+Hello OpenCV.js
+
+
+
+
+@endcode
+
+To run this web page, copy the content above and save to a local index.html file. To run it, open it using your web browser.
+
+@note It is a better practice to use a local web server to host the index.html.
+
+### Include OpenCV.js
+
+Set the URL of `opencv.js` to `src` attribute of \
+@endcode
+
+You may want to load `opencv.js` asynchronously by `async` attribute in \
+@endcode
+
+### Use OpenCV.js
+
+Once `opencv.js` is ready, you can access OpenCV objects and functions through `cv` object.
+
+For example, you can create a cv.Mat from an image by cv.imread.
+
+@note Because image loading is asynchronous, you need to put cv.Mat creation inside the `onload` callback.
+
+@code{.js}
+imgElement.onload = function() {
+ let mat = cv.imread(imgElement);
+}
+@endcode
+
+Many OpenCV functions can be used to process cv.Mat. You can refer to other tutorials, such as @ref tutorial_js_table_of_contents_imgproc, for details.
+
+In this tutorial, we just show a cv.Mat on screen. To show a cv.Mat, you need a canvas element.
+
+@code{.js}
+
+@endcode
+
+You can use cv.imshow to show cv.Mat on the canvas.
+@code{.js}
+cv.imshow(mat, "outputCanvas");
+@endcode
+
+Putting all of the steps together, the final index.html is shown below.
+
+@code{.js}
+
+
+
+
+Hello OpenCV.js
+
+
+Hello OpenCV.js
+OpenCV.js is loading...
+
+
+
+
+
+@endcode
+
+@note You have to call delete method of cv.Mat to free memory allocated in Emscripten's heap. Please refer to [Memeory management of Emscripten](https://kripken.github.io/emscripten-site/docs/porting/connecting_cpp_and_javascript/embind.html#memory-management) for details.
+
+Try it
+------
+\htmlonly
+
+\endhtmlonly
\ No newline at end of file
diff --git a/doc/js_tutorials/js_tutorials.markdown b/doc/js_tutorials/js_tutorials.markdown
new file mode 100644
index 0000000000..c8a8f92a31
--- /dev/null
+++ b/doc/js_tutorials/js_tutorials.markdown
@@ -0,0 +1,28 @@
+OpenCV.js Tutorials {#tutorial_js_root}
+=======================
+- @subpage tutorial_js_table_of_contents_setup
+
+ Learn how to use OpenCV.js inside your web pages!
+
+- @subpage tutorial_js_table_of_contents_gui
+
+ Here you will learn how to read and display images and videos, and create trackbar.
+
+- @subpage tutorial_js_table_of_contents_core
+
+ In this section you will learn some basic operations on image, some mathematical tools and some data structures etc.
+
+- @subpage tutorial_js_table_of_contents_imgproc
+
+ In this section
+ you will learn different image processing functions inside OpenCV.
+
+- @subpage tutorial_js_table_of_contents_video
+
+ In this section you
+ will learn different techniques to work with videos like object tracking etc.
+
+- @subpage tutorial_js_table_of_contents_objdetect
+
+ In this section you
+ will object detection techniques like face detection etc.
diff --git a/doc/js_tutorials/js_video/js_bg_subtraction/js_bg_subtraction.markdown b/doc/js_tutorials/js_video/js_bg_subtraction/js_bg_subtraction.markdown
new file mode 100644
index 0000000000..a072dccf0d
--- /dev/null
+++ b/doc/js_tutorials/js_video/js_bg_subtraction/js_bg_subtraction.markdown
@@ -0,0 +1,64 @@
+Background Subtraction {#tutorial_js_bg_subtraction}
+======================
+
+Goal
+----
+
+- We will familiarize with the background subtraction methods available in OpenCV.js.
+
+Basics
+------
+
+Background subtraction is a major preprocessing steps in many vision based applications. For
+example, consider the cases like visitor counter where a static camera takes the number of visitors
+entering or leaving the room, or a traffic camera extracting information about the vehicles etc. In
+all these cases, first you need to extract the person or vehicles alone. Technically, you need to
+extract the moving foreground from static background.
+
+If you have an image of background alone, like image of the room without visitors, image of the road
+without vehicles etc, it is an easy job. Just subtract the new image from the background. You get
+the foreground objects alone. But in most of the cases, you may not have such an image, so we need
+to extract the background from whatever images we have. It become more complicated when there is
+shadow of the vehicles. Since shadow is also moving, simple subtraction will mark that also as
+foreground. It complicates things.
+
+OpenCV.js has implemented one algorithm for this purpose, which is very easy to use.
+
+BackgroundSubtractorMOG2
+------------------------
+
+It is a Gaussian Mixture-based Background/Foreground Segmentation Algorithm. It is based on two
+papers by Z.Zivkovic, "Improved adaptive Gausian mixture model for background subtraction" in 2004
+and "Efficient Adaptive Density Estimation per Image Pixel for the Task of Background Subtraction"
+in 2006. One important feature of this algorithm is that it selects the appropriate number of
+gaussian distribution for each pixel. It provides better adaptibility to varying scenes due illumination
+changes etc.
+
+While coding, we use the constructor: **cv.BackgroundSubtractorMOG2 (history = 500, varThreshold = 16,
+detectShadows = true)**
+@param history Length of the history.
+@param varThreshold Threshold on the squared distance between the pixel and the sample to decide
+whether a pixel is close to that sample. This parameter does not affect the background update.
+@param detectShadows If true, the algorithm will detect shadows and mark them. It decreases the
+speed a bit, so if you do not need this feature, set the parameter to false.
+@return instance of cv.BackgroundSubtractorMOG2
+
+Use **apply (image, fgmask, learningRate = -1)** method to get the foreground mask
+@param image Next video frame. Floating point frame will be used without scaling and should
+be in range [0,255].
+@param fgmask The output foreground mask as an 8-bit binary image.
+@param learningRate The value between 0 and 1 that indicates how fast the background model is learnt.
+Negative parameter value makes the algorithm to use some automatically chosen learning rate. 0 means
+that the background model is not updated at all, 1 means that the background model is completely
+reinitialized from the last frame.
+
+@note The instance of cv.BackgroundSubtractorMOG2 should be deleted manually.
+
+Try it
+------
+
+\htmlonly
+
+\endhtmlonly
diff --git a/doc/js_tutorials/js_video/js_lucas_kanade/images/optical_flow_basic1.jpg b/doc/js_tutorials/js_video/js_lucas_kanade/images/optical_flow_basic1.jpg
new file mode 100644
index 0000000000..718d83c10b
Binary files /dev/null and b/doc/js_tutorials/js_video/js_lucas_kanade/images/optical_flow_basic1.jpg differ
diff --git a/doc/js_tutorials/js_video/js_lucas_kanade/js_lucas_kanade.markdown b/doc/js_tutorials/js_video/js_lucas_kanade/js_lucas_kanade.markdown
new file mode 100644
index 0000000000..1d8fa29ee8
--- /dev/null
+++ b/doc/js_tutorials/js_video/js_lucas_kanade/js_lucas_kanade.markdown
@@ -0,0 +1,171 @@
+Optical Flow {#tutorial_js_lucas_kanade}
+============
+
+Goal
+----
+
+- We will understand the concepts of optical flow and its estimation using Lucas-Kanade
+ method.
+- We will use functions like **cv.calcOpticalFlowPyrLK()** to track feature points in a
+ video.
+
+Optical Flow
+------------
+
+Optical flow is the pattern of apparent motion of image objects between two consecutive frames
+caused by the movemement of object or camera. It is 2D vector field where each vector is a
+displacement vector showing the movement of points from first frame to second. Consider the image
+below (Image Courtesy: [Wikipedia article on Optical
+Flow](http://en.wikipedia.org/wiki/Optical_flow)).
+
+![image](images/optical_flow_basic1.jpg)
+
+It shows a ball moving in 5 consecutive frames. The arrow shows its displacement vector. Optical
+flow has many applications in areas like :
+
+- Structure from Motion
+- Video Compression
+- Video Stabilization ...
+
+Optical flow works on several assumptions:
+
+-# The pixel intensities of an object do not change between consecutive frames.
+2. Neighbouring pixels have similar motion.
+
+Consider a pixel \f$I(x,y,t)\f$ in first frame (Check a new dimension, time, is added here. Earlier we
+were working with images only, so no need of time). It moves by distance \f$(dx,dy)\f$ in next frame
+taken after \f$dt\f$ time. So since those pixels are the same and intensity does not change, we can say,
+
+\f[I(x,y,t) = I(x+dx, y+dy, t+dt)\f]
+
+Then take taylor series approximation of right-hand side, remove common terms and divide by \f$dt\f$ to
+get the following equation:
+
+\f[f_x u + f_y v + f_t = 0 \;\f]
+
+where:
+
+\f[f_x = \frac{\partial f}{\partial x} \; ; \; f_y = \frac{\partial f}{\partial y}\f]\f[u = \frac{dx}{dt} \; ; \; v = \frac{dy}{dt}\f]
+
+Above equation is called Optical Flow equation. In it, we can find \f$f_x\f$ and \f$f_y\f$, they are image
+gradients. Similarly \f$f_t\f$ is the gradient along time. But \f$(u,v)\f$ is unknown. We cannot solve this
+one equation with two unknown variables. So several methods are provided to solve this problem and
+one of them is Lucas-Kanade.
+
+### Lucas-Kanade method
+
+We have seen an assumption before, that all the neighbouring pixels will have similar motion.
+Lucas-Kanade method takes a 3x3 patch around the point. So all the 9 points have the same motion. We
+can find \f$(f_x, f_y, f_t)\f$ for these 9 points. So now our problem becomes solving 9 equations with
+two unknown variables which is over-determined. A better solution is obtained with least square fit
+method. Below is the final solution which is two equation-two unknown problem and solve to get the
+solution.
+
+\f[\begin{bmatrix} u \\ v \end{bmatrix} =
+\begin{bmatrix}
+ \sum_{i}{f_{x_i}}^2 & \sum_{i}{f_{x_i} f_{y_i} } \\
+ \sum_{i}{f_{x_i} f_{y_i}} & \sum_{i}{f_{y_i}}^2
+\end{bmatrix}^{-1}
+\begin{bmatrix}
+ - \sum_{i}{f_{x_i} f_{t_i}} \\
+ - \sum_{i}{f_{y_i} f_{t_i}}
+\end{bmatrix}\f]
+
+( Check similarity of inverse matrix with Harris corner detector. It denotes that corners are better
+points to be tracked.)
+
+So from user point of view, idea is simple, we give some points to track, we receive the optical
+flow vectors of those points. But again there are some problems. Until now, we were dealing with
+small motions. So it fails when there is large motion. So again we go for pyramids. When we go up in
+the pyramid, small motions are removed and large motions becomes small motions. So applying
+Lucas-Kanade there, we get optical flow along with the scale.
+
+Lucas-Kanade Optical Flow in OpenCV.js
+-----------------------------------
+
+We use the function: **cv.calcOpticalFlowPyrLK (prevImg, nextImg, prevPts, nextPts, status, err, winSize =
+new cv.Size(21, 21), maxLevel = 3, criteria = new cv.TermCriteria(cv.TermCriteria_COUNT+
+cv.TermCriteria_EPS, 30, 0.01), flags = 0, minEigThreshold = 1e-4)**.
+@param prevImg first 8-bit input image or pyramid constructed by buildOpticalFlowPyramid.
+@param nextImg second input image or pyramid of the same size and the same type as prevImg.
+@param prevPts vector of 2D points for which the flow needs to be found; point coordinates must
+be single-precision floating-point numbers.
+@param nextPts output vector of 2D points (with single-precision floating-point coordinates)
+containing the calculated new positions of input features in the second image; when cv.OPTFLOW_USE_
+INITIAL_FLOW flag is passed, the vector must have the same size as in the input.
+@param status output status vector (of unsigned chars); each element of the vector is set to 1
+if the flow for the corresponding features has been found, otherwise, it is set to 0.
+@param err output vector of errors; each element of the vector is set to an error for the
+corresponding feature, type of the error measure can be set in flags parameter; if the flow wasn't
+found then the error is not defined (use the status parameter to find such cases).
+@param winSize size of the search window at each pyramid level.
+@param maxLevel 0-based maximal pyramid level number; if set to 0, pyramids are not used (single
+level), if set to 1, two levels are used, and so on; if pyramids are passed to input then algorithm
+will use as many levels as pyramids have but no more than maxLevel.
+@param criteria parameter, specifying the termination criteria of the iterative search algorithm
+(after the specified maximum number of iterations criteria.maxCount or when the search window moves
+by less than criteria.epsilon.
+@param flags operation flags:
+- cv.OPTFLOW_USE_INITIAL_FLOW uses initial estimations, stored in nextPts; if the flag is not set,
+then prevPts is copied to nextPts and is considered the initial estimate.
+- cv.OPTFLOW_LK_GET_MIN_EIGENVALS use minimum eigen values as an error measure (see minEigThreshold
+description); if the flag is not set, then L1 distance between patches around the original and a moved
+point, divided by number of pixels in a window, is used as a error measure.
+@param minEigThreshold the algorithm calculates the minimum eigen value of a 2x2 normal matrix of
+optical flow equations, divided by number of pixels in a window; if this value is less than
+minEigThreshold, then a corresponding feature is filtered out and its flow is not processed, so it
+allows to remove bad points and get a performance boost.
+
+### Try it
+
+\htmlonly
+
+\endhtmlonly
+
+(This code doesn't check how correct are the next keypoints. So even if any feature point disappears
+in image, there is a chance that optical flow finds the next point which may look close to it. So
+actually for a robust tracking, corner points should be detected in particular intervals.)
+
+Dense Optical Flow in OpenCV.js
+-------------------------------
+
+Lucas-Kanade method computes optical flow for a sparse feature set (in our example, corners detected
+using Shi-Tomasi algorithm). OpenCV.js provides another algorithm to find the dense optical flow. It
+computes the optical flow for all the points in the frame. It is based on Gunner Farneback's
+algorithm which is explained in "Two-Frame Motion Estimation Based on Polynomial Expansion" by
+Gunner Farneback in 2003.
+
+We use the function: **cv.calcOpticalFlowFarneback (prev, next, flow, pyrScale, levels, winsize,
+iterations, polyN, polySigma, flags)**
+@param prev first 8-bit single-channel input image.
+@param next second input image of the same size and the same type as prev.
+@param flow computed flow image that has the same size as prev and type CV_32FC2.
+@param pyrScale parameter, specifying the image scale (<1) to build pyramids for each image;
+pyrScale=0.5 means a classical pyramid, where each next layer is twice smaller than the previous one.
+@param levels number of pyramid layers including the initial image; levels=1 means that no extra
+layers are created and only the original images are used.
+@param winsize averaging window size; larger values increase the algorithm robustness to image noise
+and give more chances for fast motion detection, but yield more blurred motion field.
+@param iterations number of iterations the algorithm does at each pyramid level.
+@param polyN size of the pixel neighborhood used to find polynomial expansion in each pixel; larger
+values mean that the image will be approximated with smoother surfaces, yielding more robust algorithm
+and more blurred motion field, typically polyN =5 or 7.
+@param polySigma standard deviation of the Gaussian that is used to smooth derivatives used as a
+basis for the polynomial expansion; for polyN=5, you can set polySigma=1.1, for polyN=7, a good
+value would be polySigma=1.5.
+@param flags operation flags that can be a combination of the following:
+- cv.OPTFLOW_USE_INITIAL_FLOW uses the input flow as an initial flow approximation.
+- cv.OPTFLOW_FARNEBACK_GAUSSIAN uses the Gaussian 𝚠𝚒𝚗𝚜𝚒𝚣𝚎×𝚠𝚒𝚗𝚜𝚒𝚣𝚎 filter instead of a box filter of
+the same size for optical flow estimation; usually, this option gives z more accurate flow than with
+a box filter, at the cost of lower speed; normally, winsize for a Gaussian window should be set to a
+larger value to achieve the same level of robustness.
+
+### Try it
+
+\htmlonly
+
+\endhtmlonly
diff --git a/doc/js_tutorials/js_video/js_meanshift/images/camshift_face.gif b/doc/js_tutorials/js_video/js_meanshift/images/camshift_face.gif
new file mode 100644
index 0000000000..d46e1c7915
Binary files /dev/null and b/doc/js_tutorials/js_video/js_meanshift/images/camshift_face.gif differ
diff --git a/doc/js_tutorials/js_video/js_meanshift/images/meanshift_basics.jpg b/doc/js_tutorials/js_video/js_meanshift/images/meanshift_basics.jpg
new file mode 100644
index 0000000000..73e513fb14
Binary files /dev/null and b/doc/js_tutorials/js_video/js_meanshift/images/meanshift_basics.jpg differ
diff --git a/doc/js_tutorials/js_video/js_meanshift/images/meanshift_face.gif b/doc/js_tutorials/js_video/js_meanshift/images/meanshift_face.gif
new file mode 100644
index 0000000000..6f9733146e
Binary files /dev/null and b/doc/js_tutorials/js_video/js_meanshift/images/meanshift_face.gif differ
diff --git a/doc/js_tutorials/js_video/js_meanshift/js_meanshift.markdown b/doc/js_tutorials/js_video/js_meanshift/js_meanshift.markdown
new file mode 100644
index 0000000000..3c3526b8d1
--- /dev/null
+++ b/doc/js_tutorials/js_video/js_meanshift/js_meanshift.markdown
@@ -0,0 +1,98 @@
+Meanshift and Camshift {#tutorial_js_meanshift}
+======================
+
+Goal
+----
+
+- We will learn about Meanshift and Camshift algorithms to find and track objects in videos.
+
+Meanshift
+---------
+
+The intuition behind the meanshift is simple. Consider you have a set of points. (It can be a pixel
+distribution like histogram backprojection). You are given a small window ( may be a circle) and you
+have to move that window to the area of maximum pixel density (or maximum number of points). It is
+illustrated in the simple image given below:
+
+![image](images/meanshift_basics.jpg)
+
+The initial window is shown in blue circle with the name "C1". Its original center is marked in blue
+rectangle, named "C1_o". But if you find the centroid of the points inside that window, you will
+get the point "C1_r" (marked in small blue circle) which is the real centroid of window. Surely
+they don't match. So move your window such that circle of the new window matches with previous
+centroid. Again find the new centroid. Most probably, it won't match. So move it again, and continue
+the iterations such that center of window and its centroid falls on the same location (or with a
+small desired error). So finally what you obtain is a window with maximum pixel distribution. It is
+marked with green circle, named "C2". As you can see in image, it has maximum number of points. The
+whole process is demonstrated on a static image below:
+
+![image](images/meanshift_face.gif)
+
+So we normally pass the histogram backprojected image and initial target location. When the object
+moves, obviously the movement is reflected in histogram backprojected image. As a result, meanshift
+algorithm moves our window to the new location with maximum density.
+
+### Meanshift in OpenCV.js
+
+To use meanshift in OpenCV.js, first we need to setup the target, find its histogram so that we can
+backproject the target on each frame for calculation of meanshift. We also need to provide initial
+location of window. For histogram, only Hue is considered here. Also, to avoid false values due to
+low light, low light values are discarded using **cv.inRange()** function.
+
+We use the function: **cv.meanShift (probImage, window, criteria)**
+@param probImage Back projection of the object histogram. See cv.calcBackProject for details.
+@param window Initial search window.
+@param criteria Stop criteria for the iterative search algorithm.
+@return number of iterations meanShift took to converge and the new location
+
+### Try it
+
+\htmlonly
+
+\endhtmlonly
+
+Camshift
+--------
+
+Did you closely watch the last result? There is a problem. Our window always has the same size when
+the object is farther away and it is very close to camera. That is not good. We need to adapt the window
+size with size and rotation of the target. Once again, the solution came from "OpenCV Labs" and it
+is called CAMshift (Continuously Adaptive Meanshift) published by Gary Bradsky in his paper
+"Computer Vision Face Tracking for Use in a Perceptual User Interface" in 1988.
+
+It applies meanshift first. Once meanshift converges, it updates the size of the window as,
+\f$s = 2 \times \sqrt{\frac{M_{00}}{256}}\f$. It also calculates the orientation of best fitting ellipse
+to it. Again it applies the meanshift with new scaled search window and previous window location.
+The process is continued until required accuracy is met.
+
+![image](images/camshift_face.gif)
+
+### Camshift in OpenCV.js
+
+It is almost same as meanshift, but it returns a rotated rectangle (that is our result) and box
+parameters (used to be passed as search window in next iteration).
+
+We use the function: **cv.CamShift (probImage, window, criteria)**
+@param probImage Back projection of the object histogram. See cv.calcBackProject for details.
+@param window Initial search window.
+@param criteria Stop criteria for the iterative search algorithm.
+@return Rotated rectangle and the new search window
+
+### Try it
+
+\htmlonly
+
+\endhtmlonly
+
+Additional Resources
+--------------------
+
+-# French Wikipedia page on [Camshift](http://fr.wikipedia.org/wiki/Camshift). (The two animations
+ are taken from here)
+2. Bradski, G.R., "Real time face and object tracking as a component of a perceptual user
+ interface," Applications of Computer Vision, 1998. WACV '98. Proceedings., Fourth IEEE Workshop
+ on , vol., no., pp.214,219, 19-21 Oct 1998
diff --git a/doc/js_tutorials/js_video/js_table_of_contents_video.markdown b/doc/js_tutorials/js_video/js_table_of_contents_video.markdown
new file mode 100644
index 0000000000..193d53e3e6
--- /dev/null
+++ b/doc/js_tutorials/js_video/js_table_of_contents_video.markdown
@@ -0,0 +1,17 @@
+Video Analysis {#tutorial_js_table_of_contents_video}
+==============
+
+- @subpage tutorial_js_meanshift
+
+ Here, we will learn about tracking algorithms such as "Meanshift", and its upgraded version, "Camshift"
+ to find and track objects in videos.
+
+- @subpage tutorial_js_lucas_kanade
+
+ Now let's discuss an important concept, "Optical Flow", which is related to videos and has many
+ applications.
+
+- @subpage tutorial_js_bg_subtraction
+
+ In several applications, we need to extract foreground for further operations like object tracking.
+ Background Subtraction is a well-known method in those cases.
diff --git a/doc/root.markdown.in b/doc/root.markdown.in
index 73be69c925..24414773b6 100644
--- a/doc/root.markdown.in
+++ b/doc/root.markdown.in
@@ -4,6 +4,7 @@ OpenCV modules {#mainpage}
- @ref intro
- @ref tutorial_root
- @ref tutorial_py_root
+@CMAKE_DOXYGEN_TUTORIAL_JS_ROOT@
@CMAKE_DOXYGEN_TUTORIAL_CONTRIB_ROOT@
- @ref faq
- @ref citelist
diff --git a/modules/core/CMakeLists.txt b/modules/core/CMakeLists.txt
index 26fefcb8aa..a9ed1f157b 100644
--- a/modules/core/CMakeLists.txt
+++ b/modules/core/CMakeLists.txt
@@ -5,7 +5,7 @@ ocv_add_dispatched_file(stat SSE4_2 AVX2)
ocv_add_module(core
OPTIONAL opencv_cudev
- WRAP java python)
+ WRAP java python js)
set(extra_libs "")
diff --git a/modules/imgproc/CMakeLists.txt b/modules/imgproc/CMakeLists.txt
index 87d0711809..5cfb616503 100644
--- a/modules/imgproc/CMakeLists.txt
+++ b/modules/imgproc/CMakeLists.txt
@@ -1,3 +1,3 @@
set(the_description "Image Processing")
ocv_add_dispatched_file(accum SSE2 AVX NEON)
-ocv_define_module(imgproc opencv_core WRAP java python)
+ocv_define_module(imgproc opencv_core WRAP java python js)
diff --git a/modules/js/CMakeLists.txt b/modules/js/CMakeLists.txt
new file mode 100644
index 0000000000..28fee00a39
--- /dev/null
+++ b/modules/js/CMakeLists.txt
@@ -0,0 +1,125 @@
+# ----------------------------------------------------------------------------
+# CMake file for js support
+# ----------------------------------------------------------------------------
+
+# message(STATUS "---------------- Start of JavaScript module ----------------------")
+
+set(the_description "The js bindings")
+set(MODULE_NAME js)
+
+set(OPENCV_JS "opencv.js")
+
+ocv_add_module(${MODULE_NAME} BINDINGS)
+
+# TODO: add emscripten path
+ocv_module_include_directories()
+
+# get list of modules to wrap
+# message(STATUS "Wrapped in ${MODULE_NAME}:")
+set(OPENCV_JS_MODULES)
+foreach(m ${OPENCV_MODULES_BUILD})
+ if (";${OPENCV_MODULE_${m}_WRAPPERS};" MATCHES ";${MODULE_NAME};" AND HAVE_${m})
+ list(APPEND OPENCV_JS_MODULES ${m})
+ # message(STATUS "\t${m}")
+ endif()
+endforeach()
+
+set(opencv_hdrs "")
+foreach(m ${OPENCV_JS_MODULES})
+ list(APPEND opencv_hdrs ${OPENCV_MODULE_${m}_HEADERS})
+endforeach(m)
+
+# header blacklist
+ocv_list_filterout(opencv_hdrs "modules/.*.h$")
+ocv_list_filterout(opencv_hdrs "modules/core/.*/cuda")
+ocv_list_filterout(opencv_hdrs "modules/core/include/opencv2/core/opengl.hpp")
+ocv_list_filterout(opencv_hdrs "modules/core/include/opencv2/core/ocl.hpp")
+ocv_list_filterout(opencv_hdrs "modules/cuda.*")
+ocv_list_filterout(opencv_hdrs "modules/cudev")
+ocv_list_filterout(opencv_hdrs "modules/core/.*/hal/")
+ocv_list_filterout(opencv_hdrs "modules/.*/detection_based_tracker.hpp") # Conditional compilation
+ocv_list_filterout(opencv_hdrs "modules/core/include/opencv2/core/utils/trace.*.hpp")
+
+file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/headers.txt" "${opencv_hdrs}")
+
+set(bindings_cpp "${CMAKE_CURRENT_BINARY_DIR}/bindings.cpp")
+
+set(scripts_hdr_parser "${CMAKE_CURRENT_SOURCE_DIR}/../python/src2/hdr_parser.py")
+
+set(JS_HELPER "${CMAKE_CURRENT_SOURCE_DIR}/src/helpers.js")
+
+add_custom_command(
+ OUTPUT ${bindings_cpp}
+ COMMAND ${PYTHON_DEFAULT_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/src/embindgen.py" ${scripts_hdr_parser} ${bindings_cpp} "${CMAKE_CURRENT_BINARY_DIR}/headers.txt" "${CMAKE_CURRENT_SOURCE_DIR}/src/core_bindings.cpp"
+ DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/core_bindings.cpp
+ DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/embindgen.py
+ DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/templates.py
+ DEPENDS ${scripts_hdr_parser}
+ DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/headers.txt
+ DEPENDS ${opencv_hdrs}
+ DEPENDS ${JS_HELPER})
+
+add_definitions("-std=c++11")
+
+link_libraries(${OPENCV_MODULE_${the_module}_DEPS})
+
+ocv_add_executable(${the_module} ${bindings_cpp})
+
+set_target_properties(${the_module} PROPERTIES COMPILE_FLAGS "-Wno-missing-prototypes")
+
+set_target_properties(${the_module} PROPERTIES LINK_FLAGS "--memory-init-file 0 -s TOTAL_MEMORY=134217728 -s ALLOW_MEMORY_GROWTH=1 -s MODULARIZE=1 -s EXPORT_NAME=\"'cv'\" -s DEMANGLE_SUPPORT=1 -s FORCE_FILESYSTEM=1 --use-preload-plugins --bind --post-js ${JS_HELPER} -Wno-missing-prototypes")
+
+# add UMD wrapper
+set(MODULE_JS_PATH "${OpenCV_BINARY_DIR}/bin/${the_module}.js")
+set(OCV_JS_PATH "${OpenCV_BINARY_DIR}/bin/${OPENCV_JS}")
+
+add_custom_command(
+ OUTPUT ${OCV_JS_PATH}
+ COMMAND ${PYTHON_DEFAULT_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/src/make_umd.py" ${MODULE_JS_PATH} "${OCV_JS_PATH}"
+ DEPENDS ${the_module}
+ DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/src/make_umd.py")
+
+add_custom_target(${OPENCV_JS} ALL
+ DEPENDS ${OCV_JS_PATH}
+ DEPENDS ${the_module})
+
+# test
+set(opencv_test_js_bin_dir "${EXECUTABLE_OUTPUT_PATH}")
+set(test_dir ${CMAKE_CURRENT_SOURCE_DIR}/test)
+
+set(opencv_test_js_file_deps "")
+
+# message(STATUS "${opencv_test_js_bin_dir}")
+
+# make sure the build directory exists
+file(MAKE_DIRECTORY "${opencv_test_js_bin_dir}")
+
+# gather and copy specific files for js test
+file(GLOB_RECURSE test_files RELATIVE "${test_dir}" "${test_dir}/*")
+foreach(f ${test_files})
+ # message(STATUS "copy ${test_dir}/${f} ${opencv_test_js_bin_dir}/${f}")
+ add_custom_command(OUTPUT "${opencv_test_js_bin_dir}/${f}"
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different "${test_dir}/${f}" "${opencv_test_js_bin_dir}/${f}"
+ DEPENDS "${test_dir}/${f}"
+ COMMENT "Copying ${f}"
+ )
+ list(APPEND opencv_test_js_file_deps "${test_dir}/${f}" "${opencv_test_js_bin_dir}/${f}")
+endforeach()
+
+# copy test data
+set(test_data "haarcascade_frontalface_default.xml")
+set(test_data_path "${PROJECT_SOURCE_DIR}/../../data/haarcascades/${test_data}")
+
+add_custom_command(OUTPUT "${opencv_test_js_bin_dir}/${test_data}"
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different "${test_data_path}" "${opencv_test_js_bin_dir}/${test_data}"
+ DEPENDS "${test_data_path}"
+ COMMENT "Copying ${test_data}"
+ )
+list(APPEND opencv_test_js_file_deps "${test_data_path}" "${opencv_test_js_bin_dir}/${test_data}")
+
+add_custom_target(${PROJECT_NAME}_test ALL
+ DEPENDS ${OCV_JS_PATH} ${opencv_test_js_file_deps})
+
+unset(MODULE_NAME)
+
+# message(STATUS "---------------- End of JavaScript module ----------------------")
diff --git a/modules/js/src/.eslintrc.json b/modules/js/src/.eslintrc.json
new file mode 100644
index 0000000000..4f97ebcf06
--- /dev/null
+++ b/modules/js/src/.eslintrc.json
@@ -0,0 +1,16 @@
+{
+ "extends": "google",
+ "parserOptions": {
+ "ecmaVersion": 6
+ },
+ "rules": {
+ "max-len": ["error", 100, {"ignoreUrls": true}],
+ "quotes": ["error", "single"],
+ "indent": ["error", 4, {"ArrayExpression": "first",
+ "CallExpression": {"arguments": "first"},
+ "SwitchCase": 1}],
+ "no-var": "off",
+ "prefer-rest-params": "off",
+ "require-jsdoc": "off"
+ }
+}
diff --git a/modules/js/src/core_bindings.cpp b/modules/js/src/core_bindings.cpp
new file mode 100644
index 0000000000..5cd5eb205e
--- /dev/null
+++ b/modules/js/src/core_bindings.cpp
@@ -0,0 +1,587 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+/*M///////////////////////////////////////////////////////////////////////////////////////
+// Author: Sajjad Taheri, University of California, Irvine. sajjadt[at]uci[dot]edu
+//
+// LICENSE AGREEMENT
+// Copyright (c) 2015 The Regents of the University of California (Regents)
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// 3. Neither the name of the University nor the
+// names of its contributors may be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//M*/
+
+#include "opencv2/core.hpp"
+#include "opencv2/imgproc.hpp"
+#include "opencv2/video/tracking.hpp"
+#include "opencv2/video/background_segm.hpp"
+#include "opencv2/objdetect.hpp"
+
+#include
+
+using namespace emscripten;
+using namespace cv;
+
+namespace binding_utils
+{
+ template
+ emscripten::val matData(const cv::Mat& mat)
+ {
+ return emscripten::val(emscripten::memory_view((mat.total()*mat.elemSize())/sizeof(T),
+ (T*)mat.data));
+ }
+
+ template
+ emscripten::val matPtr(const cv::Mat& mat, int i)
+ {
+ return emscripten::val(emscripten::memory_view(mat.step1(0), mat.ptr(i)));
+ }
+
+ template
+ emscripten::val matPtr(const cv::Mat& mat, int i, int j)
+ {
+ return emscripten::val(emscripten::memory_view(mat.step1(1), mat.ptr(i,j)));
+ }
+
+ cv::Mat* createMat(int rows, int cols, int type, intptr_t data, size_t step)
+ {
+ return new cv::Mat(rows, cols, type, reinterpret_cast(data), step);
+ }
+
+ static emscripten::val getMatSize(const cv::Mat& mat)
+ {
+ emscripten::val size = emscripten::val::array();
+ for (int i = 0; i < mat.dims; i++) {
+ size.call("push", mat.size[i]);
+ }
+ return size;
+ }
+
+ static emscripten::val getMatStep(const cv::Mat& mat)
+ {
+ emscripten::val step = emscripten::val::array();
+ for (int i = 0; i < mat.dims; i++) {
+ step.call("push", mat.step[i]);
+ }
+ return step;
+ }
+
+ static Mat matEye(int rows, int cols, int type)
+ {
+ return Mat(cv::Mat::eye(rows, cols, type));
+ }
+
+ static Mat matEye(Size size, int type)
+ {
+ return Mat(cv::Mat::eye(size, type));
+ }
+
+ void convertTo(const Mat& obj, Mat& m, int rtype, double alpha, double beta)
+ {
+ obj.convertTo(m, rtype, alpha, beta);
+ }
+
+ void convertTo(const Mat& obj, Mat& m, int rtype)
+ {
+ obj.convertTo(m, rtype);
+ }
+
+ void convertTo(const Mat& obj, Mat& m, int rtype, double alpha)
+ {
+ obj.convertTo(m, rtype, alpha);
+ }
+
+ Size matSize(const cv::Mat& mat)
+ {
+ return mat.size();
+ }
+
+ cv::Mat matZeros(int arg0, int arg1, int arg2)
+ {
+ return cv::Mat::zeros(arg0, arg1, arg2);
+ }
+
+ cv::Mat matZeros(cv::Size arg0, int arg1)
+ {
+ return cv::Mat::zeros(arg0,arg1);
+ }
+
+ cv::Mat matOnes(int arg0, int arg1, int arg2)
+ {
+ return cv::Mat::ones(arg0, arg1, arg2);
+ }
+
+ cv::Mat matOnes(cv::Size arg0, int arg1)
+ {
+ return cv::Mat::ones(arg0, arg1);
+ }
+
+ double matDot(const cv::Mat& obj, const Mat& mat)
+ {
+ return obj.dot(mat);
+ }
+
+ Mat matMul(const cv::Mat& obj, const Mat& mat, double scale)
+ {
+ return Mat(obj.mul(mat, scale));
+ }
+
+ Mat matT(const cv::Mat& obj)
+ {
+ return Mat(obj.t());
+ }
+
+ Mat matInv(const cv::Mat& obj, int type)
+ {
+ return Mat(obj.inv(type));
+ }
+
+ void matCopyTo(const cv::Mat& obj, cv::Mat& mat)
+ {
+ return obj.copyTo(mat);
+ }
+
+ void matCopyTo(const cv::Mat& obj, cv::Mat& mat, const cv::Mat& mask)
+ {
+ return obj.copyTo(mat, mask);
+ }
+
+ Mat matDiag(const cv::Mat& obj, int d)
+ {
+ return obj.diag(d);
+ }
+
+ Mat matDiag(const cv::Mat& obj)
+ {
+ return obj.diag();
+ }
+
+ void matSetTo(cv::Mat& obj, const cv::Scalar& s)
+ {
+ obj.setTo(s);
+ }
+
+ void matSetTo(cv::Mat& obj, const cv::Scalar& s, const cv::Mat& mask)
+ {
+ obj.setTo(s, mask);
+ }
+
+ emscripten::val rotatedRectPoints(const cv::RotatedRect& obj)
+ {
+ cv::Point2f points[4];
+ obj.points(points);
+ emscripten::val pointsArray = emscripten::val::array();
+ for (int i = 0; i < 4; i++) {
+ pointsArray.call("push", points[i]);
+ }
+ return pointsArray;
+ }
+
+ Rect rotatedRectBoundingRect(const cv::RotatedRect& obj)
+ {
+ return obj.boundingRect();
+ }
+
+ Rect2f rotatedRectBoundingRect2f(const cv::RotatedRect& obj)
+ {
+ return obj.boundingRect2f();
+ }
+
+ int cvMatDepth(int flags)
+ {
+ return CV_MAT_DEPTH(flags);
+ }
+
+ class MinMaxLoc
+ {
+ public:
+ double minVal;
+ double maxVal;
+ Point minLoc;
+ Point maxLoc;
+ };
+
+ MinMaxLoc minMaxLoc(const cv::Mat& src, const cv::Mat& mask)
+ {
+ MinMaxLoc result;
+ cv::minMaxLoc(src, &result.minVal, &result.maxVal, &result.minLoc, &result.maxLoc, mask);
+ return result;
+ }
+
+ MinMaxLoc minMaxLoc_1(const cv::Mat& src)
+ {
+ MinMaxLoc result;
+ cv::minMaxLoc(src, &result.minVal, &result.maxVal, &result.minLoc, &result.maxLoc);
+ return result;
+ }
+
+ class Circle
+ {
+ public:
+ Point2f center;
+ float radius;
+ };
+
+ Circle minEnclosingCircle(const cv::Mat& points)
+ {
+ Circle circle;
+ cv::minEnclosingCircle(points, circle.center, circle.radius);
+ return circle;
+ }
+
+ emscripten::val CamShiftWrapper(const cv::Mat& arg1, Rect& arg2, TermCriteria arg3)
+ {
+ RotatedRect rotatedRect = cv::CamShift(arg1, arg2, arg3);
+ emscripten::val result = emscripten::val::array();
+ result.call("push", rotatedRect);
+ result.call("push", arg2);
+ return result;
+ }
+
+ emscripten::val meanShiftWrapper(const cv::Mat& arg1, Rect& arg2, TermCriteria arg3)
+ {
+ int n = cv::meanShift(arg1, arg2, arg3);
+ emscripten::val result = emscripten::val::array();
+ result.call("push", n);
+ result.call("push", arg2);
+ return result;
+ }
+
+ std::string getExceptionMsg(const cv::Exception& e) {
+ return e.msg;
+ }
+
+ void setExceptionMsg(cv::Exception& e, std::string msg) {
+ e.msg = msg;
+ return;
+ }
+
+ cv::Exception exceptionFromPtr(intptr_t ptr) {
+ return *reinterpret_cast(ptr);
+ }
+
+ std::string getBuildInformation() {
+ return cv::getBuildInformation();
+ }
+}
+
+EMSCRIPTEN_BINDINGS(binding_utils)
+{
+ register_vector("IntVector");
+ register_vector("FloatVector");
+ register_vector("DoubleVector");
+ register_vector("PointVector");
+ register_vector("MatVector");
+ register_vector("RectVector");
+
+ emscripten::class_("Mat")
+ .constructor<>()
+ .constructor()
+ .constructor()
+ .constructor()
+ .constructor()
+ .constructor(&binding_utils::createMat, allow_raw_pointers())
+
+ .class_function("eye", select_overload(&binding_utils::matEye))
+ .class_function("eye", select_overload(&binding_utils::matEye))
+ .class_function("ones", select_overload(&binding_utils::matOnes))
+ .class_function("ones", select_overload(&binding_utils::matOnes))
+ .class_function("zeros", select_overload(&binding_utils::matZeros))
+ .class_function("zeros", select_overload(&binding_utils::matZeros))
+
+ .property("rows", &cv::Mat::rows)
+ .property("cols", &cv::Mat::cols)
+ .property("matSize", &binding_utils::getMatSize)
+ .property("step", &binding_utils::getMatStep)
+ .property("data", &binding_utils::matData)
+ .property("data8S", &binding_utils::matData)
+ .property("data16U", &binding_utils::matData)
+ .property("data16S", &binding_utils::matData)
+ .property("data32S", &binding_utils::matData)
+ .property("data32F", &binding_utils::matData)
+ .property("data64F", &binding_utils::matData)
+
+ .function("elemSize", select_overload(&cv::Mat::elemSize))
+ .function("elemSize1", select_overload(&cv::Mat::elemSize1))
+ .function("channels", select_overload(&cv::Mat::channels))
+ .function("convertTo", select_overload(&binding_utils::convertTo))
+ .function("convertTo", select_overload(&binding_utils::convertTo))
+ .function("convertTo", select_overload(&binding_utils::convertTo))
+ .function("total", select_overload(&cv::Mat::total))
+ .function("row", select_overload(&cv::Mat::row))
+ .function("create", select_overload(&cv::Mat::create))
+ .function("create", select_overload(&cv::Mat::create))
+ .function("rowRange", select_overload(&cv::Mat::rowRange))
+ .function("rowRange", select_overload(&cv::Mat::rowRange))
+ .function("copyTo", select_overload(&binding_utils::matCopyTo))
+ .function("copyTo", select_overload(&binding_utils::matCopyTo))
+ .function("type", select_overload(&cv::Mat::type))
+ .function("empty", select_overload(&cv::Mat::empty))
+ .function("colRange", select_overload(&cv::Mat::colRange))
+ .function("colRange", select_overload(&cv::Mat::colRange))
+ .function("step1", select_overload(&cv::Mat::step1))
+ .function("clone", select_overload(&cv::Mat::clone))
+ .function("depth", select_overload(&cv::Mat::depth))
+ .function("col", select_overload(&cv::Mat::col))
+ .function("dot", select_overload(&binding_utils::matDot))
+ .function("mul", select_overload(&binding_utils::matMul))
+ .function("inv", select_overload(&binding_utils::matInv))
+ .function("t", select_overload(&binding_utils::matT))
+ .function("roi", select_overload(&cv::Mat::operator()))
+ .function("diag", select_overload(&binding_utils::matDiag))
+ .function("diag", select_overload(&binding_utils::matDiag))
+ .function("isContinuous", select_overload(&cv::Mat::isContinuous))
+ .function("setTo", select_overload(&binding_utils::matSetTo))
+ .function("setTo", select_overload(&binding_utils::matSetTo))
+ .function("size", select_overload(&binding_utils::matSize))
+
+ .function("ptr", select_overload(&binding_utils::matPtr))
+ .function("ptr", select_overload(&binding_utils::matPtr))
+ .function("ucharPtr", select_overload(&binding_utils::matPtr))
+ .function("ucharPtr", select_overload(&binding_utils::matPtr))
+ .function("charPtr", select_overload(&binding_utils::matPtr))
+ .function("charPtr", select_overload(&binding_utils::matPtr))
+ .function("shortPtr", select_overload(&binding_utils::matPtr))
+ .function("shortPtr", select_overload(&binding_utils::matPtr))
+ .function("ushortPtr", select_overload(&binding_utils::matPtr))
+ .function("ushortPtr", select_overload(&binding_utils::matPtr))
+ .function("intPtr", select_overload(&binding_utils::matPtr))
+ .function("intPtr", select_overload(&binding_utils::matPtr))
+ .function("floatPtr", select_overload(&binding_utils::matPtr))
+ .function("floatPtr", select_overload(&binding_utils::matPtr))
+ .function("doublePtr", select_overload(&binding_utils::matPtr))
+ .function("doublePtr", select_overload(&binding_utils::matPtr))
+
+ .function("charAt", select_overload(&cv::Mat::at))
+ .function("charAt", select_overload(&cv::Mat::at))
+ .function("charAt", select_overload(&cv::Mat::at))
+ .function("ucharAt", select_overload(&cv::Mat::at))
+ .function("ucharAt", select_overload(&cv::Mat::at))
+ .function("ucharAt", select_overload(&cv::Mat::at))
+ .function("shortAt", select_overload(&cv::Mat::at))
+ .function("shortAt", select_overload(&cv::Mat::at))
+ .function("shortAt", select_overload(&cv::Mat::at))
+ .function("ushortAt", select_overload(&cv::Mat::at))
+ .function("ushortAt", select_overload(&cv::Mat::at))
+ .function("ushortAt", select_overload(&cv::Mat::at))
+ .function("intAt", select_overload(&cv::Mat::at) )
+ .function("intAt", select_overload(&cv::Mat::at) )
+ .function("intAt", select_overload(&cv::Mat::at) )
+ .function("floatAt", select_overload(&cv::Mat::at))
+ .function("floatAt", select_overload(&cv::Mat::at))
+ .function("floatAt", select_overload(&cv::Mat::at))
+ .function("doubleAt", select_overload(&cv::Mat::at))
+ .function("doubleAt", select_overload(&cv::Mat::at))
+ .function("doubleAt", select_overload(&cv::Mat::at));
+
+ emscripten::value_object("Range")
+ .field("start", &cv::Range::start)
+ .field("end", &cv::Range::end);
+
+ emscripten::value_object("TermCriteria")
+ .field("type", &cv::TermCriteria::type)
+ .field("maxCount", &cv::TermCriteria::maxCount)
+ .field("epsilon", &cv::TermCriteria::epsilon);
+
+#define EMSCRIPTEN_CV_SIZE(type) \
+ emscripten::value_object("#type") \
+ .field("width", &type::width) \
+ .field("height", &type::height);
+
+ EMSCRIPTEN_CV_SIZE(Size)
+ EMSCRIPTEN_CV_SIZE(Size2f)
+
+#define EMSCRIPTEN_CV_POINT(type) \
+ emscripten::value_object("#type") \
+ .field("x", &type::x) \
+ .field("y", &type::y); \
+
+ EMSCRIPTEN_CV_POINT(Point)
+ EMSCRIPTEN_CV_POINT(Point2f)
+
+#define EMSCRIPTEN_CV_RECT(type, name) \
+ emscripten::value_object> (name) \
+ .field("x", &cv::Rect_::x) \
+ .field("y", &cv::Rect_::y) \
+ .field("width", &cv::Rect_::width) \
+ .field("height", &cv::Rect_::height);
+
+ EMSCRIPTEN_CV_RECT(int, "Rect")
+ EMSCRIPTEN_CV_RECT(float, "Rect2f")
+
+ emscripten::value_object("RotatedRect")
+ .field("center", &cv::RotatedRect::center)
+ .field("size", &cv::RotatedRect::size)
+ .field("angle", &cv::RotatedRect::angle);
+
+ function("rotatedRectPoints", select_overload(&binding_utils::rotatedRectPoints));
+ function("rotatedRectBoundingRect", select_overload(&binding_utils::rotatedRectBoundingRect));
+ function("rotatedRectBoundingRect2f", select_overload(&binding_utils::rotatedRectBoundingRect2f));
+
+ emscripten::value_array> ("Scalar")
+ .element(index<0>())
+ .element(index<1>())
+ .element(index<2>())
+ .element(index<3>());
+
+ emscripten::value_object("MinMaxLoc")
+ .field("minVal", &binding_utils::MinMaxLoc::minVal)
+ .field("maxVal", &binding_utils::MinMaxLoc::maxVal)
+ .field("minLoc", &binding_utils::MinMaxLoc::minLoc)
+ .field("maxLoc", &binding_utils::MinMaxLoc::maxLoc);
+
+ emscripten::value_object("Circle")
+ .field("center", &binding_utils::Circle::center)
+ .field("radius", &binding_utils::Circle::radius);
+
+ emscripten::value_object("Moments")
+ .field("m00", &cv::Moments::m00)
+ .field("m10", &cv::Moments::m10)
+ .field("m01", &cv::Moments::m01)
+ .field("m20", &cv::Moments::m20)
+ .field("m11", &cv::Moments::m11)
+ .field("m02", &cv::Moments::m02)
+ .field("m30", &cv::Moments::m30)
+ .field("m21", &cv::Moments::m21)
+ .field("m12", &cv::Moments::m12)
+ .field("m03", &cv::Moments::m03)
+ .field("mu20", &cv::Moments::mu20)
+ .field("mu11", &cv::Moments::mu11)
+ .field("mu02", &cv::Moments::mu02)
+ .field("mu30", &cv::Moments::mu30)
+ .field("mu21", &cv::Moments::mu21)
+ .field("mu12", &cv::Moments::mu12)
+ .field("mu03", &cv::Moments::mu03)
+ .field("nu20", &cv::Moments::nu20)
+ .field("nu11", &cv::Moments::nu11)
+ .field("nu02", &cv::Moments::nu02)
+ .field("nu30", &cv::Moments::nu30)
+ .field("nu21", &cv::Moments::nu21)
+ .field("nu12", &cv::Moments::nu12)
+ .field("nu03", &cv::Moments::nu03);
+
+ emscripten::value_object("Exception")
+ .field("code", &cv::Exception::code)
+ .field("msg", &binding_utils::getExceptionMsg, &binding_utils::setExceptionMsg);
+
+ function("exceptionFromPtr", &binding_utils::exceptionFromPtr, allow_raw_pointers());
+
+ function("minEnclosingCircle", select_overload(&binding_utils::minEnclosingCircle));
+
+ function("minMaxLoc", select_overload(&binding_utils::minMaxLoc));
+
+ function("minMaxLoc", select_overload(&binding_utils::minMaxLoc_1));
+
+ function("morphologyDefaultBorderValue", &cv::morphologyDefaultBorderValue);
+
+ function("CV_MAT_DEPTH", &binding_utils::cvMatDepth);
+
+ function("CamShift", select_overload(&binding_utils::CamShiftWrapper));
+
+ function("meanShift", select_overload(&binding_utils::meanShiftWrapper));
+
+ function("getBuildInformation", &binding_utils::getBuildInformation);
+
+ constant("CV_8UC1", CV_8UC1);
+ constant("CV_8UC2", CV_8UC2);
+ constant("CV_8UC3", CV_8UC3);
+ constant("CV_8UC4", CV_8UC4);
+
+ constant("CV_8SC1", CV_8SC1);
+ constant("CV_8SC2", CV_8SC2);
+ constant("CV_8SC3", CV_8SC3);
+ constant("CV_8SC4", CV_8SC4);
+
+ constant("CV_16UC1", CV_16UC1);
+ constant("CV_16UC2", CV_16UC2);
+ constant("CV_16UC3", CV_16UC3);
+ constant("CV_16UC4", CV_16UC4);
+
+ constant("CV_16SC1", CV_16SC1);
+ constant("CV_16SC2", CV_16SC2);
+ constant("CV_16SC3", CV_16SC3);
+ constant("CV_16SC4", CV_16SC4);
+
+ constant("CV_32SC1", CV_32SC1);
+ constant("CV_32SC2", CV_32SC2);
+ constant("CV_32SC3", CV_32SC3);
+ constant("CV_32SC4", CV_32SC4);
+
+ constant("CV_32FC1", CV_32FC1);
+ constant("CV_32FC2", CV_32FC2);
+ constant("CV_32FC3", CV_32FC3);
+ constant("CV_32FC4", CV_32FC4);
+
+ constant("CV_64FC1", CV_64FC1);
+ constant("CV_64FC2", CV_64FC2);
+ constant("CV_64FC3", CV_64FC3);
+ constant("CV_64FC4", CV_64FC4);
+
+ constant("CV_8U", CV_8U);
+ constant("CV_8S", CV_8S);
+ constant("CV_16U", CV_16U);
+ constant("CV_16S", CV_16S);
+ constant("CV_32S", CV_32S);
+ constant("CV_32F", CV_32F);
+ constant("CV_64F", CV_64F);
+
+ constant("INT_MIN", INT_MIN);
+ constant("INT_MAX", INT_MAX);
+}
diff --git a/modules/js/src/embindgen.py b/modules/js/src/embindgen.py
new file mode 100644
index 0000000000..124ab9054c
--- /dev/null
+++ b/modules/js/src/embindgen.py
@@ -0,0 +1,920 @@
+###############################################################################
+#
+# IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+#
+# By downloading, copying, installing or using the software you agree to this license.
+# If you do not agree to this license, do not download, install,
+# copy or use the software.
+#
+#
+# License Agreement
+# For Open Source Computer Vision Library
+#
+# Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+# Third party copyrights are property of their respective owners.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistribution's of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# * Redistribution's in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# * The name of the copyright holders may not be used to endorse or promote products
+# derived from this software without specific prior written permission.
+#
+# This software is provided by the copyright holders and contributors "as is" and
+# any express or implied warranties, including, but not limited to, the implied
+# warranties of merchantability and fitness for a particular purpose are disclaimed.
+# In no event shall the Intel Corporation or contributors be liable for any direct,
+# indirect, incidental, special, exemplary, or consequential damages
+# (including, but not limited to, procurement of substitute goods or services;
+# loss of use, data, or profits; or business interruption) however caused
+# and on any theory of liability, whether in contract, strict liability,
+# or tort (including negligence or otherwise) arising in any way out of
+# the use of this software, even if advised of the possibility of such damage.
+#
+
+###############################################################################
+# AUTHOR: Sajjad Taheri, University of California, Irvine. sajjadt[at]uci[dot]edu
+#
+# LICENSE AGREEMENT
+# Copyright (c) 2015, 2015 The Regents of the University of California (Regents)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. Neither the name of the University nor the
+# names of its contributors may be used to endorse or promote products
+# derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDERS AND CONTRIBUTORS ''AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+###############################################################################
+
+from __future__ import print_function
+import sys, re, os
+from templates import *
+from sets import Set
+
+if sys.version_info[0] >= 3:
+ from io import StringIO
+else:
+ from cStringIO import StringIO
+
+
+func_table = {}
+
+# Ignore these functions due to Embind limitations for now
+ignore_list = ['locate', #int&
+ 'minEnclosingCircle', #float&
+ 'checkRange',
+ 'minMaxLoc', #double*
+ 'floodFill',
+ 'phaseCorrelate',
+ 'randShuffle',
+ 'calibrationMatrixValues', #double&
+ 'undistortPoints', # global redefinition
+ 'CamShift', #Rect&
+ 'meanShift' #Rect&
+ ]
+
+# Classes and methods whitelist
+core = {'': ['absdiff', 'add', 'addWeighted', 'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'cartToPolar',\
+ 'compare', 'convertScaleAbs', 'copyMakeBorder', 'countNonZero', 'determinant', 'dft', 'divide', 'eigen', \
+ 'exp', 'flip', 'getOptimalDFTSize','gemm', 'hconcat', 'inRange', 'invert', 'kmeans', 'log', 'magnitude', \
+ 'max', 'mean', 'meanStdDev', 'merge', 'min', 'minMaxLoc', 'mixChannels', 'multiply', 'norm', 'normalize', \
+ 'perspectiveTransform', 'polarToCart', 'pow', 'randn', 'randu', 'reduce', 'repeat', 'setIdentity', 'setRNGSeed', \
+ 'solve', 'solvePoly', 'split', 'sqrt', 'subtract', 'trace', 'transform', 'transpose', 'vconcat'],
+ 'Algorithm': []}
+
+imgproc = {'': ['Canny', 'GaussianBlur', 'Laplacian', 'HoughLines', 'HoughLinesP', 'HoughCircles', 'Scharr','Sobel', \
+ 'adaptiveThreshold','approxPolyDP','arcLength','bilateralFilter','blur','boundingRect','boxFilter',\
+ 'calcBackProject','calcHist','circle','compareHist','connectedComponents','connectedComponentsWithStats', \
+ 'contourArea', 'convexHull', 'convexityDefects', 'cornerHarris','cornerMinEigenVal','createCLAHE', \
+ 'createLineSegmentDetector','cvtColor','demosaicing','dilate', 'distanceTransform','distanceTransformWithLabels', \
+ 'drawContours','ellipse','ellipse2Poly','equalizeHist','erode', 'filter2D', 'findContours','fitEllipse', \
+ 'fitLine', 'floodFill','getAffineTransform', 'getPerspectiveTransform', 'getRotationMatrix2D', 'getStructuringElement', \
+ 'goodFeaturesToTrack','grabCut','initUndistortRectifyMap', 'integral','integral2', 'isContourConvex', 'line', \
+ 'matchShapes', 'matchTemplate','medianBlur', 'minAreaRect', 'minEnclosingCircle', 'moments', 'morphologyEx', \
+ 'pointPolygonTest', 'putText','pyrDown','pyrUp','rectangle','remap', 'resize','sepFilter2D','threshold', \
+ 'undistort','warpAffine','warpPerspective','watershed'],
+ 'CLAHE': ['apply', 'collectGarbage', 'getClipLimit', 'getTilesGridSize', 'setClipLimit', 'setTilesGridSize']}
+
+objdetect = {'': ['groupRectangles'],
+ 'HOGDescriptor': ['load', 'HOGDescriptor', 'getDefaultPeopleDetector', 'getDaimlerPeopleDetector', 'setSVMDetector', 'detectMultiScale'],
+ 'CascadeClassifier': ['load', 'detectMultiScale2', 'CascadeClassifier', 'detectMultiScale3', 'empty', 'detectMultiScale']}
+
+video = {'': ['CamShift', 'calcOpticalFlowFarneback', 'calcOpticalFlowPyrLK', 'createBackgroundSubtractorMOG2', 'estimateRigidTransform',\
+ 'findTransformECC', 'meanShift'],
+ 'BackgroundSubtractorMOG2': ['BackgroundSubtractorMOG2', 'apply'],
+ 'BackgroundSubtractor': ['apply', 'getBackgroundImage']}
+
+def makeWhiteList(module_list):
+ wl = {}
+ for m in module_list:
+ for k in m.keys():
+ if k in wl:
+ wl[k] += m[k]
+ else:
+ wl[k] = m[k]
+ return wl
+
+white_list = makeWhiteList([core, imgproc, objdetect, video])
+
+# Features to be exported
+export_enums = False
+export_consts = True
+with_wrapped_functions = True
+with_default_params = True
+with_vec_from_js_array = True
+
+wrapper_namespace = "Wrappers"
+type_dict = {
+ 'InputArray': 'const cv::Mat&',
+ 'OutputArray': 'cv::Mat&',
+ 'InputOutputArray': 'cv::Mat&',
+ 'InputArrayOfArrays': 'const std::vector&',
+ 'OutputArrayOfArrays': 'std::vector&',
+ 'String': 'std::string',
+ 'const String&':'const std::string&'
+}
+
+def normalize_class_name(name):
+ return re.sub(r"^cv\.", "", name).replace(".", "_")
+
+
+class ClassProp(object):
+ def __init__(self, decl):
+ self.tp = decl[0].replace("*", "_ptr").strip()
+ self.name = decl[1]
+ self.readonly = True
+ if "/RW" in decl[3]:
+ self.readonly = False
+
+
+class ClassInfo(object):
+ def __init__(self, name, decl=None):
+ self.cname = name.replace(".", "::")
+ self.name = self.wname = normalize_class_name(name)
+
+ self.ismap = False
+ self.issimple = False
+ self.isalgorithm = False
+ self.methods = {}
+ self.ext_constructors = {}
+ self.props = []
+ self.consts = {}
+ customname = False
+ self.jsfuncs = {}
+ self.constructor_arg_num = Set()
+
+ self.has_smart_ptr = False
+
+ if decl:
+ self.bases = decl[1].split()[1:]
+ if len(self.bases) > 1:
+ self.bases = [self.bases[0].strip(",")]
+ # return sys.exit(-1)
+ if self.bases and self.bases[0].startswith("cv::"):
+ self.bases[0] = self.bases[0][4:]
+ if self.bases and self.bases[0] == "Algorithm":
+ self.isalgorithm = True
+ for m in decl[2]:
+ if m.startswith("="):
+ self.wname = m[1:]
+ customname = True
+ elif m == "/Map":
+ self.ismap = True
+ elif m == "/Simple":
+ self.issimple = True
+ self.props = [ClassProp(p) for p in decl[3]]
+
+ if not customname and self.wname.startswith("Cv"):
+ self.wname = self.wname[2:]
+
+
+def handle_ptr(tp):
+ if tp.startswith('Ptr_'):
+ tp = 'Ptr<' + "::".join(tp.split('_')[1:]) + '>'
+ return tp
+
+def handle_vector(tp):
+ if tp.startswith('vector_'):
+ tp = 'std::vector<' + "::".join(tp.split('_')[1:]) + '>'
+ return tp
+
+
+class ArgInfo(object):
+ def __init__(self, arg_tuple):
+ self.tp = handle_ptr(arg_tuple[0]).strip()
+ self.name = arg_tuple[1]
+ self.defval = arg_tuple[2]
+ self.isarray = False
+ self.arraylen = 0
+ self.arraycvt = None
+ self.inputarg = True
+ self.outputarg = False
+ self.returnarg = False
+ self.const = False
+ self.reference = False
+ for m in arg_tuple[3]:
+ if m == "/O":
+ self.inputarg = False
+ self.outputarg = True
+ self.returnarg = True
+ elif m == "/IO":
+ self.inputarg = True
+ self.outputarg = True
+ self.returnarg = True
+ elif m.startswith("/A"):
+ self.isarray = True
+ self.arraylen = m[2:].strip()
+ elif m.startswith("/CA"):
+ self.isarray = True
+ self.arraycvt = m[2:].strip()
+ elif m == "/C":
+ self.const = True
+ elif m == "/Ref":
+ self.reference = True
+ if self.tp == "Mat":
+ if self.outputarg:
+ self.tp = "cv::Mat&"
+ elif self.inputarg:
+ self.tp = "const cv::Mat&"
+ if self.tp == "vector_Mat":
+ if self.outputarg:
+ self.tp = "std::vector&"
+ elif self.inputarg:
+ self.tp = "const std::vector&"
+ self.tp = handle_vector(self.tp).strip()
+ if self.const:
+ self.tp = "const " + self.tp
+ if self.reference:
+ self.tp = self.tp + "&"
+ self.py_inputarg = False
+ self.py_outputarg = False
+
+class FuncVariant(object):
+ def __init__(self, class_name, name, decl, is_constructor, is_class_method, is_const, is_virtual, is_pure_virtual, ref_return, const_return):
+ self.class_name = class_name
+ self.name = self.wname = name
+ self.is_constructor = is_constructor
+ self.is_class_method = is_class_method
+ self.is_const = is_const
+ self.is_virtual = is_virtual
+ self.is_pure_virtual = is_pure_virtual
+ self.refret = ref_return
+ self.constret = const_return
+ self.rettype = handle_vector(handle_ptr(decl[1]).strip()).strip()
+ if self.rettype == "void":
+ self.rettype = ""
+ self.args = []
+ self.array_counters = {}
+
+ for a in decl[3]:
+ ainfo = ArgInfo(a)
+ if ainfo.isarray and not ainfo.arraycvt:
+ c = ainfo.arraylen
+ c_arrlist = self.array_counters.get(c, [])
+ if c_arrlist:
+ c_arrlist.append(ainfo.name)
+ else:
+ self.array_counters[c] = [ainfo.name]
+ self.args.append(ainfo)
+
+
+class FuncInfo(object):
+ def __init__(self, class_name, name, cname, namespace, isconstructor):
+ self.class_name = class_name
+ self.name = name
+ self.cname = cname
+ self.namespace = namespace
+ self.variants = []
+ self.is_constructor = isconstructor
+
+ def add_variant(self, variant):
+ self.variants.append(variant)
+
+
+class Namespace(object):
+ def __init__(self):
+ self.funcs = {}
+ self.enums = {}
+ self.consts = {}
+
+
+class JSWrapperGenerator(object):
+ def __init__(self):
+
+ self.bindings = []
+ self.wrapper_funcs = []
+
+ self.classes = {}
+ self.namespaces = {}
+ self.enums = {}
+
+ self.parser = hdr_parser.CppHeaderParser()
+ self.class_idx = 0
+
+ def add_class(self, stype, name, decl):
+ class_info = ClassInfo(name, decl)
+ class_info.decl_idx = self.class_idx
+ self.class_idx += 1
+
+ if class_info.name in self.classes:
+ print("Generator error: class %s (cpp_name=%s) already exists" \
+ % (class_info.name, class_info.cname))
+ sys.exit(-1)
+ self.classes[class_info.name] = class_info
+
+ if class_info.bases:
+ chunks = class_info.bases[0].split('::')
+ base = '_'.join(chunks)
+ while base not in self.classes and len(chunks) > 1:
+ del chunks[-2]
+ base = '_'.join(chunks)
+ if base not in self.classes:
+ print("Generator error: unable to resolve base %s for %s"
+ % (class_info.bases[0], class_info.name))
+ sys.exit(-1)
+ else:
+ class_info.bases[0] = "::".join(chunks)
+ class_info.isalgorithm |= self.classes[base].isalgorithm
+
+ def split_decl_name(self, name):
+ chunks = name.split('.')
+ namespace = chunks[:-1]
+ classes = []
+ while namespace and '.'.join(namespace) not in self.parser.namespaces:
+ classes.insert(0, namespace.pop())
+ return namespace, classes, chunks[-1]
+
+ def add_enum(self, decl):
+ name = decl[1]
+ namespace, classes, val = self.split_decl_name(name)
+ namespace = '.'.join(namespace)
+ val = '_'.join(classes + [name])
+ cname = name.replace('.', '::')
+ ns = self.namespaces.setdefault(namespace, Namespace())
+ if name in ns.enums:
+ print("Generator warning: constant %s (cname=%s) already exists" \
+ % (name, cname))
+ # sys.exit(-1)
+ else:
+ ns.enums[name] = []
+ for item in decl[3]:
+ ns.enums[name].append(item)
+
+ def add_const(self, name, decl):
+ cname = name.replace('.','::')
+ namespace, classes, name = self.split_decl_name(name)
+ namespace = '.'.join(namespace)
+ name = '_'.join(classes+[name])
+ ns = self.namespaces.setdefault(namespace, Namespace())
+ if name in ns.consts:
+ print("Generator error: constant %s (cname=%s) already exists" \
+ % (name, cname))
+ sys.exit(-1)
+ ns.consts[name] = cname
+
+ def add_func(self, decl):
+ namespace, classes, barename = self.split_decl_name(decl[0])
+ cpp_name = "::".join(namespace + classes + [barename])
+ name = barename
+ class_name = ''
+ bare_class_name = ''
+ if classes:
+ class_name = normalize_class_name('.'.join(namespace + classes))
+ bare_class_name = classes[-1]
+ namespace = '.'.join(namespace)
+
+ is_constructor = name == bare_class_name
+ is_class_method = False
+ is_const_method = False
+ is_virtual_method = False
+ is_pure_virtual_method = False
+ const_return = False
+ ref_return = False
+
+ for m in decl[2]:
+ if m == "/S":
+ is_class_method = True
+ elif m == "/C":
+ is_const_method = True
+ elif m == "/V":
+ is_virtual_method = True
+ elif m == "/PV":
+ is_pure_virtual_method = True
+ elif m == "/Ref":
+ ref_return = True
+ elif m == "/CRet":
+ const_return = True
+ elif m.startswith("="):
+ name = m[1:]
+
+ if class_name:
+ cpp_name = barename
+ func_map = self.classes[class_name].methods
+ else:
+ func_map = self.namespaces.setdefault(namespace, Namespace()).funcs
+
+ func = func_map.setdefault(name, FuncInfo(class_name, name, cpp_name, namespace, is_constructor))
+
+ variant = FuncVariant(class_name, name, decl, is_constructor, is_class_method, is_const_method,
+ is_virtual_method, is_pure_virtual_method, ref_return, const_return)
+ func.add_variant(variant)
+
+ def save(self, path, name, buf):
+ f = open(path + "/" + name, "wt")
+ f.write(buf.getvalue())
+ f.close()
+
+ def gen_function_binding_with_wrapper(self, func, class_info):
+
+ binding_text = None
+ wrapper_func_text = None
+
+ bindings = []
+ wrappers = []
+
+ for index, variant in enumerate(func.variants):
+
+ factory = False
+ if class_info and 'Ptr<' in variant.rettype:
+
+ factory = True
+ base_class_name = variant.rettype
+ base_class_name = base_class_name.replace("Ptr<","").replace(">","").strip()
+ if base_class_name in self.classes:
+ self.classes[base_class_name].has_smart_ptr = True
+ else:
+ print(base_class_name, ' not found in classes for registering smart pointer using ', class_info.name, 'instead')
+ self.classes[class_info.name].has_smart_ptr = True
+
+ def_args = []
+ has_def_param = False
+
+ # Return type
+ ret_type = 'void' if variant.rettype.strip() == '' else variant.rettype
+ if ret_type.startswith('Ptr'): #smart pointer
+ ptr_type = ret_type.replace('Ptr<', '').replace('>', '')
+ if ptr_type in type_dict:
+ ret_type = type_dict[ptr_type]
+ for key in type_dict:
+ if key in ret_type:
+ ret_type = ret_type.replace(key, type_dict[key])
+
+ arg_types = []
+ unwrapped_arg_types = []
+ for arg in variant.args:
+ arg_type = None
+ if arg.tp in type_dict:
+ arg_type = type_dict[arg.tp]
+ else:
+ arg_type = arg.tp
+ # Add default value
+ if with_default_params and arg.defval != '':
+ def_args.append(arg.defval);
+ arg_types.append(arg_type)
+ unwrapped_arg_types.append(arg_type)
+
+ # Function attribure
+ func_attribs = ''
+ if '*' in ''.join(arg_types):
+ func_attribs += ', allow_raw_pointers()'
+
+ if variant.is_pure_virtual:
+ func_attribs += ', pure_virtual()'
+
+
+ # Wrapper function
+ wrap_func_name = (func.class_name+"_" if class_info != None else "") + func.name.split("::")[-1] + "_wrapper"
+ js_func_name = func.name
+
+ # TODO: Name functions based wrap directives or based on arguments list
+ if index > 0:
+ wrap_func_name += str(index)
+ js_func_name += str(index)
+
+ c_func_name = 'Wrappers::' + wrap_func_name
+
+ # Binding template-
+ raw_arg_names = ['arg' + str(i + 1) for i in range(0, len(variant.args))]
+ arg_names = []
+ w_signature = []
+ casted_arg_types = []
+ for arg_type, arg_name in zip(arg_types, raw_arg_names):
+ casted_arg_name = arg_name
+ if with_vec_from_js_array:
+ # Only support const vector reference as input parameter
+ match = re.search(r'const std::vector<(.*)>&', arg_type)
+ if match:
+ type_in_vect = match.group(1)
+ if type_in_vect != 'cv::Mat':
+ casted_arg_name = 'emscripten::vecFromJSArray<' + type_in_vect + '>(' + arg_name + ')'
+ arg_type = re.sub(r'std::vector<(.*)>', 'emscripten::val', arg_type)
+ w_signature.append(arg_type + ' ' + arg_name)
+ arg_names.append(casted_arg_name)
+ casted_arg_types.append(arg_type)
+
+ arg_types = casted_arg_types
+
+ # Argument list, signature
+ arg_names_casted = [c if a == b else c + '.as<' + a + '>()' for a, b, c in
+ zip(unwrapped_arg_types, arg_types, arg_names)]
+
+ # Add self object to the parameters
+ if class_info and not factory:
+ arg_types = [class_info.cname + '&'] + arg_types
+ w_signature = [class_info.cname + '& arg0 '] + w_signature
+
+ for j in range(0, len(def_args) + 1):
+ postfix = ''
+ if j > 0:
+ postfix = '_' + str(j);
+
+ ###################################
+ # Wrapper
+ if factory: # TODO or static
+ name = class_info.cname+'::' if variant.class_name else ""
+ cpp_call_text = static_class_call_template.substitute(scope=name,
+ func=func.cname,
+ args=', '.join(arg_names[:len(arg_names)-j]))
+ elif class_info:
+ cpp_call_text = class_call_template.substitute(obj='arg0',
+ func=func.cname,
+ args=', '.join(arg_names[:len(arg_names)-j]))
+ else:
+ cpp_call_text = call_template.substitute(func=func.cname,
+ args=', '.join(arg_names[:len(arg_names)-j]))
+
+
+ wrapper_func_text = wrapper_function_template.substitute(ret_val=ret_type,
+ func=wrap_func_name+postfix,
+ signature=', '.join(w_signature[:len(w_signature)-j]),
+ cpp_call=cpp_call_text,
+ const='' if variant.is_const else '')
+
+ ###################################
+ # Binding
+ if class_info:
+ if factory:
+ # print("Factory Function: ", c_func_name, len(variant.args) - j, class_info.name)
+ if variant.is_pure_virtual:
+ # FIXME: workaround for pure virtual in constructor
+ # e.g. DescriptorMatcher_clone_wrapper
+ continue
+ # consider the default parameter variants
+ args_num = len(variant.args) - j
+ if args_num in class_info.constructor_arg_num:
+ # FIXME: workaournd for constructor overload with same args number
+ # e.g. DescriptorMatcher
+ continue
+ class_info.constructor_arg_num.add(args_num)
+ binding_text = ctr_template.substitute(const='const' if variant.is_const else '',
+ cpp_name=c_func_name+postfix,
+ ret=ret_type,
+ args=','.join(arg_types[:len(arg_types)-j]),
+ optional=func_attribs)
+ else:
+ binding_template = overload_class_static_function_template if variant.is_class_method else \
+ overload_class_function_template
+ binding_text = binding_template.substitute(js_name=js_func_name,
+ const='' if variant.is_const else '',
+ cpp_name=c_func_name+postfix,
+ ret=ret_type,
+ args=','.join(arg_types[:len(arg_types)-j]),
+ optional=func_attribs)
+ else:
+ binding_text = overload_function_template.substitute(js_name=js_func_name,
+ cpp_name=c_func_name+postfix,
+ const='const' if variant.is_const else '',
+ ret=ret_type,
+ args=', '.join(arg_types[:len(arg_types)-j]),
+ optional=func_attribs)
+
+ bindings.append(binding_text)
+ wrappers.append(wrapper_func_text)
+
+ return [bindings, wrappers]
+
+
+ def gen_function_binding(self, func, class_info):
+
+ if not class_info == None :
+ func_name = class_info.cname+'::'+func.cname
+ else :
+ func_name = func.cname
+
+ binding_text = None
+ binding_text_list = []
+
+ for index, variant in enumerate(func.variants):
+ factory = False
+ #TODO if variant.is_class_method and variant.rettype == ('Ptr<' + class_info.name + '>'):
+ if (not class_info == None) and variant.rettype == ('Ptr<' + class_info.name + '>') or (func.name.startswith("create") and variant.rettype):
+ factory = True
+ base_class_name = variant.rettype
+ base_class_name = base_class_name.replace("Ptr<","").replace(">","").strip()
+ if base_class_name in self.classes:
+ self.classes[base_class_name].has_smart_ptr = True
+ else:
+ print(base_class_name, ' not found in classes for registering smart pointer using ', class_info.name, 'instead')
+ self.classes[class_info.name].has_smart_ptr = True
+
+
+ # Return type
+ ret_type = 'void' if variant.rettype.strip() == '' else variant.rettype
+
+ ret_type = ret_type.strip()
+
+ if ret_type.startswith('Ptr'): #smart pointer
+ ptr_type = ret_type.replace('Ptr<', '').replace('>', '')
+ if ptr_type in type_dict:
+ ret_type = type_dict[ptr_type]
+ for key in type_dict:
+ if key in ret_type:
+ ret_type = ret_type.replace(key, type_dict[key])
+
+ if variant.constret and ret_type.startswith('const') == False:
+ ret_type = 'const ' + ret_type
+ if variant.refret and ret_type.endswith('&') == False:
+ ret_type += '&'
+
+ arg_types = []
+ orig_arg_types = []
+ def_args = []
+ for arg in variant.args:
+ if arg.tp in type_dict:
+ arg_type = type_dict[arg.tp]
+ else:
+ arg_type = arg.tp
+
+ #if arg.outputarg:
+ # arg_type += '&'
+ orig_arg_types.append(arg_type)
+ if with_default_params and arg.defval != '':
+ def_args.append(arg.defval)
+ arg_types.append(orig_arg_types[-1])
+
+ # Function attribure
+ func_attribs = ''
+ if '*' in ''.join(orig_arg_types):
+ func_attribs += ', allow_raw_pointers()'
+
+ if variant.is_pure_virtual:
+ func_attribs += ', pure_virtual()'
+
+ #TODO better naming
+ #if variant.name in self.jsfunctions:
+ #else
+ js_func_name = variant.name
+
+
+ c_func_name = func.cname if (factory and variant.is_class_method == False) else func_name
+
+
+ ################################### Binding
+ for j in range(0, len(def_args) + 1):
+ postfix = ''
+ if j > 0:
+ postfix = '_' + str(j);
+ if factory:
+ binding_text = ctr_template.substitute(const='const' if variant.is_const else '',
+ cpp_name=c_func_name+postfix,
+ ret=ret_type,
+ args=','.join(arg_types[:len(arg_types)-j]),
+ optional=func_attribs)
+ else:
+ binding_template = overload_class_static_function_template if variant.is_class_method else \
+ overload_function_template if class_info == None else overload_class_function_template
+ binding_text = binding_template.substitute(js_name=js_func_name,
+ const='const' if variant.is_const else '',
+ cpp_name=c_func_name+postfix,
+ ret=ret_type,
+ args=','.join(arg_types[:len(arg_types)-1]),
+ optional=func_attribs)
+
+ binding_text_list.append(binding_text)
+
+ return binding_text_list
+
+ def print_decls(self, decls):
+ """
+ Prints the list of declarations, retrieived by the parse() method
+ """
+ for d in decls:
+ print(d[0], d[1], ";".join(d[2]))
+ for a in d[3]:
+ print(" ", a[0], a[1], a[2], end="")
+ if a[3]:
+ print("; ".join(a[3]))
+ else:
+ print()
+
+ def gen(self, dst_file, src_files, core_bindings):
+ # step 1: scan the headers and extract classes, enums and functions
+ for hdr in src_files:
+ decls = self.parser.parse(hdr)
+ # print(hdr);
+ # self.print_decls(decls);
+ if len(decls) == 0:
+ continue
+ for decl in decls:
+ name = decl[0]
+ type = name[:name.find(" ")]
+ if type == "struct" or type == "class": # class/structure case
+ name = name[name.find(" ") + 1:].strip()
+ self.add_class(type, name, decl)
+ elif name.startswith("enum"): # enumerations
+ self.add_enum(decl)
+ elif name.startswith("const"):
+ # constant
+ self.add_const(name.replace("const ", "").strip(), decl)
+ else: # class/global function
+ self.add_func(decl)
+
+ # step 2: generate bindings
+ # Global functions
+ for ns_name, ns in sorted(self.namespaces.items()):
+ if ns_name.split('.')[0] != 'cv':
+ continue
+ for name, func in sorted(ns.funcs.items()):
+ if name in ignore_list:
+ continue
+ if not name in white_list['']:
+ continue
+
+ ext_cnst = False
+ # Check if the method is an external constructor
+ for variant in func.variants:
+ if "Ptr<" in variant.rettype:
+
+ # Register the smart pointer
+ base_class_name = variant.rettype
+ base_class_name = base_class_name.replace("Ptr<","").replace(">","").strip()
+ self.classes[base_class_name].has_smart_ptr = True
+
+ # Adds the external constructor
+ class_name = func.name.replace("create", "")
+ if not class_name in self.classes:
+ self.classes[base_class_name].methods[func.cname] = func
+ else:
+ self.classes[class_name].methods[func.cname] = func
+ ext_cnst = True
+ if ext_cnst:
+ continue
+
+ if with_wrapped_functions:
+ binding, wrapper = self.gen_function_binding_with_wrapper(func, class_info=None)
+ self.bindings += binding
+ self.wrapper_funcs += wrapper
+ else:
+ binding = self.gen_function_binding(func, class_info=None)
+ self.bindings+=binding
+
+ # generate code for the classes and their methods
+ class_list = list(self.classes.items())
+
+ for name, class_info in class_list:
+ class_bindings = []
+ if not name in white_list:
+ continue
+
+ # Generate bindings for methods
+ for method_name, method in class_info.methods.iteritems():
+ if method.cname in ignore_list:
+ continue
+ if not method.name in white_list[method.class_name]:
+ continue
+ if method.is_constructor:
+ for variant in method.variants:
+ args = []
+ for arg in variant.args:
+ args.append(arg.tp)
+ # print('Constructor: ', class_info.name, len(variant.args))
+ args_num = len(variant.args)
+ if args_num in class_info.constructor_arg_num:
+ continue
+ class_info.constructor_arg_num.add(args_num)
+ class_bindings.append(constructor_template.substitute(signature=', '.join(args)))
+ else:
+ if with_wrapped_functions and (len(method.variants) > 1 or len(method.variants[0].args)>0 or "String" in method.variants[0].rettype):
+ binding, wrapper = self.gen_function_binding_with_wrapper(method, class_info=class_info)
+ self.wrapper_funcs = self.wrapper_funcs + wrapper
+ class_bindings = class_bindings + binding
+ else:
+ binding = self.gen_function_binding(method, class_info=class_info)
+ class_bindings = class_bindings + binding
+
+ # Regiseter Smart pointer
+ if class_info.has_smart_ptr:
+ class_bindings.append(smart_ptr_reg_template.substitute(cname=class_info.cname, name=class_info.name))
+
+ # Attach external constructors
+ # for method_name, method in class_info.ext_constructors.iteritems():
+ # print("ext constructor", method_name)
+ #if class_info.ext_constructors:
+
+
+
+ # Generate bindings for properties
+ for property in class_info.props:
+ class_bindings.append(class_property_template.substitute(js_name=property.name, cpp_name='::'.join(
+ [class_info.cname, property.name])))
+
+ dv = ''
+ base = Template("""base<$base$isPoly>""")
+
+ assert len(class_info.bases) <= 1 , "multiple inheritance not supported"
+
+ if len(class_info.bases) == 1:
+ dv = "," + base.substitute(base=', '.join(class_info.bases),
+ isPoly = " ,true" if class_info.name=="Feature2D" else "")
+
+ self.bindings.append(class_template.substitute(cpp_name=class_info.cname,
+ js_name=name,
+ class_templates=''.join(class_bindings),
+ derivation=dv))
+
+ if export_enums:
+ # step 4: generate bindings for enums
+ # TODO anonymous enums are ignored for now.
+ for ns_name, ns in sorted(self.namespaces.items()):
+ if ns_name.split('.')[0] != 'cv':
+ continue
+ for name, enum in sorted(ns.enums.items()):
+ if not name.endswith('.anonymous'):
+ name = name.replace("cv.", "")
+ enum_values = []
+ for enum_val in enum:
+ value = enum_val[0][enum_val[0].rfind(".")+1:]
+ enum_values.append(enum_item_template.substitute(val=value,
+ cpp_val=name.replace('.', '::')+'::'+value))
+
+ self.bindings.append(enum_template.substitute(cpp_name=name.replace(".", "::"),
+ js_name=name.replace(".", "_"),
+ enum_items=''.join(enum_values)))
+ else:
+ print(name)
+ #TODO: represent anonymous enums with constants
+
+ if export_consts:
+ # step 5: generate bindings for consts
+ for ns_name, ns in sorted(self.namespaces.items()):
+ if ns_name.split('.')[0] != 'cv':
+ continue
+ for name, const in sorted(ns.consts.items()):
+ # print("Gen consts: ", name, const)
+ self.bindings.append(const_template.substitute(js_name=name, value=const))
+
+ with open(core_bindings) as f:
+ ret = f.read()
+
+ defis = '\n'.join(self.wrapper_funcs)
+ ret += wrapper_codes_template.substitute(ns=wrapper_namespace, defs=defis)
+ ret += emscripten_binding_template.substitute(binding_name='testBinding', bindings=''.join(self.bindings))
+
+
+ # print(ret)
+ text_file = open(dst_file, "w")
+ text_file.write(ret)
+ text_file.close()
+
+
+if __name__ == "__main__":
+ if len(sys.argv) < 4:
+ print("Usage:\n", \
+ os.path.basename(sys.argv[0]), \
+ " ")
+ print("Current args are: ", ", ".join(["'"+a+"'" for a in sys.argv]))
+ exit(0)
+
+ dstdir = "."
+ hdr_parser_path = os.path.abspath(sys.argv[1])
+ if hdr_parser_path.endswith(".py"):
+ hdr_parser_path = os.path.dirname(hdr_parser_path)
+ sys.path.append(hdr_parser_path)
+ import hdr_parser
+
+ bindingsCpp = sys.argv[2]
+ headers = open(sys.argv[3], 'r').read().split(';')
+ coreBindings = sys.argv[4]
+ generator = JSWrapperGenerator()
+ generator.gen(bindingsCpp, headers, coreBindings)
diff --git a/modules/js/src/helpers.js b/modules/js/src/helpers.js
new file mode 100644
index 0000000000..08d1a89b2b
--- /dev/null
+++ b/modules/js/src/helpers.js
@@ -0,0 +1,399 @@
+// //////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+
+Module['imread'] = function(imageSource) {
+ var img = null;
+ if (typeof imageSource === 'string') {
+ img = document.getElementById(imageSource);
+ } else {
+ img = imageSource;
+ }
+ var canvas = null;
+ var ctx = null;
+ if (img instanceof HTMLImageElement) {
+ canvas = document.createElement('canvas');
+ canvas.width = img.width;
+ canvas.height = img.height;
+ ctx = canvas.getContext('2d');
+ ctx.drawImage(img, 0, 0, img.width, img.height);
+ } else if (img instanceof HTMLCanvasElement) {
+ canvas = img;
+ ctx = canvas.getContext('2d');
+ } else {
+ throw new Error('Please input the valid canvas or img id.');
+ return;
+ }
+
+ var imgData = ctx.getImageData(0, 0, canvas.width, canvas.height);
+ return cv.matFromImageData(imgData);
+};
+
+Module['imshow'] = function(canvasSource, mat) {
+ var canvas = null;
+ if (typeof canvasSource === 'string') {
+ canvas = document.getElementById(canvasSource);
+ } else {
+ canvas = canvasSource;
+ }
+ if (!(canvas instanceof HTMLCanvasElement)) {
+ throw new Error('Please input the valid canvas element or id.');
+ return;
+ }
+ if (!(mat instanceof cv.Mat)) {
+ throw new Error('Please input the valid cv.Mat instance.');
+ return;
+ }
+
+ // convert the mat type to cv.CV_8U
+ var img = new cv.Mat();
+ var depth = mat.type()%8;
+ var scale = depth <= cv.CV_8S? 1.0 : (depth <= cv.CV_32S? 1.0/256.0 : 255.0);
+ var shift = (depth === cv.CV_8S || depth === cv.CV_16S)? 128.0 : 0.0;
+ mat.convertTo(img, cv.CV_8U, scale, shift);
+
+ // convert the img type to cv.CV_8UC4
+ switch (img.type()) {
+ case cv.CV_8UC1:
+ cv.cvtColor(img, img, cv.COLOR_GRAY2RGBA);
+ break;
+ case cv.CV_8UC3:
+ cv.cvtColor(img, img, cv.COLOR_RGB2RGBA);
+ break;
+ case cv.CV_8UC4:
+ break;
+ default:
+ throw new Error('Bad number of channels (Source image must have 1, 3 or 4 channels)');
+ return;
+ }
+ var imgData = new ImageData(new Uint8ClampedArray(img.data), img.cols, img.rows);
+ var ctx = canvas.getContext('2d');
+ ctx.clearRect(0, 0, canvas.width, canvas.height);
+ canvas.width = imgData.width;
+ canvas.height = imgData.height;
+ ctx.putImageData(imgData, 0, 0);
+ img.delete();
+};
+
+Module['VideoCapture'] = function(videoSource) {
+ var video = null;
+ if (typeof videoSource === 'string') {
+ video = document.getElementById(videoSource);
+ } else {
+ video = videoSource;
+ }
+ if (!(video instanceof HTMLVideoElement)) {
+ throw new Error('Please input the valid video element or id.');
+ return;
+ }
+ var canvas = document.createElement('canvas');
+ canvas.width = video.width;
+ canvas.height = video.height;
+ var ctx = canvas.getContext('2d');
+ this.video = video;
+ this.read = function(frame) {
+ if (!(frame instanceof cv.Mat)) {
+ throw new Error('Please input the valid cv.Mat instance.');
+ return;
+ }
+ if (frame.type() !== cv.CV_8UC4) {
+ throw new Error('Bad type of input mat: the type should be cv.CV_8UC4.');
+ return;
+ }
+ if (frame.cols !== video.width || frame.rows !== video.height) {
+ throw new Error('Bad size of input mat: the size should be same as the video.');
+ return;
+ }
+ ctx.drawImage(video, 0, 0, video.width, video.height);
+ frame.data.set(ctx.getImageData(0, 0, video.width, video.height).data);
+ };
+};
+
+function Range(start, end) {
+ this.start = typeof(start) === 'undefined' ? 0 : start;
+ this.end = typeof(end) === 'undefined' ? 0 : end;
+}
+
+Module['Range'] = Range;
+
+function Point(x, y) {
+ this.x = typeof(x) === 'undefined' ? 0 : x;
+ this.y = typeof(y) === 'undefined' ? 0 : y;
+}
+
+Module['Point'] = Point;
+
+function Size(width, height) {
+ this.width = typeof(width) === 'undefined' ? 0 : width;
+ this.height = typeof(height) === 'undefined' ? 0 : height;
+}
+
+Module['Size'] = Size;
+
+function Rect() {
+ switch (arguments.length) {
+ case 0: {
+ // new cv.Rect()
+ this.x = 0;
+ this.y = 0;
+ this.width = 0;
+ this.height = 0;
+ break;
+ }
+ case 1: {
+ // new cv.Rect(rect)
+ var rect = arguments[0];
+ this.x = rect.x;
+ this.y = rect.y;
+ this.width = rect.width;
+ this.height = rect.height;
+ break;
+ }
+ case 2: {
+ // new cv.Rect(point, size)
+ var point = arguments[0];
+ var size = arguments[1];
+ this.x = point.x;
+ this.y = point.y;
+ this.width = size.width;
+ this.height = size.height;
+ break;
+ }
+ case 4: {
+ // new cv.Rect(x, y, width, height)
+ this.x = arguments[0];
+ this.y = arguments[1];
+ this.width = arguments[2];
+ this.height = arguments[3];
+ break;
+ }
+ default: {
+ throw new Error('Invalid arguments');
+ }
+ }
+}
+
+Module['Rect'] = Rect;
+
+function RotatedRect() {
+ switch (arguments.length) {
+ case 0: {
+ this.center = {x: 0, y: 0};
+ this.size = {width: 0, height: 0};
+ this.angle = 0;
+ break;
+ }
+ case 3: {
+ this.center = arguments[0];
+ this.size = arguments[1];
+ this.angle = arguments[2];
+ break;
+ }
+ default: {
+ throw new Error('Invalid arguments');
+ }
+ }
+}
+
+RotatedRect.points = function(obj) {
+ return Module.rotatedRectPoints(obj);
+};
+
+RotatedRect.boundingRect = function(obj) {
+ return Module.rotatedRectBoundingRect(obj);
+};
+
+RotatedRect.boundingRect2f = function(obj) {
+ return Module.rotatedRectBoundingRect2f(obj);
+};
+
+Module['RotatedRect'] = RotatedRect;
+
+function Scalar(v0, v1, v2, v3) {
+ this.push(typeof(v0) === 'undefined' ? 0 : v0);
+ this.push(typeof(v1) === 'undefined' ? 0 : v1);
+ this.push(typeof(v2) === 'undefined' ? 0 : v2);
+ this.push(typeof(v3) === 'undefined' ? 0 : v3);
+}
+
+Scalar.prototype = new Array; // eslint-disable-line no-array-constructor
+
+Scalar.all = function(v) {
+ return new Scalar(v, v, v, v);
+};
+
+Module['Scalar'] = Scalar;
+
+function MinMaxLoc() {
+ switch (arguments.length) {
+ case 0: {
+ this.minVal = 0;
+ this.maxVal = 0;
+ this.minLoc = new Point();
+ this.maxLoc = new Point();
+ break;
+ }
+ case 4: {
+ this.minVal = arguments[0];
+ this.maxVal = arguments[1];
+ this.minLoc = arguments[2];
+ this.maxLoc = arguments[3];
+ break;
+ }
+ default: {
+ throw new Error('Invalid arguments');
+ }
+ }
+}
+
+Module['MinMaxLoc'] = MinMaxLoc;
+
+function Circle() {
+ switch (arguments.length) {
+ case 0: {
+ this.center = new Point();
+ this.radius = 0;
+ break;
+ }
+ case 2: {
+ this.center = arguments[0];
+ this.radius = arguments[1];
+ break;
+ }
+ default: {
+ throw new Error('Invalid arguments');
+ }
+ }
+}
+
+Module['Circle'] = Circle;
+
+function TermCriteria() {
+ switch (arguments.length) {
+ case 0: {
+ this.type = 0;
+ this.maxCount = 0;
+ this.epsilon = 0;
+ break;
+ }
+ case 3: {
+ this.type = arguments[0];
+ this.maxCount = arguments[1];
+ this.epsilon = arguments[2];
+ break;
+ }
+ default: {
+ throw new Error('Invalid arguments');
+ }
+ }
+}
+
+Module['TermCriteria'] = TermCriteria;
+
+Module['matFromArray'] = function(rows, cols, type, array) {
+ var mat = new cv.Mat(rows, cols, type);
+ switch (type) {
+ case cv.CV_8U:
+ case cv.CV_8UC1:
+ case cv.CV_8UC2:
+ case cv.CV_8UC3:
+ case cv.CV_8UC4: {
+ mat.data.set(array);
+ break;
+ }
+ case cv.CV_8S:
+ case cv.CV_8SC1:
+ case cv.CV_8SC2:
+ case cv.CV_8SC3:
+ case cv.CV_8SC4: {
+ mat.data8S.set(array);
+ break;
+ }
+ case cv.CV_16U:
+ case cv.CV_16UC1:
+ case cv.CV_16UC2:
+ case cv.CV_16UC3:
+ case cv.CV_16UC4: {
+ mat.data16U.set(array);
+ break;
+ }
+ case cv.CV_16S:
+ case cv.CV_16SC1:
+ case cv.CV_16SC2:
+ case cv.CV_16SC3:
+ case cv.CV_16SC4: {
+ mat.data16S.set(array);
+ break;
+ }
+ case cv.CV_32S:
+ case cv.CV_32SC1:
+ case cv.CV_32SC2:
+ case cv.CV_32SC3:
+ case cv.CV_32SC4: {
+ mat.data32S.set(array);
+ break;
+ }
+ case cv.CV_32F:
+ case cv.CV_32FC1:
+ case cv.CV_32FC2:
+ case cv.CV_32FC3:
+ case cv.CV_32FC4: {
+ mat.data32F.set(array);
+ break;
+ }
+ case cv.CV_64F:
+ case cv.CV_64FC1:
+ case cv.CV_64FC2:
+ case cv.CV_64FC3:
+ case cv.CV_64FC4: {
+ mat.data64F.set(array);
+ break;
+ }
+ default: {
+ throw new Error('Type is unsupported');
+ }
+ }
+ return mat;
+};
+
+Module['matFromImageData'] = function(imageData) {
+ var mat = new cv.Mat(imageData.height, imageData.width, cv.CV_8UC4);
+ mat.data.set(imageData.data);
+ return mat;
+};
diff --git a/modules/js/src/make_umd.py b/modules/js/src/make_umd.py
new file mode 100644
index 0000000000..4bca6c152b
--- /dev/null
+++ b/modules/js/src/make_umd.py
@@ -0,0 +1,106 @@
+###############################################################################
+#
+# IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+#
+# By downloading, copying, installing or using the software you agree to this license.
+# If you do not agree to this license, do not download, install,
+# copy or use the software.
+#
+#
+# License Agreement
+# For Open Source Computer Vision Library
+#
+# Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+# Third party copyrights are property of their respective owners.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistribution's of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# * Redistribution's in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# * The name of the copyright holders may not be used to endorse or promote products
+# derived from this software without specific prior written permission.
+#
+# This software is provided by the copyright holders and contributors "as is" and
+# any express or implied warranties, including, but not limited to, the implied
+# warranties of merchantability and fitness for a particular purpose are disclaimed.
+# In no event shall the Intel Corporation or contributors be liable for any direct,
+# indirect, incidental, special, exemplary, or consequential damages
+# (including, but not limited to, procurement of substitute goods or services;
+# loss of use, data, or profits; or business interruption) however caused
+# and on any theory of liability, whether in contract, strict liability,
+# or tort (including negligence or otherwise) arising in any way out of
+# the use of this software, even if advised of the possibility of such damage.
+#
+
+###############################################################################
+# AUTHOR: Sajjad Taheri, University of California, Irvine. sajjadt[at]uci[dot]edu
+#
+# LICENSE AGREEMENT
+# Copyright (c) 2015, 2015 The Regents of the University of California (Regents)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. Neither the name of the University nor the
+# names of its contributors may be used to endorse or promote products
+# derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDERS AND CONTRIBUTORS ''AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+###############################################################################
+import os, sys, re, json, shutil
+from subprocess import Popen, PIPE, STDOUT
+
+def make_umd(opencvjs, cvjs):
+ src = open(opencvjs, 'r+b')
+ dst = open(cvjs, 'w+b')
+ content = src.read()
+ dst.seek(0)
+ # inspired by https://github.com/umdjs/umd/blob/95563fd6b46f06bda0af143ff67292e7f6ede6b7/templates/returnExportsGlobal.js
+ dst.write(("""
+(function (root, factory) {
+ if (typeof define === 'function' && define.amd) {
+ // AMD. Register as an anonymous module.
+ define(function () {
+ return (root.cv = factory());
+ });
+ } else if (typeof module === 'object' && module.exports) {
+ // Node. Does not work with strict CommonJS, but
+ // only CommonJS-like environments that support module.exports,
+ // like Node.
+ module.exports = factory();
+ } else {
+ // Browser globals
+ root.cv = factory();
+ }
+}(this, function () {
+ %s
+ if (typeof Module === 'undefined')
+ Module = {};
+ return cv(Module);
+}));
+ """ % (content)).lstrip())
+
+if __name__ == "__main__":
+ if len(sys.argv) > 2:
+ opencvjs = sys.argv[1]
+ cvjs = sys.argv[2]
+ make_umd(opencvjs, cvjs);
diff --git a/modules/js/src/templates.py b/modules/js/src/templates.py
new file mode 100644
index 0000000000..05dc9bbc37
--- /dev/null
+++ b/modules/js/src/templates.py
@@ -0,0 +1,192 @@
+###############################################################################
+#
+# IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+#
+# By downloading, copying, installing or using the software you agree to this license.
+# If you do not agree to this license, do not download, install,
+# copy or use the software.
+#
+#
+# License Agreement
+# For Open Source Computer Vision Library
+#
+# Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+# Third party copyrights are property of their respective owners.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistribution's of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# * Redistribution's in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# * The name of the copyright holders may not be used to endorse or promote products
+# derived from this software without specific prior written permission.
+#
+# This software is provided by the copyright holders and contributors "as is" and
+# any express or implied warranties, including, but not limited to, the implied
+# warranties of merchantability and fitness for a particular purpose are disclaimed.
+# In no event shall the Intel Corporation or contributors be liable for any direct,
+# indirect, incidental, special, exemplary, or consequential damages
+# (including, but not limited to, procurement of substitute goods or services;
+# loss of use, data, or profits; or business interruption) however caused
+# and on any theory of liability, whether in contract, strict liability,
+# or tort (including negligence or otherwise) arising in any way out of
+# the use of this software, even if advised of the possibility of such damage.
+#
+
+###############################################################################
+# AUTHOR: Sajjad Taheri, University of California, Irvine. sajjadt[at]uci[dot]edu
+#
+# LICENSE AGREEMENT
+# Copyright (c) 2015, 2015 The Regents of the University of California (Regents)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. Neither the name of the University nor the
+# names of its contributors may be used to endorse or promote products
+# derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDERS AND CONTRIBUTORS ''AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+##############################################################################
+
+from string import Template
+
+wrapper_codes_template = Template("namespace $ns {\n$defs\n}")
+
+call_template = Template("""$func($args)""")
+class_call_template = Template("""$obj.$func($args)""")
+static_class_call_template = Template("""$scope$func($args)""")
+
+wrapper_function_template = Template(""" $ret_val $func($signature)$const {
+ return $cpp_call;
+ }
+ """)
+
+wrapper_function_with_def_args_template = Template(""" $ret_val $func($signature)$const {
+ $check_args
+ }
+ """)
+
+wrapper_overload_def_values = [
+ Template("""return $cpp_call;"""), Template("""if ($arg0.isUndefined())
+ return $cpp_call;
+ else
+ $next"""),
+ Template("""if ($arg0.isUndefined() && $arg1.isUndefined())
+ return $cpp_call;
+ else $next"""),
+ Template("""if ($arg0.isUndefined() && $arg1.isUndefined() && $arg2.isUndefined())
+ return $cpp_call;
+ else $next"""),
+ Template("""if ($arg0.isUndefined() && $arg1.isUndefined() && $arg2.isUndefined() && $arg3.isUndefined())
+ return $cpp_call;
+ else $next"""),
+ Template("""if ($arg0.isUndefined() && $arg1.isUndefined() && $arg2.isUndefined() && $arg3.isUndefined() &&
+ $arg4.isUndefined())
+ return $cpp_call;
+ else $next"""),
+ Template("""if ($arg0.isUndefined() && $arg1.isUndefined() && $arg2.isUndefined() && $arg3.isUndefined() &&
+ $arg4.isUndefined() && $arg5.isUndefined() )
+ return $cpp_call;
+ else $next"""),
+ Template("""if ($arg0.isUndefined() && $arg1.isUndefined() && $arg2.isUndefined() && $arg3.isUndefined() &&
+ $arg4.isUndefined() && $arg5.isUndefined() && $arg6.isUndefined() )
+ return $cpp_call;
+ else $next"""),
+ Template("""if ($arg0.isUndefined() && $arg1.isUndefined() && $arg2.isUndefined() && $arg3.isUndefined() &&
+ $arg4.isUndefined() && $arg5.isUndefined()&& $arg6.isUndefined() && $arg7.isUndefined())
+ return $cpp_call;
+ else $next"""),
+ Template("""if ($arg0.isUndefined() && $arg1.isUndefined() && $arg2.isUndefined() && $arg3.isUndefined() &&
+ $arg4.isUndefined() && $arg5.isUndefined()&& $arg6.isUndefined() && $arg7.isUndefined() &&
+ $arg8.isUndefined())
+ return $cpp_call;
+ else $next"""),
+ Template("""if ($arg0.isUndefined() && $arg1.isUndefined() && $arg2.isUndefined() && $arg3.isUndefined() &&
+ $arg4.isUndefined() && $arg5.isUndefined()&& $arg6.isUndefined() && $arg7.isUndefined()&&
+ $arg8.isUndefined() && $arg9.isUndefined())
+ return $cpp_call;
+ else $next""")]
+
+emscripten_binding_template = Template("""
+
+EMSCRIPTEN_BINDINGS($binding_name) {$bindings
+}
+""")
+
+simple_function_template = Template("""
+ emscripten::function("$js_name", &$cpp_name);
+""")
+
+smart_ptr_reg_template = Template("""
+ .smart_ptr>("Ptr<$name>")
+""")
+
+overload_function_template = Template("""
+ function("$js_name", select_overload<$ret($args)$const>(&$cpp_name)$optional);
+""")
+
+overload_class_function_template = Template("""
+ .function("$js_name", select_overload<$ret($args)$const>(&$cpp_name)$optional)""")
+
+overload_class_static_function_template = Template("""
+ .class_function("$js_name", select_overload<$ret($args)$const>(&$cpp_name)$optional)""")
+
+class_property_template = Template("""
+ .property("$js_name", &$cpp_name)""")
+
+ctr_template = Template("""
+ .constructor(select_overload<$ret($args)$const>(&$cpp_name)$optional)""")
+
+smart_ptr_ctr_overload_template = Template("""
+ .smart_ptr_constructor("$ptr_type", select_overload<$ret($args)$const>(&$cpp_name)$optional)""")
+
+function_template = Template("""
+ .function("$js_name", &$cpp_name)""")
+
+static_function_template = Template("""
+ .class_function("$js_name", &$cpp_name)""")
+
+constructor_template = Template("""
+ .constructor<$signature>()""")
+
+enum_item_template = Template("""
+ .value("$val", $cpp_val)""")
+
+enum_template = Template("""
+ emscripten::enum_<$cpp_name>("$js_name")$enum_items;
+""")
+
+const_template = Template("""
+ constant("$js_name", +$value);
+""")
+
+vector_template = Template("""
+ emscripten::register_vector<$cType>("$js_name");
+""")
+
+map_template = Template("""
+ emscripten::register_map("$js_name");
+""")
+
+class_template = Template("""
+ emscripten::class_<$cpp_name $derivation>("$js_name")$class_templates;
+""")
diff --git a/modules/js/test/.eslintrc.json b/modules/js/test/.eslintrc.json
new file mode 100644
index 0000000000..e7d7209710
--- /dev/null
+++ b/modules/js/test/.eslintrc.json
@@ -0,0 +1,12 @@
+{
+ "extends": "google",
+ "parserOptions": {
+ "ecmaVersion": 6
+ },
+ "rules": {
+ "max-len": ["error", 100, {"ignoreUrls": true}],
+ "quotes": ["error", "single"],
+ "indent": ["error", 4, {"ArrayExpression": "first",
+ "CallExpression": {"arguments": "first"}}]
+ }
+}
diff --git a/modules/js/test/package.json b/modules/js/test/package.json
new file mode 100644
index 0000000000..cb303bac17
--- /dev/null
+++ b/modules/js/test/package.json
@@ -0,0 +1,26 @@
+{
+ "name": "opencv_js_tests",
+ "description": "Tests for opencv js bindings",
+ "version": "1.0.0",
+ "dependencies" : {
+ "qunit" : "latest"
+ },
+ "devDependencies": {
+ "eslint" : "latest",
+ "eslint-config-google" : "latest"
+ },
+ "scripts": {
+ "test": "node tests.js"
+ },
+ "repository": {
+ "type": "git",
+ "url": "https://github.com/opencv/opencv.git"
+ },
+ "keywords": [],
+ "author": "",
+ "license": "BSD-4-Clause",
+ "bugs": {
+ "url": "https://github.com/opencv/opencv/issues"
+ },
+ "homepage": "https://github.com/opencv/opencv"
+}
diff --git a/modules/js/test/test_imgproc.js b/modules/js/test/test_imgproc.js
new file mode 100644
index 0000000000..214a073666
--- /dev/null
+++ b/modules/js/test/test_imgproc.js
@@ -0,0 +1,807 @@
+// //////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//
+
+// //////////////////////////////////////////////////////////////////////////////////////
+// Author: Sajjad Taheri, University of California, Irvine. sajjadt[at]uci[dot]edu
+//
+// LICENSE AGREEMENT
+// Copyright (c) 2015 The Regents of the University of California (Regents)
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// 3. Neither the name of the University nor the
+// names of its contributors may be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+if (typeof module !== 'undefined' && module.exports) {
+ // The envrionment is Node.js
+ var cv = require('./opencv.js'); // eslint-disable-line no-var
+}
+
+QUnit.module('Image Processing', {});
+
+QUnit.test('test_imgProc', function(assert) {
+ // calcHist
+ {
+ let vec1 = new cv.Mat.ones(new cv.Size(20, 20), cv.CV_8UC1); // eslint-disable-line new-cap
+ let source = new cv.MatVector();
+ source.push_back(vec1);
+ let channels = [0];
+ let histSize = [256];
+ let ranges =[0, 256];
+
+ let hist = new cv.Mat();
+ let mask = new cv.Mat();
+ let binSize = cv._malloc(4);
+ let binView = new Int32Array(cv.HEAP8.buffer, binSize);
+ binView[0] = 10;
+ cv.calcHist(source, channels, mask, hist, histSize, ranges, false);
+
+ // hist should contains a N X 1 arrary.
+ let size = hist.size();
+ assert.equal(size.height, 256);
+ assert.equal(size.width, 1);
+
+ // default parameters
+ cv.calcHist(source, channels, mask, hist, histSize, ranges);
+ size = hist.size();
+ assert.equal(size.height, 256);
+ assert.equal(size.width, 1);
+
+ // Do we need to verify data in histogram?
+ // let dataView = hist.data;
+
+ // Free resource
+ cv._free(binSize);
+ mask.delete();
+ hist.delete();
+ }
+
+ // cvtColor
+ {
+ let source = new cv.Mat(10, 10, cv.CV_8UC3);
+ let dest = new cv.Mat();
+
+ cv.cvtColor(source, dest, cv.COLOR_BGR2GRAY, 0);
+ assert.equal(dest.channels(), 1);
+
+ cv.cvtColor(source, dest, cv.COLOR_BGR2GRAY);
+ assert.equal(dest.channels(), 1);
+
+ cv.cvtColor(source, dest, cv.COLOR_BGR2BGRA, 0);
+ assert.equal(dest.channels(), 4);
+
+ cv.cvtColor(source, dest, cv.COLOR_BGR2BGRA);
+ assert.equal(dest.channels(), 4);
+
+ dest.delete();
+ source.delete();
+ }
+ // equalizeHist
+ {
+ let source = new cv.Mat(10, 10, cv.CV_8UC1);
+ let dest = new cv.Mat();
+
+ cv.equalizeHist(source, dest);
+
+ // eualizeHist changes the content of a image, but does not alter meta data
+ // of it.
+ assert.equal(source.channels(), dest.channels());
+ assert.equal(source.type(), dest.type());
+
+ dest.delete();
+ source.delete();
+ }
+});
+
+QUnit.test('test_segmentation', function(assert) {
+ const THRESHOLD = 127.0;
+ const THRESHOLD_MAX = 210.0;
+
+ // threshold
+ {
+ let source = new cv.Mat(1, 5, cv.CV_8UC1);
+ let sourceView = source.data;
+ sourceView[0] = 0; // < threshold
+ sourceView[1] = 100; // < threshold
+ sourceView[2] = 200; // > threshold
+
+ let dest = new cv.Mat();
+
+ cv.threshold(source, dest, THRESHOLD, THRESHOLD_MAX, cv.THRESH_BINARY);
+
+ let destView = dest.data;
+ assert.equal(destView[0], 0);
+ assert.equal(destView[1], 0);
+ assert.equal(destView[2], THRESHOLD_MAX);
+ }
+
+ // adaptiveThreshold
+ {
+ let source = cv.Mat.zeros(1, 5, cv.CV_8UC1);
+ let sourceView = source.data;
+ sourceView[0] = 50;
+ sourceView[1] = 150;
+ sourceView[2] = 200;
+
+ let dest = new cv.Mat();
+ const C = 0;
+ const blockSize = 3;
+ cv.adaptiveThreshold(source, dest, THRESHOLD_MAX,
+ cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY, blockSize, C);
+
+ let destView = dest.data;
+ assert.equal(destView[0], 0);
+ assert.equal(destView[1], THRESHOLD_MAX);
+ assert.equal(destView[2], THRESHOLD_MAX);
+ }
+});
+
+QUnit.test('test_shape', function(assert) {
+ // moments
+ {
+ let points = new cv.Mat(1, 4, cv.CV_32SC2);
+ let data32S = points.data32S;
+ data32S[0]=50;
+ data32S[1]=56;
+ data32S[2]=53;
+ data32S[3]=53;
+ data32S[4]=46;
+ data32S[5]=54;
+ data32S[6]=49;
+ data32S[7]=51;
+
+ let m = cv.moments(points, false);
+ let area = cv.contourArea(points, false);
+
+ assert.equal(m.m00, 0);
+ assert.equal(m.m01, 0);
+ assert.equal(m.m10, 0);
+ assert.equal(area, 0);
+
+ // default parameters
+ m = cv.moments(points);
+ area = cv.contourArea(points);
+ assert.equal(m.m00, 0);
+ assert.equal(m.m01, 0);
+ assert.equal(m.m10, 0);
+ assert.equal(area, 0);
+
+ points.delete();
+ }
+});
+
+QUnit.test('test_min_enclosing', function(assert) {
+ {
+ let points = new cv.Mat(4, 1, cv.CV_32FC2);
+
+ points.data32F[0] = 0;
+ points.data32F[1] = 0;
+ points.data32F[2] = 1;
+ points.data32F[3] = 0;
+ points.data32F[4] = 1;
+ points.data32F[5] = 1;
+ points.data32F[6] = 0;
+ points.data32F[7] = 1;
+
+ let circle = cv.minEnclosingCircle(points);
+
+ assert.deepEqual(circle.center, {x: 0.5, y: 0.5});
+ assert.ok(Math.abs(circle.radius - Math.sqrt(2) / 2) < 0.001);
+
+ points.delete();
+ }
+});
+
+QUnit.test('test_filter', function(assert) {
+ // blur
+ {
+ let mat1 = cv.Mat.ones(5, 5, cv.CV_8UC3);
+ let mat2 = new cv.Mat();
+
+ cv.blur(mat1, mat2, {height: 3, width: 3}, {x: -1, y: -1}, cv.BORDER_DEFAULT);
+
+ // Verify result.
+ let size = mat2.size();
+ assert.equal(mat2.channels(), 3);
+ assert.equal(size.height, 5);
+ assert.equal(size.width, 5);
+
+ cv.blur(mat1, mat2, {height: 3, width: 3}, {x: -1, y: -1});
+
+ // Verify result.
+ size = mat2.size();
+ assert.equal(mat2.channels(), 3);
+ assert.equal(size.height, 5);
+ assert.equal(size.width, 5);
+
+ cv.blur(mat1, mat2, {height: 3, width: 3});
+
+ // Verify result.
+ size = mat2.size();
+ assert.equal(mat2.channels(), 3);
+ assert.equal(size.height, 5);
+ assert.equal(size.width, 5);
+
+ mat1.delete();
+ mat2.delete();
+ }
+
+ // GaussianBlur
+ {
+ let mat1 = cv.Mat.ones(7, 7, cv.CV_8UC1);
+ let mat2 = new cv.Mat();
+
+ cv.GaussianBlur(mat1, mat2, new cv.Size(3, 3), 0, 0, // eslint-disable-line new-cap
+ cv.BORDER_DEFAULT);
+
+ // Verify result.
+ let size = mat2.size();
+ assert.equal(mat2.channels(), 1);
+ assert.equal(size.height, 7);
+ assert.equal(size.width, 7);
+ }
+
+ // medianBlur
+ {
+ let mat1 = cv.Mat.ones(9, 9, cv.CV_8UC3);
+ let mat2 = new cv.Mat();
+
+ cv.medianBlur(mat1, mat2, 3);
+
+ // Verify result.
+ let size = mat2.size();
+ assert.equal(mat2.channels(), 3);
+ assert.equal(size.height, 9);
+ assert.equal(size.width, 9);
+ }
+
+ // Transpose
+ {
+ let mat1 = cv.Mat.eye(9, 9, cv.CV_8UC3);
+ let mat2 = new cv.Mat();
+
+ cv.transpose(mat1, mat2);
+
+ // Verify result.
+ let size = mat2.size();
+ assert.equal(mat2.channels(), 3);
+ assert.equal(size.height, 9);
+ assert.equal(size.width, 9);
+ }
+
+ // bilateralFilter
+ {
+ let mat1 = cv.Mat.ones(11, 11, cv.CV_8UC3);
+ let mat2 = new cv.Mat();
+
+ cv.bilateralFilter(mat1, mat2, 3, 6, 1.5, cv.BORDER_DEFAULT);
+
+ // Verify result.
+ let size = mat2.size();
+ assert.equal(mat2.channels(), 3);
+ assert.equal(size.height, 11);
+ assert.equal(size.width, 11);
+
+ // default parameters
+ cv.bilateralFilter(mat1, mat2, 3, 6, 1.5);
+ // Verify result.
+ size = mat2.size();
+ assert.equal(mat2.channels(), 3);
+ assert.equal(size.height, 11);
+ assert.equal(size.width, 11);
+
+ mat1.delete();
+ mat2.delete();
+ }
+
+ // Watershed
+ {
+ let mat = cv.Mat.ones(11, 11, cv.CV_8UC3);
+ let out = new cv.Mat(11, 11, cv.CV_32SC1);
+
+ cv.watershed(mat, out);
+
+ // Verify result.
+ let size = out.size();
+ assert.equal(out.channels(), 1);
+ assert.equal(size.height, 11);
+ assert.equal(size.width, 11);
+ assert.equal(out.elemSize1(), 4);
+
+ mat.delete();
+ out.delete();
+ }
+
+ // Concat
+ {
+ let mat = cv.Mat.ones({height: 10, width: 5}, cv.CV_8UC3);
+ let mat2 = cv.Mat.eye({height: 10, width: 5}, cv.CV_8UC3);
+ let mat3 = cv.Mat.eye({height: 10, width: 5}, cv.CV_8UC3);
+
+
+ let out = new cv.Mat();
+ let input = new cv.MatVector();
+ input.push_back(mat);
+ input.push_back(mat2);
+ input.push_back(mat3);
+
+ cv.vconcat(input, out);
+
+ // Verify result.
+ let size = out.size();
+ assert.equal(out.channels(), 3);
+ assert.equal(size.height, 30);
+ assert.equal(size.width, 5);
+ assert.equal(out.elemSize1(), 1);
+
+ cv.hconcat(input, out);
+
+ // Verify result.
+ size = out.size();
+ assert.equal(out.channels(), 3);
+ assert.equal(size.height, 10);
+ assert.equal(size.width, 15);
+ assert.equal(out.elemSize1(), 1);
+
+ input.delete();
+ out.delete();
+ }
+
+
+ // distanceTransform letiants
+ {
+ let mat = cv.Mat.ones(11, 11, cv.CV_8UC1);
+ let out = new cv.Mat(11, 11, cv.CV_32FC1);
+ let labels = new cv.Mat(11, 11, cv.CV_32FC1);
+ const maskSize = 3;
+ cv.distanceTransform(mat, out, cv.DIST_L2, maskSize, cv.CV_32F);
+
+ // Verify result.
+ let size = out.size();
+ assert.equal(out.channels(), 1);
+ assert.equal(size.height, 11);
+ assert.equal(size.width, 11);
+ assert.equal(out.elemSize1(), 4);
+
+
+ cv.distanceTransformWithLabels(mat, out, labels, cv.DIST_L2, maskSize,
+ cv.DIST_LABEL_CCOMP);
+
+ // Verify result.
+ size = out.size();
+ assert.equal(out.channels(), 1);
+ assert.equal(size.height, 11);
+ assert.equal(size.width, 11);
+ assert.equal(out.elemSize1(), 4);
+
+ size = labels.size();
+ assert.equal(labels.channels(), 1);
+ assert.equal(size.height, 11);
+ assert.equal(size.width, 11);
+ assert.equal(labels.elemSize1(), 4);
+
+ mat.delete();
+ out.delete();
+ labels.delete();
+ }
+
+ // Min, Max
+ {
+ let data1 = new Uint8Array([1, 2, 3, 4, 5, 6, 7, 8, 9]);
+ let data2 = new Uint8Array([0, 4, 0, 8, 0, 12, 0, 16, 0]);
+
+ let expectedMin = new Uint8Array([0, 2, 0, 4, 0, 6, 0, 8, 0]);
+ let expectedMax = new Uint8Array([1, 4, 3, 8, 5, 12, 7, 16, 9]);
+
+ let dataPtr = cv._malloc(3*3*1);
+ let dataPtr2 = cv._malloc(3*3*1);
+
+ let dataHeap = new Uint8Array(cv.HEAPU8.buffer, dataPtr, 3*3*1);
+ dataHeap.set(new Uint8Array(data1.buffer));
+
+ let dataHeap2 = new Uint8Array(cv.HEAPU8.buffer, dataPtr2, 3*3*1);
+ dataHeap2.set(new Uint8Array(data2.buffer));
+
+
+ let mat1 = new cv.Mat(3, 3, cv.CV_8UC1, dataPtr, 0);
+ let mat2 = new cv.Mat(3, 3, cv.CV_8UC1, dataPtr2, 0);
+
+ let mat3 = new cv.Mat();
+
+ cv.min(mat1, mat2, mat3);
+ // Verify result.
+ let size = mat2.size();
+ assert.equal(mat2.channels(), 1);
+ assert.equal(size.height, 3);
+ assert.equal(size.width, 3);
+
+ assert.deepEqual(mat3.data, expectedMin);
+
+
+ cv.max(mat1, mat2, mat3);
+ // Verify result.
+ size = mat2.size();
+ assert.equal(mat2.channels(), 1);
+ assert.equal(size.height, 3);
+ assert.equal(size.width, 3);
+
+ assert.deepEqual(mat3.data, expectedMax);
+
+ cv._free(dataPtr);
+ cv._free(dataPtr2);
+ }
+
+ // Bitwise operations
+ {
+ let data1 = new Uint8Array([0, 1, 2, 4, 8, 16, 32, 64, 128]);
+ let data2 = new Uint8Array([255, 255, 255, 255, 255, 255, 255, 255, 255]);
+
+ let expectedAnd = new Uint8Array([0, 1, 2, 4, 8, 16, 32, 64, 128]);
+ let expectedOr = new Uint8Array([255, 255, 255, 255, 255, 255, 255, 255, 255]);
+ let expectedXor = new Uint8Array([255, 254, 253, 251, 247, 239, 223, 191, 127]);
+
+ let expectedNot = new Uint8Array([255, 254, 253, 251, 247, 239, 223, 191, 127]);
+
+ let dataPtr = cv._malloc(3*3*1);
+ let dataPtr2 = cv._malloc(3*3*1);
+
+ let dataHeap = new Uint8Array(cv.HEAPU8.buffer, dataPtr, 3*3*1);
+ dataHeap.set(new Uint8Array(data1.buffer));
+
+ let dataHeap2 = new Uint8Array(cv.HEAPU8.buffer, dataPtr2, 3*3*1);
+ dataHeap2.set(new Uint8Array(data2.buffer));
+
+
+ let mat1 = new cv.Mat(3, 3, cv.CV_8UC1, dataPtr, 0);
+ let mat2 = new cv.Mat(3, 3, cv.CV_8UC1, dataPtr2, 0);
+
+ let mat3 = new cv.Mat();
+ let none = new cv.Mat();
+
+ cv.bitwise_not(mat1, mat3, none);
+ // Verify result.
+ let size = mat3.size();
+ assert.equal(mat3.channels(), 1);
+ assert.equal(size.height, 3);
+ assert.equal(size.width, 3);
+
+ assert.deepEqual(mat3.data, expectedNot);
+
+ cv.bitwise_and(mat1, mat2, mat3, none);
+ // Verify result.
+ size = mat3.size();
+ assert.equal(mat3.channels(), 1);
+ assert.equal(size.height, 3);
+ assert.equal(size.width, 3);
+
+ assert.deepEqual(mat3.data, expectedAnd);
+
+
+ cv.bitwise_or(mat1, mat2, mat3, none);
+ // Verify result.
+ size = mat3.size();
+ assert.equal(mat3.channels(), 1);
+ assert.equal(size.height, 3);
+ assert.equal(size.width, 3);
+
+ assert.deepEqual(mat3.data, expectedOr);
+
+ cv.bitwise_xor(mat1, mat2, mat3, none);
+ // Verify result.
+ size = mat3.size();
+ assert.equal(mat3.channels(), 1);
+ assert.equal(size.height, 3);
+ assert.equal(size.width, 3);
+
+ assert.deepEqual(mat3.data, expectedXor);
+
+ cv._free(dataPtr);
+ cv._free(dataPtr2);
+ }
+
+ // Arithmatic operations
+ {
+ let data1 = new Uint8Array([0, 1, 2, 3, 4, 5, 6, 7, 8]);
+ let data2 = new Uint8Array([0, 2, 4, 6, 8, 10, 12, 14, 16]);
+ let data3 = new Uint8Array([0, 1, 0, 1, 0, 1, 0, 1, 0]);
+
+ // |data1 - data2|
+ let expectedAbsDiff = new Uint8Array([0, 1, 2, 3, 4, 5, 6, 7, 8]);
+ let expectedAdd = new Uint8Array([0, 3, 6, 9, 12, 15, 18, 21, 24]);
+
+ const alpha = 4;
+ const beta = -1;
+ const gamma = 3;
+ // 4*data1 - data2 + 3
+ let expectedWeightedAdd = new Uint8Array([3, 5, 7, 9, 11, 13, 15, 17, 19]);
+
+ let dataPtr = cv._malloc(3*3*1);
+ let dataPtr2 = cv._malloc(3*3*1);
+ let dataPtr3 = cv._malloc(3*3*1);
+
+ let dataHeap = new Uint8Array(cv.HEAPU8.buffer, dataPtr, 3*3*1);
+ dataHeap.set(new Uint8Array(data1.buffer));
+ let dataHeap2 = new Uint8Array(cv.HEAPU8.buffer, dataPtr2, 3*3*1);
+ dataHeap2.set(new Uint8Array(data2.buffer));
+ let dataHeap3 = new Uint8Array(cv.HEAPU8.buffer, dataPtr3, 3*3*1);
+ dataHeap3.set(new Uint8Array(data3.buffer));
+
+ let mat1 = new cv.Mat(3, 3, cv.CV_8UC1, dataPtr, 0);
+ let mat2 = new cv.Mat(3, 3, cv.CV_8UC1, dataPtr2, 0);
+ let mat3 = new cv.Mat(3, 3, cv.CV_8UC1, dataPtr3, 0);
+
+ let dst = new cv.Mat();
+ let none = new cv.Mat();
+
+ cv.absdiff(mat1, mat2, dst);
+ // Verify result.
+ let size = dst.size();
+ assert.equal(dst.channels(), 1);
+ assert.equal(size.height, 3);
+ assert.equal(size.width, 3);
+
+ assert.deepEqual(dst.data, expectedAbsDiff);
+
+ cv.add(mat1, mat2, dst, none, -1);
+ // Verify result.
+ size = dst.size();
+ assert.equal(dst.channels(), 1);
+ assert.equal(size.height, 3);
+ assert.equal(size.width, 3);
+
+ assert.deepEqual(dst.data, expectedAdd);
+
+ cv.addWeighted(mat1, alpha, mat2, beta, gamma, dst, -1);
+ // Verify result.
+ size = dst.size();
+ assert.equal(dst.channels(), 1);
+ assert.equal(size.height, 3);
+ assert.equal(size.width, 3);
+
+ assert.deepEqual(dst.data, expectedWeightedAdd);
+
+ // default parameter
+ cv.addWeighted(mat1, alpha, mat2, beta, gamma, dst);
+ // Verify result.
+ size = dst.size();
+ assert.equal(dst.channels(), 1);
+ assert.equal(size.height, 3);
+ assert.equal(size.width, 3);
+
+ assert.deepEqual(dst.data, expectedWeightedAdd);
+
+ mat1.delete();
+ mat2.delete();
+ mat3.delete();
+ dst.delete();
+ none.delete();
+ }
+
+ // Integral letiants
+ {
+ let mat = cv.Mat.eye({height: 100, width: 100}, cv.CV_8UC3);
+ let sum = new cv.Mat();
+ let sqSum = new cv.Mat();
+ let title = new cv.Mat();
+
+ cv.integral(mat, sum, -1);
+
+ // Verify result.
+ let size = sum.size();
+ assert.equal(sum.channels(), 3);
+ assert.equal(size.height, 100+1);
+ assert.equal(size.width, 100+1);
+
+ cv.integral2(mat, sum, sqSum, -1, -1);
+ // Verify result.
+ size = sum.size();
+ assert.equal(sum.channels(), 3);
+ assert.equal(size.height, 100+1);
+ assert.equal(size.width, 100+1);
+
+ size = sqSum.size();
+ assert.equal(sqSum.channels(), 3);
+ assert.equal(size.height, 100+1);
+ assert.equal(size.width, 100+1);
+
+ mat.delete();
+ sum.delete();
+ sqSum.delete();
+ title.delete();
+ }
+
+ // Mean, meanSTDev
+ {
+ let mat = cv.Mat.eye({height: 100, width: 100}, cv.CV_8UC3);
+ let sum = new cv.Mat();
+ let sqSum = new cv.Mat();
+ let title = new cv.Mat();
+
+ cv.integral(mat, sum, -1);
+
+ // Verify result.
+ let size = sum.size();
+ assert.equal(sum.channels(), 3);
+ assert.equal(size.height, 100+1);
+ assert.equal(size.width, 100+1);
+
+ cv.integral2(mat, sum, sqSum, -1, -1);
+ // Verify result.
+ size = sum.size();
+ assert.equal(sum.channels(), 3);
+ assert.equal(size.height, 100+1);
+ assert.equal(size.width, 100+1);
+
+ size = sqSum.size();
+ assert.equal(sqSum.channels(), 3);
+ assert.equal(size.height, 100+1);
+ assert.equal(size.width, 100+1);
+
+ mat.delete();
+ sum.delete();
+ sqSum.delete();
+ title.delete();
+ }
+
+ // Invert
+ {
+ let inv1 = new cv.Mat();
+ let inv2 = new cv.Mat();
+ let inv3 = new cv.Mat();
+ let inv4 = new cv.Mat();
+
+
+ let data1 = new Float32Array([1, 0, 0,
+ 0, 1, 0,
+ 0, 0, 1]);
+ let data2 = new Float32Array([0, 0, 0,
+ 0, 5, 0,
+ 0, 0, 0]);
+ let data3 = new Float32Array([1, 1, 1, 0,
+ 0, 3, 1, 2,
+ 2, 3, 1, 0,
+ 1, 0, 2, 1]);
+ let data4 = new Float32Array([1, 4, 5,
+ 4, 2, 2,
+ 5, 2, 2]);
+
+ let expected1 = new Float32Array([1, 0, 0,
+ 0, 1, 0,
+ 0, 0, 1]);
+ // Inverse does not exist!
+ let expected3 = new Float32Array([-3, -1/2, 3/2, 1,
+ 1, 1/4, -1/4, -1/2,
+ 3, 1/4, -5/4, -1/2,
+ -3, 0, 1, 1]);
+ let expected4 = new Float32Array([0, -1, 1,
+ -1, 23/2, -9,
+ 1, -9, 7]);
+
+ let dataPtr1 = cv._malloc(3*3*4);
+ let dataPtr2 = cv._malloc(3*3*4);
+ let dataPtr3 = cv._malloc(4*4*4);
+ let dataPtr4 = cv._malloc(3*3*4);
+
+ let dataHeap = new Float32Array(cv.HEAP32.buffer, dataPtr1, 3*3);
+ dataHeap.set(new Float32Array(data1.buffer));
+ let dataHeap2 = new Float32Array(cv.HEAP32.buffer, dataPtr2, 3*3);
+ dataHeap2.set(new Float32Array(data2.buffer));
+ let dataHeap3 = new Float32Array(cv.HEAP32.buffer, dataPtr3, 4*4);
+ dataHeap3.set(new Float32Array(data3.buffer));
+ let dataHeap4 = new Float32Array(cv.HEAP32.buffer, dataPtr4, 3*3);
+ dataHeap4.set(new Float32Array(data4.buffer));
+
+ let mat1 = new cv.Mat(3, 3, cv.CV_32FC1, dataPtr1, 0);
+ let mat2 = new cv.Mat(3, 3, cv.CV_32FC1, dataPtr2, 0);
+ let mat3 = new cv.Mat(4, 4, cv.CV_32FC1, dataPtr3, 0);
+ let mat4 = new cv.Mat(3, 3, cv.CV_32FC1, dataPtr4, 0);
+
+ QUnit.assert.deepEqualWithTolerance = function( value, expected, tolerance ) {
+ for (let i = 0; i < value.length; i= i+1) {
+ this.pushResult( {
+ result: Math.abs(value[i]-expected[i]) < tolerance,
+ actual: value[i],
+ expected: expected[i],
+ } );
+ }
+ };
+
+ cv.invert(mat1, inv1, 0);
+ // Verify result.
+ let size = inv1.size();
+ assert.equal(inv1.channels(), 1);
+ assert.equal(size.height, 3);
+ assert.equal(size.width, 3);
+ assert.deepEqualWithTolerance(inv1.data32F, expected1, 0.0001);
+
+
+ cv.invert(mat2, inv2, 0);
+ // Verify result.
+ assert.deepEqualWithTolerance(inv3.data32F, expected3, 0.0001);
+
+ cv.invert(mat3, inv3, 0);
+ // Verify result.
+ size = inv3.size();
+ assert.equal(inv3.channels(), 1);
+ assert.equal(size.height, 4);
+ assert.equal(size.width, 4);
+ assert.deepEqualWithTolerance(inv3.data32F, expected3, 0.0001);
+
+ cv.invert(mat3, inv3, 1);
+ // Verify result.
+ assert.deepEqualWithTolerance(inv3.data32F, expected3, 0.0001);
+
+ cv.invert(mat4, inv4, 2);
+ // Verify result.
+ assert.deepEqualWithTolerance(inv4.data32F, expected4, 0.0001);
+
+ cv.invert(mat4, inv4, 3);
+ // Verify result.
+ assert.deepEqualWithTolerance(inv4.data32F, expected4, 0.0001);
+
+ mat1.delete();
+ mat2.delete();
+ mat3.delete();
+ mat4.delete();
+ inv1.delete();
+ inv2.delete();
+ inv3.delete();
+ inv4.delete();
+ }
+});
diff --git a/modules/js/test/test_mat.js b/modules/js/test/test_mat.js
new file mode 100644
index 0000000000..2572fbdb04
--- /dev/null
+++ b/modules/js/test/test_mat.js
@@ -0,0 +1,987 @@
+// //////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//
+
+// //////////////////////////////////////////////////////////////////////////////////////
+// Author: Sajjad Taheri, University of California, Irvine. sajjadt[at]uci[dot]edu
+//
+// LICENSE AGREEMENT
+// Copyright (c) 2015 The Regents of the University of California (Regents)
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// 3. Neither the name of the University nor the
+// names of its contributors may be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+if (typeof module !== 'undefined' && module.exports) {
+ // The envrionment is Node.js
+ var cv = require('./opencv.js'); // eslint-disable-line no-var
+}
+
+QUnit.module('Core', {});
+
+QUnit.test('test_mat_creation', function(assert) {
+ // Mat constructors.
+ // Mat::Mat(int rows, int cols, int type)
+ {
+ let mat = new cv.Mat(10, 20, cv.CV_8UC3);
+
+ assert.equal(mat.type(), cv.CV_8UC3);
+ assert.equal(mat.depth(), cv.CV_8U);
+ assert.equal(mat.channels(), 3);
+ assert.ok(mat.empty() === false);
+
+ let size = mat.size();
+ assert.equal(size.height, 10);
+ assert.equal(size.width, 20);
+
+ mat.delete();
+ }
+
+ // Mat::Mat(const Mat &)
+ {
+ // Copy from another Mat
+ let mat1 = new cv.Mat(10, 20, cv.CV_8UC3);
+ let mat2 = new cv.Mat(mat1);
+
+ assert.equal(mat2.type(), mat1.type());
+ assert.equal(mat2.depth(), mat1.depth());
+ assert.equal(mat2.channels(), mat1.channels());
+ assert.equal(mat2.empty(), mat1.empty());
+
+ let size1 = mat1.size;
+ let size2 = mat2.size();
+ assert.ok(size1[0] === size2[0]);
+ assert.ok(size1[1] === size2[1]);
+
+ mat1.delete();
+ mat2.delete();
+ }
+
+ // Mat::Mat(int rows, int cols, int type, void *data, size_t step=AUTO_STEP)
+ {
+ // 10 * 10 and one channel
+ let data = cv._malloc(10 * 10 * 1);
+ let mat = new cv.Mat(10, 10, cv.CV_8UC1, data, 0);
+
+ assert.equal(mat.type(), cv.CV_8UC1);
+ assert.equal(mat.depth(), cv.CV_8U);
+ assert.equal(mat.channels(), 1);
+ assert.ok(mat.empty() === false);
+
+ let size = mat.size();
+ assert.ok(size.height === 10);
+ assert.ok(size.width === 10);
+
+ mat.delete();
+ }
+
+ // Mat::Mat(int rows, int cols, int type, const Scalar& scalar)
+ {
+ // 2 * 2 8UC4 mat
+ let mat = new cv.Mat(2, 2, cv.CV_8UC4, [0, 1, 2, 3]);
+
+ for (let r = 0; r < mat.rows; r++) {
+ for (let c = 0; c < mat.cols; c++) {
+ let element = mat.ptr(r, c);
+ assert.equal(element[0], 0);
+ assert.equal(element[1], 1);
+ assert.equal(element[2], 2);
+ assert.equal(element[3], 3);
+ }
+ }
+
+ mat.delete();
+ }
+
+ // Mat::create(int, int, int)
+ {
+ let mat = new cv.Mat();
+ mat.create(10, 5, cv.CV_8UC3);
+ let size = mat.size();
+
+ assert.ok(mat.type() === cv.CV_8UC3);
+ assert.ok(size.height === 10);
+ assert.ok(size.width === 5);
+ assert.ok(mat.channels() === 3);
+
+ mat.delete();
+ }
+ // Mat::create(Size, int)
+ {
+ let mat = new cv.Mat();
+ mat.create({height: 10, width: 5}, cv.CV_8UC4);
+ let size = mat.size();
+
+ assert.ok(mat.type() === cv.CV_8UC4);
+ assert.ok(size.height === 10);
+ assert.ok(size.width === 5);
+ assert.ok(mat.channels() === 4);
+
+ mat.delete();
+ }
+ // clone
+ {
+ let mat = cv.Mat.ones(5, 5, cv.CV_8UC1);
+ let mat2 = mat.clone();
+
+ assert.equal(mat.channels, mat2.channels);
+ assert.equal(mat.size().height, mat2.size().height);
+ assert.equal(mat.size().width, mat2.size().width);
+
+ assert.deepEqual(mat.data, mat2.data);
+
+
+ mat.delete();
+ mat2.delete();
+ }
+ // copyTo
+ {
+ let mat = cv.Mat.ones(5, 5, cv.CV_8UC1);
+ let mat2 = new cv.Mat();
+ mat.copyTo(mat2);
+
+ assert.equal(mat.channels, mat2.channels);
+ assert.equal(mat.size().height, mat2.size().height);
+ assert.equal(mat.size().width, mat2.size().width);
+
+ assert.deepEqual(mat.data, mat2.data);
+
+
+ mat.delete();
+ mat2.delete();
+ }
+ // copyTo1
+ {
+ let mat = cv.Mat.ones(5, 5, cv.CV_8UC1);
+ let mat2 = new cv.Mat();
+ let mask = new cv.Mat(5, 5, cv.CV_8UC1, new cv.Scalar(1));
+ mat.copyTo(mat2, mask);
+
+ assert.equal(mat.channels, mat2.channels);
+ assert.equal(mat.size().height, mat2.size().height);
+ assert.equal(mat.size().width, mat2.size().width);
+
+ assert.deepEqual(mat.data, mat2.data);
+
+
+ mat.delete();
+ mat2.delete();
+ mask.delete();
+ }
+
+ // matFromArray
+ {
+ let arrayC1 = [0, -1, 2, -3];
+ let arrayC2 = [0, -1, 2, -3, 4, -5, 6, -7];
+ let arrayC3 = [0, -1, 2, -3, 4, -5, 6, -7, 9, -9, 10, -11];
+ let arrayC4 = [0, -1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11, 12, 13, 14, 15];
+
+ let mat8UC1 = cv.matFromArray(2, 2, cv.CV_8UC1, arrayC1);
+ let mat8UC2 = cv.matFromArray(2, 2, cv.CV_8UC2, arrayC2);
+ let mat8UC3 = cv.matFromArray(2, 2, cv.CV_8UC3, arrayC3);
+ let mat8UC4 = cv.matFromArray(2, 2, cv.CV_8UC4, arrayC4);
+
+ let mat8SC1 = cv.matFromArray(2, 2, cv.CV_8SC1, arrayC1);
+ let mat8SC2 = cv.matFromArray(2, 2, cv.CV_8SC2, arrayC2);
+ let mat8SC3 = cv.matFromArray(2, 2, cv.CV_8SC3, arrayC3);
+ let mat8SC4 = cv.matFromArray(2, 2, cv.CV_8SC4, arrayC4);
+
+ let mat16UC1 = cv.matFromArray(2, 2, cv.CV_16UC1, arrayC1);
+ let mat16UC2 = cv.matFromArray(2, 2, cv.CV_16UC2, arrayC2);
+ let mat16UC3 = cv.matFromArray(2, 2, cv.CV_16UC3, arrayC3);
+ let mat16UC4 = cv.matFromArray(2, 2, cv.CV_16UC4, arrayC4);
+
+ let mat16SC1 = cv.matFromArray(2, 2, cv.CV_16SC1, arrayC1);
+ let mat16SC2 = cv.matFromArray(2, 2, cv.CV_16SC2, arrayC2);
+ let mat16SC3 = cv.matFromArray(2, 2, cv.CV_16SC3, arrayC3);
+ let mat16SC4 = cv.matFromArray(2, 2, cv.CV_16SC4, arrayC4);
+
+ let mat32SC1 = cv.matFromArray(2, 2, cv.CV_32SC1, arrayC1);
+ let mat32SC2 = cv.matFromArray(2, 2, cv.CV_32SC2, arrayC2);
+ let mat32SC3 = cv.matFromArray(2, 2, cv.CV_32SC3, arrayC3);
+ let mat32SC4 = cv.matFromArray(2, 2, cv.CV_32SC4, arrayC4);
+
+ let mat32FC1 = cv.matFromArray(2, 2, cv.CV_32FC1, arrayC1);
+ let mat32FC2 = cv.matFromArray(2, 2, cv.CV_32FC2, arrayC2);
+ let mat32FC3 = cv.matFromArray(2, 2, cv.CV_32FC3, arrayC3);
+ let mat32FC4 = cv.matFromArray(2, 2, cv.CV_32FC4, arrayC4);
+
+ let mat64FC1 = cv.matFromArray(2, 2, cv.CV_64FC1, arrayC1);
+ let mat64FC2 = cv.matFromArray(2, 2, cv.CV_64FC2, arrayC2);
+ let mat64FC3 = cv.matFromArray(2, 2, cv.CV_64FC3, arrayC3);
+ let mat64FC4 = cv.matFromArray(2, 2, cv.CV_64FC4, arrayC4);
+
+ assert.deepEqual(mat8UC1.data, new Uint8Array(arrayC1));
+ assert.deepEqual(mat8UC2.data, new Uint8Array(arrayC2));
+ assert.deepEqual(mat8UC3.data, new Uint8Array(arrayC3));
+ assert.deepEqual(mat8UC4.data, new Uint8Array(arrayC4));
+
+ assert.deepEqual(mat8SC1.data8S, new Int8Array(arrayC1));
+ assert.deepEqual(mat8SC2.data8S, new Int8Array(arrayC2));
+ assert.deepEqual(mat8SC3.data8S, new Int8Array(arrayC3));
+ assert.deepEqual(mat8SC4.data8S, new Int8Array(arrayC4));
+
+ assert.deepEqual(mat16UC1.data16U, new Uint16Array(arrayC1));
+ assert.deepEqual(mat16UC2.data16U, new Uint16Array(arrayC2));
+ assert.deepEqual(mat16UC3.data16U, new Uint16Array(arrayC3));
+ assert.deepEqual(mat16UC4.data16U, new Uint16Array(arrayC4));
+
+ assert.deepEqual(mat16SC1.data16S, new Int16Array(arrayC1));
+ assert.deepEqual(mat16SC2.data16S, new Int16Array(arrayC2));
+ assert.deepEqual(mat16SC3.data16S, new Int16Array(arrayC3));
+ assert.deepEqual(mat16SC4.data16S, new Int16Array(arrayC4));
+
+ assert.deepEqual(mat32SC1.data32S, new Int32Array(arrayC1));
+ assert.deepEqual(mat32SC2.data32S, new Int32Array(arrayC2));
+ assert.deepEqual(mat32SC3.data32S, new Int32Array(arrayC3));
+ assert.deepEqual(mat32SC4.data32S, new Int32Array(arrayC4));
+
+ assert.deepEqual(mat32FC1.data32F, new Float32Array(arrayC1));
+ assert.deepEqual(mat32FC2.data32F, new Float32Array(arrayC2));
+ assert.deepEqual(mat32FC3.data32F, new Float32Array(arrayC3));
+ assert.deepEqual(mat32FC4.data32F, new Float32Array(arrayC4));
+
+ assert.deepEqual(mat64FC1.data64F, new Float64Array(arrayC1));
+ assert.deepEqual(mat64FC2.data64F, new Float64Array(arrayC2));
+ assert.deepEqual(mat64FC3.data64F, new Float64Array(arrayC3));
+ assert.deepEqual(mat64FC4.data64F, new Float64Array(arrayC4));
+
+ mat8UC1.delete();
+ mat8UC2.delete();
+ mat8UC3.delete();
+ mat8UC4.delete();
+ mat8SC1.delete();
+ mat8SC2.delete();
+ mat8SC3.delete();
+ mat8SC4.delete();
+ mat16UC1.delete();
+ mat16UC2.delete();
+ mat16UC3.delete();
+ mat16UC4.delete();
+ mat16SC1.delete();
+ mat16SC2.delete();
+ mat16SC3.delete();
+ mat16SC4.delete();
+ mat32SC1.delete();
+ mat32SC2.delete();
+ mat32SC3.delete();
+ mat32SC4.delete();
+ mat32FC1.delete();
+ mat32FC2.delete();
+ mat32FC3.delete();
+ mat32FC4.delete();
+ mat64FC1.delete();
+ mat64FC2.delete();
+ mat64FC3.delete();
+ mat64FC4.delete();
+ }
+
+ // matFromImageData
+ {
+ // Only test in browser
+ if (typeof window === 'undefined') {
+ return;
+ }
+ let canvas = window.document.createElement('canvas');
+ canvas.width = 2;
+ canvas.height = 2;
+ let ctx = canvas.getContext('2d');
+ ctx.fillStyle='#FF0000';
+ ctx.fillRect(0, 0, 1, 1);
+ ctx.fillRect(1, 1, 1, 1);
+
+ let imageData = ctx.getImageData(0, 0, 2, 2);
+ let mat = cv.matFromImageData(imageData);
+
+ assert.deepEqual(mat.data, new Uint8Array(imageData.data));
+
+ mat.delete();
+ }
+
+ // Mat(mat)
+ {
+ let mat = new cv.Mat(2, 2, cv.CV_8UC4, new cv.Scalar(1, 0, 1, 0));
+ let mat1 = new cv.Mat(mat);
+ let mat2 = mat;
+
+ assert.equal(mat.rows, mat1.rows);
+ assert.equal(mat.cols, mat1.cols);
+ assert.equal(mat.type(), mat1.type());
+ assert.deepEqual(mat.data, mat1.data);
+
+ mat.delete();
+
+ assert.equal(mat1.isDeleted(), false);
+ assert.equal(mat2.isDeleted(), true);
+
+ mat1.delete();
+ }
+
+ // mat.setTo
+ {
+ let mat = new cv.Mat(2, 2, cv.CV_8UC4);
+ let s = [0, 1, 2, 3];
+
+ mat.setTo(s);
+
+ assert.deepEqual(mat.ptr(0, 0), new Uint8Array(s));
+ assert.deepEqual(mat.ptr(0, 1), new Uint8Array(s));
+ assert.deepEqual(mat.ptr(1, 0), new Uint8Array(s));
+ assert.deepEqual(mat.ptr(1, 1), new Uint8Array(s));
+
+ let s1 = [0, 0, 0, 0];
+ mat.setTo(s1);
+ let mask = cv.matFromArray(2, 2, cv.CV_8UC1, [0, 1, 0, 1]);
+ mat.setTo(s, mask);
+
+ assert.deepEqual(mat.ptr(0, 0), new Uint8Array(s1));
+ assert.deepEqual(mat.ptr(0, 1), new Uint8Array(s));
+ assert.deepEqual(mat.ptr(1, 0), new Uint8Array(s1));
+ assert.deepEqual(mat.ptr(1, 1), new Uint8Array(s));
+
+ mat.delete();
+ mask.delete();
+ }
+});
+
+QUnit.test('test_mat_ptr', function(assert) {
+ const RValue = 3;
+ const GValue = 7;
+ const BValue = 197;
+
+ // cv.CV_8UC1 + Mat::ptr(int).
+ {
+ let mat = new cv.Mat(10, 10, cv.CV_8UC1);
+ let view = mat.data;
+
+ // Alter matrix[2, 1].
+ let step = 10;
+ view[2 * step + 1] = RValue;
+
+ // Access matrix[2, 1].
+ view = mat.ptr(2);
+
+ assert.equal(view[1], RValue);
+
+ mat.delete();
+ }
+
+ // cv.CV_8UC3 + Mat::ptr(int).
+ {
+ let mat = new cv.Mat(10, 10, cv.CV_8UC3);
+ let view = mat.data;
+
+ // Alter matrix[2, 1].
+ let step = 3 * 10;
+ view[2 * step + 3] = RValue;
+ view[2 * step + 3 + 1] = GValue;
+ view[2 * step + 3 + 2] = BValue;
+
+ // Access matrix[2, 1].
+ view = mat.ptr(2);
+
+ assert.equal(view[3], RValue);
+ assert.equal(view[3 + 1], GValue);
+ assert.equal(view[3 + 2], BValue);
+
+ mat.delete();
+ }
+
+ // cv.CV_8UC3 + Mat::ptr(int, int).
+ {
+ let mat = new cv.Mat(10, 10, cv.CV_8UC3);
+ let view = mat.data;
+
+ // Alter matrix[2, 1].
+ let step = 3 * 10;
+ view[2 * step + 3] = RValue;
+ view[2 * step + 3 + 1] = GValue;
+ view[2 * step + 3 + 2] = BValue;
+
+ // Access matrix[2, 1].
+ view = mat.ptr(2, 1);
+
+ assert.equal(view[0], RValue);
+ assert.equal(view[1], GValue);
+ assert.equal(view[2], BValue);
+
+ mat.delete();
+ }
+
+ const RValueF32 = 3.3;
+ const GValueF32 = 7.3;
+ const BValueF32 = 197.3;
+ const EPSILON = 0.001;
+
+ // cv.CV_32FC1 + Mat::ptr(int).
+ {
+ let mat = new cv.Mat(10, 10, cv.CV_32FC1);
+ let view = mat.data32F;
+
+ // Alter matrix[2, 1].
+ let step = 10;
+ view[2 * step + 1] = RValueF32;
+
+ // Access matrix[2, 1].
+ view = mat.floatPtr(2);
+
+ assert.ok(Math.abs(view[1] - RValueF32) < EPSILON);
+
+ mat.delete();
+ }
+
+ // cv.CV_32FC3 + Mat::ptr(int).
+ {
+ let mat = new cv.Mat(10, 10, cv.CV_32FC3);
+ let view = mat.data32F;
+
+ // Alter matrix[2, 1].
+ let step = mat.step1(0);
+ view[2 * step + 3] = RValueF32;
+ view[2 * step + 3 + 1] = GValueF32;
+ view[2 * step + 3 + 2] = BValueF32;
+
+ // Access matrix[2, 1].
+ view = mat.floatPtr(2);
+
+ assert.ok(Math.abs(view[3] - RValueF32) < EPSILON);
+ assert.ok(Math.abs(view[3 + 1] - GValueF32) < EPSILON);
+ assert.ok(Math.abs(view[3 + 2] - BValueF32) < EPSILON);
+
+ mat.delete();
+ }
+
+ // cv.CV_32FC3 + Mat::ptr(int, int).
+ {
+ let mat = new cv.Mat(10, 10, cv.CV_32FC3);
+ let view = mat.data32F;
+
+ // Alter matrix[2, 1].
+ let step = mat.step1(0);
+ view[2 * step + 3] = RValueF32;
+ view[2 * step + 3 + 1] = GValueF32;
+ view[2 * step + 3 + 2] = BValueF32;
+
+ // Access matrix[2, 1].
+ view = mat.floatPtr(2, 1);
+
+ assert.ok(Math.abs(view[0] - RValueF32) < EPSILON);
+ assert.ok(Math.abs(view[1] - GValueF32) < EPSILON);
+ assert.ok(Math.abs(view[2] - BValueF32) < EPSILON);
+
+ mat.delete();
+ }
+});
+
+QUnit.test('test_mat_zeros', function(assert) {
+ let zeros = new Uint8Array(10*10).fill(0);
+ // Mat::zeros(int, int, int)
+ {
+ let mat = cv.Mat.zeros(10, 10, cv.CV_8UC1);
+ let view = mat.data;
+
+ assert.deepEqual(view, zeros);
+
+ mat.delete();
+ }
+
+ // Mat::zeros(Size, int)
+ {
+ let mat = cv.Mat.zeros({height: 10, width: 10}, cv.CV_8UC1);
+ let view = mat.data;
+
+ assert.deepEqual(view, zeros);
+
+ mat.delete();
+ }
+});
+
+QUnit.test('test_mat_ones', function(assert) {
+ let ones = new Uint8Array(10*10).fill(1);
+ // Mat::ones(int, int, int)
+ {
+ let mat = cv.Mat.ones(10, 10, cv.CV_8UC1);
+ let view = mat.data;
+
+ assert.deepEqual(view, ones);
+ }
+ // Mat::ones(Size, int)
+ {
+ let mat = cv.Mat.ones({height: 10, width: 10}, cv.CV_8UC1);
+ let view = mat.data;
+
+ assert.deepEqual(view, ones);
+ }
+});
+
+QUnit.test('test_mat_eye', function(assert) {
+ let eye4by4 = new Uint8Array([1, 0, 0, 0,
+ 0, 1, 0, 0,
+ 0, 0, 1, 0,
+ 0, 0, 0, 1]);
+ // Mat::eye(int, int, int)
+ {
+ let mat = cv.Mat.eye(4, 4, cv.CV_8UC1);
+ let view = mat.data;
+
+ assert.deepEqual(view, eye4by4);
+ }
+
+ // Mat::eye(Size, int)
+ {
+ let mat = cv.Mat.eye({height: 4, width: 4}, cv.CV_8UC1);
+ let view = mat.data;
+
+ assert.deepEqual(view, eye4by4);
+ }
+});
+
+QUnit.test('test_mat_miscs', function(assert) {
+ // Mat::col(int)
+ {
+ let mat = cv.matFromArray(2, 2, cv.CV_8UC2, [1, 2, 3, 4, 5, 6, 7, 8]);
+ let col = mat.col(1);
+
+ assert.equal(col.isContinuous(), false);
+ assert.equal(col.ptr(0, 0)[0], 3);
+ assert.equal(col.ptr(0, 0)[1], 4);
+ assert.equal(col.ptr(1, 0)[0], 7);
+ assert.equal(col.ptr(1, 0)[1], 8);
+
+ col.delete();
+ mat.delete();
+ }
+
+ // Mat::row(int)
+ {
+ let mat = cv.Mat.zeros(5, 5, cv.CV_8UC2);
+ let row = mat.row(1);
+ let view = row.data;
+ assert.equal(view[0], 0);
+ assert.equal(view[4], 0);
+
+ row.delete();
+ mat.delete();
+ }
+
+ // Mat::convertTo(Mat, int, double, double)
+ {
+ let mat = cv.Mat.ones(5, 5, cv.CV_8UC3);
+ let grayMat = cv.Mat.zeros(5, 5, cv.CV_8UC1);
+
+ mat.convertTo(grayMat, cv.CV_8U, 2, 1);
+ // dest = 2 * source(x, y) + 1.
+ let view = grayMat.data;
+ assert.equal(view[0], (1 * 2) + 1);
+
+ mat.convertTo(grayMat, cv.CV_8U);
+ // dest = 1 * source(x, y) + 0.
+ assert.equal(view[0], 1);
+
+ mat.convertTo(grayMat, cv.CV_8U, 2);
+ // dest = 2 * source(x, y) + 0.
+ assert.equal(view[0], 2);
+
+ grayMat.delete();
+ mat.delete();
+ }
+
+ // split
+ {
+ const R =7;
+ const G =13;
+ const B =29;
+
+ let mat = cv.Mat.ones(5, 5, cv.CV_8UC3);
+ let view = mat.data;
+ view[0] = R;
+ view[1] = G;
+ view[2] = B;
+
+ let bgrPlanes = new cv.MatVector();
+ cv.split(mat, bgrPlanes);
+ assert.equal(bgrPlanes.size(), 3);
+
+ let rMat = bgrPlanes.get(0);
+ view = rMat.data;
+ assert.equal(view[0], R);
+
+ let gMat = bgrPlanes.get(1);
+ view = gMat.data;
+ assert.equal(view[0], G);
+
+ let bMat = bgrPlanes.get(2);
+ view = bMat.data;
+ assert.equal(view[0], B);
+
+ mat.delete();
+ rMat.delete();
+ gMat.delete();
+ bgrPlanes.delete();
+ bMat.delete();
+ }
+
+ // elemSize
+ {
+ let mat = cv.Mat.ones(5, 5, cv.CV_8UC3);
+ assert.equal(mat.elemSize(), 3);
+ assert.equal(mat.elemSize1(), 1);
+
+ let mat2 = cv.Mat.zeros(5, 5, cv.CV_8UC1);
+ assert.equal(mat2.elemSize(), 1);
+ assert.equal(mat2.elemSize1(), 1);
+
+ let mat3 = cv.Mat.eye(5, 5, cv.CV_16UC3);
+ assert.equal(mat3.elemSize(), 2 * 3);
+ assert.equal(mat3.elemSize1(), 2);
+
+ mat.delete();
+ mat2.delete();
+ mat3.delete();
+ }
+
+ // step
+ {
+ let mat = cv.Mat.ones(5, 5, cv.CV_8UC3);
+ assert.equal(mat.step[0], 15);
+ assert.equal(mat.step[1], 3);
+
+ let mat2 = cv.Mat.zeros(5, 5, cv.CV_8UC1);
+ assert.equal(mat2.step[0], 5);
+ assert.equal(mat2.step[1], 1);
+
+ let mat3 = cv.Mat.eye(5, 5, cv.CV_16UC3);
+ assert.equal(mat3.step[0], 30);
+ assert.equal(mat3.step[1], 6);
+
+ mat.delete();
+ mat2.delete();
+ mat3.delete();
+ }
+
+ // dot
+ {
+ let mat = cv.Mat.ones(5, 5, cv.CV_8UC1);
+ let mat2 = cv.Mat.eye(5, 5, cv.CV_8UC1);
+
+ assert.equal(mat.dot(mat), 25);
+ assert.equal(mat.dot(mat2), 5);
+ assert.equal(mat2.dot(mat2), 5);
+
+ mat.delete();
+ mat2.delete();
+ }
+
+ // mul
+ {
+ const FACTOR = 5;
+ let mat = cv.Mat.ones(4, 4, cv.CV_8UC1);
+ let mat2 = cv.Mat.eye(4, 4, cv.CV_8UC1);
+
+ let expected = new Uint8Array([FACTOR, 0, 0, 0,
+ 0, FACTOR, 0, 0,
+ 0, 0, FACTOR, 0,
+ 0, 0, 0, FACTOR]);
+ let mat3 = mat.mul(mat2, FACTOR);
+
+ assert.deepEqual(mat3.data, expected);
+
+ mat.delete();
+ mat2.delete();
+ mat3.delete();
+ }
+});
+
+
+QUnit.test('test mat access', function(assert) {
+ // test memory view
+ {
+ let data = new Uint8Array([0, 0, 0, 255, 0, 1, 2, 3]);
+ let dataPtr = cv._malloc(8);
+
+ let dataHeap = new Uint8Array(cv.HEAPU8.buffer, dataPtr, 8);
+ dataHeap.set(new Uint8Array(data.buffer));
+
+ let mat = new cv.Mat(8, 1, cv.CV_8UC1, dataPtr, 0);
+
+
+ let unsignedCharView = new Uint8Array(data.buffer);
+ let charView = new Int8Array(data.buffer);
+ let shortView = new Int16Array(data.buffer);
+ let unsignedShortView = new Uint16Array(data.buffer);
+ let intView = new Int32Array(data.buffer);
+ let float32View = new Float32Array(data.buffer);
+ let float64View = new Float64Array(data.buffer);
+
+
+ assert.deepEqual(unsignedCharView, mat.data);
+ assert.deepEqual(charView, mat.data8S);
+ assert.deepEqual(shortView, mat.data16S);
+ assert.deepEqual(unsignedShortView, mat.data16U);
+ assert.deepEqual(intView, mat.data32S);
+ assert.deepEqual(float32View, mat.data32F);
+ assert.deepEqual(float64View, mat.data64F);
+ }
+
+ // test ucharAt(i)
+ {
+ let data = new Uint8Array([0, 0, 0, 255, 0, 1, 2, 3]);
+ let dataPtr = cv._malloc(8);
+
+ let dataHeap = new Uint8Array(cv.HEAPU8.buffer, dataPtr, 8);
+ dataHeap.set(new Uint8Array(data.buffer));
+
+ let mat = new cv.Mat(8, 1, cv.CV_8UC1, dataPtr, 0);
+
+ assert.equal(mat.ucharAt(0), 0);
+ assert.equal(mat.ucharAt(1), 0);
+ assert.equal(mat.ucharAt(2), 0);
+ assert.equal(mat.ucharAt(3), 255);
+ assert.equal(mat.ucharAt(4), 0);
+ assert.equal(mat.ucharAt(5), 1);
+ assert.equal(mat.ucharAt(6), 2);
+ assert.equal(mat.ucharAt(7), 3);
+ }
+
+ // test ushortAt(i)
+ {
+ let data = new Uint16Array([0, 1000, 65000, 255, 0, 1, 2, 3]);
+ let dataPtr = cv._malloc(16);
+
+ let dataHeap = new Uint16Array(cv.HEAPU8.buffer, dataPtr, 8);
+ dataHeap.set(new Uint16Array(data.buffer));
+
+ let mat = new cv.Mat(8, 1, cv.CV_16SC1, dataPtr, 0);
+
+ assert.equal(mat.ushortAt(0), 0);
+ assert.equal(mat.ushortAt(1), 1000);
+ assert.equal(mat.ushortAt(2), 65000);
+ assert.equal(mat.ushortAt(3), 255);
+ assert.equal(mat.ushortAt(4), 0);
+ assert.equal(mat.ushortAt(5), 1);
+ assert.equal(mat.ushortAt(6), 2);
+ assert.equal(mat.ushortAt(7), 3);
+ }
+
+ // test intAt(i)
+ {
+ let data = new Int32Array([0, -1000, 65000, 255, -2000000, -1, 2, 3]);
+ let dataPtr = cv._malloc(32);
+
+ let dataHeap = new Int32Array(cv.HEAPU32.buffer, dataPtr, 8);
+ dataHeap.set(new Int32Array(data.buffer));
+
+ let mat = new cv.Mat(8, 1, cv.CV_32SC1, dataPtr, 0);
+
+ assert.equal(mat.intAt(0), 0);
+ assert.equal(mat.intAt(1), -1000);
+ assert.equal(mat.intAt(2), 65000);
+ assert.equal(mat.intAt(3), 255);
+ assert.equal(mat.intAt(4), -2000000);
+ assert.equal(mat.intAt(5), -1);
+ assert.equal(mat.intAt(6), 2);
+ assert.equal(mat.intAt(7), 3);
+ }
+
+ // test floatAt(i)
+ {
+ const EPSILON = 0.001;
+ let data = new Float32Array([0, -10.5, 650.001, 255, -20.1, -1.2, 2, 3.5]);
+ let dataPtr = cv._malloc(32);
+
+ let dataHeap = new Float32Array(cv.HEAPU32.buffer, dataPtr, 8);
+ dataHeap.set(new Float32Array(data.buffer));
+
+ let mat = new cv.Mat(8, 1, cv.CV_32FC1, dataPtr, 0);
+
+ assert.equal(Math.abs(mat.floatAt(0)-0) < EPSILON, true);
+ assert.equal(Math.abs(mat.floatAt(1)+10.5) < EPSILON, true);
+ assert.equal(Math.abs(mat.floatAt(2)-650.001) < EPSILON, true);
+ assert.equal(Math.abs(mat.floatAt(3)-255) < EPSILON, true);
+ assert.equal(Math.abs(mat.floatAt(4)+20.1) < EPSILON, true);
+ assert.equal(Math.abs(mat.floatAt(5)+1.2) < EPSILON, true);
+ assert.equal(Math.abs(mat.floatAt(6)-2) < EPSILON, true);
+ assert.equal(Math.abs(mat.floatAt(7)-3.5) < EPSILON, true);
+ }
+
+ // test intAt(i,j)
+ {
+ let mat = cv.Mat.eye({height: 3, width: 3}, cv.CV_32SC1);
+
+ assert.equal(mat.intAt(0, 0), 1);
+ assert.equal(mat.intAt(0, 1), 0);
+ assert.equal(mat.intAt(0, 2), 0);
+ assert.equal(mat.intAt(1, 0), 0);
+ assert.equal(mat.intAt(1, 1), 1);
+ assert.equal(mat.intAt(1, 2), 0);
+ assert.equal(mat.intAt(2, 0), 0);
+ assert.equal(mat.intAt(2, 1), 0);
+ assert.equal(mat.intAt(2, 2), 1);
+
+ mat.delete();
+ }
+});
+
+QUnit.test('test_mat_operations', function(assert) {
+ // test minMaxLoc
+ {
+ let src = cv.Mat.ones(4, 4, cv.CV_8UC1);
+
+ src.data[2] = 0;
+ src.data[5] = 2;
+
+ let result = cv.minMaxLoc(src);
+
+ assert.equal(result.minVal, 0);
+ assert.equal(result.maxVal, 2);
+ assert.deepEqual(result.minLoc, {x: 2, y: 0});
+ assert.deepEqual(result.maxLoc, {x: 1, y: 1});
+
+ src.delete();
+ }
+});
+
+QUnit.test('test_mat_roi', function(assert) {
+ // test minMaxLoc
+ {
+ let mat = cv.matFromArray(2, 2, cv.CV_8UC1, [0, 1, 2, 3]);
+ let roi = mat.roi(new cv.Rect(1, 1, 1, 1));
+
+ assert.equal(roi.rows, 1);
+ assert.equal(roi.cols, 1);
+ assert.deepEqual(roi.data, new Uint8Array([mat.ucharAt(1, 1)]));
+
+ mat.delete();
+ roi.delete();
+ }
+});
+
+
+QUnit.test('test_mat_range', function(assert) {
+ {
+ let src = cv.matFromArray(2, 2, cv.CV_8UC1, [0, 1, 2, 3]);
+ let mat = src.colRange(0, 1);
+
+ assert.equal(mat.isContinuous(), false);
+ assert.equal(mat.rows, 2);
+ assert.equal(mat.cols, 1);
+ assert.equal(mat.ucharAt(0), 0);
+ assert.equal(mat.ucharAt(1), 2);
+
+ mat.delete();
+
+ mat = src.colRange({start: 0, end: 1});
+
+ assert.equal(mat.isContinuous(), false);
+ assert.equal(mat.rows, 2);
+ assert.equal(mat.cols, 1);
+ assert.equal(mat.ucharAt(0), 0);
+ assert.equal(mat.ucharAt(1), 2);
+
+ mat.delete();
+
+ mat = src.rowRange(1, 2);
+
+ assert.equal(mat.rows, 1);
+ assert.equal(mat.cols, 2);
+ assert.deepEqual(mat.data, new Uint8Array([2, 3]));
+
+ mat.delete();
+
+ mat = src.rowRange({start: 1, end: 2});
+
+ assert.equal(mat.rows, 1);
+ assert.equal(mat.cols, 2);
+ assert.deepEqual(mat.data, new Uint8Array([2, 3]));
+
+ mat.delete();
+
+ src.delete();
+ }
+});
+
+QUnit.test('test_mat_diag', function(assert) {
+ // test diag
+ {
+ let mat = cv.matFromArray(3, 3, cv.CV_8UC1, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
+ let d = mat.diag();
+ let d1 = mat.diag(1);
+ let d2 = mat.diag(-1);
+
+ assert.equal(mat.isContinuous(), true);
+ assert.equal(d.isContinuous(), false);
+ assert.equal(d1.isContinuous(), false);
+ assert.equal(d2.isContinuous(), false);
+
+ assert.equal(d.ucharAt(0), 0);
+ assert.equal(d.ucharAt(1), 4);
+ assert.equal(d.ucharAt(2), 8);
+
+ assert.equal(d1.ucharAt(0), 1);
+ assert.equal(d1.ucharAt(1), 5);
+
+ assert.equal(d2.ucharAt(0), 3);
+ assert.equal(d2.ucharAt(1), 7);
+
+ mat.delete();
+ d.delete();
+ d1.delete();
+ d2.delete();
+ }
+});
diff --git a/modules/js/test/test_objdetect.js b/modules/js/test/test_objdetect.js
new file mode 100644
index 0000000000..76f0a771a5
--- /dev/null
+++ b/modules/js/test/test_objdetect.js
@@ -0,0 +1,161 @@
+// //////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//
+
+// //////////////////////////////////////////////////////////////////////////////////////
+// Author: Sajjad Taheri, University of California, Irvine. sajjadt[at]uci[dot]edu
+//
+// LICENSE AGREEMENT
+// Copyright (c) 2015 The Regents of the University of California (Regents)
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// 3. Neither the name of the University nor the
+// names of its contributors may be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+if (typeof module !== 'undefined' && module.exports) {
+ // The envrionment is Node.js
+ var cv = require('./opencv.js'); // eslint-disable-line no-var
+ cv.FS_createLazyFile('/', 'haarcascade_frontalface_default.xml', // eslint-disable-line new-cap
+ 'haarcascade_frontalface_default.xml', true, false);
+}
+
+QUnit.module('Object Detection', {});
+QUnit.test('Cascade classification', function(assert) {
+ // Group rectangle
+ {
+ let rectList = new cv.RectVector();
+ let weights = new cv.IntVector();
+ let groupThreshold = 1;
+ const eps = 0.2;
+
+ let rect1 = new cv.Rect(1, 2, 3, 4);
+ let rect2 = new cv.Rect(1, 4, 2, 3);
+
+ rectList.push_back(rect1);
+ rectList.push_back(rect2);
+
+ cv.groupRectangles(rectList, weights, groupThreshold, eps);
+
+
+ rectList.delete();
+ weights.delete();
+ }
+
+ // CascadeClassifier
+ {
+ let classifier = new cv.CascadeClassifier();
+ const modelPath = '/haarcascade_frontalface_default.xml';
+
+ assert.equal(classifier.empty(), true);
+
+
+ classifier.load(modelPath);
+ assert.equal(classifier.empty(), false);
+
+ let image = cv.Mat.eye({height: 10, width: 10}, cv.CV_8UC3);
+ let objects = new cv.RectVector();
+ let numDetections = new cv.IntVector();
+ const scaleFactor = 1.1;
+ const minNeighbors = 3;
+ const flags = 0;
+ const minSize = {height: 0, width: 0};
+ const maxSize = {height: 10, width: 10};
+
+ classifier.detectMultiScale2(image, objects, numDetections, scaleFactor,
+ minNeighbors, flags, minSize, maxSize);
+
+ // test default parameters
+ classifier.detectMultiScale2(image, objects, numDetections, scaleFactor,
+ minNeighbors, flags, minSize);
+ classifier.detectMultiScale2(image, objects, numDetections, scaleFactor,
+ minNeighbors, flags);
+ classifier.detectMultiScale2(image, objects, numDetections, scaleFactor,
+ minNeighbors);
+ classifier.detectMultiScale2(image, objects, numDetections, scaleFactor);
+
+ classifier.delete();
+ objects.delete();
+ numDetections.delete();
+ }
+
+ // HOGDescriptor
+ {
+ let hog = new cv.HOGDescriptor();
+ let mat = new cv.Mat({height: 10, width: 10}, cv.CV_8UC1);
+ let descriptors = new cv.FloatVector();
+ let locations = new cv.PointVector();
+
+
+ assert.equal(hog.winSize.height, 128);
+ assert.equal(hog.winSize.width, 64);
+ assert.equal(hog.nbins, 9);
+ assert.equal(hog.derivAperture, 1);
+ assert.equal(hog.winSigma, -1);
+ assert.equal(hog.histogramNormType, 0);
+ assert.equal(hog.nlevels, 64);
+
+ hog.nlevels = 32;
+ assert.equal(hog.nlevels, 32);
+
+ hog.delete();
+ mat.delete();
+ descriptors.delete();
+ locations.delete();
+ }
+});
diff --git a/modules/js/test/test_utils.js b/modules/js/test/test_utils.js
new file mode 100644
index 0000000000..0f345b4223
--- /dev/null
+++ b/modules/js/test/test_utils.js
@@ -0,0 +1,253 @@
+// //////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+
+// //////////////////////////////////////////////////////////////////////////////////////
+// Author: Sajjad Taheri, University of California, Irvine. sajjadt[at]uci[dot]edu
+//
+// LICENSE AGREEMENT
+// Copyright (c) 2015 The Regents of the University of California (Regents)
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// 3. Neither the name of the University nor the
+// names of its contributors may be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+if (typeof module !== 'undefined' && module.exports) {
+ // The envrionment is Node.js
+ var cv = require('./opencv.js'); // eslint-disable-line no-var
+}
+QUnit.module('Utils', {});
+QUnit.test('Test vectors', function(assert) {
+ {
+ let pointVector = new cv.PointVector();
+ for (let i=0; i<100; ++i) {
+ pointVector.push_back({x: i, y: 2*i});
+ }
+
+ assert.equal(pointVector.size(), 100);
+
+ let index = 10;
+ let item = pointVector.get(index);
+ assert.equal(item.x, index);
+ assert.equal(item.y, 2*index);
+
+ index = 0;
+ item = pointVector.get(index);
+ assert.equal(item.x, index);
+ assert.equal(item.y, 2*index);
+
+ index = 99;
+ item = pointVector.get(index);
+ assert.equal(item.x, index);
+ assert.equal(item.y, 2*index);
+
+ pointVector.delete();
+ }
+
+ {
+ let pointVector = new cv.PointVector();
+ for (let i=0; i<100; ++i) {
+ pointVector.push_back(new cv.Point(i, 2*i));
+ }
+
+ pointVector.push_back(new cv.Point());
+
+ assert.equal(pointVector.size(), 101);
+
+ let index = 10;
+ let item = pointVector.get(index);
+ assert.equal(item.x, index);
+ assert.equal(item.y, 2*index);
+
+ index = 0;
+ item = pointVector.get(index);
+ assert.equal(item.x, index);
+ assert.equal(item.y, 2*index);
+
+ index = 99;
+ item = pointVector.get(index);
+ assert.equal(item.x, index);
+ assert.equal(item.y, 2*index);
+
+ index = 100;
+ item = pointVector.get(index);
+ assert.equal(item.x, 0);
+ assert.equal(item.y, 0);
+
+ pointVector.delete();
+ }
+});
+QUnit.test('Test Rect', function(assert) {
+ let rectVector = new cv.RectVector();
+ let rect = {x: 1, y: 2, width: 3, height: 4};
+ rectVector.push_back(rect);
+ rectVector.push_back(new cv.Rect());
+ rectVector.push_back(new cv.Rect(rect));
+ rectVector.push_back(new cv.Rect({x: 5, y: 6}, {width: 7, height: 8}));
+ rectVector.push_back(new cv.Rect(9, 10, 11, 12));
+
+ assert.equal(rectVector.size(), 5);
+
+ let item = rectVector.get(0);
+ assert.equal(item.x, 1);
+ assert.equal(item.y, 2);
+ assert.equal(item.width, 3);
+ assert.equal(item.height, 4);
+
+ item = rectVector.get(1);
+ assert.equal(item.x, 0);
+ assert.equal(item.y, 0);
+ assert.equal(item.width, 0);
+ assert.equal(item.height, 0);
+
+ item = rectVector.get(2);
+ assert.equal(item.x, 1);
+ assert.equal(item.y, 2);
+ assert.equal(item.width, 3);
+ assert.equal(item.height, 4);
+
+ item = rectVector.get(3);
+ assert.equal(item.x, 5);
+ assert.equal(item.y, 6);
+ assert.equal(item.width, 7);
+ assert.equal(item.height, 8);
+
+ item = rectVector.get(4);
+ assert.equal(item.x, 9);
+ assert.equal(item.y, 10);
+ assert.equal(item.width, 11);
+ assert.equal(item.height, 12);
+
+ rectVector.delete();
+});
+QUnit.test('Test Size', function(assert) {
+ {
+ let mat = new cv.Mat();
+ mat.create({width: 5, height: 10}, cv.CV_8UC4);
+ let size = mat.size();
+
+ assert.ok(mat.type() === cv.CV_8UC4);
+ assert.ok(size.height === 10);
+ assert.ok(size.width === 5);
+ assert.ok(mat.channels() === 4);
+
+ mat.delete();
+ }
+
+ {
+ let mat = new cv.Mat();
+ mat.create(new cv.Size(5, 10), cv.CV_8UC4);
+ let size = mat.size();
+
+ assert.ok(mat.type() === cv.CV_8UC4);
+ assert.ok(size.height === 10);
+ assert.ok(size.width === 5);
+ assert.ok(mat.channels() === 4);
+
+ mat.delete();
+ }
+});
+
+
+QUnit.test('test_rotated_rect', function(assert) {
+ {
+ let rect = {center: {x: 100, y: 100}, size: {height: 100, width: 50}, angle: 30};
+
+ assert.equal(rect.center.x, 100);
+ assert.equal(rect.center.y, 100);
+ assert.equal(rect.angle, 30);
+ assert.equal(rect.size.height, 100);
+ assert.equal(rect.size.width, 50);
+ }
+
+ {
+ let rect = new cv.RotatedRect();
+
+ assert.equal(rect.center.x, 0);
+ assert.equal(rect.center.y, 0);
+ assert.equal(rect.angle, 0);
+ assert.equal(rect.size.height, 0);
+ assert.equal(rect.size.width, 0);
+
+ let points = cv.RotatedRect.points(rect);
+
+ assert.equal(points[0].x, 0);
+ assert.equal(points[0].y, 0);
+ assert.equal(points[1].x, 0);
+ assert.equal(points[1].y, 0);
+ assert.equal(points[2].x, 0);
+ assert.equal(points[2].y, 0);
+ assert.equal(points[3].x, 0);
+ assert.equal(points[3].y, 0);
+ }
+
+ {
+ let rect = new cv.RotatedRect({x: 100, y: 100}, {height: 100, width: 50}, 30);
+
+ assert.equal(rect.center.x, 100);
+ assert.equal(rect.center.y, 100);
+ assert.equal(rect.angle, 30);
+ assert.equal(rect.size.height, 100);
+ assert.equal(rect.size.width, 50);
+
+ let points = cv.RotatedRect.points(rect);
+
+ assert.equal(points[0].x, cv.RotatedRect.boundingRect2f(rect).x);
+ assert.equal(points[1].y, cv.RotatedRect.boundingRect2f(rect).y);
+ }
+});
diff --git a/modules/js/test/test_video.js b/modules/js/test/test_video.js
new file mode 100644
index 0000000000..f26a8b7b40
--- /dev/null
+++ b/modules/js/test/test_video.js
@@ -0,0 +1,107 @@
+// //////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+
+// //////////////////////////////////////////////////////////////////////////////////////
+// Author: Sajjad Taheri, University of California, Irvine. sajjadt[at]uci[dot]edu
+//
+// LICENSE AGREEMENT
+// Copyright (c) 2015 The Regents of the University of California (Regents)
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// 3. Neither the name of the University nor the
+// names of its contributors may be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+if (typeof module !== 'undefined' && module.exports) {
+ // The envrionment is Node.js
+ var cv = require('./opencv.js'); // eslint-disable-line no-var
+}
+
+QUnit.module('Video', {});
+QUnit.test('Background Segmentation', function(assert) {
+ // BackgroundSubtractorMOG2
+ {
+ const history = 600;
+ const varThreshold = 15;
+ const detectShadows = true;
+
+ let mog2 = new cv.BackgroundSubtractorMOG2(history, varThreshold, detectShadows);
+
+ assert.equal(mog2 instanceof cv.BackgroundSubtractorMOG2, true);
+
+ mog2.delete();
+
+ mog2 = new cv.BackgroundSubtractorMOG2();
+
+ assert.equal(mog2 instanceof cv.BackgroundSubtractorMOG2, true);
+
+ mog2.delete();
+
+ mog2 = new cv.BackgroundSubtractorMOG2(history);
+
+ assert.equal(mog2 instanceof cv.BackgroundSubtractorMOG2, true);
+
+ mog2.delete();
+
+ mog2 = new cv.BackgroundSubtractorMOG2(history, varThreshold);
+
+ assert.equal(mog2 instanceof cv.BackgroundSubtractorMOG2, true);
+
+ mog2.delete();
+ }
+});
diff --git a/modules/js/test/tests.html b/modules/js/test/tests.html
new file mode 100644
index 0000000000..2ed5325c5b
--- /dev/null
+++ b/modules/js/test/tests.html
@@ -0,0 +1,78 @@
+
+
+
+ OpenCV JS Tests
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/modules/js/test/tests.js b/modules/js/test/tests.js
new file mode 100644
index 0000000000..cae9bac581
--- /dev/null
+++ b/modules/js/test/tests.js
@@ -0,0 +1,53 @@
+// //////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+
+let testrunner = require('qunit');
+testrunner.options.maxBlockDuration = 20000; // cause opencv_js.js need time to load
+
+testrunner.run(
+ {
+ code: 'opencv.js',
+ tests: ['test_mat.js', 'test_utils.js', 'test_imgproc.js',
+ 'test_objdetect.js', 'test_video.js'],
+ },
+ function(err, report) {
+ console.log(report.failed + ' failed, ' + report.passed + ' passed');
+ }
+);
diff --git a/modules/objdetect/CMakeLists.txt b/modules/objdetect/CMakeLists.txt
index 862d564cc7..4e330af6c0 100644
--- a/modules/objdetect/CMakeLists.txt
+++ b/modules/objdetect/CMakeLists.txt
@@ -1,2 +1,2 @@
set(the_description "Object Detection")
-ocv_define_module(objdetect opencv_core opencv_imgproc WRAP java python)
+ocv_define_module(objdetect opencv_core opencv_imgproc WRAP java python js)
diff --git a/modules/python/src2/hdr_parser.py b/modules/python/src2/hdr_parser.py
index 61afe6a730..1ff7779a0d 100755
--- a/modules/python/src2/hdr_parser.py
+++ b/modules/python/src2/hdr_parser.py
@@ -105,6 +105,14 @@ class CppHeaderParser(object):
modlist.append("/CA " + macro_arg)
arg_str = arg_str[:npos] + arg_str[npos3+1:]
+ npos = arg_str.find("const")
+ if npos >= 0:
+ modlist.append("/C")
+
+ npos = arg_str.find("&")
+ if npos >= 0:
+ modlist.append("/Ref")
+
arg_str = arg_str.strip()
word_start = 0
word_list = []
@@ -406,13 +414,27 @@ class CppHeaderParser(object):
func_modlist.append("="+arg)
decl_str = decl_str[:npos] + decl_str[npos3+1:]
+ virtual_method = False
+ pure_virtual_method = False
+ const_method = False
+
# filter off some common prefixes, which are meaningless for Python wrappers.
# note that we do not strip "static" prefix, which does matter;
# it means class methods, not instance methods
- decl_str = self.batch_replace(decl_str, [("virtual", ""), ("static inline", ""), ("inline", ""),\
+ decl_str = self.batch_replace(decl_str, [("static inline", ""), ("inline", ""),\
("CV_EXPORTS_W", ""), ("CV_EXPORTS", ""), ("CV_CDECL", ""), ("CV_WRAP ", " "), ("CV_INLINE", ""),
("CV_DEPRECATED", "")]).strip()
+
+ if decl_str.strip().startswith('virtual'):
+ virtual_method = True
+
+ decl_str = decl_str.replace('virtual' , '')
+
+ end_tokens = decl_str[decl_str.rfind(')'):].split()
+ const_method = 'const' in end_tokens
+ pure_virtual_method = '=' in end_tokens and '0' in end_tokens
+
static_method = False
context = top[0]
if decl_str.startswith("static") and (context == "class" or context == "struct"):
@@ -575,6 +597,12 @@ class CppHeaderParser(object):
if static_method:
func_modlist.append("/S")
+ if const_method:
+ func_modlist.append("/C")
+ if virtual_method:
+ func_modlist.append("/V")
+ if pure_virtual_method:
+ func_modlist.append("/PV")
return [funcname, rettype, func_modlist, args, original_type, docstring]
diff --git a/modules/video/CMakeLists.txt b/modules/video/CMakeLists.txt
index 15705cfc3a..df5e49b828 100644
--- a/modules/video/CMakeLists.txt
+++ b/modules/video/CMakeLists.txt
@@ -1,2 +1,2 @@
set(the_description "Video Analysis")
-ocv_define_module(video opencv_imgproc WRAP java python)
+ocv_define_module(video opencv_imgproc WRAP java python js)
diff --git a/platforms/js/README.md b/platforms/js/README.md
new file mode 100644
index 0000000000..1db8a84a4b
--- /dev/null
+++ b/platforms/js/README.md
@@ -0,0 +1,15 @@
+Building OpenCV.js by Emscripten
+====================
+
+[Download and install Emscripten](https://kripken.github.io/emscripten-site/docs/getting_started/downloads.html).
+
+Execute `build_js.py` script:
+```
+python /platforms/js/build_js.py
+```
+
+If everything is fine, a few minutes later you will get `/bin/opencv.js`. You can add this into your web pages.
+
+Find out more build options by `-h` switch.
+
+For detailed build tutorial, check out `/doc/js_tutorials/js_setup/js_setup/js_setup.markdown`.
diff --git a/platforms/js/build_js.py b/platforms/js/build_js.py
new file mode 100644
index 0000000000..f58625d172
--- /dev/null
+++ b/platforms/js/build_js.py
@@ -0,0 +1,278 @@
+#!/usr/bin/env python
+
+import os, sys, subprocess, argparse, shutil, glob, re, multiprocessing
+import logging as log
+
+class Fail(Exception):
+ def __init__(self, text=None):
+ self.t = text
+ def __str__(self):
+ return "ERROR" if self.t is None else self.t
+
+def execute(cmd, shell=False):
+ try:
+ log.info("Executing: %s" % cmd)
+ retcode = subprocess.call(cmd, shell=shell)
+ if retcode < 0:
+ raise Fail("Child was terminated by signal:" %s -retcode)
+ elif retcode > 0:
+ raise Fail("Child returned: %s" % retcode)
+ except OSError as e:
+ raise Fail("Execution failed: %d / %s" % (e.errno, e.strerror))
+
+def rm_one(d):
+ d = os.path.abspath(d)
+ if os.path.exists(d):
+ if os.path.isdir(d):
+ log.info("Removing dir: %s", d)
+ shutil.rmtree(d)
+ elif os.path.isfile(d):
+ log.info("Removing file: %s", d)
+ os.remove(d)
+
+def check_dir(d, create=False, clean=False):
+ d = os.path.abspath(d)
+ log.info("Check dir %s (create: %s, clean: %s)", d, create, clean)
+ if os.path.exists(d):
+ if not os.path.isdir(d):
+ raise Fail("Not a directory: %s" % d)
+ if clean:
+ for x in glob.glob(os.path.join(d, "*")):
+ rm_one(x)
+ else:
+ if create:
+ os.makedirs(d)
+ return d
+
+def check_file(d):
+ d = os.path.abspath(d)
+ if os.path.exists(d):
+ if os.path.isfile(d):
+ return True
+ else:
+ return False
+ return False
+
+def find_file(name, path):
+ for root, dirs, files in os.walk(path):
+ if name in files:
+ return os.path.join(root, name)
+
+def determine_emcc_version(emscripten_dir):
+ ret = subprocess.check_output([os.path.join(emscripten_dir, "emcc"), "--version"])
+ m = re.match(r'^emcc.*(\d+\.\d+\.\d+)', ret, flags=re.IGNORECASE)
+ return m.group(1)
+
+def determine_opencv_version(version_hpp_path):
+ # version in 2.4 - CV_VERSION_EPOCH.CV_VERSION_MAJOR.CV_VERSION_MINOR.CV_VERSION_REVISION
+ # version in master - CV_VERSION_MAJOR.CV_VERSION_MINOR.CV_VERSION_REVISION-CV_VERSION_STATUS
+ with open(version_hpp_path, "rt") as f:
+ data = f.read()
+ major = re.search(r'^#define\W+CV_VERSION_MAJOR\W+(\d+)$', data, re.MULTILINE).group(1)
+ minor = re.search(r'^#define\W+CV_VERSION_MINOR\W+(\d+)$', data, re.MULTILINE).group(1)
+ revision = re.search(r'^#define\W+CV_VERSION_REVISION\W+(\d+)$', data, re.MULTILINE).group(1)
+ version_status = re.search(r'^#define\W+CV_VERSION_STATUS\W+"([^"]*)"$', data, re.MULTILINE).group(1)
+ return "%(major)s.%(minor)s.%(revision)s%(version_status)s" % locals()
+
+class Builder:
+ def __init__(self, options):
+ self.options = options
+ self.build_dir = check_dir(options.build_dir, create=True)
+ self.opencv_dir = check_dir(options.opencv_dir)
+ self.emscripten_dir = check_dir(options.emscripten_dir)
+ self.opencv_version = determine_opencv_version(os.path.join(self.opencv_dir, "modules", "core", "include", "opencv2", "core", "version.hpp"))
+ self.emcc_version = determine_emcc_version(self.emscripten_dir)
+
+ def get_toolchain_file(self):
+ return os.path.join(self.emscripten_dir, "cmake", "Modules", "Platform", "Emscripten.cmake")
+
+ def clean_build_dir(self):
+ for d in ["CMakeCache.txt", "CMakeFiles/", "bin/", "libs/", "lib/", "modules"]:
+ rm_one(d)
+
+ def get_cmake_cmd(self):
+ cmd = ["cmake",
+ "-DCMAKE_BUILD_TYPE=Release",
+ "-DCMAKE_TOOLCHAIN_FILE='%s'" % self.get_toolchain_file(),
+ "-DCPU_BASELINE=''",
+ "-DCPU_DISPATCH=''",
+ "-DCV_TRACE=OFF",
+ "-DBUILD_SHARED_LIBS=OFF",
+ "-DWITH_1394=OFF",
+ "-DWITH_VTK=OFF",
+ "-DWITH_CUDA=OFF",
+ "-DWITH_CUFFT=OFF",
+ "-DWITH_CUBLAS=OFF",
+ "-DWITH_NVCUVID=OFF",
+ "-DWITH_EIGEN=OFF",
+ "-DWITH_FFMPEG=OFF",
+ "-DWITH_GSTREAMER=OFF",
+ "-DWITH_GTK=OFF",
+ "-DWITH_GTK_2_X=OFF",
+ "-DWITH_IPP=OFF",
+ "-DWITH_JASPER=OFF",
+ "-DWITH_JPEG=OFF",
+ "-DWITH_WEBP=OFF",
+ "-DWITH_OPENEXR=OFF",
+ "-DWITH_OPENGL=OFF",
+ "-DWITH_OPENVX=OFF",
+ "-DWITH_OPENNI=OFF",
+ "-DWITH_OPENNI2=OFF",
+ "-DWITH_PNG=OFF",
+ "-DWITH_TBB=OFF",
+ "-DWITH_PTHREADS_PF=OFF",
+ "-DWITH_TIFF=OFF",
+ "-DWITH_V4L=OFF",
+ "-DWITH_OPENCL=OFF",
+ "-DWITH_OPENCL_SVM=OFF",
+ "-DWITH_OPENCLAMDFFT=OFF",
+ "-DWITH_OPENCLAMDBLAS=OFF",
+ "-DWITH_MATLAB=OFF",
+ "-DWITH_GPHOTO2=OFF",
+ "-DWITH_LAPACK=OFF",
+ "-DWITH_ITT=OFF",
+ "-DBUILD_ZLIB=ON",
+ "-DBUILD_opencv_apps=OFF",
+ "-DBUILD_opencv_calib3d=OFF",
+ "-DBUILD_opencv_dnn=OFF",
+ "-DBUILD_opencv_features2d=OFF",
+ "-DBUILD_opencv_flann=OFF",
+ "-DBUILD_opencv_ml=OFF",
+ "-DBUILD_opencv_photo=OFF",
+ "-DBUILD_opencv_imgcodecs=OFF",
+ "-DBUILD_opencv_shape=OFF",
+ "-DBUILD_opencv_videoio=OFF",
+ "-DBUILD_opencv_videostab=OFF",
+ "-DBUILD_opencv_highgui=OFF",
+ "-DBUILD_opencv_superres=OFF",
+ "-DBUILD_opencv_stitching=OFF",
+ "-DBUILD_opencv_java=OFF",
+ "-DBUILD_opencv_js=ON",
+ "-DBUILD_opencv_python2=OFF",
+ "-DBUILD_opencv_python3=OFF",
+ "-DBUILD_EXAMPLES=OFF",
+ "-DBUILD_PACKAGE=OFF",
+ "-DBUILD_TESTS=OFF",
+ "-DBUILD_PERF_TESTS=OFF"]
+ if self.options.build_doc:
+ cmd.append("-DBUILD_DOCS=ON")
+ else:
+ cmd.append("-DBUILD_DOCS=OFF")
+
+ flags = self.get_build_flags()
+ if flags:
+ cmd += ["-DCMAKE_C_FLAGS='%s'" % flags,
+ "-DCMAKE_CXX_FLAGS='%s'" % flags]
+ return cmd;
+
+ def get_build_flags(self):
+ flags = ""
+ if self.options.build_wasm:
+ flags += "-s WASM=1 "
+ if self.options.enable_exception:
+ flags += "-s DISABLE_EXCEPTION_CATCHING=0 "
+ return flags
+
+ def config(self):
+ cmd = self.get_cmake_cmd()
+ cmd.append(self.opencv_dir)
+ execute(cmd)
+
+ def build_opencvjs(self):
+ execute(["make", "-j", str(multiprocessing.cpu_count()), "opencv.js"])
+
+ def build_test(self):
+ execute(["make", "-j", str(multiprocessing.cpu_count()), "opencv_js_test"])
+
+ def build_doc(self):
+ execute(["make", "-j", str(multiprocessing.cpu_count()), "doxygen"])
+
+
+#===================================================================================================
+
+if __name__ == "__main__":
+ opencv_dir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "../.."))
+ emscripten_dir = None
+ if "EMSCRIPTEN" in os.environ:
+ emscripten_dir = os.environ["EMSCRIPTEN"]
+
+ parser = argparse.ArgumentParser(description='Build OpenCV.js by Emscripten')
+ parser.add_argument("build_dir", help="Building directory (and output)")
+ parser.add_argument('--opencv_dir', default=opencv_dir, help='Opencv source directory (default is "../.." relative to script location)')
+ parser.add_argument('--emscripten_dir', default=emscripten_dir, help="Path to Emscripten to use for build")
+ parser.add_argument('--build_wasm', action="store_true", help="Build OpenCV.js in WebAssembly format")
+ parser.add_argument('--build_test', action="store_true", help="Build tests")
+ parser.add_argument('--build_doc', action="store_true", help="Build tutorials")
+ parser.add_argument('--clean_build_dir', action="store_true", help="Clean build dir")
+ parser.add_argument('--skip_config', action="store_true", help="Skip cmake config")
+ parser.add_argument('--config_only', action="store_true", help="Only do cmake config")
+ parser.add_argument('--enable_exception', action="store_true", help="Enable exception handling")
+ args = parser.parse_args()
+
+ log.basicConfig(format='%(message)s', level=log.DEBUG)
+ log.debug("Args: %s", args)
+
+ if args.emscripten_dir is None:
+ log.info("Cannot get Emscripten path, please specify it either by EMSCRIPTEN environment variable or --emscripten_dir option.")
+ sys.exit(-1)
+
+ builder = Builder(args)
+
+ log.info("Detected OpenCV version: %s", builder.opencv_version)
+ log.info("Detected emcc version: %s", builder.emcc_version)
+
+ os.chdir(builder.build_dir)
+
+ if args.clean_build_dir:
+ log.info("=====")
+ log.info("===== Clean build dir %s", builder.build_dir)
+ log.info("=====")
+ builder.clean_build_dir()
+
+ if not args.skip_config:
+ target = "asm.js"
+ if args.build_wasm:
+ target = "wasm"
+ log.info("=====")
+ log.info("===== Config OpenCV.js build for %s" % target)
+ log.info("=====")
+ builder.config()
+
+ if args.config_only:
+ sys.exit(0);
+
+ log.info("=====")
+ log.info("===== Building OpenCV.js in %s", "asm.js" if not args.build_wasm else "wasm")
+ log.info("=====")
+ builder.build_opencvjs()
+
+ if args.build_test:
+ log.info("=====")
+ log.info("===== Building OpenCV.js tests")
+ log.info("=====")
+ builder.build_test()
+
+ if args.build_doc:
+ log.info("=====")
+ log.info("===== Building OpenCV.js tutorials")
+ log.info("=====")
+ builder.build_doc()
+
+
+ log.info("=====")
+ log.info("===== Build finished")
+ log.info("=====")
+
+ opencvjs_path = os.path.join(builder.build_dir, "bin", "opencv.js")
+ if check_file(opencvjs_path):
+ log.info("OpenCV.js location: %s", opencvjs_path)
+
+ if args.build_test:
+ opencvjs_test_path = os.path.join(builder.build_dir, "bin", "tests.html")
+ if check_file(opencvjs_test_path):
+ log.info("OpenCV.js tests location: %s", opencvjs_test_path)
+
+ if args.build_doc:
+ opencvjs_tutorial_path = find_file("tutorial_js_root.html", os.path.join(builder.build_dir, "doc", "doxygen", "html"))
+ if check_file(opencvjs_tutorial_path):
+ log.info("OpenCV.js tutorials location: %s", opencvjs_tutorial_path)