mirror of
https://github.com/opencv/opencv.git
synced 2025-01-18 22:44:02 +08:00
Merge remote-tracking branch 'upstream/3.4' into merge-3.4
This commit is contained in:
commit
bd1fd59fc1
@ -8,8 +8,19 @@ function Utils(errorOutputId) { // eslint-disable-line no-unused-vars
|
||||
script.setAttribute('async', '');
|
||||
script.setAttribute('type', 'text/javascript');
|
||||
script.addEventListener('load', () => {
|
||||
console.log(cv.getBuildInformation());
|
||||
onloadCallback();
|
||||
if (cv.getBuildInformation)
|
||||
{
|
||||
console.log(cv.getBuildInformation());
|
||||
onloadCallback();
|
||||
}
|
||||
else
|
||||
{
|
||||
// WASM
|
||||
cv['onRuntimeInitialized']=()=>{
|
||||
console.log(cv.getBuildInformation());
|
||||
onloadCallback();
|
||||
}
|
||||
}
|
||||
});
|
||||
script.addEventListener('error', () => {
|
||||
self.printError('Failed to load ' + OPENCV_URL);
|
||||
|
@ -0,0 +1,33 @@
|
||||
Create calibration pattern {#tutorial_camera_calibration_pattern}
|
||||
=========================================
|
||||
|
||||
The goal of this tutorial is to learn how to create calibration pattern.
|
||||
|
||||
You can find a chessboard pattern in https://github.com/opencv/opencv/blob/master/doc/pattern.png
|
||||
|
||||
You can find a circleboard pattern in https://github.com/opencv/opencv/blob/master/doc/acircles_pattern.png
|
||||
|
||||
Create your own pattern
|
||||
---------------
|
||||
|
||||
Now, if you want to create your own pattern, you will need python to use https://github.com/opencv/opencv/blob/master/doc/pattern_tools/gen_pattern.py
|
||||
|
||||
Example
|
||||
|
||||
create a checkerboard pattern in file chessboard.svg with 9 rows, 6 columns and a square size of 20mm:
|
||||
|
||||
python gen_pattern.py -o chessboard.svg --rows 9 --columns 6 --type checkerboard --square_size 20
|
||||
|
||||
create a circle board pattern in file circleboard.svg with 7 rows, 5 columns and a radius of 15mm:
|
||||
|
||||
python gen_pattern.py -o circleboard.svg --rows 7 --columns 5 --type circles --square_size 15
|
||||
|
||||
create a circle board pattern in file acircleboard.svg with 7 rows, 5 columns and a square size of 10mm and less spacing between circle:
|
||||
|
||||
python gen_pattern.py -o acircleboard.svg --rows 7 --columns 5 --type acircles --square_size 10 --radius_rate 2
|
||||
|
||||
If you want to change unit use -u option (mm inches, px, m)
|
||||
|
||||
If you want to change page size use -w and -h options
|
||||
|
||||
If you want to create a ChArUco board read tutorial Detection of ChArUco Corners in opencv_contrib tutorial(https://docs.opencv.org/3.4/df/d4a/tutorial_charuco_detection.html)
|
@ -3,6 +3,14 @@ Camera calibration and 3D reconstruction (calib3d module) {#tutorial_table_of_co
|
||||
|
||||
Although we get most of our images in a 2D format they do come from a 3D world. Here you will learn how to find out 3D world information from 2D images.
|
||||
|
||||
- @subpage tutorial_camera_calibration_pattern
|
||||
|
||||
*Compatibility:* \> OpenCV 2.0
|
||||
|
||||
*Author:* Laurent Berger
|
||||
|
||||
You will learn how to create some calibration pattern.
|
||||
|
||||
- @subpage tutorial_camera_calibration_square_chess
|
||||
|
||||
*Compatibility:* \> OpenCV 2.0
|
||||
|
@ -12,7 +12,7 @@ Tutorial was written for the following versions of corresponding software:
|
||||
|
||||
- Download and install Android Studio from https://developer.android.com/studio.
|
||||
|
||||
- Get the latest pre-built OpenCV for Android release from https://github.com/opencv/opencv/releases and unpack it (for example, `opencv-3.4.5-android-sdk.zip`).
|
||||
- Get the latest pre-built OpenCV for Android release from https://github.com/opencv/opencv/releases and unpack it (for example, `opencv-4.1.0-android-sdk.zip`).
|
||||
|
||||
- Download MobileNet object detection model from https://github.com/chuanqi305/MobileNet-SSD. We need a configuration file `MobileNetSSD_deploy.prototxt` and weights `MobileNetSSD_deploy.caffemodel`.
|
||||
|
||||
|
@ -104,8 +104,9 @@ void cv::fisheye::projectPoints(InputArray objectPoints, OutputArray imagePoints
|
||||
|
||||
Vec4d k = _D.depth() == CV_32F ? (Vec4d)*_D.getMat().ptr<Vec4f>(): *_D.getMat().ptr<Vec4d>();
|
||||
|
||||
const bool isJacobianNeeded = jacobian.needed();
|
||||
JacobianRow *Jn = 0;
|
||||
if (jacobian.needed())
|
||||
if (isJacobianNeeded)
|
||||
{
|
||||
int nvars = 2 + 2 + 1 + 4 + 3 + 3; // f, c, alpha, k, om, T,
|
||||
jacobian.create(2*(int)n, nvars, CV_64F);
|
||||
@ -153,7 +154,7 @@ void cv::fisheye::projectPoints(InputArray objectPoints, OutputArray imagePoints
|
||||
else
|
||||
xpd[i] = final_point;
|
||||
|
||||
if (jacobian.needed())
|
||||
if (isJacobianNeeded)
|
||||
{
|
||||
//Vec3d Xi = pdepth == CV_32F ? (Vec3d)Xf[i] : Xd[i];
|
||||
//Vec3d Y = aff*Xi;
|
||||
|
@ -4430,6 +4430,7 @@ public:
|
||||
: size_(size), originPtr_(ptr), alignment_(alignment), ptr_(ptr), allocatedPtr_(NULL)
|
||||
{
|
||||
CV_DbgAssert((alignment & (alignment - 1)) == 0); // check for 2^n
|
||||
CV_DbgAssert(!readAccess || ptr);
|
||||
if (((size_t)ptr_ & (alignment - 1)) != 0)
|
||||
{
|
||||
allocatedPtr_ = new uchar[size_ + alignment - 1];
|
||||
@ -4483,6 +4484,7 @@ public:
|
||||
: size_(rows*step), originPtr_(ptr), alignment_(alignment), ptr_(ptr), allocatedPtr_(NULL), rows_(rows), cols_(cols), step_(step)
|
||||
{
|
||||
CV_DbgAssert((alignment & (alignment - 1)) == 0); // check for 2^n
|
||||
CV_DbgAssert(!readAccess || ptr != NULL);
|
||||
if (ptr == 0 || ((size_t)ptr_ & (alignment - 1)) != 0)
|
||||
{
|
||||
allocatedPtr_ = new uchar[size_ + extrabytes + alignment - 1];
|
||||
|
@ -76,7 +76,7 @@ TEST(Imgcodecs_Tiff, write_read_16bit_big_little_endian)
|
||||
// Write sample TIFF file
|
||||
FILE* fp = fopen(filename.c_str(), "wb");
|
||||
ASSERT_TRUE(fp != NULL);
|
||||
ASSERT_EQ((size_t)1, fwrite(tiff_sample_data, 86, 1, fp));
|
||||
ASSERT_EQ((size_t)1, fwrite(tiff_sample_data[i], 86, 1, fp));
|
||||
fclose(fp);
|
||||
|
||||
Mat img = imread(filename, IMREAD_UNCHANGED);
|
||||
|
@ -360,6 +360,8 @@ public:
|
||||
{
|
||||
CV_TRACE_FUNCTION();
|
||||
|
||||
CV_DbgAssert(cn > 0);
|
||||
|
||||
Mat dx, dy;
|
||||
AutoBuffer<short> dxMax(0), dyMax(0);
|
||||
std::deque<uchar*> stack, borderPeaksLocal;
|
||||
|
@ -48,16 +48,35 @@
|
||||
|
||||
/**
|
||||
@defgroup photo Computational Photography
|
||||
|
||||
This module includes photo processing algorithms
|
||||
@{
|
||||
@defgroup photo_inpaint Inpainting
|
||||
@defgroup photo_denoise Denoising
|
||||
@defgroup photo_hdr HDR imaging
|
||||
|
||||
This section describes high dynamic range imaging algorithms namely tonemapping, exposure alignment,
|
||||
camera calibration with multiple exposures and exposure fusion.
|
||||
|
||||
@defgroup photo_decolor Contrast Preserving Decolorization
|
||||
|
||||
Useful links:
|
||||
|
||||
http://www.cse.cuhk.edu.hk/leojia/projects/color2gray/index.html
|
||||
|
||||
@defgroup photo_clone Seamless Cloning
|
||||
|
||||
Useful links:
|
||||
|
||||
https://www.learnopencv.com/seamless-cloning-using-opencv-python-cpp
|
||||
|
||||
@defgroup photo_render Non-Photorealistic Rendering
|
||||
@defgroup photo_c C API
|
||||
|
||||
Useful links:
|
||||
|
||||
http://www.inf.ufrgs.br/~eslgastal/DomainTransform
|
||||
|
||||
https://www.learnopencv.com/non-photorealistic-rendering-using-opencv-python-c/
|
||||
@}
|
||||
*/
|
||||
|
||||
@ -67,24 +86,13 @@ namespace cv
|
||||
//! @addtogroup photo
|
||||
//! @{
|
||||
|
||||
//! @addtogroup photo_inpaint
|
||||
//! @{
|
||||
//! the inpainting algorithm
|
||||
enum
|
||||
{
|
||||
INPAINT_NS = 0, // Navier-Stokes algorithm
|
||||
INPAINT_TELEA = 1 // A. Telea algorithm
|
||||
};
|
||||
|
||||
enum
|
||||
{
|
||||
NORMAL_CLONE = 1,
|
||||
MIXED_CLONE = 2,
|
||||
MONOCHROME_TRANSFER = 3
|
||||
};
|
||||
|
||||
enum
|
||||
{
|
||||
RECURS_FILTER = 1,
|
||||
NORMCONV_FILTER = 2
|
||||
INPAINT_NS = 0, //!< Use Navier-Stokes based method
|
||||
INPAINT_TELEA = 1 //!< Use the algorithm proposed by Alexandru Telea @cite Telea04
|
||||
};
|
||||
|
||||
/** @brief Restores the selected region in an image using the region neighborhood.
|
||||
@ -95,9 +103,7 @@ needs to be inpainted.
|
||||
@param dst Output image with the same size and type as src .
|
||||
@param inpaintRadius Radius of a circular neighborhood of each point inpainted that is considered
|
||||
by the algorithm.
|
||||
@param flags Inpainting method that could be one of the following:
|
||||
- **INPAINT_NS** Navier-Stokes based method [Navier01]
|
||||
- **INPAINT_TELEA** Method by Alexandru Telea @cite Telea04 .
|
||||
@param flags Inpainting method that could be cv::INPAINT_NS or cv::INPAINT_TELEA
|
||||
|
||||
The function reconstructs the selected image area from the pixel near the area boundary. The
|
||||
function may be used to remove dust and scratches from a scanned photo, or to remove undesirable
|
||||
@ -106,12 +112,14 @@ objects from still images or video. See <http://en.wikipedia.org/wiki/Inpainting
|
||||
@note
|
||||
- An example using the inpainting technique can be found at
|
||||
opencv_source_code/samples/cpp/inpaint.cpp
|
||||
- (Python) An example using the inpainting technique can be found at
|
||||
- (Python) An example using the inpainting technique can be found at
|
||||
opencv_source_code/samples/python/inpaint.py
|
||||
*/
|
||||
CV_EXPORTS_W void inpaint( InputArray src, InputArray inpaintMask,
|
||||
OutputArray dst, double inpaintRadius, int flags );
|
||||
|
||||
//! @} photo_inpaint
|
||||
|
||||
//! @addtogroup photo_denoise
|
||||
//! @{
|
||||
|
||||
@ -678,6 +686,9 @@ CV_EXPORTS_W Ptr<MergeRobertson> createMergeRobertson();
|
||||
|
||||
//! @} photo_hdr
|
||||
|
||||
//! @addtogroup photo_decolor
|
||||
//! @{
|
||||
|
||||
/** @brief Transforms a color image to a grayscale image. It is a basic tool in digital printing, stylized
|
||||
black-and-white photograph rendering, and in many single channel image processing applications
|
||||
@cite CL12 .
|
||||
@ -690,9 +701,24 @@ This function is to be applied on color images.
|
||||
*/
|
||||
CV_EXPORTS_W void decolor( InputArray src, OutputArray grayscale, OutputArray color_boost);
|
||||
|
||||
//! @} photo_decolor
|
||||
|
||||
//! @addtogroup photo_clone
|
||||
//! @{
|
||||
|
||||
|
||||
//! seamlessClone algorithm flags
|
||||
enum
|
||||
{
|
||||
/** The power of the method is fully expressed when inserting objects with complex outlines into a new background*/
|
||||
NORMAL_CLONE = 1,
|
||||
/** The classic method, color-based selection and alpha masking might be time consuming and often leaves an undesirable
|
||||
halo. Seamless cloning, even averaged with the original image, is not effective. Mixed seamless cloning based on a loose selection proves effective.*/
|
||||
MIXED_CLONE = 2,
|
||||
/** Monochrome transfer allows the user to easily replace certain features of one object by alternative features.*/
|
||||
MONOCHROME_TRANSFER = 3};
|
||||
|
||||
|
||||
/** @example samples/cpp/tutorial_code/photo/seamless_cloning/cloning_demo.cpp
|
||||
An example using seamlessClone function
|
||||
*/
|
||||
@ -707,15 +733,7 @@ content @cite PM03 .
|
||||
@param mask Input 8-bit 1 or 3-channel image.
|
||||
@param p Point in dst image where object is placed.
|
||||
@param blend Output image with the same size and type as dst.
|
||||
@param flags Cloning method that could be one of the following:
|
||||
- **NORMAL_CLONE** The power of the method is fully expressed when inserting objects with
|
||||
complex outlines into a new background
|
||||
- **MIXED_CLONE** The classic method, color-based selection and alpha masking might be time
|
||||
consuming and often leaves an undesirable halo. Seamless cloning, even averaged with the
|
||||
original image, is not effective. Mixed seamless cloning based on a loose selection proves
|
||||
effective.
|
||||
- **MONOCHROME_TRANSFER** Monochrome transfer allows the user to easily replace certain features of
|
||||
one object by alternative features.
|
||||
@param flags Cloning method that could be cv::NORMAL_CLONE, cv::MIXED_CLONE or cv::MONOCHROME_TRANSFER
|
||||
*/
|
||||
CV_EXPORTS_W void seamlessClone( InputArray src, InputArray dst, InputArray mask, Point p,
|
||||
OutputArray blend, int flags);
|
||||
@ -750,18 +768,16 @@ CV_EXPORTS_W void illuminationChange(InputArray src, InputArray mask, OutputArra
|
||||
float alpha = 0.2f, float beta = 0.4f);
|
||||
|
||||
/** @brief By retaining only the gradients at edge locations, before integrating with the Poisson solver, one
|
||||
washes out the texture of the selected region, giving its contents a flat aspect. Here Canny Edge
|
||||
Detector is used.
|
||||
washes out the texture of the selected region, giving its contents a flat aspect. Here Canny Edge %Detector is used.
|
||||
|
||||
@param src Input 8-bit 3-channel image.
|
||||
@param mask Input 8-bit 1 or 3-channel image.
|
||||
@param dst Output image with the same size and type as src.
|
||||
@param low_threshold Range from 0 to 100.
|
||||
@param low_threshold %Range from 0 to 100.
|
||||
@param high_threshold Value \> 100.
|
||||
@param kernel_size The size of the Sobel kernel to be used.
|
||||
|
||||
**NOTE:**
|
||||
|
||||
@note
|
||||
The algorithm assumes that the color of the source image is close to that of the destination. This
|
||||
assumption means that when the colors don't match, the source image color gets tinted toward the
|
||||
color of the destination image.
|
||||
@ -775,16 +791,21 @@ CV_EXPORTS_W void textureFlattening(InputArray src, InputArray mask, OutputArray
|
||||
//! @addtogroup photo_render
|
||||
//! @{
|
||||
|
||||
//! Edge preserving filters
|
||||
enum
|
||||
{
|
||||
RECURS_FILTER = 1, //!< Recursive Filtering
|
||||
NORMCONV_FILTER = 2 //!< Normalized Convolution Filtering
|
||||
};
|
||||
|
||||
/** @brief Filtering is the fundamental operation in image and video processing. Edge-preserving smoothing
|
||||
filters are used in many different applications @cite EM11 .
|
||||
|
||||
@param src Input 8-bit 3-channel image.
|
||||
@param dst Output 8-bit 3-channel image.
|
||||
@param flags Edge preserving filters:
|
||||
- **RECURS_FILTER** = 1
|
||||
- **NORMCONV_FILTER** = 2
|
||||
@param sigma_s Range between 0 to 200.
|
||||
@param sigma_r Range between 0 to 1.
|
||||
@param flags Edge preserving filters: cv::RECURS_FILTER or cv::NORMCONV_FILTER
|
||||
@param sigma_s %Range between 0 to 200.
|
||||
@param sigma_r %Range between 0 to 1.
|
||||
*/
|
||||
CV_EXPORTS_W void edgePreservingFilter(InputArray src, OutputArray dst, int flags = 1,
|
||||
float sigma_s = 60, float sigma_r = 0.4f);
|
||||
@ -793,8 +814,8 @@ CV_EXPORTS_W void edgePreservingFilter(InputArray src, OutputArray dst, int flag
|
||||
|
||||
@param src Input 8-bit 3-channel image.
|
||||
@param dst Output image with the same size and type as src.
|
||||
@param sigma_s Range between 0 to 200.
|
||||
@param sigma_r Range between 0 to 1.
|
||||
@param sigma_s %Range between 0 to 200.
|
||||
@param sigma_r %Range between 0 to 1.
|
||||
*/
|
||||
CV_EXPORTS_W void detailEnhance(InputArray src, OutputArray dst, float sigma_s = 10,
|
||||
float sigma_r = 0.15f);
|
||||
@ -807,9 +828,9 @@ An example using non-photorealistic line drawing functions
|
||||
@param src Input 8-bit 3-channel image.
|
||||
@param dst1 Output 8-bit 1-channel image.
|
||||
@param dst2 Output image with the same size and type as src.
|
||||
@param sigma_s Range between 0 to 200.
|
||||
@param sigma_r Range between 0 to 1.
|
||||
@param shade_factor Range between 0 to 0.1.
|
||||
@param sigma_s %Range between 0 to 200.
|
||||
@param sigma_r %Range between 0 to 1.
|
||||
@param shade_factor %Range between 0 to 0.1.
|
||||
*/
|
||||
CV_EXPORTS_W void pencilSketch(InputArray src, OutputArray dst1, OutputArray dst2,
|
||||
float sigma_s = 60, float sigma_r = 0.07f, float shade_factor = 0.02f);
|
||||
@ -820,8 +841,8 @@ contrast while preserving, or enhancing, high-contrast features.
|
||||
|
||||
@param src Input 8-bit 3-channel image.
|
||||
@param dst Output image with the same size and type as src.
|
||||
@param sigma_s Range between 0 to 200.
|
||||
@param sigma_r Range between 0 to 1.
|
||||
@param sigma_s %Range between 0 to 200.
|
||||
@param sigma_r %Range between 0 to 1.
|
||||
*/
|
||||
CV_EXPORTS_W void stylization(InputArray src, OutputArray dst, float sigma_s = 60,
|
||||
float sigma_r = 0.45f);
|
||||
|
@ -1803,9 +1803,11 @@ bool CvCaptureCAM_V4L::setProperty( int property_id, double _value )
|
||||
if (bool(value)) {
|
||||
convert_rgb = convertableToRgb();
|
||||
return convert_rgb;
|
||||
}else{
|
||||
convert_rgb = false;
|
||||
releaseFrame();
|
||||
return true;
|
||||
}
|
||||
convert_rgb = false;
|
||||
return true;
|
||||
case cv::CAP_PROP_FOURCC:
|
||||
{
|
||||
if (palette == static_cast<__u32>(value))
|
||||
|
Loading…
Reference in New Issue
Block a user