Merge remote-tracking branch 'origin/3.4' into merge-3.4

This commit is contained in:
Alexander Smorkalov 2023-04-20 16:44:22 +03:00
commit e4a29d93fe
26 changed files with 336 additions and 227 deletions

View File

@ -2,32 +2,32 @@ function(download_ippicv root_var)
set(${root_var} "" PARENT_SCOPE)
# Commit SHA in the opencv_3rdparty repo
set(IPPICV_COMMIT "a56b6ac6f030c312b2dce17430eef13aed9af274")
set(IPPICV_COMMIT "1224f78da6684df04397ac0f40c961ed37f79ccb")
# Define actual ICV versions
if(APPLE)
set(OPENCV_ICV_PLATFORM "macosx")
set(OPENCV_ICV_PACKAGE_SUBDIR "ippicv_mac")
set(OPENCV_ICV_NAME "ippicv_2020_mac_intel64_20191018_general.tgz")
set(OPENCV_ICV_HASH "1c3d675c2a2395d094d523024896e01b")
set(OPENCV_ICV_NAME "ippicv_2021.8_mac_intel64_20230330_general.tgz")
set(OPENCV_ICV_HASH "d2b234a86af1b616958619a4560356d9")
elseif((UNIX AND NOT ANDROID) OR (UNIX AND ANDROID_ABI MATCHES "x86"))
set(OPENCV_ICV_PLATFORM "linux")
set(OPENCV_ICV_PACKAGE_SUBDIR "ippicv_lnx")
if(X86_64)
set(OPENCV_ICV_NAME "ippicv_2020_lnx_intel64_20191018_general.tgz")
set(OPENCV_ICV_HASH "7421de0095c7a39162ae13a6098782f9")
set(OPENCV_ICV_NAME "ippicv_2021.8_lnx_intel64_20230330_general.tgz")
set(OPENCV_ICV_HASH "43219bdc7e3805adcbe3a1e2f1f3ef3b")
else()
set(OPENCV_ICV_NAME "ippicv_2020_lnx_ia32_20191018_general.tgz")
set(OPENCV_ICV_HASH "ad189a940fb60eb71f291321322fe3e8")
set(OPENCV_ICV_NAME "ippicv_2021.8_lnx_ia32_20230330_general.tgz")
set(OPENCV_ICV_HASH "165875443d72faa3fd2146869da90d07")
endif()
elseif(WIN32 AND NOT ARM)
set(OPENCV_ICV_PLATFORM "windows")
set(OPENCV_ICV_PACKAGE_SUBDIR "ippicv_win")
if(X86_64)
set(OPENCV_ICV_NAME "ippicv_2020_win_intel64_20191018_general.zip")
set(OPENCV_ICV_HASH "879741a7946b814455eee6c6ffde2984")
set(OPENCV_ICV_NAME "ippicv_2021.8_win_intel64_20230330_general.zip")
set(OPENCV_ICV_HASH "71e4f58de939f0348ec7fb58ffb17dbf")
else()
set(OPENCV_ICV_NAME "ippicv_2020_win_ia32_20191018_general.zip")
set(OPENCV_ICV_HASH "cd39bdf0c2e1cac9a61101dad7a2413e")
set(OPENCV_ICV_NAME "ippicv_2021.8_win_ia32_20230330_general.zip")
set(OPENCV_ICV_HASH "57fd4648cfe64eae9e2ad9d50173a553")
endif()
else()
return()

View File

@ -41,7 +41,7 @@
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.cols, src.rows, cv.CV_8UC3);
let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(src, src, 120, 200, cv.THRESH_BINARY);
let contours = new cv.MatVector();

View File

@ -147,7 +147,7 @@ if (dataset === 'COCO') {
["Neck", "LShoulder"], ["RShoulder", "RElbow"],
["RElbow", "RWrist"], ["LShoulder", "LElbow"],
["LElbow", "LWrist"], ["Nose", "REye"],
["REye", "REar"], ["Neck", "LEye"],
["REye", "REar"], ["Nose", "LEye"],
["LEye", "LEar"], ["Neck", "MidHip"],
["MidHip", "RHip"], ["RHip", "RKnee"],
["RKnee", "RAnkle"], ["RAnkle", "RBigToe"],

View File

@ -30,7 +30,7 @@ programmer to express ideas in fewer lines of code without reducing readability.
Compared to languages like C/C++, Python is slower. That said, Python can be easily extended with
C/C++, which allows us to write computationally intensive code in C/C++ and create Python wrappers
that can be used as Python modules. This gives us two advantages: first, the code is as fast as the
original C/C++ code (since it is the actual C++ code working in background) and second, it easier to
original C/C++ code (since it is the actual C++ code working in background) and second, it is easier to
code in Python than C/C++. OpenCV-Python is a Python wrapper for the original OpenCV C++
implementation.
@ -79,8 +79,9 @@ Below is the list of contributors who submitted tutorials to OpenCV-Python.
Additional Resources
--------------------
-# A Quick guide to Python - [A Byte of Python](http://swaroopch.com/notes/python/)
2. [NumPy Quickstart tutorial](https://numpy.org/devdocs/user/quickstart.html)
3. [NumPy Reference](https://numpy.org/devdocs/reference/index.html#reference)
4. [OpenCV Documentation](http://docs.opencv.org/)
-# A Quick guide to Python - [A Byte of Python](https://python.swaroopch.com/)
1. [A Quick guide to Python](https://www.freecodecamp.org/news/the-python-guide-for-beginners/)
2. [NumPy Quickstart tutorial](https://numpy.org/doc/stable/user/quickstart.html)
3. [NumPy Reference](https://numpy.org/doc/stable/reference/index.html)
4. [OpenCV Documentation](https://docs.opencv.org/)
5. [OpenCV Forum](https://forum.opencv.org/)

View File

@ -33,6 +33,8 @@ Installing OpenCV from prebuilt binaries
-# Copy **cv2.pyd** to **C:/Python27/lib/site-packages**.
-# Copy the **opencv_world.dll** file to **C:/Python27/lib/site-packages**
-# Open Python IDLE and type following codes in Python terminal.
@code
>>> import cv2 as cv

View File

@ -11,29 +11,30 @@
| Compatibility | OpenCV >= 3.4.1 |
## Introduction
Deep learning is a fast growing area. The new approaches to build neural networks
usually introduce new types of layers. They could be modifications of existing
ones or implement outstanding researching ideas.
Deep learning is a fast-growing area. New approaches to building neural networks
usually introduce new types of layers. These could be modifications of existing
ones or implementation of outstanding research ideas.
OpenCV gives an opportunity to import and run networks from different deep learning
frameworks. There are a number of the most popular layers. However you can face
a problem that your network cannot be imported using OpenCV because of unimplemented layers.
OpenCV allows importing and running networks from different deep learning frameworks.
There is a number of the most popular layers. However, you can face a problem that
your network cannot be imported using OpenCV because some layers of your network
can be not implemented in the deep learning engine of OpenCV.
The first solution is to create a feature request at https://github.com/opencv/opencv/issues
mentioning details such a source of model and type of new layer. A new layer could
be implemented if OpenCV community shares this need.
mentioning details such as a source of a model and a type of new layer.
The new layer could be implemented if the OpenCV community shares this need.
The second way is to define a **custom layer** so OpenCV's deep learning engine
The second way is to define a **custom layer** so that OpenCV's deep learning engine
will know how to use it. This tutorial is dedicated to show you a process of deep
learning models import customization.
learning model's import customization.
## Define a custom layer in C++
Deep learning layer is a building block of network's pipeline.
It has connections to **input blobs** and produces results to **output blobs**.
There are trained **weights** and **hyper-parameters**.
Layers' names, types, weights and hyper-parameters are stored in files are generated by
native frameworks during training. If OpenCV mets unknown layer type it throws an
exception trying to read a model:
Layers' names, types, weights and hyper-parameters are stored in files are
generated by native frameworks during training. If OpenCV encounters unknown
layer type it throws an exception while trying to read a model:
```
Unspecified error: Can't create layer "layer_name" of type "MyType" in function getLayerInstance
@ -69,7 +70,7 @@ This method should create an instance of you layer and return cv::Ptr with it.
@snippet dnn/custom_layers.hpp MyLayer::getMemoryShapes
Returns layer's output shapes depends on input shapes. You may request an extra
Returns layer's output shapes depending on input shapes. You may request an extra
memory using `internals`.
- Run a layer
@ -79,20 +80,20 @@ memory using `internals`.
Implement a layer's logic here. Compute outputs for given inputs.
@note OpenCV manages memory allocated for layers. In the most cases the same memory
can be reused between layers. So your `forward` implementation should not rely that
the second invocation of `forward` will has the same data at `outputs` and `internals`.
can be reused between layers. So your `forward` implementation should not rely on that
the second invocation of `forward` will have the same data at `outputs` and `internals`.
- Optional `finalize` method
@snippet dnn/custom_layers.hpp MyLayer::finalize
The chain of methods are the following: OpenCV deep learning engine calls `create`
method once then it calls `getMemoryShapes` for an every created layer then you
can make some preparations depends on known input dimensions at cv::dnn::Layer::finalize.
After network was initialized only `forward` method is called for an every network's input.
The chain of methods is the following: OpenCV deep learning engine calls `create`
method once, then it calls `getMemoryShapes` for every created layer, then you
can make some preparations depend on known input dimensions at cv::dnn::Layer::finalize.
After network was initialized only `forward` method is called for every network's input.
@note Varying input blobs' sizes such height or width or batch size you make OpenCV
reallocate all the internal memory. That leads efficiency gaps. Try to initialize
@note Varying input blobs' sizes such height, width or batch size make OpenCV
reallocate all the internal memory. That leads to efficiency gaps. Try to initialize
and deploy models using a fixed batch size and image's dimensions.
## Example: custom layer from Caffe
@ -209,7 +210,7 @@ deep learning model. That was trained with one and only difference comparing to
a current version of [Caffe framework](http://caffe.berkeleyvision.org/). `Crop`
layers that receive two input blobs and crop the first one to match spatial dimensions
of the second one used to crop from the center. Nowadays Caffe's layer does it
from the top-left corner. So using the latest version of Caffe or OpenCV you'll
from the top-left corner. So using the latest version of Caffe or OpenCV you will
get shifted results with filled borders.
Next we're going to replace OpenCV's `Crop` layer that makes top-left cropping by
@ -225,7 +226,7 @@ a centric one.
@snippet dnn/edge_detection.py Register
That's it! We've replaced an implemented OpenCV's layer to a custom one.
That's it! We have replaced an implemented OpenCV's layer to a custom one.
You may find a full script in the [source code](https://github.com/opencv/opencv/tree/4.x/samples/dnn/edge_detection.py).
<table border="0">

View File

@ -2517,28 +2517,28 @@ double cvStereoCalibrate( const CvMat* _objectPoints, const CvMat* _imagePoints1
static void
icvGetRectangles( const CvMat* cameraMatrix, const CvMat* distCoeffs,
const CvMat* R, const CvMat* newCameraMatrix, CvSize imgSize,
cv::Rect_<float>& inner, cv::Rect_<float>& outer )
cv::Rect_<double>& inner, cv::Rect_<double>& outer )
{
const int N = 9;
int x, y, k;
cv::Ptr<CvMat> _pts(cvCreateMat(1, N*N, CV_32FC2));
CvPoint2D32f* pts = (CvPoint2D32f*)(_pts->data.ptr);
cv::Ptr<CvMat> _pts(cvCreateMat(1, N*N, CV_64FC2));
CvPoint2D64f* pts = (CvPoint2D64f*)(_pts->data.ptr);
for( y = k = 0; y < N; y++ )
for( x = 0; x < N; x++ )
pts[k++] = cvPoint2D32f((float)x*imgSize.width/(N-1),
(float)y*imgSize.height/(N-1));
pts[k++] = cvPoint2D64f((double)x*(imgSize.width-1)/(N-1),
(double)y*(imgSize.height-1)/(N-1));
cvUndistortPoints(_pts, _pts, cameraMatrix, distCoeffs, R, newCameraMatrix);
float iX0=-FLT_MAX, iX1=FLT_MAX, iY0=-FLT_MAX, iY1=FLT_MAX;
float oX0=FLT_MAX, oX1=-FLT_MAX, oY0=FLT_MAX, oY1=-FLT_MAX;
double iX0=-FLT_MAX, iX1=FLT_MAX, iY0=-FLT_MAX, iY1=FLT_MAX;
double oX0=FLT_MAX, oX1=-FLT_MAX, oY0=FLT_MAX, oY1=-FLT_MAX;
// find the inscribed rectangle.
// the code will likely not work with extreme rotation matrices (R) (>45%)
for( y = k = 0; y < N; y++ )
for( x = 0; x < N; x++ )
{
CvPoint2D32f p = pts[k++];
CvPoint2D64f p = pts[k++];
oX0 = MIN(oX0, p.x);
oX1 = MAX(oX1, p.x);
oY0 = MIN(oY0, p.y);
@ -2553,8 +2553,8 @@ icvGetRectangles( const CvMat* cameraMatrix, const CvMat* distCoeffs,
if( y == N-1 )
iY1 = MIN(iY1, p.y);
}
inner = cv::Rect_<float>(iX0, iY0, iX1-iX0, iY1-iY0);
outer = cv::Rect_<float>(oX0, oY0, oX1-oX0, oY1-oY0);
inner = cv::Rect_<double>(iX0, iY0, iX1-iX0, iY1-iY0);
outer = cv::Rect_<double>(oX0, oY0, oX1-oX0, oY1-oY0);
}
@ -2567,7 +2567,7 @@ void cvStereoRectify( const CvMat* _cameraMatrix1, const CvMat* _cameraMatrix2,
{
double _om[3], _t[3] = {0}, _uu[3]={0,0,0}, _r_r[3][3], _pp[3][4];
double _ww[3], _wr[3][3], _z[3] = {0,0,0}, _ri[3][3];
cv::Rect_<float> inner1, inner2, outer1, outer2;
cv::Rect_<double> inner1, inner2, outer1, outer2;
CvMat om = cvMat(3, 1, CV_64F, _om);
CvMat t = cvMat(3, 1, CV_64F, _t);
@ -2774,7 +2774,7 @@ void cvGetOptimalNewCameraMatrix( const CvMat* cameraMatrix, const CvMat* distCo
CvMat* newCameraMatrix, CvSize newImgSize,
CvRect* validPixROI, int centerPrincipalPoint )
{
cv::Rect_<float> inner, outer;
cv::Rect_<double> inner, outer;
newImgSize = newImgSize.width*newImgSize.height != 0 ? newImgSize : imgSize;
double M[3][3];
@ -2804,10 +2804,10 @@ void cvGetOptimalNewCameraMatrix( const CvMat* cameraMatrix, const CvMat* distCo
if( validPixROI )
{
inner = cv::Rect_<float>((float)((inner.x - cx0)*s + cx),
(float)((inner.y - cy0)*s + cy),
(float)(inner.width*s),
(float)(inner.height*s));
inner = cv::Rect_<double>((double)((inner.x - cx0)*s + cx),
(double)((inner.y - cy0)*s + cy),
(double)(inner.width*s),
(double)(inner.height*s));
cv::Rect r(cvCeil(inner.x), cvCeil(inner.y), cvFloor(inner.width), cvFloor(inner.height));
r &= cv::Rect(0, 0, newImgSize.width, newImgSize.height);
*validPixROI = cvRect(r);

View File

@ -157,6 +157,104 @@ void CV_DefaultNewCameraMatrixTest::prepare_to_validation( int /*test_case_idx*/
//---------
class CV_GetOptimalNewCameraMatrixNoDistortionTest : public cvtest::ArrayTest
{
public:
CV_GetOptimalNewCameraMatrixNoDistortionTest();
protected:
int prepare_test_case (int test_case_idx);
void prepare_to_validation(int test_case_idx);
void get_test_array_types_and_sizes(int test_case_idx, vector<vector<Size> >& sizes, vector<vector<int> >& types);
void run_func();
private:
cv::Mat camera_mat;
cv::Mat distortion_coeffs;
cv::Mat new_camera_mat;
cv::Size img_size;
double alpha;
bool center_principal_point;
int matrix_type;
static const int MAX_X = 2048;
static const int MAX_Y = 2048;
};
CV_GetOptimalNewCameraMatrixNoDistortionTest::CV_GetOptimalNewCameraMatrixNoDistortionTest()
{
test_array[INPUT].push_back(NULL); // camera_mat
test_array[INPUT].push_back(NULL); // distortion_coeffs
test_array[OUTPUT].push_back(NULL); // new_camera_mat
test_array[REF_OUTPUT].push_back(NULL);
alpha = 0.0;
center_principal_point = false;
matrix_type = 0;
}
void CV_GetOptimalNewCameraMatrixNoDistortionTest::get_test_array_types_and_sizes(int test_case_idx, vector<vector<Size> >& sizes, vector<vector<int> >& types)
{
cvtest::ArrayTest::get_test_array_types_and_sizes(test_case_idx, sizes, types);
RNG& rng = ts->get_rng();
matrix_type = types[INPUT][0] = types[INPUT][1] = types[OUTPUT][0] = types[REF_OUTPUT][0] = cvtest::randInt(rng)%2 ? CV_64F : CV_32F;
sizes[INPUT][0] = sizes[OUTPUT][0] = sizes[REF_OUTPUT][0] = cvSize(3,3);
sizes[INPUT][1] = cvSize(1,4);
}
int CV_GetOptimalNewCameraMatrixNoDistortionTest::prepare_test_case(int test_case_idx)
{
int code = cvtest::ArrayTest::prepare_test_case( test_case_idx );
if (code <= 0)
return code;
RNG& rng = ts->get_rng();
alpha = cvtest::randReal(rng);
center_principal_point = ((cvtest::randInt(rng) % 2)!=0);
// Generate random camera matrix. Use floating point precision for source to avoid precision loss
img_size.width = cvtest::randInt(rng) % MAX_X + 1;
img_size.height = cvtest::randInt(rng) % MAX_Y + 1;
const float aspect_ratio = static_cast<float>(img_size.width) / img_size.height;
float cam_array[9] = {0,0,0,0,0,0,0,0,1};
cam_array[2] = static_cast<float>((img_size.width - 1)*0.5); // center
cam_array[5] = static_cast<float>((img_size.height - 1)*0.5); // center
cam_array[0] = static_cast<float>(MAX(img_size.width, img_size.height)/(0.9 - cvtest::randReal(rng)*0.6));
cam_array[4] = aspect_ratio*cam_array[0];
Mat& input_camera_mat = test_mat[INPUT][0];
cvtest::convert(Mat(3, 3, CV_32F, cam_array), input_camera_mat, input_camera_mat.type());
camera_mat = input_camera_mat;
// Generate zero distortion matrix
const Mat zero_dist_coeffs = Mat::zeros(1, 4, CV_32F);
Mat& input_dist_coeffs = test_mat[INPUT][1];
cvtest::convert(zero_dist_coeffs, input_dist_coeffs, input_dist_coeffs.type());
distortion_coeffs = input_dist_coeffs;
return code;
}
void CV_GetOptimalNewCameraMatrixNoDistortionTest::run_func()
{
new_camera_mat = cv::getOptimalNewCameraMatrix(camera_mat, distortion_coeffs, img_size, alpha, img_size, NULL, center_principal_point);
}
void CV_GetOptimalNewCameraMatrixNoDistortionTest::prepare_to_validation(int /*test_case_idx*/)
{
const Mat& src = test_mat[INPUT][0];
Mat& dst = test_mat[REF_OUTPUT][0];
cvtest::copy(src, dst);
Mat& output = test_mat[OUTPUT][0];
cvtest::convert(new_camera_mat, output, output.type());
}
//---------
class CV_UndistortPointsTest : public cvtest::ArrayTest
{
public:
@ -991,6 +1089,7 @@ double CV_InitInverseRectificationMapTest::get_success_error_level( int /*test_c
//////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Calib3d_DefaultNewCameraMatrix, accuracy) { CV_DefaultNewCameraMatrixTest test; test.safe_run(); }
TEST(Calib3d_GetOptimalNewCameraMatrixNoDistortion, accuracy) { CV_GetOptimalNewCameraMatrixNoDistortionTest test; test.safe_run(); }
TEST(Calib3d_UndistortPoints, accuracy) { CV_UndistortPointsTest test; test.safe_run(); }
TEST(Calib3d_InitUndistortRectifyMap, accuracy) { CV_InitUndistortRectifyMapTest test; test.safe_run(); }
TEST(DISABLED_Calib3d_InitInverseRectificationMap, accuracy) { CV_InitInverseRectificationMapTest test; test.safe_run(); }

View File

@ -880,14 +880,10 @@ OPENCV_HAL_IMPL_CMP_OP(<=)
For all types except 64-bit integer values. */
OPENCV_HAL_IMPL_CMP_OP(>=)
/** @brief Equal comparison
For all types except 64-bit integer values. */
/** @brief Equal comparison */
OPENCV_HAL_IMPL_CMP_OP(==)
/** @brief Not equal comparison
For all types except 64-bit integer values. */
/** @brief Not equal comparison */
OPENCV_HAL_IMPL_CMP_OP(!=)
template<int n>

View File

@ -1036,18 +1036,6 @@ OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_float64x2, v_min, vminq_f64)
OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_float64x2, v_max, vmaxq_f64)
#endif
#if CV_SIMD128_64F
inline int64x2_t vmvnq_s64(int64x2_t a)
{
int64x2_t vx = vreinterpretq_s64_u32(vdupq_n_u32(0xFFFFFFFF));
return veorq_s64(a, vx);
}
inline uint64x2_t vmvnq_u64(uint64x2_t a)
{
uint64x2_t vx = vreinterpretq_u64_u32(vdupq_n_u32(0xFFFFFFFF));
return veorq_u64(a, vx);
}
#endif
#define OPENCV_HAL_IMPL_NEON_INT_CMP_OP(_Tpvec, cast, suffix, not_suffix) \
inline _Tpvec operator == (const _Tpvec& a, const _Tpvec& b) \
{ return _Tpvec(cast(vceqq_##suffix(a.val, b.val))); } \
@ -1069,9 +1057,47 @@ OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_int16x8, vreinterpretq_s16_u16, s16, u16)
OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_uint32x4, OPENCV_HAL_NOP, u32, u32)
OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_int32x4, vreinterpretq_s32_u32, s32, u32)
OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_float32x4, vreinterpretq_f32_u32, f32, u32)
#if defined(__aarch64__) || defined(_M_ARM64)
static inline uint64x2_t vmvnq_u64(uint64x2_t a)
{
uint64x2_t vx = vreinterpretq_u64_u32(vdupq_n_u32(0xFFFFFFFF));
return veorq_u64(a, vx);
}
//OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_uint64x2, OPENCV_HAL_NOP, u64, u64)
//OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_int64x2, vreinterpretq_s64_u64, s64, u64)
static inline v_uint64x2 operator == (const v_uint64x2& a, const v_uint64x2& b)
{ return v_uint64x2(vceqq_u64(a.val, b.val)); }
static inline v_uint64x2 operator != (const v_uint64x2& a, const v_uint64x2& b)
{ return v_uint64x2(vmvnq_u64(vceqq_u64(a.val, b.val))); }
static inline v_int64x2 operator == (const v_int64x2& a, const v_int64x2& b)
{ return v_int64x2(vreinterpretq_s64_u64(vceqq_s64(a.val, b.val))); }
static inline v_int64x2 operator != (const v_int64x2& a, const v_int64x2& b)
{ return v_int64x2(vreinterpretq_s64_u64(vmvnq_u64(vceqq_s64(a.val, b.val)))); }
#else
static inline v_uint64x2 operator == (const v_uint64x2& a, const v_uint64x2& b)
{
uint32x4_t cmp = vceqq_u32(vreinterpretq_u32_u64(a.val), vreinterpretq_u32_u64(b.val));
uint32x4_t swapped = vrev64q_u32(cmp);
return v_uint64x2(vreinterpretq_u64_u32(vandq_u32(cmp, swapped)));
}
static inline v_uint64x2 operator != (const v_uint64x2& a, const v_uint64x2& b)
{
uint32x4_t cmp = vceqq_u32(vreinterpretq_u32_u64(a.val), vreinterpretq_u32_u64(b.val));
uint32x4_t swapped = vrev64q_u32(cmp);
uint64x2_t v_eq = vreinterpretq_u64_u32(vandq_u32(cmp, swapped));
uint64x2_t vx = vreinterpretq_u64_u32(vdupq_n_u32(0xFFFFFFFF));
return v_uint64x2(veorq_u64(v_eq, vx));
}
static inline v_int64x2 operator == (const v_int64x2& a, const v_int64x2& b)
{
return v_reinterpret_as_s64(v_reinterpret_as_u64(a) == v_reinterpret_as_u64(b));
}
static inline v_int64x2 operator != (const v_int64x2& a, const v_int64x2& b)
{
return v_reinterpret_as_s64(v_reinterpret_as_u64(a) != v_reinterpret_as_u64(b));
}
#endif
#if CV_SIMD128_64F
OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_uint64x2, OPENCV_HAL_NOP, u64, u64)
OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_int64x2, vreinterpretq_s64_u64, s64, u64)
OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_float64x2, vreinterpretq_f64_u64, f64, u64)
#endif

View File

@ -62,10 +62,6 @@ static bool ipp_countNonZero( Mat &src, int &res )
{
CV_INSTRUMENT_REGION_IPP();
// see https://github.com/opencv/opencv/issues/17453
if (src.dims <= 2 && src.step > 520000 && cv::ipp::getIppTopFeatures() == ippCPUID_SSE42)
return false;
#if IPP_VERSION_X100 < 201801
// Poor performance of SSE42
if(cv::ipp::getIppTopFeatures() == ippCPUID_SSE42)

View File

@ -531,7 +531,7 @@ inline int hal_ni_dftFree1D(cvhalDFT *context) { return CV_HAL_ERROR_NOT_IMPLEME
/**
@param context double pointer to context storing all necessary data
@param width,height image dimensions
@param depth image type (CV_32F or CV64F)
@param depth image type (CV_32F or CV_64F)
@param src_channels number of channels in input image
@param dst_channels number of channels in output image
@param flags algorithm options (combination of CV_HAL_DFT_INVERSE, ...)
@ -558,7 +558,7 @@ inline int hal_ni_dftFree2D(cvhalDFT *context) { return CV_HAL_ERROR_NOT_IMPLEME
/**
@param context double pointer to context storing all necessary data
@param width,height image dimensions
@param depth image type (CV_32F or CV64F)
@param depth image type (CV_32F or CV_64F)
@param flags algorithm options (combination of CV_HAL_DFT_INVERSE, ...)
*/
inline int hal_ni_dctInit2D(cvhalDFT **context, int width, int height, int depth, int flags) { return CV_HAL_ERROR_NOT_IMPLEMENTED; }

View File

@ -7,10 +7,6 @@
#include "mathfuncs_core.simd.hpp"
#include "mathfuncs_core.simd_declarations.hpp" // defines CV_CPU_DISPATCH_MODES_ALL=AVX2,...,BASELINE based on CMakeLists.txt content
#define IPP_DISABLE_MAGNITUDE_32F 1 // accuracy: https://github.com/opencv/opencv/issues/19506
namespace cv { namespace hal {
///////////////////////////////////// ATAN2 ////////////////////////////////////
@ -48,25 +44,8 @@ void magnitude32f(const float* x, const float* y, float* mag, int len)
CV_INSTRUMENT_REGION();
CALL_HAL(magnitude32f, cv_hal_magnitude32f, x, y, mag, len);
#ifdef HAVE_IPP
bool allowIPP = true;
#ifdef IPP_DISABLE_MAGNITUDE_32F
if (cv::ipp::getIppTopFeatures() & (
#if IPP_VERSION_X100 >= 201700
ippCPUID_AVX512F |
#endif
ippCPUID_AVX2)
)
{
allowIPP = (len & 7) == 0;
}
#endif
// SSE42 performance issues
CV_IPP_RUN((IPP_VERSION_X100 > 201800 || cv::ipp::getIppTopFeatures() != ippCPUID_SSE42) && allowIPP,
CV_INSTRUMENT_FUN_IPP(ippsMagnitude_32f, x, y, mag, len) >= 0);
#endif
CV_IPP_RUN(IPP_VERSION_X100 > 201800 || cv::ipp::getIppTopFeatures() != ippCPUID_SSE42, CV_INSTRUMENT_FUN_IPP(ippsMagnitude_32f, x, y, mag, len) >= 0);
CV_CPU_DISPATCH(magnitude32f, (x, y, mag, len),
CV_CPU_DISPATCH_MODES_ALL);

View File

@ -995,16 +995,6 @@ static bool ipp_norm(InputArray _src1, InputArray _src2, int normType, InputArra
type == CV_16UC3 ? (ippiMaskNormDiffFuncC3)ippiNormDiff_L2_16u_C3CMR :
type == CV_32FC3 ? (ippiMaskNormDiffFuncC3)ippiNormDiff_L2_32f_C3CMR :
0) : 0;
if (cv::ipp::getIppTopFeatures() & (
#if IPP_VERSION_X100 >= 201700
ippCPUID_AVX512F |
#endif
ippCPUID_AVX2)
) // IPP_DISABLE_NORM_16UC3_mask_small (#11399)
{
if (normType == NORM_L1 && type == CV_16UC3 && sz.width < 16)
return false;
}
if( ippiNormDiff_C3CMR )
{
Ipp64f norm1, norm2, norm3;

View File

@ -166,7 +166,7 @@ template <typename R> struct Data
{
*this = r;
}
operator R ()
operator R () const
{
return initializer<VTraits<R>::max_nlanes>().init(*this);
}
@ -1736,11 +1736,39 @@ template<typename R> struct TheTest
}
#endif
#if CV_SIMD_64F
void do_check_cmp64(const Data<R>& dataA, const Data<R>& dataB)
{
R a = dataA;
R b = dataB;
#if CV_SIMD_SCALABLE
Data<R> dataEQ = v_eq(a, b);
Data<R> dataNE = v_ne(a, b);
#else
Data<R> dataEQ = (a == b);
Data<R> dataNE = (a != b);
#endif
for (int i = 0; i < VTraits<R>::vlanes(); ++i)
{
SCOPED_TRACE(cv::format("i=%d", i));
if (cvtest::debugLevel > 0) cout << "i=" << i << " ( " << dataA[i] << " vs " << dataB[i] << " ): eq=" << dataEQ[i] << " ne=" << dataNE[i] << endl;
EXPECT_NE((LaneType)dataEQ[i], (LaneType)dataNE[i]);
if (dataA[i] == dataB[i])
EXPECT_EQ((LaneType)-1, (LaneType)dataEQ[i]);
else
EXPECT_EQ((LaneType)0, (LaneType)dataEQ[i]);
if (dataA[i] != dataB[i])
EXPECT_EQ((LaneType)-1, (LaneType)dataNE[i]);
else
EXPECT_EQ((LaneType)0, (LaneType)dataNE[i]);
}
}
TheTest & test_cmp64()
{
Data<R> dataA, dataB;
R a = dataA, b = dataB;
Data<R> dataA;
Data<R> dataB;
for (int i = 0; i < VTraits<R>::vlanes(); ++i)
{
@ -1748,37 +1776,25 @@ template<typename R> struct TheTest
}
dataA[0]++;
a = dataA, b = dataB;
do_check_cmp64(dataA, dataB);
do_check_cmp64(dataB, dataA);
Data<R> resC = (a == b);
Data<R> resD = (a != b);
dataA[0] = dataB[0];
dataA[1] += (((LaneType)1) << 32);
do_check_cmp64(dataA, dataB);
do_check_cmp64(dataB, dataA);
for (int i = 0; i < VTraits<R>::vlanes(); ++i)
{
SCOPED_TRACE(cv::format("i=%d", i));
EXPECT_EQ(dataA[i] == dataB[i], resC[i] != 0);
EXPECT_EQ(dataA[i] != dataB[i], resD[i] != 0);
}
dataA[0] = (LaneType)-1;
dataB[0] = (LaneType)-1;
dataA[1] = (LaneType)-1;
dataB[1] = (LaneType)2;
for (int i = 0; i < VTraits<R>::vlanes(); ++i)
{
dataA[i] = dataB[i] = (LaneType)-1;
}
do_check_cmp64(dataA, dataB);
do_check_cmp64(dataB, dataA);
a = dataA, b = dataB;
resC = (a == b);
resD = (a != b);
for (int i = 0; i < VTraits<R>::vlanes(); ++i)
{
SCOPED_TRACE(cv::format("i=%d", i));
EXPECT_EQ(dataA[i] == dataB[i], resC[i] != 0);
EXPECT_EQ(dataA[i] != dataB[i], resD[i] != 0);
}
return *this;
}
#endif
};
#define DUMP_ENTRY(type) printf("SIMD%d: %s\n", 8*VTraits<v_uint8>::vlanes(), CV__TRACE_FUNCTION);
@ -2023,9 +2039,8 @@ void test_hal_intrin_uint64()
TheTest<v_uint64>()
.test_loadstore()
.test_addsub()
#if CV_SIMD_64F
.test_cmp64()
#endif
//.test_cmp() - not declared as supported
.test_shift<1>().test_shift<8>()
.test_logic()
.test_reverse()
@ -2043,9 +2058,8 @@ void test_hal_intrin_int64()
TheTest<v_int64>()
.test_loadstore()
.test_addsub()
#if CV_SIMD_64F
.test_cmp64()
#endif
//.test_cmp() - not declared as supported
.test_shift<1>().test_shift<8>()
.test_logic()
.test_reverse()
@ -2128,7 +2142,8 @@ void test_hal_intrin_float64()
.test_rotate<2>().test_rotate<3>()
#endif
;
#else
std::cout << "SKIP: CV_SIMD_64F is not available" << std::endl;
#endif
}

View File

@ -45,6 +45,21 @@
namespace cvflann
{
class FILEScopeGuard {
public:
explicit FILEScopeGuard(FILE* file) {
file_ = file;
};
~FILEScopeGuard() {
fclose(file_);
};
private:
FILE* file_;
};
/**
* Sets the log level used for all flann functions
@ -69,7 +84,6 @@ struct SavedIndexParams : public IndexParams
}
};
template<typename Distance>
NNIndex<Distance>* load_saved_index(const Matrix<typename Distance::ElementType>& dataset, const cv::String& filename, Distance distance)
{
@ -79,13 +93,13 @@ NNIndex<Distance>* load_saved_index(const Matrix<typename Distance::ElementType>
if (fin == NULL) {
return NULL;
}
FILEScopeGuard fscgd(fin);
IndexHeader header = load_header(fin);
if (header.data_type != Datatype<ElementType>::type()) {
fclose(fin);
FLANN_THROW(cv::Error::StsError, "Datatype of saved index is different than of the one to be created.");
}
if ((size_t(header.rows) != dataset.rows)||(size_t(header.cols) != dataset.cols)) {
fclose(fin);
FLANN_THROW(cv::Error::StsError, "The index saved belongs to a different dataset");
}
@ -93,7 +107,6 @@ NNIndex<Distance>* load_saved_index(const Matrix<typename Distance::ElementType>
params["algorithm"] = header.index_type;
NNIndex<Distance>* nnIndex = create_index_by_type<Distance>(dataset, params, distance);
nnIndex->loadIndex(fin);
fclose(fin);
return nnIndex;
}
@ -107,7 +120,7 @@ public:
typedef typename Distance::ResultType DistanceType;
Index(const Matrix<ElementType>& features, const IndexParams& params, Distance distance = Distance() )
: index_params_(params)
:index_params_(params)
{
flann_algorithm_t index_type = get_param<flann_algorithm_t>(params,"algorithm");
loaded_ = false;

View File

@ -806,10 +806,13 @@ bool Index::load_(const String& filename)
bool ok = true;
FILE* fin = fopen(filename.c_str(), "rb");
if (fin == NULL)
if (fin == NULL) {
return false;
}
FILEScopeGuard fscgd(fin);
::cvflann::IndexHeader header = ::cvflann::load_header(fin);
algo = header.index_type;
featureType = header.data_type == FLANN_UINT8 ? CV_8U :
header.data_type == FLANN_INT8 ? CV_8S :
@ -824,7 +827,6 @@ bool Index::load_(const String& filename)
{
fprintf(stderr, "Reading FLANN index error: the saved data size (%d, %d) or type (%d) is different from the passed one (%d, %d), %d\n",
(int)header.rows, (int)header.cols, featureType, data.rows, data.cols, data.type());
fclose(fin);
return false;
}
@ -837,7 +839,6 @@ bool Index::load_(const String& filename)
(distType != FLANN_DIST_HAMMING && featureType == CV_32F)) )
{
fprintf(stderr, "Reading FLANN index error: unsupported feature type %d for the index type %d\n", featureType, algo);
fclose(fin);
return false;
}
@ -877,8 +878,6 @@ bool Index::load_(const String& filename)
ok = false;
}
if( fin )
fclose(fin);
return ok;
}

View File

@ -626,7 +626,7 @@ CV_IMPL int cvWaitKey (int maxWait)
inMode:NSDefaultRunLoopMode
dequeue:YES];
if([event type] == NSKeyDown) {
if([event type] == NSKeyDown && [[event characters] length]) {
returnCode = [[event characters] characterAtIndex:0];
break;
}

View File

@ -2420,21 +2420,6 @@ std::shared_ptr<CvTrackbar> createTrackbar_(CvWindow& window, const std::string&
/* Retrieve current buttons count */
int bcount = (int)SendMessage(window.toolbar.toolbar, TB_BUTTONCOUNT, 0, 0);
if (bcount > 0)
{
/* If this is not the first button then we need to
separate it from the previous one */
tbs.iBitmap = 0;
tbs.idCommand = bcount; // Set button id to it's number
tbs.iString = 0;
tbs.fsStyle = TBSTYLE_SEP;
tbs.fsState = TBSTATE_ENABLED;
SendMessage(window.toolbar.toolbar, TB_ADDBUTTONS, 1, (LPARAM)&tbs);
// Retrieve current buttons count
bcount = (int)SendMessage(window.toolbar.toolbar, TB_BUTTONCOUNT, 0, 0);
}
/* Add a button which we're going to cover with the slider */
tbs.iBitmap = 0;
tbs.idCommand = bcount; // Set button id to it's number

View File

@ -256,18 +256,26 @@ CV_EXPORTS_W size_t imcount(const String& filename, int flags = IMREAD_ANYCOLOR)
/** @brief Saves an image to a specified file.
The function imwrite saves the image to the specified file. The image format is chosen based on the
filename extension (see cv::imread for the list of extensions). In general, only 8-bit
filename extension (see cv::imread for the list of extensions). In general, only 8-bit unsigned (CV_8U)
single-channel or 3-channel (with 'BGR' channel order) images
can be saved using this function, with these exceptions:
- 16-bit unsigned (CV_16U) images can be saved in the case of PNG, JPEG 2000, and TIFF formats
- 32-bit float (CV_32F) images can be saved in PFM, TIFF, OpenEXR, and Radiance HDR formats;
3-channel (CV_32FC3) TIFF images will be saved using the LogLuv high dynamic range encoding
(4 bytes per pixel)
- PNG images with an alpha channel can be saved using this function. To do this, create
8-bit (or 16-bit) 4-channel image BGRA, where the alpha channel goes last. Fully transparent pixels
should have alpha set to 0, fully opaque pixels should have alpha set to 255/65535 (see the code sample below).
- Multiple images (vector of Mat) can be saved in TIFF format (see the code sample below).
- With OpenEXR encoder, only 32-bit float (CV_32F) images can be saved.
- 8-bit unsigned (CV_8U) images are not supported.
- With Radiance HDR encoder, non 64-bit float (CV_64F) images can be saved.
- All images will be converted to 32-bit float (CV_32F).
- With JPEG 2000 encoder, 8-bit unsigned (CV_8U) and 16-bit unsigned (CV_16U) images can be saved.
- With PAM encoder, 8-bit unsigned (CV_8U) and 16-bit unsigned (CV_16U) images can be saved.
- With PNG encoder, 8-bit unsigned (CV_8U) and 16-bit unsigned (CV_16U) images can be saved.
- PNG images with an alpha channel can be saved using this function. To do this, create
8-bit (or 16-bit) 4-channel image BGRA, where the alpha channel goes last. Fully transparent pixels
should have alpha set to 0, fully opaque pixels should have alpha set to 255/65535 (see the code sample below).
- With PGM/PPM encoder, 8-bit unsigned (CV_8U) and 16-bit unsigned (CV_16U) images can be saved.
- With TIFF encoder, 8-bit unsigned (CV_8U), 16-bit unsigned (CV_16U),
32-bit float (CV_32F) and 64-bit float (CV_64F) images can be saved.
- Multiple images (vector of Mat) can be saved in TIFF format (see the code sample below).
- 32-bit float 3-channel (CV_32FC3) TIFF images will be saved
using the LogLuv high dynamic range encoding (4 bytes per pixel)
If the image format is not supported, the image will be converted to 8-bit unsigned (CV_8U) and saved that way.

View File

@ -420,6 +420,14 @@ inline void HSV2RGB_simd(const v_float32& h, const v_float32& s, const v_float32
}
#endif
// Compute the sector and the new H for HSV and HLS 2 RGB conversions.
inline void ComputeSectorAndClampedH(float& h, int &sector) {
sector = cvFloor(h);
h -= sector;
sector %= 6;
sector += sector < 0 ? 6 : 0;
}
inline void HSV2RGB_native(float h, float s, float v,
float& b, float& g, float& r,
@ -434,14 +442,7 @@ inline void HSV2RGB_native(float h, float s, float v,
float tab[4];
int sector;
h *= hscale;
h = fmod(h, 6.f);
sector = cvFloor(h);
h -= sector;
if( (unsigned)sector >= 6u )
{
sector = 0;
h = 0.f;
}
ComputeSectorAndClampedH(h, sector);
tab[0] = v;
tab[1] = v*(1.f - s);
@ -1058,13 +1059,7 @@ struct HLS2RGB_f
float p1 = 2*l - p2;
h *= hscale;
// We need both loops to clamp (e.g. for h == -1e-40).
while( h < 0 ) h += 6;
while( h >= 6 ) h -= 6;
CV_DbgAssert( 0 <= h && h < 6 );
sector = cvFloor(h);
h -= sector;
ComputeSectorAndClampedH(h, sector);
tab[0] = p2;
tab[1] = p1;

View File

@ -592,8 +592,8 @@ void LineSegmentDetectorImpl::ll_angle(const double& threshold,
}
}
// Sort
std::sort(ordered_points.begin(), ordered_points.end(), compare_norm);
// Use stable sort to ensure deterministic region growing and thus overall LSD result determinism.
std::stable_sort(ordered_points.begin(), ordered_points.end(), compare_norm);
}
void LineSegmentDetectorImpl::region_grow(const Point2i& s, std::vector<RegionPoint>& reg,

View File

@ -904,7 +904,7 @@ public:
if( s )
{
j = s[i];
CV_Assert( 0 <= j && j < nsamples );
CV_Assert( 0 <= j && j < ((layout == ROW_SAMPLE) ? samples.rows : samples.cols) );
}
values[i] = src[j*sstep];
if( values[i] == MISSED_VAL )

View File

@ -74,7 +74,11 @@ void focalsFromHomography(const Mat& H, double &f0, double &f1, bool &f0_ok, boo
d2 = (h[7] - h[6]) * (h[7] + h[6]);
v1 = -(h[0] * h[1] + h[3] * h[4]) / d1;
v2 = (h[0] * h[0] + h[3] * h[3] - h[1] * h[1] - h[4] * h[4]) / d2;
if (v1 < v2) std::swap(v1, v2);
if (v1 < v2)
{
std::swap(v1, v2);
std::swap(d1, d2);
}
if (v1 > 0 && v2 > 0) f1 = std::sqrt(std::abs(d1) > std::abs(d2) ? v1 : v2);
else if (v1 > 0) f1 = std::sqrt(v1);
else f1_ok = false;
@ -84,7 +88,11 @@ void focalsFromHomography(const Mat& H, double &f0, double &f1, bool &f0_ok, boo
d2 = h[0] * h[0] + h[1] * h[1] - h[3] * h[3] - h[4] * h[4];
v1 = -h[2] * h[5] / d1;
v2 = (h[5] * h[5] - h[2] * h[2]) / d2;
if (v1 < v2) std::swap(v1, v2);
if (v1 < v2)
{
std::swap(v1, v2);
std::swap(d1, d2);
}
if (v1 > 0 && v2 > 0) f0 = std::sqrt(std::abs(d1) > std::abs(d2) ? v1 : v2);
else if (v1 > 0) f0 = std::sqrt(v1);
else f0_ok = false;

View File

@ -218,6 +218,11 @@ int main( int argc, char** argv )
return 0;
}
inline static bool isGoodBox(const RotatedRect& box) {
//size.height >= size.width awalys,only if the pts are on a line or at the same point,size.width=0
return (box.size.height <= box.size.width * 30) && (box.size.width > 0);
}
// Define trackbar callback function. This function finds contours,
// draws them, and approximates by ellipses.
void processImage(int /*h*/, void*)
@ -276,39 +281,30 @@ void processImage(int /*h*/, void*)
{
vector<Point2f> pts = points[i];
if (pts.size()<=5) {
//At least 5 points can fit an ellipse
if (pts.size()<5) {
continue;
}
if (fitEllipseQ) {
box = fitEllipse(pts);
if( MAX(box.size.width, box.size.height) > MIN(box.size.width, box.size.height)*30 ||
MAX(box.size.width, box.size.height) <= 0 ||
MIN(box.size.width, box.size.height) <= 0){continue;};
if (isGoodBox(box)) {
paper.drawEllipseWithBox(box, fitEllipseColor, 3);
}
}
if (fitEllipseAMSQ) {
boxAMS = fitEllipseAMS(pts);
if( MAX(boxAMS.size.width, boxAMS.size.height) > MIN(boxAMS.size.width, boxAMS.size.height)*30 ||
MAX(box.size.width, box.size.height) <= 0 ||
MIN(box.size.width, box.size.height) <= 0){continue;};
if (isGoodBox(boxAMS)) {
paper.drawEllipseWithBox(boxAMS, fitEllipseAMSColor, 2);
}
}
if (fitEllipseDirectQ) {
boxDirect = fitEllipseDirect(pts);
if( MAX(boxDirect.size.width, boxDirect.size.height) > MIN(boxDirect.size.width, boxDirect.size.height)*30 ||
MAX(box.size.width, box.size.height) <= 0 ||
MIN(box.size.width, box.size.height) <= 0 ){continue;};
if (isGoodBox(boxDirect)){
paper.drawEllipseWithBox(boxDirect, fitEllipseDirectColor, 1);
}
}
if (fitEllipseQ) {
paper.drawEllipseWithBox(box, fitEllipseColor, 3);
}
if (fitEllipseAMSQ) {
paper.drawEllipseWithBox(boxAMS, fitEllipseAMSColor, 2);
}
if (fitEllipseDirectQ) {
paper.drawEllipseWithBox(boxDirect, fitEllipseDirectColor, 1);
}
paper.drawPoints(pts, cv::Scalar(255,255,255));
paper.drawPoints(pts, fitEllipseTrueColor);
}
imshow("result", paper.img);

View File

@ -92,7 +92,7 @@ void Sharpen(const Mat& myImage,Mat& Result)
for(int i= nChannels;i < nChannels*(myImage.cols-1); ++i)
{
*output++ = saturate_cast<uchar>(5*current[i]
output[i] = saturate_cast<uchar>(5*current[i]
-current[i-nChannels] - current[i+nChannels] - previous[i] - next[i]);
}
}