Merge remote-tracking branch 'upstream/3.4' into merge-3.4

This commit is contained in:
Alexander Alekhin 2020-12-09 18:09:00 +00:00
commit de385009ae
15 changed files with 172 additions and 64 deletions

View File

@ -122,7 +122,6 @@ if(CV_GCC OR CV_CLANG)
endif()
add_extra_compiler_option(-Wsign-promo)
add_extra_compiler_option(-Wuninitialized)
add_extra_compiler_option(-Winit-self)
if(CV_GCC AND (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 6.0) AND (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0))
add_extra_compiler_option(-Wno-psabi)
endif()

View File

@ -201,19 +201,6 @@ macro(ocv_add_module _name)
set(OPENCV_MODULES_DISABLED_USER ${OPENCV_MODULES_DISABLED_USER} "${the_module}" CACHE INTERNAL "List of OpenCV modules explicitly disabled by user")
endif()
# add reverse wrapper dependencies (BINDINDS)
foreach (wrapper ${OPENCV_MODULE_${the_module}_WRAPPERS})
if(wrapper STREQUAL "python") # hack for python (BINDINDS)
ocv_add_dependencies(opencv_python2 OPTIONAL ${the_module})
ocv_add_dependencies(opencv_python3 OPTIONAL ${the_module})
else()
ocv_add_dependencies(opencv_${wrapper} OPTIONAL ${the_module})
endif()
if(DEFINED OPENCV_MODULE_opencv_${wrapper}_bindings_generator_CLASS)
ocv_add_dependencies(opencv_${wrapper}_bindings_generator OPTIONAL ${the_module})
endif()
endforeach()
# stop processing of current file
ocv_cmake_hook(POST_ADD_MODULE)
ocv_cmake_hook(POST_ADD_MODULE_${the_module})
@ -500,6 +487,21 @@ function(__ocv_resolve_dependencies)
endforeach()
endif()
# add reverse wrapper dependencies (BINDINDS)
foreach(the_module ${OPENCV_MODULES_BUILD})
foreach (wrapper ${OPENCV_MODULE_${the_module}_WRAPPERS})
if(wrapper STREQUAL "python") # hack for python (BINDINDS)
ocv_add_dependencies(opencv_python2 OPTIONAL ${the_module})
ocv_add_dependencies(opencv_python3 OPTIONAL ${the_module})
else()
ocv_add_dependencies(opencv_${wrapper} OPTIONAL ${the_module})
endif()
if(DEFINED OPENCV_MODULE_opencv_${wrapper}_bindings_generator_CLASS)
ocv_add_dependencies(opencv_${wrapper}_bindings_generator OPTIONAL ${the_module})
endif()
endforeach()
endforeach()
# disable MODULES with unresolved dependencies
set(has_changes ON)
while(has_changes)

View File

@ -8,7 +8,20 @@ include(CMakeParseArguments)
function(ocv_cmake_dump_vars)
set(OPENCV_SUPPRESS_DEPRECATIONS 1) # suppress deprecation warnings from variable_watch() guards
get_cmake_property(__variableNames VARIABLES)
cmake_parse_arguments(DUMP "" "TOFILE" "" ${ARGN})
cmake_parse_arguments(DUMP "FORCE" "TOFILE" "" ${ARGN})
# avoid generation of excessive logs with "--trace" or "--trace-expand" parameters
# Note: `-DCMAKE_TRACE_MODE=1` should be passed to CMake through command line. It is not a CMake buildin variable for now (2020-12)
# Use `cmake . -UCMAKE_TRACE_MODE` to remove this variable from cache
if(CMAKE_TRACE_MODE AND NOT DUMP_FORCE)
if(DUMP_TOFILE)
file(WRITE ${CMAKE_BINARY_DIR}/${DUMP_TOFILE} "Skipped due to enabled CMAKE_TRACE_MODE")
else()
message(AUTHOR_WARNING "ocv_cmake_dump_vars() is skipped due to enabled CMAKE_TRACE_MODE")
endif()
return()
endif()
set(regex "${DUMP_UNPARSED_ARGUMENTS}")
string(TOLOWER "${regex}" regex_lower)
set(__VARS "")

View File

@ -88,16 +88,24 @@ option(OPENCV_ENABLE_ALLOCATOR_STATS "Enable Allocator metrics" ON)
if(NOT OPENCV_ENABLE_ALLOCATOR_STATS)
add_definitions(-DOPENCV_DISABLE_ALLOCATOR_STATS=1)
else()
elseif(HAVE_CXX11 OR DEFINED OPENCV_ALLOCATOR_STATS_COUNTER_TYPE)
if(NOT DEFINED OPENCV_ALLOCATOR_STATS_COUNTER_TYPE)
if(HAVE_ATOMIC_LONG_LONG AND OPENCV_ENABLE_ATOMIC_LONG_LONG)
set(OPENCV_ALLOCATOR_STATS_COUNTER_TYPE "long long")
if(MINGW)
# command-line generation issue due to space in value, int/int64_t should be used instead
# https://github.com/opencv/opencv/issues/16990
message(STATUS "Consider adding OPENCV_ALLOCATOR_STATS_COUNTER_TYPE=int/int64_t according to your build configuration")
else()
set(OPENCV_ALLOCATOR_STATS_COUNTER_TYPE "long long")
endif()
else()
set(OPENCV_ALLOCATOR_STATS_COUNTER_TYPE "int")
endif()
endif()
message(STATUS "Allocator metrics storage type: '${OPENCV_ALLOCATOR_STATS_COUNTER_TYPE}'")
add_definitions("-DOPENCV_ALLOCATOR_STATS_COUNTER_TYPE=${OPENCV_ALLOCATOR_STATS_COUNTER_TYPE}")
if(DEFINED OPENCV_ALLOCATOR_STATS_COUNTER_TYPE)
message(STATUS "Allocator metrics storage type: '${OPENCV_ALLOCATOR_STATS_COUNTER_TYPE}'")
add_definitions("-DOPENCV_ALLOCATOR_STATS_COUNTER_TYPE=${OPENCV_ALLOCATOR_STATS_COUNTER_TYPE}")
endif()
endif()

View File

@ -2554,6 +2554,14 @@ inline _Tpvec func(const _Tpvec& a, const _Tpvec& b) \
return _Tpvec(intrin(a.val, b.val)); \
}
#define OPENCV_HAL_IMPL_WASM_BIN_FUNC_FALLBACK(_Tpvec, func, intrin) \
inline _Tpvec func(const _Tpvec& a, const _Tpvec& b) \
{ \
fallback::_Tpvec a_(a); \
fallback::_Tpvec b_(b); \
return _Tpvec(fallback::func(a_, b_)); \
}
OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_float32x4, v_min, wasm_f32x4_min)
OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_float32x4, v_max, wasm_f32x4_max)
OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_float64x2, v_min, wasm_f64x2_min)
@ -2644,8 +2652,14 @@ OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_uint8x16, v_sub_wrap, wasm_i8x16_sub)
OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_int8x16, v_sub_wrap, wasm_i8x16_sub)
OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_uint16x8, v_sub_wrap, wasm_i16x8_sub)
OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_int16x8, v_sub_wrap, wasm_i16x8_sub)
#if (__EMSCRIPTEN_major__ * 1000000 + __EMSCRIPTEN_minor__ * 1000 + __EMSCRIPTEN_tiny__) >= (2000000)
// details: https://github.com/opencv/opencv/issues/18097 ( https://github.com/emscripten-core/emscripten/issues/12018 )
OPENCV_HAL_IMPL_WASM_BIN_FUNC_FALLBACK(v_uint8x16, v_mul_wrap, wasm_i8x16_mul)
OPENCV_HAL_IMPL_WASM_BIN_FUNC_FALLBACK(v_int8x16, v_mul_wrap, wasm_i8x16_mul)
#else
OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_uint8x16, v_mul_wrap, wasm_i8x16_mul)
OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_int8x16, v_mul_wrap, wasm_i8x16_mul)
#endif
OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_uint16x8, v_mul_wrap, wasm_i16x8_mul)
OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_int16x8, v_mul_wrap, wasm_i16x8_mul)

View File

@ -7,13 +7,11 @@
#include "./allocator_stats.hpp"
#ifdef CV_CXX11
#include <atomic>
#endif
//#define OPENCV_DISABLE_ALLOCATOR_STATS
namespace cv { namespace utils {
#ifdef CV_CXX11
#include <atomic>
#ifndef OPENCV_ALLOCATOR_STATS_COUNTER_TYPE
#if defined(__GNUC__) && (\
@ -28,6 +26,16 @@ namespace cv { namespace utils {
#define OPENCV_ALLOCATOR_STATS_COUNTER_TYPE long long
#endif
#else // CV_CXX11
#ifndef OPENCV_ALLOCATOR_STATS_COUNTER_TYPE
#define OPENCV_ALLOCATOR_STATS_COUNTER_TYPE int // CV_XADD supports int only
#endif
#endif // CV_CXX11
namespace cv { namespace utils {
#ifdef CV__ALLOCATOR_STATS_LOG
namespace {
#endif

View File

@ -181,6 +181,8 @@ message DetectionOutputParameter {
optional float confidence_threshold = 9;
// If prior boxes are normalized to [0, 1] or not.
optional bool normalized_bbox = 10 [default = true];
// OpenCV custom parameter
optional bool clip = 1000 [default = false];
}
message Datum {

View File

@ -1597,15 +1597,15 @@ public:
v_float32x4 r2 = v_load_aligned(rptr + vsz_a*2);
v_float32x4 r3 = v_load_aligned(rptr + vsz_a*3);
vs00 += w0*r0;
vs01 += w0*r1;
vs02 += w0*r2;
vs03 += w0*r3;
vs00 = v_fma(w0, r0, vs00);
vs01 = v_fma(w0, r1, vs01);
vs02 = v_fma(w0, r2, vs02);
vs03 = v_fma(w0, r3, vs03);
vs10 += w1*r0;
vs11 += w1*r1;
vs12 += w1*r2;
vs13 += w1*r3;
vs10 = v_fma(w1, r0, vs10);
vs11 = v_fma(w1, r1, vs11);
vs12 = v_fma(w1, r2, vs12);
vs13 = v_fma(w1, r3, vs13);
}
s0 += v_reduce_sum4(vs00, vs01, vs02, vs03);
s1 += v_reduce_sum4(vs10, vs11, vs12, vs13);
@ -2365,20 +2365,21 @@ public:
for( ; n <= nmax - 4; n += 4 )
{
v_float32x4 d0 = v_load(dst0 + n);
v_float32x4 d1 = v_load(dst1 + n);
v_float32x4 b0 = v_load(bptr0 + n);
v_float32x4 b1 = v_load(bptr1 + n);
v_float32x4 b2 = v_load(bptr2 + n);
v_float32x4 b3 = v_load(bptr3 + n);
v_float32x4 d0 = v_load(dst0 + n);
v_float32x4 d1 = v_load(dst1 + n);
d0 += b0*a00;
d1 += b0*a01;
d0 += b1*a10;
d1 += b1*a11;
d0 += b2*a20;
d1 += b2*a21;
d0 += b3*a30;
d1 += b3*a31;
// TODO try to improve pipeline width
d0 = v_fma(b0, a00, d0);
d1 = v_fma(b0, a01, d1);
d0 = v_fma(b1, a10, d0);
d1 = v_fma(b1, a11, d1);
d0 = v_fma(b2, a20, d0);
d1 = v_fma(b2, a21, d1);
d0 = v_fma(b3, a30, d0);
d1 = v_fma(b3, a31, d1);
v_store(dst0 + n, d0);
v_store(dst1 + n, d1);
}
@ -2386,8 +2387,10 @@ public:
for( ; n < nmax; n++ )
{
float b0 = bptr0[n], b1 = bptr1[n];
float b2 = bptr2[n], b3 = bptr3[n];
float b0 = bptr0[n];
float b1 = bptr1[n];
float b2 = bptr2[n];
float b3 = bptr3[n];
float d0 = dst0[n] + alpha00*b0 + alpha10*b1 + alpha20*b2 + alpha30*b3;
float d1 = dst1[n] + alpha01*b0 + alpha11*b1 + alpha21*b2 + alpha31*b3;
dst0[n] = d0;

View File

@ -245,16 +245,18 @@ public:
#if CV_SIMD128
for( ; i <= nw - 4; i += 4, wptr += 4*wstep )
{
v_float32x4 vs0 = v_setall_f32(0.f), vs1 = v_setall_f32(0.f);
v_float32x4 vs2 = v_setall_f32(0.f), vs3 = v_setall_f32(0.f);
v_float32x4 vs0 = v_setall_f32(0.f);
v_float32x4 vs1 = v_setall_f32(0.f);
v_float32x4 vs2 = v_setall_f32(0.f);
v_float32x4 vs3 = v_setall_f32(0.f);
for( k = 0; k < vecsize; k += 4 )
{
v_float32x4 v = v_load_aligned(sptr + k);
vs0 += v*v_load_aligned(wptr + k);
vs1 += v*v_load_aligned(wptr + wstep + k);
vs2 += v*v_load_aligned(wptr + wstep*2 + k);
vs3 += v*v_load_aligned(wptr + wstep*3 + k);
vs0 = v_fma(v, v_load_aligned(wptr + k), vs0);
vs1 = v_fma(v, v_load_aligned(wptr + wstep + k), vs1);
vs2 = v_fma(v, v_load_aligned(wptr + wstep*2 + k), vs2);
vs3 = v_fma(v, v_load_aligned(wptr + wstep*3 + k), vs3);
}
v_float32x4 s = v_reduce_sum4(vs0, vs1, vs2, vs3);

View File

@ -48,6 +48,7 @@ public:
CV_Check(interpolation, interpolation == "nearest" || interpolation == "opencv_linear" || interpolation == "bilinear", "");
alignCorners = params.get<bool>("align_corners", false);
halfPixelCenters = params.get<bool>("half_pixel_centers", false);
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -123,7 +124,7 @@ public:
Mat& inp = inputs[0];
Mat& out = outputs[0];
if (interpolation == "nearest" || interpolation == "opencv_linear")
if (interpolation == "nearest" || interpolation == "opencv_linear" || (interpolation == "bilinear" && halfPixelCenters))
{
InterpolationFlags mode = interpolation == "nearest" ? INTER_NEAREST : INTER_LINEAR;
for (size_t n = 0; n < inputs[0].size[0]; ++n)
@ -269,6 +270,7 @@ protected:
String interpolation;
float scaleWidth, scaleHeight;
bool alignCorners;
bool halfPixelCenters;
};

View File

@ -1962,6 +1962,9 @@ void TFImporter::populateNet(Net dstNet)
if (hasLayerAttr(layer, "align_corners"))
layerParams.set("align_corners", getLayerAttr(layer, "align_corners").b());
if (hasLayerAttr(layer, "half_pixel_centers"))
layerParams.set("half_pixel_centers", getLayerAttr(layer, "half_pixel_centers").b());
int id = dstNet.addLayer(name, "Resize", layerParams);
layer_id[name] = id;

View File

@ -81,12 +81,12 @@ class Test_TensorFlow_layers : public DNNTestLayer
{
public:
void runTensorFlowNet(const std::string& prefix, bool hasText = false,
double l1 = 0.0, double lInf = 0.0, bool memoryLoad = false)
double l1 = 0.0, double lInf = 0.0, bool memoryLoad = false, const std::string& groupPrefix = "")
{
std::string netPath = path(prefix + "_net.pb");
std::string netConfig = (hasText ? path(prefix + "_net.pbtxt") : "");
std::string netPath = path(prefix + groupPrefix + "_net.pb");
std::string netConfig = (hasText ? path(prefix + groupPrefix + "_net.pbtxt") : "");
std::string inpPath = path(prefix + "_in.npy");
std::string outPath = path(prefix + "_out.npy");
std::string outPath = path(prefix + groupPrefix + "_out.npy");
cv::Mat input = blobFromNPY(inpPath);
cv::Mat ref = blobFromNPY(outPath);
@ -1056,10 +1056,53 @@ TEST_P(Test_TensorFlow_layers, keras_mobilenet_head)
runTensorFlowNet("keras_learning_phase");
}
// TF case: align_corners=False, half_pixel_centers=False
TEST_P(Test_TensorFlow_layers, resize_bilinear)
{
runTensorFlowNet("resize_bilinear");
}
// TF case: align_corners=True, half_pixel_centers=False
TEST_P(Test_TensorFlow_layers, resize_bilinear_align_corners)
{
runTensorFlowNet("resize_bilinear",
false, 0.0, 0.0, false, // default parameters
"_align_corners");
}
// TF case: align_corners=False, half_pixel_centers=True
TEST_P(Test_TensorFlow_layers, resize_bilinear_half_pixel)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
runTensorFlowNet("resize_bilinear", false, 0.0, 0.0, false, "_half_pixel");
}
// TF case: align_corners=False, half_pixel_centers=False
TEST_P(Test_TensorFlow_layers, resize_bilinear_factor)
{
runTensorFlowNet("resize_bilinear_factor");
}
// TF case: align_corners=False, half_pixel_centers=True
TEST_P(Test_TensorFlow_layers, resize_bilinear_factor_half_pixel)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
runTensorFlowNet("resize_bilinear_factor", false, 0.0, 0.0, false, "_half_pixel");
}
// TF case: align_corners=True, half_pixel_centers=False
TEST_P(Test_TensorFlow_layers, resize_bilinear_factor_align_corners)
{
runTensorFlowNet("resize_bilinear_factor", false, 0.0, 0.0, false, "_align_corners");
}
// TF case: align_corners=False, half_pixel_centers=False
TEST_P(Test_TensorFlow_layers, resize_bilinear_down)
{
runTensorFlowNet("resize_bilinear_down");
}

View File

@ -43,17 +43,17 @@ public class JavaCamera2View extends CameraBridgeViewBase {
private static final String LOGTAG = "JavaCamera2View";
private ImageReader mImageReader;
private int mPreviewFormat = ImageFormat.YUV_420_888;
protected ImageReader mImageReader;
protected int mPreviewFormat = ImageFormat.YUV_420_888;
private CameraDevice mCameraDevice;
private CameraCaptureSession mCaptureSession;
private CaptureRequest.Builder mPreviewRequestBuilder;
private String mCameraID;
private android.util.Size mPreviewSize = new android.util.Size(-1, -1);
protected CameraDevice mCameraDevice;
protected CameraCaptureSession mCaptureSession;
protected CaptureRequest.Builder mPreviewRequestBuilder;
protected String mCameraID;
protected android.util.Size mPreviewSize = new android.util.Size(-1, -1);
private HandlerThread mBackgroundThread;
private Handler mBackgroundHandler;
protected Handler mBackgroundHandler;
public JavaCamera2View(Context context, int cameraId) {
super(context, cameraId);

View File

@ -991,6 +991,8 @@ double GStreamerCapture::getProperty(int propId) const
switch(propId)
{
case CV_CAP_PROP_POS_MSEC:
CV_LOG_ONCE_WARNING(NULL, "OpenCV | GStreamer: CAP_PROP_POS_MSEC property result may be unrealiable: "
"https://github.com/opencv/opencv/issues/19025");
format = GST_FORMAT_TIME;
status = gst_element_query_position(sink.get(), CV_GST_FORMAT(format), &value);
if(!status) {

View File

@ -239,6 +239,11 @@ public:
if (!isBackendAvailable(apiPref, cv::videoio_registry::getStreamBackends()))
throw SkipTestException(cv::String("Backend is not available/disabled: ") + cv::videoio_registry::getBackendName(apiPref));
// GStreamer: https://github.com/opencv/opencv/issues/19025
if (apiPref == CAP_GSTREAMER)
throw SkipTestException(cv::String("Backend ") + cv::videoio_registry::getBackendName(apiPref) +
cv::String(" does not return reliable values for CAP_PROP_POS_MSEC property"));
if (((apiPref == CAP_FFMPEG) && ((ext == "h264") || (ext == "h265"))))
throw SkipTestException(cv::String("Backend ") + cv::videoio_registry::getBackendName(apiPref) +
cv::String(" does not support CAP_PROP_POS_MSEC option"));
@ -255,10 +260,12 @@ public:
double timestamp = 0;
ASSERT_NO_THROW(cap >> img);
EXPECT_NO_THROW(timestamp = cap.get(CAP_PROP_POS_MSEC));
if (cvtest::debugLevel > 0)
std::cout << "i = " << i << ": timestamp = " << timestamp << std::endl;
const double frame_period = 1000.f/bunny_param.getFps();
// NOTE: eps == frame_period, because videoCapture returns frame begining timestamp or frame end
// timestamp depending on codec and back-end. So the first frame has timestamp 0 or frame_period.
EXPECT_NEAR(timestamp, i*frame_period, frame_period);
EXPECT_NEAR(timestamp, i*frame_period, frame_period) << "i=" << i;
}
}
};