mirror of
https://github.com/opencv/opencv.git
synced 2025-01-18 22:44:02 +08:00
Merge remote-tracking branch 'upstream/3.4' into merge-3.4
This commit is contained in:
commit
57ee14d62d
4
3rdparty/libjpeg-turbo/CMakeLists.txt
vendored
4
3rdparty/libjpeg-turbo/CMakeLists.txt
vendored
@ -4,9 +4,9 @@ ocv_warnings_disable(CMAKE_C_FLAGS -Wunused-parameter -Wsign-compare -Wshorten-6
|
||||
|
||||
set(VERSION_MAJOR 2)
|
||||
set(VERSION_MINOR 1)
|
||||
set(VERSION_REVISION 0)
|
||||
set(VERSION_REVISION 2)
|
||||
set(VERSION ${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_REVISION})
|
||||
set(LIBJPEG_TURBO_VERSION_NUMBER 2001000)
|
||||
set(LIBJPEG_TURBO_VERSION_NUMBER 2001002)
|
||||
|
||||
string(TIMESTAMP BUILD "opencv-${OPENCV_VERSION}-libjpeg-turbo")
|
||||
if(CMAKE_BUILD_TYPE STREQUAL "Debug")
|
||||
|
10
3rdparty/libjpeg-turbo/jconfigint.h.in
vendored
10
3rdparty/libjpeg-turbo/jconfigint.h.in
vendored
@ -40,3 +40,13 @@
|
||||
#define HAVE_BITSCANFORWARD
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(__has_attribute)
|
||||
#if __has_attribute(fallthrough)
|
||||
#define FALLTHROUGH __attribute__((fallthrough));
|
||||
#else
|
||||
#define FALLTHROUGH
|
||||
#endif
|
||||
#else
|
||||
#define FALLTHROUGH
|
||||
#endif
|
||||
|
5
3rdparty/libjpeg-turbo/src/jchuff.c
vendored
5
3rdparty/libjpeg-turbo/src/jchuff.c
vendored
@ -44,8 +44,9 @@
|
||||
* flags (this defines __thumb__).
|
||||
*/
|
||||
|
||||
#if defined(__arm__) || defined(__aarch64__) || defined(_M_ARM) || \
|
||||
defined(_M_ARM64)
|
||||
/* NOTE: Both GCC and Clang define __GNUC__ */
|
||||
#if (defined(__GNUC__) && (defined(__arm__) || defined(__aarch64__))) || \
|
||||
defined(_M_ARM) || defined(_M_ARM64)
|
||||
#if !defined(__thumb__) || defined(__thumb2__)
|
||||
#define USE_CLZ_INTRINSIC
|
||||
#endif
|
||||
|
2
3rdparty/libjpeg-turbo/src/jcmaster.c
vendored
2
3rdparty/libjpeg-turbo/src/jcmaster.c
vendored
@ -493,7 +493,7 @@ prepare_for_pass(j_compress_ptr cinfo)
|
||||
master->pass_type = output_pass;
|
||||
master->pass_number++;
|
||||
#endif
|
||||
/*FALLTHROUGH*/
|
||||
FALLTHROUGH /*FALLTHROUGH*/
|
||||
case output_pass:
|
||||
/* Do a data-output pass. */
|
||||
/* We need not repeat per-scan setup if prior optimization pass did it. */
|
||||
|
10
3rdparty/libjpeg-turbo/src/jcphuff.c
vendored
10
3rdparty/libjpeg-turbo/src/jcphuff.c
vendored
@ -7,6 +7,7 @@
|
||||
* Copyright (C) 2011, 2015, 2018, 2021, D. R. Commander.
|
||||
* Copyright (C) 2016, 2018, Matthieu Darbois.
|
||||
* Copyright (C) 2020, Arm Limited.
|
||||
* Copyright (C) 2021, Alex Richardson.
|
||||
* For conditions of distribution and use, see the accompanying README.ijg
|
||||
* file.
|
||||
*
|
||||
@ -52,8 +53,9 @@
|
||||
* flags (this defines __thumb__).
|
||||
*/
|
||||
|
||||
#if defined(__arm__) || defined(__aarch64__) || defined(_M_ARM) || \
|
||||
defined(_M_ARM64)
|
||||
/* NOTE: Both GCC and Clang define __GNUC__ */
|
||||
#if (defined(__GNUC__) && (defined(__arm__) || defined(__aarch64__))) || \
|
||||
defined(_M_ARM) || defined(_M_ARM64)
|
||||
#if !defined(__thumb__) || defined(__thumb2__)
|
||||
#define USE_CLZ_INTRINSIC
|
||||
#endif
|
||||
@ -679,7 +681,7 @@ encode_mcu_AC_first(j_compress_ptr cinfo, JBLOCKROW *MCU_data)
|
||||
emit_restart(entropy, entropy->next_restart_num);
|
||||
|
||||
#ifdef WITH_SIMD
|
||||
cvalue = values = (JCOEF *)PAD((size_t)values_unaligned, 16);
|
||||
cvalue = values = (JCOEF *)PAD((JUINTPTR)values_unaligned, 16);
|
||||
#else
|
||||
/* Not using SIMD, so alignment is not needed */
|
||||
cvalue = values = values_unaligned;
|
||||
@ -944,7 +946,7 @@ encode_mcu_AC_refine(j_compress_ptr cinfo, JBLOCKROW *MCU_data)
|
||||
emit_restart(entropy, entropy->next_restart_num);
|
||||
|
||||
#ifdef WITH_SIMD
|
||||
cabsvalue = absvalues = (JCOEF *)PAD((size_t)absvalues_unaligned, 16);
|
||||
cabsvalue = absvalues = (JCOEF *)PAD((JUINTPTR)absvalues_unaligned, 16);
|
||||
#else
|
||||
/* Not using SIMD, so alignment is not needed */
|
||||
cabsvalue = absvalues = absvalues_unaligned;
|
||||
|
3
3rdparty/libjpeg-turbo/src/jdapimin.c
vendored
3
3rdparty/libjpeg-turbo/src/jdapimin.c
vendored
@ -23,6 +23,7 @@
|
||||
#include "jinclude.h"
|
||||
#include "jpeglib.h"
|
||||
#include "jdmaster.h"
|
||||
#include "jconfigint.h"
|
||||
|
||||
|
||||
/*
|
||||
@ -308,7 +309,7 @@ jpeg_consume_input(j_decompress_ptr cinfo)
|
||||
/* Initialize application's data source module */
|
||||
(*cinfo->src->init_source) (cinfo);
|
||||
cinfo->global_state = DSTATE_INHEADER;
|
||||
/*FALLTHROUGH*/
|
||||
FALLTHROUGH /*FALLTHROUGH*/
|
||||
case DSTATE_INHEADER:
|
||||
retcode = (*cinfo->inputctl->consume_input) (cinfo);
|
||||
if (retcode == JPEG_REACHED_SOS) { /* Found SOS, prepare to decompress */
|
||||
|
11
3rdparty/libjpeg-turbo/src/jdhuff.c
vendored
11
3rdparty/libjpeg-turbo/src/jdhuff.c
vendored
@ -584,7 +584,7 @@ decode_mcu_slow(j_decompress_ptr cinfo, JBLOCKROW *MCU_data)
|
||||
* behavior is, to the best of our understanding, innocuous, and it is
|
||||
* unclear how to work around it without potentially affecting
|
||||
* performance. Thus, we (hopefully temporarily) suppress UBSan integer
|
||||
* overflow errors for this function.
|
||||
* overflow errors for this function and decode_mcu_fast().
|
||||
*/
|
||||
s += state.last_dc_val[ci];
|
||||
state.last_dc_val[ci] = s;
|
||||
@ -651,6 +651,12 @@ decode_mcu_slow(j_decompress_ptr cinfo, JBLOCKROW *MCU_data)
|
||||
}
|
||||
|
||||
|
||||
#if defined(__has_feature)
|
||||
#if __has_feature(undefined_behavior_sanitizer)
|
||||
__attribute__((no_sanitize("signed-integer-overflow"),
|
||||
no_sanitize("unsigned-integer-overflow")))
|
||||
#endif
|
||||
#endif
|
||||
LOCAL(boolean)
|
||||
decode_mcu_fast(j_decompress_ptr cinfo, JBLOCKROW *MCU_data)
|
||||
{
|
||||
@ -681,6 +687,9 @@ decode_mcu_fast(j_decompress_ptr cinfo, JBLOCKROW *MCU_data)
|
||||
|
||||
if (entropy->dc_needed[blkn]) {
|
||||
int ci = cinfo->MCU_membership[blkn];
|
||||
/* Refer to the comment in decode_mcu_slow() regarding the supression of
|
||||
* a UBSan integer overflow error in this line of code.
|
||||
*/
|
||||
s += state.last_dc_val[ci];
|
||||
state.last_dc_val[ci] = s;
|
||||
if (block)
|
||||
|
5
3rdparty/libjpeg-turbo/src/jdmainct.c
vendored
5
3rdparty/libjpeg-turbo/src/jdmainct.c
vendored
@ -18,6 +18,7 @@
|
||||
|
||||
#include "jinclude.h"
|
||||
#include "jdmainct.h"
|
||||
#include "jconfigint.h"
|
||||
|
||||
|
||||
/*
|
||||
@ -360,7 +361,7 @@ process_data_context_main(j_decompress_ptr cinfo, JSAMPARRAY output_buf,
|
||||
main_ptr->context_state = CTX_PREPARE_FOR_IMCU;
|
||||
if (*out_row_ctr >= out_rows_avail)
|
||||
return; /* Postprocessor exactly filled output buf */
|
||||
/*FALLTHROUGH*/
|
||||
FALLTHROUGH /*FALLTHROUGH*/
|
||||
case CTX_PREPARE_FOR_IMCU:
|
||||
/* Prepare to process first M-1 row groups of this iMCU row */
|
||||
main_ptr->rowgroup_ctr = 0;
|
||||
@ -371,7 +372,7 @@ process_data_context_main(j_decompress_ptr cinfo, JSAMPARRAY output_buf,
|
||||
if (main_ptr->iMCU_row_ctr == cinfo->total_iMCU_rows)
|
||||
set_bottom_pointers(cinfo);
|
||||
main_ptr->context_state = CTX_PROCESS_IMCU;
|
||||
/*FALLTHROUGH*/
|
||||
FALLTHROUGH /*FALLTHROUGH*/
|
||||
case CTX_PROCESS_IMCU:
|
||||
/* Call postprocessor using previously set pointers */
|
||||
(*cinfo->post->post_process_data) (cinfo,
|
||||
|
6
3rdparty/libjpeg-turbo/src/jmemmgr.c
vendored
6
3rdparty/libjpeg-turbo/src/jmemmgr.c
vendored
@ -4,7 +4,7 @@
|
||||
* This file was part of the Independent JPEG Group's software:
|
||||
* Copyright (C) 1991-1997, Thomas G. Lane.
|
||||
* libjpeg-turbo Modifications:
|
||||
* Copyright (C) 2016, D. R. Commander.
|
||||
* Copyright (C) 2016, 2021, D. R. Commander.
|
||||
* For conditions of distribution and use, see the accompanying README.ijg
|
||||
* file.
|
||||
*
|
||||
@ -1032,7 +1032,7 @@ free_pool(j_common_ptr cinfo, int pool_id)
|
||||
large_pool_ptr next_lhdr_ptr = lhdr_ptr->next;
|
||||
space_freed = lhdr_ptr->bytes_used +
|
||||
lhdr_ptr->bytes_left +
|
||||
sizeof(large_pool_hdr);
|
||||
sizeof(large_pool_hdr) + ALIGN_SIZE - 1;
|
||||
jpeg_free_large(cinfo, (void *)lhdr_ptr, space_freed);
|
||||
mem->total_space_allocated -= space_freed;
|
||||
lhdr_ptr = next_lhdr_ptr;
|
||||
@ -1045,7 +1045,7 @@ free_pool(j_common_ptr cinfo, int pool_id)
|
||||
while (shdr_ptr != NULL) {
|
||||
small_pool_ptr next_shdr_ptr = shdr_ptr->next;
|
||||
space_freed = shdr_ptr->bytes_used + shdr_ptr->bytes_left +
|
||||
sizeof(small_pool_hdr);
|
||||
sizeof(small_pool_hdr) + ALIGN_SIZE - 1;
|
||||
jpeg_free_small(cinfo, (void *)shdr_ptr, space_freed);
|
||||
mem->total_space_allocated -= space_freed;
|
||||
shdr_ptr = next_shdr_ptr;
|
||||
|
15
3rdparty/libjpeg-turbo/src/jpegint.h
vendored
15
3rdparty/libjpeg-turbo/src/jpegint.h
vendored
@ -5,8 +5,9 @@
|
||||
* Copyright (C) 1991-1997, Thomas G. Lane.
|
||||
* Modified 1997-2009 by Guido Vollbeding.
|
||||
* libjpeg-turbo Modifications:
|
||||
* Copyright (C) 2015-2016, 2019, D. R. Commander.
|
||||
* Copyright (C) 2015-2016, 2019, 2021, D. R. Commander.
|
||||
* Copyright (C) 2015, Google, Inc.
|
||||
* Copyright (C) 2021, Alex Richardson.
|
||||
* For conditions of distribution and use, see the accompanying README.ijg
|
||||
* file.
|
||||
*
|
||||
@ -47,6 +48,18 @@ typedef enum { /* Operating modes for buffer controllers */
|
||||
/* JLONG must hold at least signed 32-bit values. */
|
||||
typedef long JLONG;
|
||||
|
||||
/* JUINTPTR must hold pointer values. */
|
||||
#ifdef __UINTPTR_TYPE__
|
||||
/*
|
||||
* __UINTPTR_TYPE__ is GNU-specific and available in GCC 4.6+ and Clang 3.0+.
|
||||
* Fortunately, that is sufficient to support the few architectures for which
|
||||
* sizeof(void *) != sizeof(size_t). The only other options would require C99
|
||||
* or Clang-specific builtins.
|
||||
*/
|
||||
typedef __UINTPTR_TYPE__ JUINTPTR;
|
||||
#else
|
||||
typedef size_t JUINTPTR;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Left shift macro that handles a negative operand without causing any
|
||||
|
@ -106,7 +106,7 @@ set(CMAKE_POSITION_INDEPENDENT_CODE ${ENABLE_PIC})
|
||||
|
||||
ocv_cmake_hook(PRE_CMAKE_BOOTSTRAP)
|
||||
|
||||
# Bootstap CMake system: setup CMAKE_SYSTEM_NAME and other vars
|
||||
# Bootstrap CMake system: setup CMAKE_SYSTEM_NAME and other vars
|
||||
if(OPENCV_WORKAROUND_CMAKE_20989)
|
||||
set(CMAKE_SYSTEM_PROCESSOR_BACKUP ${CMAKE_SYSTEM_PROCESSOR})
|
||||
endif()
|
||||
|
@ -77,7 +77,7 @@ cv.normalize(roiHist, roiHist, 0, 255, cv.NORM_MINMAX);
|
||||
// delete useless mats.
|
||||
roi.delete(); hsvRoi.delete(); mask.delete(); low.delete(); high.delete(); hsvRoiVec.delete();
|
||||
|
||||
// Setup the termination criteria, either 10 iteration or move by atleast 1 pt
|
||||
// Setup the termination criteria, either 10 iteration or move by at least 1 pt
|
||||
let termCrit = new cv.TermCriteria(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1);
|
||||
|
||||
let hsv = new cv.Mat(video.height, video.width, cv.CV_8UC3);
|
||||
|
@ -77,7 +77,7 @@ cv.normalize(roiHist, roiHist, 0, 255, cv.NORM_MINMAX);
|
||||
// delete useless mats.
|
||||
roi.delete(); hsvRoi.delete(); mask.delete(); low.delete(); high.delete(); hsvRoiVec.delete();
|
||||
|
||||
// Setup the termination criteria, either 10 iteration or move by atleast 1 pt
|
||||
// Setup the termination criteria, either 10 iteration or move by at least 1 pt
|
||||
let termCrit = new cv.TermCriteria(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1);
|
||||
|
||||
let hsv = new cv.Mat(video.height, video.width, cv.CV_8UC3);
|
||||
|
@ -181,17 +181,33 @@ LogLevel getLogLevel()
|
||||
|
||||
namespace internal {
|
||||
|
||||
static int getShowTimestampMode()
|
||||
{
|
||||
static bool param_timestamp_enable = utils::getConfigurationParameterBool("OPENCV_LOG_TIMESTAMP", true);
|
||||
static bool param_timestamp_ns_enable = utils::getConfigurationParameterBool("OPENCV_LOG_TIMESTAMP_NS", false);
|
||||
return (param_timestamp_enable ? 1 : 0) + (param_timestamp_ns_enable ? 2 : 0);
|
||||
}
|
||||
|
||||
void writeLogMessage(LogLevel logLevel, const char* message)
|
||||
{
|
||||
const int threadID = cv::utils::getThreadID();
|
||||
|
||||
std::string message_id;
|
||||
switch (getShowTimestampMode())
|
||||
{
|
||||
case 1: message_id = cv::format("%d@%0.3f", threadID, getTimestampNS() * 1e-9); break;
|
||||
case 1+2: message_id = cv::format("%d@%llu", threadID, (long long unsigned int)getTimestampNS()); break;
|
||||
default: message_id = cv::format("%d", threadID); break;
|
||||
}
|
||||
|
||||
std::ostringstream ss;
|
||||
switch (logLevel)
|
||||
{
|
||||
case LOG_LEVEL_FATAL: ss << "[FATAL:" << threadID << "] " << message << std::endl; break;
|
||||
case LOG_LEVEL_ERROR: ss << "[ERROR:" << threadID << "] " << message << std::endl; break;
|
||||
case LOG_LEVEL_WARNING: ss << "[ WARN:" << threadID << "] " << message << std::endl; break;
|
||||
case LOG_LEVEL_INFO: ss << "[ INFO:" << threadID << "] " << message << std::endl; break;
|
||||
case LOG_LEVEL_DEBUG: ss << "[DEBUG:" << threadID << "] " << message << std::endl; break;
|
||||
case LOG_LEVEL_FATAL: ss << "[FATAL:" << message_id << "] " << message << std::endl; break;
|
||||
case LOG_LEVEL_ERROR: ss << "[ERROR:" << message_id << "] " << message << std::endl; break;
|
||||
case LOG_LEVEL_WARNING: ss << "[ WARN:" << message_id << "] " << message << std::endl; break;
|
||||
case LOG_LEVEL_INFO: ss << "[ INFO:" << message_id << "] " << message << std::endl; break;
|
||||
case LOG_LEVEL_DEBUG: ss << "[DEBUG:" << message_id << "] " << message << std::endl; break;
|
||||
case LOG_LEVEL_VERBOSE: ss << message << std::endl; break;
|
||||
case LOG_LEVEL_SILENT: return; // avoid compiler warning about incomplete switch
|
||||
case ENUM_LOG_LEVEL_FORCE_INT: return; // avoid compiler warning about incomplete switch
|
||||
|
@ -368,6 +368,10 @@ bool __termination; // skip some cleanups, because process is terminating
|
||||
|
||||
cv::Mutex& getInitializationMutex();
|
||||
|
||||
/// @brief Returns timestamp in nanoseconds since program launch
|
||||
int64 getTimestampNS();
|
||||
|
||||
|
||||
#define CV_SINGLETON_LAZY_INIT_(TYPE, INITIALIZER, RET_VALUE) \
|
||||
static TYPE* const instance = INITIALIZER; \
|
||||
return RET_VALUE;
|
||||
|
@ -944,6 +944,51 @@ int64 getCPUTickCount(void)
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
namespace internal {
|
||||
|
||||
class Timestamp
|
||||
{
|
||||
public:
|
||||
const int64 zeroTickCount;
|
||||
const double ns_in_ticks;
|
||||
|
||||
Timestamp()
|
||||
: zeroTickCount(getTickCount())
|
||||
, ns_in_ticks(1e9 / getTickFrequency())
|
||||
{
|
||||
// nothing
|
||||
}
|
||||
|
||||
int64 getTimestamp()
|
||||
{
|
||||
int64 t = getTickCount();
|
||||
return (int64)((t - zeroTickCount) * ns_in_ticks);
|
||||
}
|
||||
|
||||
static Timestamp& getInstance()
|
||||
{
|
||||
static Timestamp g_timestamp;
|
||||
return g_timestamp;
|
||||
}
|
||||
};
|
||||
|
||||
class InitTimestamp {
|
||||
public:
|
||||
InitTimestamp() {
|
||||
Timestamp::getInstance();
|
||||
}
|
||||
};
|
||||
static InitTimestamp g_initialize_timestamp; // force zero timestamp initialization
|
||||
|
||||
} // namespace
|
||||
|
||||
int64 getTimestampNS()
|
||||
{
|
||||
return internal::Timestamp::getInstance().getTimestamp();
|
||||
}
|
||||
|
||||
|
||||
const String& getBuildInformation()
|
||||
{
|
||||
static String build_info =
|
||||
|
@ -63,15 +63,6 @@ namespace details {
|
||||
#pragma warning(disable:4065) // switch statement contains 'default' but no 'case' labels
|
||||
#endif
|
||||
|
||||
static int64 g_zero_timestamp = 0;
|
||||
|
||||
static int64 getTimestamp()
|
||||
{
|
||||
int64 t = getTickCount();
|
||||
static double tick_to_ns = 1e9 / getTickFrequency();
|
||||
return (int64)((t - g_zero_timestamp) * tick_to_ns);
|
||||
}
|
||||
|
||||
static bool getParameterTraceEnable()
|
||||
{
|
||||
static bool param_traceEnable = utils::getConfigurationParameterBool("OPENCV_TRACE", false);
|
||||
@ -485,7 +476,7 @@ Region::Region(const LocationStaticStorage& location) :
|
||||
}
|
||||
}
|
||||
|
||||
int64 beginTimestamp = getTimestamp();
|
||||
int64 beginTimestamp = getTimestampNS();
|
||||
|
||||
int currentDepth = ctx.getCurrentDepth() + 1;
|
||||
switch (location.flags & REGION_FLAG_IMPL_MASK)
|
||||
@ -635,7 +626,7 @@ void Region::destroy()
|
||||
}
|
||||
}
|
||||
|
||||
int64 endTimestamp = getTimestamp();
|
||||
int64 endTimestamp = getTimestampNS();
|
||||
int64 duration = endTimestamp - ctx.stackTopBeginTimestamp();
|
||||
|
||||
bool active = isActive();
|
||||
@ -844,7 +835,7 @@ static bool isInitialized = false;
|
||||
|
||||
TraceManager::TraceManager()
|
||||
{
|
||||
g_zero_timestamp = cv::getTickCount();
|
||||
(void)cv::getTimestampNS();
|
||||
|
||||
isInitialized = true;
|
||||
CV_LOG("TraceManager ctor: " << (void*)this);
|
||||
@ -990,7 +981,7 @@ void parallelForFinalize(const Region& rootRegion)
|
||||
{
|
||||
TraceManagerThreadLocal& ctx = getTraceManager().tls.getRef();
|
||||
|
||||
int64 endTimestamp = getTimestamp();
|
||||
int64 endTimestamp = getTimestampNS();
|
||||
int64 duration = endTimestamp - ctx.stackTopBeginTimestamp();
|
||||
CV_LOG_PARALLEL(NULL, "parallel_for duration: " << duration << " " << &rootRegion);
|
||||
|
||||
|
@ -217,8 +217,16 @@ TEST_P(DNNTestNetwork, MobileNet_SSD_Caffe_Different_Width_Height)
|
||||
#if defined(INF_ENGINE_RELEASE)
|
||||
if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) &&
|
||||
target == DNN_TARGET_MYRIAD && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||
#endif
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
// IE exception: Ngraph operation Transpose with name conv15_2_mbox_conf_perm has dynamic output shape on 0 port, but CPU plug-in supports only static shape
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
|
||||
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
|
||||
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION
|
||||
);
|
||||
#endif
|
||||
|
||||
Mat sample = imread(findDataFile("dnn/street.png"));
|
||||
Mat inp = blobFromImage(sample, 1.0f / 127.5, Size(300, 560), Scalar(127.5, 127.5, 127.5), false);
|
||||
float scoreDiff = 0.0, iouDiff = 0.0;
|
||||
@ -324,12 +332,14 @@ TEST_P(DNNTestNetwork, SSD_VGG16)
|
||||
CV_TEST_TAG_DEBUG_VERYLONG);
|
||||
if (backend == DNN_BACKEND_HALIDE && target == DNN_TARGET_CPU)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE); // TODO HALIDE_CPU
|
||||
|
||||
Mat sample = imread(findDataFile("dnn/street.png"));
|
||||
Mat inp = blobFromImage(sample, 1.0f, Size(300, 300), Scalar(), false);
|
||||
|
||||
float scoreDiff = 0.0, iouDiff = 0.0;
|
||||
if (target == DNN_TARGET_OPENCL_FP16)
|
||||
{
|
||||
scoreDiff = 0.0325;
|
||||
scoreDiff = 0.04;
|
||||
}
|
||||
else if (target == DNN_TARGET_MYRIAD)
|
||||
{
|
||||
|
@ -517,10 +517,12 @@ TEST_P(Test_Caffe_nets, Colorization)
|
||||
l1 = 0.21;
|
||||
lInf = 4.5;
|
||||
}
|
||||
#if defined(INF_ENGINE_RELEASE)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
|
||||
{
|
||||
l1 = 0.26; lInf = 6.5;
|
||||
l1 = 0.3; lInf = 10;
|
||||
}
|
||||
#endif
|
||||
|
||||
normAssert(out, ref, "", l1, lInf);
|
||||
expectNoFallbacksFromIE(net);
|
||||
@ -713,6 +715,13 @@ TEST_P(Test_Caffe_nets, FasterRCNN_zf)
|
||||
#endif
|
||||
CV_TEST_TAG_DEBUG_LONG
|
||||
);
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
// IE exception: Ngraph operation Reshape with name rpn_cls_score_reshape has dynamic output shape on 0 port, but CPU plug-in supports only static shape
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
|
||||
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
|
||||
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION
|
||||
);
|
||||
#endif
|
||||
if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
|
||||
backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && target == DNN_TARGET_OPENCL_FP16)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
|
||||
@ -734,6 +743,11 @@ TEST_P(Test_Caffe_nets, RFCN)
|
||||
CV_TEST_TAG_LONG,
|
||||
CV_TEST_TAG_DEBUG_VERYLONG
|
||||
);
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
// Exception: Function contains several inputs and outputs with one friendly name! (HETERO bug?)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||
#endif
|
||||
if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
|
||||
backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && target == DNN_TARGET_OPENCL_FP16)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
|
||||
|
@ -172,16 +172,19 @@ public:
|
||||
|
||||
static void checkBackend(int backend, int target, Mat* inp = 0, Mat* ref = 0)
|
||||
{
|
||||
CV_UNUSED(backend); CV_UNUSED(target); CV_UNUSED(inp); CV_UNUSED(ref);
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021000000)
|
||||
if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||
&& target == DNN_TARGET_MYRIAD)
|
||||
{
|
||||
if (inp && ref && inp->dims == 4 && ref->dims == 4 &&
|
||||
inp->size[0] != 1 && inp->size[0] != ref->size[0])
|
||||
{
|
||||
std::cout << "Inconsistent batch size of input and output blobs for Myriad plugin" << std::endl;
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
|
||||
throw SkipTestException("Inconsistent batch size of input and output blobs for Myriad plugin");
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void expectNoFallbacks(Net& net, bool raiseError = true)
|
||||
|
@ -245,13 +245,13 @@ public:
|
||||
nms_boxes.push_back(box);
|
||||
nms_confidences.push_back(conf);
|
||||
nms_classIds.push_back(class_id);
|
||||
#if 0 // use to update test reference data
|
||||
std::cout << b << ", " << class_id << ", " << conf << "f, "
|
||||
<< box.x << "f, " << box.y << "f, "
|
||||
<< box.x + box.width << "f, " << box.y + box.height << "f,"
|
||||
<< std::endl;
|
||||
#endif
|
||||
|
||||
if (cvtest::debugLevel > 0)
|
||||
{
|
||||
std::cout << b << ", " << class_id << ", " << conf << "f, "
|
||||
<< box.x << "f, " << box.y << "f, "
|
||||
<< box.x + box.width << "f, " << box.y + box.height << "f,"
|
||||
<< std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
if (cvIsNaN(iouDiff))
|
||||
@ -359,6 +359,13 @@ TEST_P(Test_Darknet_nets, YoloVoc)
|
||||
scoreDiff = 0.03;
|
||||
iouDiff = 0.018;
|
||||
}
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
// accuracy
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
|
||||
{
|
||||
iouDiff = std::numeric_limits<double>::quiet_NaN();
|
||||
}
|
||||
#endif
|
||||
|
||||
std::string config_file = "yolo-voc.cfg";
|
||||
std::string weights_file = "yolo-voc.weights";
|
||||
@ -372,6 +379,12 @@ TEST_P(Test_Darknet_nets, YoloVoc)
|
||||
SCOPED_TRACE("batch size 2");
|
||||
testDarknetModel(config_file, weights_file, ref, scoreDiff, iouDiff, 0.24, nmsThreshold);
|
||||
}
|
||||
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
// accuracy
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||
#endif
|
||||
}
|
||||
|
||||
TEST_P(Test_Darknet_nets, TinyYoloVoc)
|
||||
@ -615,6 +628,14 @@ TEST_P(Test_Darknet_nets, YOLOv4)
|
||||
std::string config_file = "yolov4.cfg";
|
||||
std::string weights_file = "yolov4.weights";
|
||||
|
||||
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
// accuracy (batch 1)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
|
||||
{
|
||||
iouDiff = std::numeric_limits<double>::quiet_NaN();
|
||||
}
|
||||
#endif
|
||||
#if defined(INF_ENGINE_RELEASE)
|
||||
if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
|
||||
backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && target == DNN_TARGET_MYRIAD &&
|
||||
@ -633,6 +654,13 @@ TEST_P(Test_Darknet_nets, YOLOv4)
|
||||
{
|
||||
SCOPED_TRACE("batch size 2");
|
||||
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
// accuracy (batch 1)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
|
||||
{
|
||||
iouDiff = 0.45f;
|
||||
}
|
||||
#endif
|
||||
#if defined(INF_ENGINE_RELEASE)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
||||
{
|
||||
@ -648,6 +676,12 @@ TEST_P(Test_Darknet_nets, YOLOv4)
|
||||
|
||||
testDarknetModel(config_file, weights_file, ref, scoreDiff, iouDiff);
|
||||
}
|
||||
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
// accuracy
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||
#endif
|
||||
}
|
||||
|
||||
TEST_P(Test_Darknet_nets, YOLOv4_tiny)
|
||||
@ -718,6 +752,13 @@ TEST_P(Test_Darknet_nets, YOLOv4x_mish)
|
||||
{
|
||||
applyTestTag(CV_TEST_TAG_LONG, (target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_1GB : CV_TEST_TAG_MEMORY_2GB));
|
||||
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
// IE exception: Ngraph operation Transpose with name permute_168 has dynamic output shape on 0 port, but CPU plug-in supports only static shape
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
|
||||
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
|
||||
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION
|
||||
);
|
||||
#endif
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2020040000) // nGraph compilation failure
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||
|
@ -39,12 +39,13 @@ static void test(Mat& input, Net& net, Backend backendId, Target targetId, bool
|
||||
l1 = default_l1;
|
||||
if (lInf == 0.0)
|
||||
lInf = default_lInf;
|
||||
#if 0
|
||||
std::cout << "l1=" << l1 << " lInf=" << lInf << std::endl;
|
||||
std::cout << outputDefault.reshape(1, outputDefault.total()).t() << std::endl;
|
||||
std::cout << outputHalide.reshape(1, outputDefault.total()).t() << std::endl;
|
||||
#endif
|
||||
normAssert(outputDefault, outputHalide, "", l1, lInf);
|
||||
if (cvtest::debugLevel > 0 || testing::Test::HasFailure())
|
||||
{
|
||||
std::cout << "l1=" << l1 << " lInf=" << lInf << std::endl;
|
||||
std::cout << outputDefault.reshape(1, outputDefault.total()).t() << std::endl;
|
||||
std::cout << outputHalide.reshape(1, outputDefault.total()).t() << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
static void test(LayerParams& params, Mat& input, Backend backendId, Target targetId, bool skipCheck = false, double l1 = 0.0, double lInf = 0.0)
|
||||
@ -802,6 +803,16 @@ TEST_P(Eltwise, Accuracy)
|
||||
Backend backendId = get<0>(get<4>(GetParam()));
|
||||
Target targetId = get<1>(get<4>(GetParam()));
|
||||
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
// accuracy
|
||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && targetId == DNN_TARGET_OPENCL &&
|
||||
inSize == Vec3i(1, 4, 5) && op == "sum" && numConv == 1 && !weighted)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && targetId == DNN_TARGET_OPENCL &&
|
||||
inSize == Vec3i(2, 8, 6) && op == "sum" && numConv == 1 && !weighted)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||
#endif
|
||||
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
|
||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD &&
|
||||
inSize == Vec3i(1, 4, 5))
|
||||
|
@ -378,7 +378,7 @@ TEST_P(Test_Caffe_layers, layer_prelu_fc)
|
||||
// Reference output values are in range [-0.0001, 10.3906]
|
||||
double l1 = (target == DNN_TARGET_MYRIAD) ? 0.005 : 0.0;
|
||||
double lInf = (target == DNN_TARGET_MYRIAD) ? 0.021 : 0.0;
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2020040000)
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020040000)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL)
|
||||
{
|
||||
l1 = 0.006f; lInf = 0.05f;
|
||||
@ -1475,6 +1475,14 @@ TEST_P(Test_DLDT_two_inputs, as_backend)
|
||||
lInf = 0.3;
|
||||
}
|
||||
normAssert(out, ref, "", l1, lInf);
|
||||
if (cvtest::debugLevel > 0 || HasFailure())
|
||||
{
|
||||
std::cout << "input1 scale=" << kScale << " input2 scale=" << kScaleInv << std::endl;
|
||||
std::cout << "input1: " << firstInp.size << " " << firstInp.reshape(1, 1) << std::endl;
|
||||
std::cout << "input2: " << secondInp.size << " " << secondInp.reshape(1, 1) << std::endl;
|
||||
std::cout << "ref: " << ref.reshape(1, 1) << std::endl;
|
||||
std::cout << "out: " << out.reshape(1, 1) << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_DLDT_two_inputs, Combine(
|
||||
|
@ -802,6 +802,14 @@ TEST_P(Test_ONNX_layers, Split_EltwiseMax)
|
||||
|
||||
TEST_P(Test_ONNX_layers, LSTM_Activations)
|
||||
{
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
// IE Exception: Ngraph operation Reshape with name Block1237_Output_0_before_reshape has dynamic output shape on 0 port, but CPU plug-in supports only static shape
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
|
||||
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
|
||||
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION
|
||||
);
|
||||
#endif
|
||||
|
||||
testONNXModels("lstm_cntk_tanh", pb, 0, 0, false, false);
|
||||
}
|
||||
|
||||
@ -946,6 +954,13 @@ TEST_P(Test_ONNX_layers, Conv1d_variable_weight_bias)
|
||||
|
||||
TEST_P(Test_ONNX_layers, GatherMultiOutput)
|
||||
{
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
// IE Exception: Ngraph operation Reshape with name 6 has dynamic output shape on 0 port, but CPU plug-in supports only static shape
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
|
||||
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
|
||||
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION
|
||||
);
|
||||
#endif
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021030000)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // exception
|
||||
@ -953,7 +968,7 @@ TEST_P(Test_ONNX_layers, GatherMultiOutput)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // exception
|
||||
#endif
|
||||
|
||||
#if defined(INF_ENGINE_RELEASE)
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2021030000)
|
||||
if (target == DNN_TARGET_MYRIAD)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE);
|
||||
#endif
|
||||
@ -963,14 +978,25 @@ TEST_P(Test_ONNX_layers, GatherMultiOutput)
|
||||
|
||||
TEST_P(Test_ONNX_layers, DynamicAxes)
|
||||
{
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
// accuracy
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
|
||||
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
|
||||
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION
|
||||
);
|
||||
#endif
|
||||
#if defined(INF_ENGINE_RELEASE)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
||||
{
|
||||
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
|
||||
}
|
||||
#if INF_ENGINE_VER_MAJOR_LT(2021000000)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||
{
|
||||
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
testONNXModels("squeeze_and_conv_dynamic_axes");
|
||||
testONNXModels("unsqueeze_and_conv_dynamic_axes");
|
||||
testONNXModels("gather_dynamic_axes");
|
||||
@ -1050,6 +1076,13 @@ TEST_P(Test_ONNX_layers, PoolConv1d)
|
||||
|
||||
TEST_P(Test_ONNX_layers, ConvResizePool1d)
|
||||
{
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
// IE Exception: Ngraph operation Reshape with name 15 has dynamic output shape on 0 port, but CPU plug-in supports only static shape
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
|
||||
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
|
||||
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION
|
||||
);
|
||||
#endif
|
||||
#if defined(INF_ENGINE_RELEASE)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
||||
{
|
||||
@ -1367,8 +1400,8 @@ TEST_P(Test_ONNX_nets, TinyYolov2)
|
||||
double l1 = default_l1, lInf = default_lInf;
|
||||
if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)
|
||||
{
|
||||
l1 = 0.017;
|
||||
lInf = 0.14;
|
||||
l1 = 0.02;
|
||||
lInf = 0.2;
|
||||
}
|
||||
else if (target == DNN_TARGET_CUDA_FP16)
|
||||
{
|
||||
@ -1465,10 +1498,10 @@ TEST_P(Test_ONNX_nets, Emotion_ferplus)
|
||||
l1 = 2.4e-4;
|
||||
lInf = 6e-4;
|
||||
}
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2020040000)
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020040000)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
|
||||
{
|
||||
l1 = 0.012f; lInf = 0.035f;
|
||||
l1 = 0.013f; lInf = 0.035f;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -84,6 +84,10 @@ public:
|
||||
void runTensorFlowNet(const std::string& prefix, bool hasText = false,
|
||||
double l1 = 0.0, double lInf = 0.0, bool memoryLoad = false, const std::string& groupPrefix = "")
|
||||
{
|
||||
if (cvtest::debugLevel > 0)
|
||||
{
|
||||
std::cout << prefix << groupPrefix << std::endl;
|
||||
}
|
||||
std::string netPath = path(prefix + groupPrefix + "_net.pb");
|
||||
std::string netConfig = (hasText ? path(prefix + groupPrefix + "_net.pbtxt") : "");
|
||||
std::string inpPath = path(prefix + "_in.npy");
|
||||
@ -119,6 +123,16 @@ public:
|
||||
net.setInput(input);
|
||||
cv::Mat output = net.forward();
|
||||
normAssert(ref, output, "", l1 ? l1 : default_l1, lInf ? lInf : default_lInf);
|
||||
|
||||
if (cvtest::debugLevel > 0 || HasFailure())
|
||||
{
|
||||
std::cout << "input: " << input.size << std::endl;
|
||||
std::cout << input.reshape(1, 1) << std::endl;
|
||||
std::cout << "ref " << ref.size << std::endl;
|
||||
std::cout << ref.reshape(1, 1) << std::endl;
|
||||
std::cout << "output: " << output.size << std::endl;
|
||||
std::cout << output.reshape(1, 1) << std::endl;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@ -133,7 +147,7 @@ TEST_P(Test_TensorFlow_layers, reduce_max)
|
||||
{
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
|
||||
runTensorFlowNet("max_pool_by_axis");
|
||||
runTensorFlowNet("max_pool_by_axis", false, 0.0f, 0.0f);
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, reduce_sum)
|
||||
@ -145,7 +159,11 @@ TEST_P(Test_TensorFlow_layers, reduce_sum)
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, reduce_max_channel)
|
||||
{
|
||||
runTensorFlowNet("reduce_max_channel");
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020040000)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD) // incorrect result
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||
#endif
|
||||
runTensorFlowNet("reduce_max_channel", false, 0.0f, 0.0f);
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, reduce_sum_channel)
|
||||
@ -155,6 +173,10 @@ TEST_P(Test_TensorFlow_layers, reduce_sum_channel)
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, reduce_max_channel_keep_dims)
|
||||
{
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020040000)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD) // incorrect result
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||
#endif
|
||||
runTensorFlowNet("reduce_max_channel", false, 0.0, 0.0, false, "_keep_dims");
|
||||
}
|
||||
|
||||
@ -221,13 +243,49 @@ TEST_P(Test_TensorFlow_layers, padding)
|
||||
runTensorFlowNet("keras_pad_concat");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, padding_asymmetric)
|
||||
TEST_P(Test_TensorFlow_layers, padding_asymmetric_1)
|
||||
{
|
||||
runTensorFlowNet("conv2d_asymmetric_pads_nchw");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, padding_asymmetric_2)
|
||||
{
|
||||
runTensorFlowNet("conv2d_asymmetric_pads_nhwc");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, padding_asymmetric_3)
|
||||
{
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_CPU) // Exception: Unsupported pad value
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_CPU, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||
#endif
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020020000)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD) // Exception: Unsupported pad value
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||
#endif
|
||||
runTensorFlowNet("max_pool2d_asymmetric_pads_nchw");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, padding_asymmetric_4)
|
||||
{
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_CPU) // Exception: Unsupported pad value
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_CPU, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||
#endif
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020020000)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD) // Exception: Unsupported pad value
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||
#endif
|
||||
runTensorFlowNet("max_pool2d_asymmetric_pads_nhwc");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, padding_asymmetric_5)
|
||||
{
|
||||
runTensorFlowNet("conv2d_backprop_input_asymmetric_pads_nchw");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, padding_asymmetric_6)
|
||||
{
|
||||
runTensorFlowNet("conv2d_backprop_input_asymmetric_pads_nhwc");
|
||||
}
|
||||
|
||||
@ -268,6 +326,13 @@ TEST_P(Test_TensorFlow_layers, pad_and_concat)
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, concat_axis_1)
|
||||
{
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
// IE Exception: Ngraph operation Transpose with name Flatten_1/flatten/Reshape/nhwc has dynamic output shape on 0 port, but CPU plug-in supports only static shape
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
|
||||
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
|
||||
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION
|
||||
);
|
||||
#endif
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021030000)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // exception
|
||||
@ -423,19 +488,77 @@ TEST_P(Test_TensorFlow_layers, pooling_reduce_sum)
|
||||
runTensorFlowNet("reduce_sum"); // a SUM pooling over all spatial dimensions.
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, pooling_reduce_sum2)
|
||||
TEST_P(Test_TensorFlow_layers, pooling_reduce_sum_0_false)
|
||||
{
|
||||
int axises[] = {0, 1, 2, 3};
|
||||
for (int keepdims = 0; keepdims <= 1; ++keepdims)
|
||||
{
|
||||
for (int i = 0; i < sizeof(axises)/sizeof(axises[0]); ++i)
|
||||
{
|
||||
runTensorFlowNet(cv::format("reduce_sum_%d_%s", axises[i], (keepdims ? "True" : "False")));
|
||||
}
|
||||
runTensorFlowNet(cv::format("reduce_sum_1_2_%s", keepdims ? "True" : "False"));
|
||||
}
|
||||
runTensorFlowNet("reduce_sum_0_False");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, pooling_reduce_sum_1_false)
|
||||
{
|
||||
runTensorFlowNet("reduce_sum_1_False");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, pooling_reduce_sum_2_false)
|
||||
{
|
||||
runTensorFlowNet("reduce_sum_2_False");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, pooling_reduce_sum_3_false)
|
||||
{
|
||||
runTensorFlowNet("reduce_sum_3_False");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, pooling_reduce_sum_1_2_false)
|
||||
{
|
||||
#if defined(INF_ENGINE_RELEASE)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
|
||||
{
|
||||
default_l1 = 0.01f;
|
||||
}
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
|
||||
{
|
||||
default_l1 = 0.01f;
|
||||
}
|
||||
#endif
|
||||
runTensorFlowNet("reduce_sum_1_2_False");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, pooling_reduce_sum_0_true)
|
||||
{
|
||||
runTensorFlowNet("reduce_sum_0_True");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, pooling_reduce_sum_1_true)
|
||||
{
|
||||
runTensorFlowNet("reduce_sum_1_True");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, pooling_reduce_sum_2_true)
|
||||
{
|
||||
runTensorFlowNet("reduce_sum_2_True");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, pooling_reduce_sum_3_true)
|
||||
{
|
||||
runTensorFlowNet("reduce_sum_3_True");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, pooling_reduce_sum_1_2_true)
|
||||
{
|
||||
#if defined(INF_ENGINE_RELEASE)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
|
||||
{
|
||||
default_l1 = 0.01f;
|
||||
}
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
|
||||
{
|
||||
default_l1 = 0.01f;
|
||||
}
|
||||
#endif
|
||||
runTensorFlowNet("reduce_sum_1_2_True");
|
||||
}
|
||||
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, max_pool_grad)
|
||||
{
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
||||
@ -715,13 +838,14 @@ TEST_P(Test_TensorFlow_nets, MobileNet_SSD)
|
||||
double scoreDiff = default_l1, iouDiff = default_lInf;
|
||||
if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)
|
||||
{
|
||||
scoreDiff = 0.0043;
|
||||
iouDiff = 0.037;
|
||||
scoreDiff = 0.01;
|
||||
iouDiff = 0.1;
|
||||
}
|
||||
else if (target == DNN_TARGET_CUDA_FP16)
|
||||
{
|
||||
iouDiff = 0.04;
|
||||
}
|
||||
|
||||
normAssertDetections(ref, out, "", 0.2, scoreDiff, iouDiff);
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2019010000
|
||||
expectNoFallbacksFromIE(net);
|
||||
@ -815,16 +939,13 @@ TEST_P(Test_TensorFlow_nets, MobileNet_v1_SSD)
|
||||
expectNoFallbacksFromIE(net);
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_nets, Faster_RCNN)
|
||||
TEST_P(Test_TensorFlow_nets, Faster_RCNN_inception_v2_coco_2018_01_28)
|
||||
{
|
||||
// FIXIT split test
|
||||
applyTestTag(
|
||||
(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_1GB : CV_TEST_TAG_MEMORY_2GB),
|
||||
CV_TEST_TAG_LONG,
|
||||
CV_TEST_TAG_DEBUG_VERYLONG
|
||||
);
|
||||
static std::string names[] = {"faster_rcnn_inception_v2_coco_2018_01_28",
|
||||
"faster_rcnn_resnet50_coco_2018_01_28"};
|
||||
|
||||
#ifdef INF_ENGINE_RELEASE
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 &&
|
||||
@ -835,13 +956,82 @@ TEST_P(Test_TensorFlow_nets, Faster_RCNN)
|
||||
backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
|
||||
#endif
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
|
||||
// segfault: inference-engine/thirdparty/clDNN/src/gpu/detection_output_cpu.cpp:111:
|
||||
// Assertion `prior_height > 0' failed.
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
|
||||
#endif
|
||||
|
||||
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
|
||||
if (backend == DNN_BACKEND_CUDA && target == DNN_TARGET_CUDA_FP16)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA_FP16);
|
||||
|
||||
checkBackend();
|
||||
|
||||
double scoresDiff = 1e-5;
|
||||
double iouDiff = 1e-4;
|
||||
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||
{
|
||||
scoresDiff = 0.02;
|
||||
iouDiff = 0.1;
|
||||
}
|
||||
|
||||
std::string name = "faster_rcnn_inception_v2_coco_2018_01_28";
|
||||
{
|
||||
std::string proto = findDataFile("dnn/" + name + ".pbtxt");
|
||||
std::string model = findDataFile("dnn/" + name + ".pb", false);
|
||||
|
||||
Net net = readNetFromTensorflow(model, proto);
|
||||
net.setPreferableBackend(backend);
|
||||
net.setPreferableTarget(target);
|
||||
Mat img = imread(findDataFile("dnn/dog416.png"));
|
||||
Mat blob = blobFromImage(img, 1.0f, Size(800, 600), Scalar(), true, false);
|
||||
|
||||
net.setInput(blob);
|
||||
Mat out = net.forward();
|
||||
|
||||
Mat ref = blobFromNPY(findDataFile("dnn/tensorflow/" + name + ".detection_out.npy"));
|
||||
|
||||
// accuracy (both OpenCV & IE)
|
||||
if (target == DNN_TARGET_OPENCL_FP16)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
|
||||
|
||||
normAssertDetections(ref, out, name.c_str(), 0.3, scoresDiff, iouDiff);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_nets, Faster_RCNN_resnet50_coco_2018_01_28)
|
||||
{
|
||||
applyTestTag(
|
||||
(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_1GB : CV_TEST_TAG_MEMORY_2GB),
|
||||
CV_TEST_TAG_LONG,
|
||||
CV_TEST_TAG_DEBUG_VERYLONG
|
||||
);
|
||||
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
// IE exception: Ngraph operation Transpose with name FirstStageBoxPredictor/ClassPredictor/reshape_1/nhwc has dynamic output shape on 0 port, but CPU plug-in supports only static shape
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
|
||||
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
|
||||
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION
|
||||
);
|
||||
#endif
|
||||
|
||||
#ifdef INF_ENGINE_RELEASE
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 &&
|
||||
(INF_ENGINE_VER_MAJOR_LT(2019020000) || target != DNN_TARGET_CPU))
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||
|
||||
if (INF_ENGINE_VER_MAJOR_GT(2019030000) &&
|
||||
backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
|
||||
#endif
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
|
||||
// segfault: inference-engine/thirdparty/clDNN/src/gpu/detection_output_cpu.cpp:111:
|
||||
// Assertion `prior_height > 0' failed.
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
|
||||
#endif
|
||||
|
||||
if (backend == DNN_BACKEND_CUDA && target == DNN_TARGET_CUDA_FP16)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA_FP16);
|
||||
@ -856,10 +1046,11 @@ TEST_P(Test_TensorFlow_nets, Faster_RCNN)
|
||||
scoresDiff = 0.06;
|
||||
iouDiff = 0.08;
|
||||
}
|
||||
for (int i = 0; i < 2; ++i)
|
||||
|
||||
std::string name = "faster_rcnn_resnet50_coco_2018_01_28";
|
||||
{
|
||||
std::string proto = findDataFile("dnn/" + names[i] + ".pbtxt");
|
||||
std::string model = findDataFile("dnn/" + names[i] + ".pb", false);
|
||||
std::string proto = findDataFile("dnn/" + name + ".pbtxt");
|
||||
std::string model = findDataFile("dnn/" + name + ".pb", false);
|
||||
|
||||
Net net = readNetFromTensorflow(model, proto);
|
||||
net.setPreferableBackend(backend);
|
||||
@ -870,8 +1061,13 @@ TEST_P(Test_TensorFlow_nets, Faster_RCNN)
|
||||
net.setInput(blob);
|
||||
Mat out = net.forward();
|
||||
|
||||
Mat ref = blobFromNPY(findDataFile("dnn/tensorflow/" + names[i] + ".detection_out.npy"));
|
||||
normAssertDetections(ref, out, names[i].c_str(), 0.3, scoresDiff, iouDiff);
|
||||
Mat ref = blobFromNPY(findDataFile("dnn/tensorflow/" + name + ".detection_out.npy"));
|
||||
|
||||
// accuracy
|
||||
if (target == DNN_TARGET_OPENCL_FP16)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
|
||||
|
||||
normAssertDetections(ref, out, name.c_str(), 0.3, scoresDiff, iouDiff);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1282,6 +1478,10 @@ TEST_P(Test_TensorFlow_layers, resize_bilinear_down)
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, resize_concat_optimization)
|
||||
{
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU) // Exception: Function contains several inputs and outputs with one friendly name! (HETERO bug?)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||
#endif
|
||||
runTensorFlowNet("resize_concat_optimization");
|
||||
}
|
||||
|
||||
@ -1406,7 +1606,7 @@ TEST_P(Test_TensorFlow_nets, Mask_RCNN)
|
||||
Mat outDetections = outs[0];
|
||||
Mat outMasks = outs[1];
|
||||
|
||||
double scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.019 : 2e-5;
|
||||
double scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.2 : 2e-5;
|
||||
double iouDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.018 : default_lInf;
|
||||
normAssertDetections(refDetections, outDetections, "", /*threshold for zero confidence*/1e-5, scoreDiff, iouDiff);
|
||||
|
||||
@ -1440,7 +1640,7 @@ TEST_P(Test_TensorFlow_nets, Mask_RCNN)
|
||||
|
||||
double inter = cv::countNonZero(masks & refMasks);
|
||||
double area = cv::countNonZero(masks | refMasks);
|
||||
EXPECT_GE(inter / area, 0.99);
|
||||
EXPECT_GE(inter / area, (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.98 : 0.99);
|
||||
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||
expectNoFallbacks(net);
|
||||
|
@ -186,7 +186,7 @@ TEST_P(Test_Torch_layers, run_concat)
|
||||
TEST_P(Test_Torch_layers, run_depth_concat)
|
||||
{
|
||||
double lInf = 0.0;
|
||||
if (target == DNN_TARGET_OPENCL_FP16)
|
||||
if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)
|
||||
{
|
||||
lInf = 0.032;
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ if(HAVE_QT)
|
||||
QT5_ADD_RESOURCES(_RCC_OUTFILES ${CMAKE_CURRENT_LIST_DIR}/src/window_QT.qrc)
|
||||
QT5_WRAP_CPP(_MOC_OUTFILES ${CMAKE_CURRENT_LIST_DIR}/src/window_QT.h)
|
||||
else()
|
||||
message(FATAL_ERROR "Unsuported QT version: ${QT_VERSION_MAJOR}")
|
||||
message(FATAL_ERROR "Unsupported QT version: ${QT_VERSION_MAJOR}")
|
||||
endif()
|
||||
|
||||
list(APPEND highgui_srcs
|
||||
|
@ -485,7 +485,7 @@ class JSWrapperGenerator(object):
|
||||
arg_types.append(arg_type)
|
||||
unwrapped_arg_types.append(arg_type)
|
||||
|
||||
# Function attribure
|
||||
# Function attribute
|
||||
func_attribs = ''
|
||||
if '*' in ''.join(arg_types):
|
||||
func_attribs += ', allow_raw_pointers()'
|
||||
@ -680,7 +680,7 @@ class JSWrapperGenerator(object):
|
||||
def_args.append(arg.defval)
|
||||
arg_types.append(orig_arg_types[-1])
|
||||
|
||||
# Function attribure
|
||||
# Function attribute
|
||||
func_attribs = ''
|
||||
if '*' in ''.join(orig_arg_types):
|
||||
func_attribs += ', allow_raw_pointers()'
|
||||
|
@ -1,5 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from __future__ import print_function
|
||||
import testlog_parser, sys, os, xml, re
|
||||
from table_formatter import *
|
||||
from optparse import OptionParser
|
||||
@ -116,7 +117,7 @@ if __name__ == "__main__":
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
if len(args) != 1:
|
||||
print >> sys.stderr, "Usage:\n", os.path.basename(sys.argv[0]), "<log_name1>.xml"
|
||||
print("Usage:\n", os.path.basename(sys.argv[0]), "<log_name1>.xml", file=sys.stderr)
|
||||
exit(1)
|
||||
|
||||
options.generateHtml = detectHtmlOutputType(options.format)
|
||||
@ -136,7 +137,7 @@ if __name__ == "__main__":
|
||||
args[0] = os.path.basename(args[0])
|
||||
|
||||
if not tests:
|
||||
print >> sys.stderr, "Error - no tests matched"
|
||||
print("Error - no tests matched", file=sys.stderr)
|
||||
exit(1)
|
||||
|
||||
argsnum = len(tests[0][1])
|
||||
@ -156,26 +157,26 @@ if __name__ == "__main__":
|
||||
names1.add(sn)
|
||||
if sn == sname:
|
||||
if len(pair[1]) != argsnum:
|
||||
print >> sys.stderr, "Error - unable to create chart tables for functions having different argument numbers"
|
||||
print("Error - unable to create chart tables for functions having different argument numbers", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
for i in range(argsnum):
|
||||
arglists[i][pair[1][i]] = 1
|
||||
|
||||
if names1 or len(names) != 1:
|
||||
print >> sys.stderr, "Error - unable to create tables for functions from different test suits:"
|
||||
print("Error - unable to create tables for functions from different test suits:", file=sys.stderr)
|
||||
i = 1
|
||||
for name in sorted(names):
|
||||
print >> sys.stderr, "%4s: %s" % (i, name)
|
||||
print("%4s: %s" % (i, name), file=sys.stderr)
|
||||
i += 1
|
||||
if names1:
|
||||
print >> sys.stderr, "Other suits in this log (can not be chosen):"
|
||||
print("Other suits in this log (can not be chosen):", file=sys.stderr)
|
||||
for name in sorted(names1):
|
||||
print >> sys.stderr, "%4s: %s" % (i, name)
|
||||
print("%4s: %s" % (i, name), file=sys.stderr)
|
||||
i += 1
|
||||
sys.exit(1)
|
||||
|
||||
if argsnum < 2:
|
||||
print >> sys.stderr, "Error - tests from %s have less than 2 parameters" % sname
|
||||
print("Error - tests from %s have less than 2 parameters" % sname, file=sys.stderr)
|
||||
exit(1)
|
||||
|
||||
for i in range(argsnum):
|
||||
|
@ -1,5 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from __future__ import print_function
|
||||
import testlog_parser, sys, os, xml, re, glob
|
||||
from table_formatter import *
|
||||
from optparse import OptionParser
|
||||
@ -14,7 +15,7 @@ if __name__ == "__main__":
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
if len(args) < 1:
|
||||
print >> sys.stderr, "Usage:\n", os.path.basename(sys.argv[0]), "<log_name1>.xml"
|
||||
print("Usage:\n", os.path.basename(sys.argv[0]), "<log_name1>.xml", file=sys.stderr)
|
||||
exit(0)
|
||||
|
||||
options.generateHtml = detectHtmlOutputType(options.format)
|
||||
|
@ -105,7 +105,7 @@ if __name__ == "__main__":
|
||||
path = args.build_path
|
||||
try:
|
||||
if not os.path.isdir(path):
|
||||
raise Err("Not a directory (should contain CMakeCache.txt ot test executables)")
|
||||
raise Err("Not a directory (should contain CMakeCache.txt to test executables)")
|
||||
cache = CMakeCache(args.configuration)
|
||||
fname = os.path.join(path, "CMakeCache.txt")
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from __future__ import print_function
|
||||
import testlog_parser, sys, os, xml, glob, re
|
||||
from table_formatter import *
|
||||
from optparse import OptionParser
|
||||
@ -26,7 +27,7 @@ def getSetName(tset, idx, columns, short = True):
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) < 2:
|
||||
print >> sys.stderr, "Usage:\n", os.path.basename(sys.argv[0]), "<log_name1>.xml [<log_name2>.xml ...]"
|
||||
print("Usage:\n", os.path.basename(sys.argv[0]), "<log_name1>.xml [<log_name2>.xml ...]", file=sys.stderr)
|
||||
exit(0)
|
||||
|
||||
parser = OptionParser()
|
||||
|
@ -163,7 +163,7 @@ class BuilderDLDT:
|
||||
self.config = config
|
||||
|
||||
cpath = self.config.dldt_config
|
||||
log.info('DLDT build configration: %s', cpath)
|
||||
log.info('DLDT build configuration: %s', cpath)
|
||||
if not os.path.exists(cpath):
|
||||
cpath = os.path.join(SCRIPT_DIR, cpath)
|
||||
if not os.path.exists(cpath):
|
||||
@ -575,5 +575,5 @@ if __name__ == "__main__":
|
||||
try:
|
||||
main()
|
||||
except:
|
||||
log.info('FATAL: Error occured. To investigate problem try to change logging level using LOGLEVEL=DEBUG environment variable.')
|
||||
log.info('FATAL: Error occurred. To investigate problem try to change logging level using LOGLEVEL=DEBUG environment variable.')
|
||||
raise
|
||||
|
@ -300,7 +300,7 @@ class SiamRPNTracker:
|
||||
# clip boundary
|
||||
cx, cy, width, height = self._bbox_clip(cx, cy, width, height, img.shape[:2])
|
||||
|
||||
# udpate state
|
||||
# update state
|
||||
self.center_pos = np.array([cx, cy])
|
||||
self.w = width
|
||||
self.h = height
|
||||
|
@ -270,12 +270,12 @@ def createSSDGraph(modelPath, configPath, outputPath):
|
||||
addConstNode('concat/axis_flatten', [-1], graph_def)
|
||||
addConstNode('PriorBox/concat/axis', [-2], graph_def)
|
||||
|
||||
for label in ['ClassPredictor', 'BoxEncodingPredictor' if box_predictor is 'convolutional' else 'BoxPredictor']:
|
||||
for label in ['ClassPredictor', 'BoxEncodingPredictor' if box_predictor == 'convolutional' else 'BoxPredictor']:
|
||||
concatInputs = []
|
||||
for i in range(num_layers):
|
||||
# Flatten predictions
|
||||
flatten = NodeDef()
|
||||
if box_predictor is 'convolutional':
|
||||
if box_predictor == 'convolutional':
|
||||
inpName = 'BoxPredictor_%d/%s/BiasAdd' % (i, label)
|
||||
else:
|
||||
if i == 0:
|
||||
@ -308,7 +308,7 @@ def createSSDGraph(modelPath, configPath, outputPath):
|
||||
priorBox = NodeDef()
|
||||
priorBox.name = 'PriorBox_%d' % i
|
||||
priorBox.op = 'PriorBox'
|
||||
if box_predictor is 'convolutional':
|
||||
if box_predictor == 'convolutional':
|
||||
priorBox.input.append('BoxPredictor_%d/BoxEncodingPredictor/BiasAdd' % i)
|
||||
else:
|
||||
if i == 0:
|
||||
|
@ -24,7 +24,7 @@ mask = cv.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))
|
||||
roi_hist = cv.calcHist([hsv_roi],[0],mask,[180],[0,180])
|
||||
cv.normalize(roi_hist,roi_hist,0,255,cv.NORM_MINMAX)
|
||||
|
||||
# Setup the termination criteria, either 10 iteration or move by atleast 1 pt
|
||||
# Setup the termination criteria, either 10 iteration or move by at least 1 pt
|
||||
term_crit = ( cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1 )
|
||||
|
||||
while(1):
|
||||
|
@ -24,7 +24,7 @@ mask = cv.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))
|
||||
roi_hist = cv.calcHist([hsv_roi],[0],mask,[180],[0,180])
|
||||
cv.normalize(roi_hist,roi_hist,0,255,cv.NORM_MINMAX)
|
||||
|
||||
# Setup the termination criteria, either 10 iteration or move by atleast 1 pt
|
||||
# Setup the termination criteria, either 10 iteration or move by at least 1 pt
|
||||
term_crit = ( cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1 )
|
||||
|
||||
while(1):
|
||||
|
Loading…
Reference in New Issue
Block a user