mirror of
https://github.com/opencv/opencv.git
synced 2024-11-24 19:20:28 +08:00
Merge remote-tracking branch 'upstream/3.4' into merge-3.4
This commit is contained in:
commit
b4c5b50a3e
@ -22,6 +22,9 @@ set(OPENCV_DOWNLOAD_PATH "${OpenCV_SOURCE_DIR}/.cache" CACHE PATH "${HELP_OPENCV
|
||||
set(OPENCV_DOWNLOAD_LOG "${OpenCV_BINARY_DIR}/CMakeDownloadLog.txt")
|
||||
set(OPENCV_DOWNLOAD_WITH_CURL "${OpenCV_BINARY_DIR}/download_with_curl.sh")
|
||||
set(OPENCV_DOWNLOAD_WITH_WGET "${OpenCV_BINARY_DIR}/download_with_wget.sh")
|
||||
set(OPENCV_DOWNLOAD_TRIES_LIST 1 CACHE STRING "List of download tries") # a list
|
||||
set(OPENCV_DOWNLOAD_PARAMS INACTIVITY_TIMEOUT 60 TIMEOUT 600 CACHE STRING "Download parameters to be passed to file(DOWNLAOD ...)")
|
||||
mark_as_advanced(OPENCV_DOWNLOAD_TRIES_LIST OPENCV_DOWNLOAD_PARAMS)
|
||||
|
||||
# Init download cache directory and log file and helper scripts
|
||||
if(NOT EXISTS "${OPENCV_DOWNLOAD_PATH}")
|
||||
@ -154,11 +157,17 @@ function(ocv_download)
|
||||
# Download
|
||||
if(NOT EXISTS "${CACHE_CANDIDATE}")
|
||||
ocv_download_log("#cmake_download \"${CACHE_CANDIDATE}\" \"${DL_URL}\"")
|
||||
file(DOWNLOAD "${DL_URL}" "${CACHE_CANDIDATE}"
|
||||
INACTIVITY_TIMEOUT 60
|
||||
TIMEOUT 600
|
||||
STATUS status
|
||||
LOG __log)
|
||||
foreach(try ${OPENCV_DOWNLOAD_TRIES_LIST})
|
||||
ocv_download_log("#try ${try}")
|
||||
file(DOWNLOAD "${DL_URL}" "${CACHE_CANDIDATE}"
|
||||
STATUS status
|
||||
LOG __log
|
||||
${OPENCV_DOWNLOAD_PARAMS})
|
||||
if(status EQUAL 0)
|
||||
break()
|
||||
endif()
|
||||
message(STATUS "Try ${try} failed")
|
||||
endforeach()
|
||||
if(NOT OPENCV_SKIP_FILE_DOWNLOAD_DUMP) # workaround problem with old CMake versions: "Invalid escape sequence"
|
||||
string(LENGTH "${__log}" __log_length)
|
||||
if(__log_length LESS 65536)
|
||||
@ -195,8 +204,8 @@ For details please refer to the download log file:
|
||||
${OPENCV_DOWNLOAD_LOG}
|
||||
")
|
||||
# write helper scripts for failed downloads
|
||||
file(APPEND "${OPENCV_DOWNLOAD_WITH_CURL}" "curl --output \"${CACHE_CANDIDATE}\" \"${DL_URL}\"\n")
|
||||
file(APPEND "${OPENCV_DOWNLOAD_WITH_WGET}" "wget -O \"${CACHE_CANDIDATE}\" \"${DL_URL}\"\n")
|
||||
file(APPEND "${OPENCV_DOWNLOAD_WITH_CURL}" "curl --create-dirs --output \"${CACHE_CANDIDATE}\" \"${DL_URL}\"\n")
|
||||
file(APPEND "${OPENCV_DOWNLOAD_WITH_WGET}" "mkdir -p $(dirname ${CACHE_CANDIDATE}) && wget -O \"${CACHE_CANDIDATE}\" \"${DL_URL}\"\n")
|
||||
return()
|
||||
endif()
|
||||
|
||||
|
@ -117,14 +117,41 @@ So, make sure [docker](https://www.docker.com/) is installed in your system and
|
||||
@code{.bash}
|
||||
git clone https://github.com/opencv/opencv.git
|
||||
cd opencv
|
||||
docker run --rm --workdir /code -v "$PWD":/code "trzeci/emscripten:latest" python ./platforms/js/build_js.py build_js
|
||||
docker run --rm --workdir /code -v "$PWD":/code "trzeci/emscripten:latest" python ./platforms/js/build_js.py build
|
||||
@endcode
|
||||
|
||||
In Windows use the following PowerShell command:
|
||||
|
||||
@code{.bash}
|
||||
docker run --rm --workdir /code -v "$(get-location):/code" "trzeci/emscripten:latest" python ./platforms/js/build_js.py build_js
|
||||
docker run --rm --workdir /code -v "$(get-location):/code" "trzeci/emscripten:latest" python ./platforms/js/build_js.py build
|
||||
@endcode
|
||||
|
||||
@note
|
||||
The example uses latest version of [trzeci/emscripten](https://hub.docker.com/r/trzeci/emscripten) docker container. At this time, the latest version works fine and is `trzeci/emscripten:sdk-tag-1.38.32-64bit`
|
||||
@warning
|
||||
The example uses latest version of emscripten. If the build fails you should try a version that is known to work fine which is `1.38.32` using the following command:
|
||||
|
||||
@code{.bash}
|
||||
docker run --rm --workdir /code -v "$PWD":/code "trzeci/emscripten:sdk-tag-1.38.32-64bit" python ./platforms/js/build_js.py build
|
||||
@endcode
|
||||
|
||||
### Building the documentation with Docker
|
||||
|
||||
To build the documentation `doxygen` needs to be installed. Create a file named `Dockerfile` with the following content:
|
||||
|
||||
```
|
||||
FROM trzeci/emscripten:sdk-tag-1.38.32-64bit
|
||||
|
||||
RUN apt-get update -y
|
||||
RUN apt-get install -y doxygen
|
||||
```
|
||||
|
||||
Then we build the docker image and name it `opencv-js-doc` with the following command (that needs to be run only once):
|
||||
|
||||
@code{.bash}
|
||||
docker build . -t opencv-js-doc
|
||||
@endcode
|
||||
|
||||
Now run the build command again, this time using the new image and passing `--build_doc`:
|
||||
|
||||
@code{.bash}
|
||||
docker run --rm --workdir /code -v "$PWD":/code "opencv-js-doc" python ./platforms/js/build_js.py build --build_doc
|
||||
@endcode
|
||||
|
@ -7,7 +7,7 @@
|
||||
|
||||
/**
|
||||
Helper header to support SIMD intrinsics (universal intrinsics) in user code.
|
||||
Intrinsics documentation: https://docs.opencv.org/3.4/df/d91/group__core__hal__intrin.html
|
||||
Intrinsics documentation: https://docs.opencv.org/master/df/d91/group__core__hal__intrin.html
|
||||
|
||||
|
||||
Checks of target CPU instruction set based on compiler definitions don't work well enough.
|
||||
|
@ -124,6 +124,33 @@ VSX_FINLINE(rt) fnm(const rg& a, const rg& b) \
|
||||
|
||||
#define VSX_IMPL_2VRG(rt, rg, opc, fnm) VSX_IMPL_2VRG_F(rt, rg, #opc" %0,%1,%2", fnm)
|
||||
|
||||
#if __GNUG__ < 8
|
||||
|
||||
// Support for int4 -> dword2 expanding multiply was added in GCC 8.
|
||||
#ifdef vec_mule
|
||||
#undef vec_mule
|
||||
#endif
|
||||
#ifdef vec_mulo
|
||||
#undef vec_mulo
|
||||
#endif
|
||||
|
||||
VSX_REDIRECT_2RG(vec_ushort8, vec_uchar16, vec_mule, __builtin_vec_mule)
|
||||
VSX_REDIRECT_2RG(vec_short8, vec_char16, vec_mule, __builtin_vec_mule)
|
||||
VSX_REDIRECT_2RG(vec_int4, vec_short8, vec_mule, __builtin_vec_mule)
|
||||
VSX_REDIRECT_2RG(vec_uint4, vec_ushort8, vec_mule, __builtin_vec_mule)
|
||||
VSX_REDIRECT_2RG(vec_ushort8, vec_uchar16, vec_mulo, __builtin_vec_mulo)
|
||||
VSX_REDIRECT_2RG(vec_short8, vec_char16, vec_mulo, __builtin_vec_mulo)
|
||||
VSX_REDIRECT_2RG(vec_int4, vec_short8, vec_mulo, __builtin_vec_mulo)
|
||||
VSX_REDIRECT_2RG(vec_uint4, vec_ushort8, vec_mulo, __builtin_vec_mulo)
|
||||
|
||||
// dword2 support arrived in ISA 2.07 and GCC 8+
|
||||
VSX_IMPL_2VRG(vec_dword2, vec_int4, vmulesw, vec_mule)
|
||||
VSX_IMPL_2VRG(vec_udword2, vec_uint4, vmuleuw, vec_mule)
|
||||
VSX_IMPL_2VRG(vec_dword2, vec_int4, vmulosw, vec_mulo)
|
||||
VSX_IMPL_2VRG(vec_udword2, vec_uint4, vmulouw, vec_mulo)
|
||||
|
||||
#endif
|
||||
|
||||
#if __GNUG__ < 7
|
||||
// up to GCC 6 vec_mul only supports precisions and llong
|
||||
# ifdef vec_mul
|
||||
|
@ -1619,7 +1619,8 @@ struct Net::Impl
|
||||
Ptr<Layer> layer = ld.layerInstance;
|
||||
if (!fused && !layer->supportBackend(preferableBackend))
|
||||
{
|
||||
bool customizable = ld.id != 0 && ld.outputBlobs.size() == 1;
|
||||
bool customizable = ld.id != 0 && ld.outputBlobs.size() == 1 &&
|
||||
INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R2);
|
||||
// TODO: there is a bug in Myriad plugin with custom layers shape infer.
|
||||
if (preferableTarget == DNN_TARGET_MYRIAD)
|
||||
{
|
||||
|
@ -582,7 +582,6 @@ void InfEngineBackendNet::initPlugin(InferenceEngine::CNNNetwork& net)
|
||||
try
|
||||
{
|
||||
AutoLock lock(getInitializationMutex());
|
||||
InferenceEngine::Core& ie = getCore();
|
||||
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
|
||||
auto& sharedPlugins = getSharedPlugins();
|
||||
auto pluginIt = sharedPlugins.find(device_name);
|
||||
@ -591,6 +590,8 @@ void InfEngineBackendNet::initPlugin(InferenceEngine::CNNNetwork& net)
|
||||
enginePtr = pluginIt->second;
|
||||
}
|
||||
else
|
||||
#else
|
||||
InferenceEngine::Core& ie = getCore();
|
||||
#endif
|
||||
{
|
||||
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
|
||||
|
@ -334,6 +334,8 @@ static const std::chrono::milliseconds async_timeout(500);
|
||||
typedef testing::TestWithParam<tuple<std::string, Target> > Test_Darknet_nets_async;
|
||||
TEST_P(Test_Darknet_nets_async, Accuracy)
|
||||
{
|
||||
if (INF_ENGINE_VER_MAJOR_LT(2019020000))
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
|
||||
applyTestTag(CV_TEST_TAG_MEMORY_512MB);
|
||||
|
||||
std::string prefix = get<0>(GetParam());
|
||||
|
@ -481,8 +481,11 @@ TEST_P(Test_TensorFlow_nets, Faster_RCNN)
|
||||
"faster_rcnn_resnet50_coco_2018_01_28"};
|
||||
|
||||
checkBackend();
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_CPU)
|
||||
#ifdef INF_ENGINE_RELEASE
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE &&
|
||||
(INF_ENGINE_VER_MAJOR_LT(2019020000) || target != DNN_TARGET_CPU))
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
|
||||
#endif
|
||||
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
|
||||
|
||||
|
@ -83,12 +83,12 @@ CV_IMPL void cvSetWindowProperty(const char* name, int prop_id, double prop_valu
|
||||
break;
|
||||
|
||||
case cv::WND_PROP_TOPMOST:
|
||||
#if defined(HAVE_WIN32UI)
|
||||
#if defined (HAVE_QT)
|
||||
// nothing
|
||||
#elif defined(HAVE_WIN32UI)
|
||||
cvSetPropTopmost_W32(name, (prop_value != 0 ? true : false));
|
||||
#elif defined(HAVE_COCOA)
|
||||
cvSetPropTopmost_COCOA(name, (prop_value != 0 ? true : false));
|
||||
#else
|
||||
CV_LOG_WARNING(NULL, "Property WND_PROP_TOPMOST is not supported on current GUI backend");
|
||||
#endif
|
||||
break;
|
||||
|
||||
@ -171,12 +171,13 @@ CV_IMPL double cvGetWindowProperty(const char* name, int prop_id)
|
||||
break;
|
||||
|
||||
case cv::WND_PROP_TOPMOST:
|
||||
#if defined(HAVE_WIN32UI)
|
||||
#if defined (HAVE_QT)
|
||||
return -1;
|
||||
#elif defined(HAVE_WIN32UI)
|
||||
return cvGetPropTopmost_W32(name);
|
||||
#elif defined(HAVE_COCOA)
|
||||
return cvGetPropTopmost_COCOA(name);
|
||||
#else
|
||||
CV_LOG_WARNING(NULL, "Property WND_PROP_TOPMOST is not supported on current GUI backend");
|
||||
return -1;
|
||||
#endif
|
||||
break;
|
||||
|
@ -2543,10 +2543,10 @@ namespace cv{
|
||||
//Array used to store info and labeled pixel by each thread.
|
||||
//Different threads affect different memory location of chunksSizeAndLabels
|
||||
const int chunksSizeAndLabelsSize = h + 1;
|
||||
int *chunksSizeAndLabels = (int *)cv::fastMalloc(chunksSizeAndLabelsSize * sizeof(int));
|
||||
cv::AutoBuffer<int, 0> chunksSizeAndLabels(chunksSizeAndLabelsSize);
|
||||
|
||||
//Tree of labels
|
||||
LabelT *P = (LabelT *)cv::fastMalloc(Plength * sizeof(LabelT));
|
||||
cv::AutoBuffer<LabelT, 0> P(Plength);
|
||||
//First label is for background
|
||||
P[0] = 0;
|
||||
|
||||
@ -2555,30 +2555,27 @@ namespace cv{
|
||||
|
||||
//First scan, each thread works with chunk of img.rows/nThreads rows
|
||||
//e.g. 300 rows, 4 threads -> each chunks is composed of 75 rows
|
||||
cv::parallel_for_(range, FirstScan(img, imgLabels, P, chunksSizeAndLabels), nParallelStripes);
|
||||
cv::parallel_for_(range, FirstScan(img, imgLabels, P.data(), chunksSizeAndLabels.data()), nParallelStripes);
|
||||
|
||||
//merge labels of different chunks
|
||||
mergeLabels(img, imgLabels, P, chunksSizeAndLabels);
|
||||
mergeLabels(img, imgLabels, P.data(), chunksSizeAndLabels.data());
|
||||
|
||||
LabelT nLabels = 1;
|
||||
for (int i = 0; i < h; i = chunksSizeAndLabels[i]){
|
||||
CV_Assert(i + 1 < chunksSizeAndLabelsSize);
|
||||
flattenL(P, LabelT((i + 1) / 2) * LabelT((w + 1) / 2) + 1, chunksSizeAndLabels[i + 1], nLabels);
|
||||
flattenL(P.data(), LabelT((i + 1) / 2) * LabelT((w + 1) / 2) + 1, chunksSizeAndLabels[i + 1], nLabels);
|
||||
}
|
||||
|
||||
//Array for statistics data
|
||||
StatsOp *sopArray = new StatsOp[h];
|
||||
cv::AutoBuffer<StatsOp, 0> sopArray(h);
|
||||
sop.init(nLabels);
|
||||
|
||||
//Second scan
|
||||
cv::parallel_for_(range, SecondScan(img, imgLabels, P, sop, sopArray, nLabels), nParallelStripes);
|
||||
cv::parallel_for_(range, SecondScan(img, imgLabels, P.data(), sop, sopArray.data(), nLabels), nParallelStripes);
|
||||
|
||||
StatsOp::mergeStats(imgLabels, sopArray, sop, nLabels);
|
||||
StatsOp::mergeStats(imgLabels, sopArray.data(), sop, nLabels);
|
||||
sop.finish();
|
||||
|
||||
delete[] sopArray;
|
||||
cv::fastFree(chunksSizeAndLabels);
|
||||
cv::fastFree(P);
|
||||
return nLabels;
|
||||
}
|
||||
};//End struct LabelingGranaParallel
|
||||
|
@ -2727,7 +2727,7 @@ static void calcSobelKernel1D( int order, int _aperture_size, int size, vector<i
|
||||
|
||||
if( _aperture_size < 0 )
|
||||
{
|
||||
static const int scharr[] = { 3, 10, 3, -1, 0, 1 };
|
||||
static const int scharr[8] = { 3, 10, 3, -1, 0, 1, 0, 0 }; // extra elements to eliminate "-Warray-bounds" bogus warning
|
||||
assert( size == 3 );
|
||||
for( i = 0; i < size; i++ )
|
||||
kernel[i] = scharr[order*3 + i];
|
||||
|
Loading…
Reference in New Issue
Block a user