mirror of
https://github.com/opencv/opencv.git
synced 2025-08-05 22:19:14 +08:00
Merge remote-tracking branch 'upstream/3.4' into merge-3.4
This commit is contained in:
commit
295afd5882
@ -100,7 +100,7 @@ if(CUDA_FOUND)
|
||||
set(_arch_pascal "6.0;6.1")
|
||||
set(_arch_volta "7.0")
|
||||
set(_arch_turing "7.5")
|
||||
set(_arch_ampere "8.0")
|
||||
set(_arch_ampere "8.0;8.6")
|
||||
if(NOT CMAKE_CROSSCOMPILING)
|
||||
list(APPEND _generations "Auto")
|
||||
endif()
|
||||
|
@ -252,6 +252,7 @@ if(NOT DEFINED IPPROOT)
|
||||
else()
|
||||
ocv_install_3rdparty_licenses(ippicv "${ICV_PACKAGE_ROOT}/EULA.txt")
|
||||
endif()
|
||||
ocv_install_3rdparty_licenses(ippicv "${ICV_PACKAGE_ROOT}/third-party-programs.txt")
|
||||
endif()
|
||||
|
||||
file(TO_CMAKE_PATH "${IPPROOT}" __IPPROOT)
|
||||
|
@ -77,18 +77,18 @@ void PoseSolver::solveGeneric(InputArray _objectPoints, InputArray _normalizedIn
|
||||
OutputArray _Ma, OutputArray _Mb)
|
||||
{
|
||||
//argument checking:
|
||||
size_t n = static_cast<size_t>(_objectPoints.rows() * _objectPoints.cols()); //number of points
|
||||
size_t n = static_cast<size_t>(_normalizedInputPoints.rows()) * static_cast<size_t>(_normalizedInputPoints.cols()); //number of points
|
||||
int objType = _objectPoints.type();
|
||||
int type_input = _normalizedInputPoints.type();
|
||||
|
||||
CV_CheckType(objType, objType == CV_32FC3 || objType == CV_64FC3,
|
||||
"Type of _objectPoints must be CV_32FC3 or CV_64FC3" );
|
||||
CV_CheckType(type_input, type_input == CV_32FC2 || type_input == CV_64FC2,
|
||||
"Type of _normalizedInputPoints must be CV_32FC3 or CV_64FC3" );
|
||||
"Type of _normalizedInputPoints must be CV_32FC2 or CV_64FC2" );
|
||||
CV_Assert(_objectPoints.rows() == 1 || _objectPoints.cols() == 1);
|
||||
CV_Assert(_objectPoints.rows() >= 4 || _objectPoints.cols() >= 4);
|
||||
CV_Assert(_normalizedInputPoints.rows() == 1 || _normalizedInputPoints.cols() == 1);
|
||||
CV_Assert(static_cast<size_t>(_objectPoints.rows() * _objectPoints.cols()) == n);
|
||||
CV_Assert(static_cast<size_t>(_objectPoints.rows()) * static_cast<size_t>(_objectPoints.cols()) == n);
|
||||
|
||||
Mat normalizedInputPoints;
|
||||
if (type_input == CV_32FC2)
|
||||
@ -101,7 +101,7 @@ void PoseSolver::solveGeneric(InputArray _objectPoints, InputArray _normalizedIn
|
||||
}
|
||||
|
||||
Mat objectInputPoints;
|
||||
if (type_input == CV_32FC3)
|
||||
if (objType == CV_32FC3)
|
||||
{
|
||||
_objectPoints.getMat().convertTo(objectInputPoints, CV_64F);
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ struct CheckContext {
|
||||
#define CV__CHECK_LOCATION_VARNAME(id) CVAUX_CONCAT(CVAUX_CONCAT(__cv_check_, id), __LINE__)
|
||||
#define CV__DEFINE_CHECK_CONTEXT(id, message, testOp, p1_str, p2_str) \
|
||||
static const cv::detail::CheckContext CV__CHECK_LOCATION_VARNAME(id) = \
|
||||
{ CV__CHECK_FUNCTION, CV__CHECK_FILENAME, __LINE__, testOp, message, p1_str, p2_str }
|
||||
{ CV__CHECK_FUNCTION, CV__CHECK_FILENAME, __LINE__, testOp, "" message, "" p1_str, "" p2_str }
|
||||
|
||||
CV_EXPORTS void CV_NORETURN check_failed_auto(const int v1, const int v2, const CheckContext& ctx);
|
||||
CV_EXPORTS void CV_NORETURN check_failed_auto(const size_t v1, const size_t v2, const CheckContext& ctx);
|
||||
|
@ -607,6 +607,7 @@ void OCL4DNNConvSpatial<Dtype>::calculateBenchmark(const UMat &bottom, UMat &ver
|
||||
{
|
||||
options_.str(""); options_.clear(); // clear contents and state flags
|
||||
createBasicKernel(1, 1, 1);
|
||||
CV_Assert(!kernelQueue.empty()); // basic kernel must be available
|
||||
kernel_index_ = kernelQueue.size() - 1;
|
||||
convolve(bottom, verifyTop, weight, bias, numImages, kernelQueue[kernel_index_]);
|
||||
CV_Assert(phash.find(kernelQueue[kernel_index_]->kernelName) != phash.end());
|
||||
@ -1713,6 +1714,7 @@ void OCL4DNNConvSpatial<float>::useFirstAvailable(const UMat &bottom,
|
||||
tunerItems[i]->blockHeight,
|
||||
tunerItems[i]->blockDepth))
|
||||
{
|
||||
CV_Assert(!kernelQueue.empty()); // basic kernel must be available
|
||||
int kernelIdx = kernelQueue.size() - 1;
|
||||
kernelConfig* config = kernelQueue[kernelIdx].get();
|
||||
bool failed = false;
|
||||
@ -1883,6 +1885,7 @@ void OCL4DNNConvSpatial<float>::setupConvolution(const UMat &bottom,
|
||||
CV_LOG_INFO(NULL, "fallback to basic kernel");
|
||||
options_.str(""); options_.clear(); // clear contents and state flags
|
||||
createBasicKernel(1, 1, 1);
|
||||
CV_Assert(!kernelQueue.empty()); // basic kernel must be available
|
||||
kernel_index_ = kernelQueue.size() - 1;
|
||||
}
|
||||
this->bestKernelConfig = kernelQueue[kernel_index_];
|
||||
|
@ -446,8 +446,8 @@ void ONNXImporter::populateNet(Net dstNet)
|
||||
avgLp.set("pool", pool);
|
||||
if (axes.size() == 2)
|
||||
{
|
||||
CV_CheckEQ(clamp(axes.get<int>(0), inpShape.size()), 1, ("Unsupported " + layer_type + " mode").c_str());
|
||||
CV_CheckEQ(clamp(axes.get<int>(1), inpShape.size()), 2, ("Unsupported " + layer_type + " mode").c_str());
|
||||
CV_CheckEQ(clamp(axes.get<int>(0), inpShape.size()), 1, "Unsupported mode");
|
||||
CV_CheckEQ(clamp(axes.get<int>(1), inpShape.size()), 2, "Unsupported mode");
|
||||
avgLp.set("global_pooling", true);
|
||||
}
|
||||
else
|
||||
@ -489,7 +489,7 @@ void ONNXImporter::populateNet(Net dstNet)
|
||||
}
|
||||
else if (!layerParams.has("axes") && (layer_type == "ReduceMean" || layer_type == "ReduceSum" || layer_type == "ReduceMax"))
|
||||
{
|
||||
CV_CheckEQ(layerParams.get<int>("keepdims"), 0, (layer_type + " layer only supports keepdims = false").c_str());
|
||||
CV_CheckEQ(layerParams.get<int>("keepdims"), 0, "layer only supports keepdims = false");
|
||||
LayerParams reshapeLp;
|
||||
reshapeLp.name = layerParams.name + "/reshape";
|
||||
reshapeLp.type = "Reshape";
|
||||
|
@ -205,7 +205,7 @@ __kernel void ConvolveBasic(
|
||||
#if APPLY_BIAS
|
||||
ACTIVATION_FUNCTION(convolved_image, offset, sum[kern] + bias[biasIndex + kern], biasIndex + kern);
|
||||
#else
|
||||
ACTIVATION_FUNCTION(convolved_image, offset, sum[kern], biasIndex + kern);
|
||||
ACTIVATION_FUNCTION(convolved_image, offset, sum[kern], kernelNum + kern);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ __kernel void TEMPLATE(lrn_full_no_scale,Dtype)(const int nthreads, __global con
|
||||
* in_off[(head - size) * step];
|
||||
}
|
||||
scale_val = k + accum_scale * alpha_over_size;
|
||||
out_off[(head - post_pad) * step] = in_off[(head - post_pad) * step] * (Dtype)native_powr((Dtype)scale_val, (Dtype)negative_beta);
|
||||
out_off[(head - post_pad) * step] = in_off[(head - post_pad) * step] * (Dtype)native_powr(scale_val, negative_beta);
|
||||
++head;
|
||||
}
|
||||
// subtract only
|
||||
@ -93,7 +93,7 @@ __kernel void TEMPLATE(lrn_full_no_scale,Dtype)(const int nthreads, __global con
|
||||
* in_off[(head - size) * step];
|
||||
}
|
||||
scale_val = k + accum_scale * alpha_over_size;
|
||||
out_off[(head - post_pad) * step] = in_off[(head - post_pad) * step] * (Dtype)native_powr((Dtype)scale_val, (Dtype)negative_beta);
|
||||
out_off[(head - post_pad) * step] = in_off[(head - post_pad) * step] * (Dtype)native_powr(scale_val, negative_beta);
|
||||
++head;
|
||||
}
|
||||
}
|
||||
|
@ -2268,13 +2268,6 @@ TEST_P(ConvolutionActivationFusion, Accuracy)
|
||||
if (actType == "Power" && backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16))
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL);
|
||||
|
||||
// bug: https://github.com/opencv/opencv/issues/17953
|
||||
if (actType == "ChannelsPReLU" && bias_term == false &&
|
||||
backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16))
|
||||
{
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL);
|
||||
}
|
||||
|
||||
Net net;
|
||||
int convId = net.addLayer(convParams.name, convParams.type, convParams);
|
||||
int activId = net.addLayerToPrev(activationParams.name, activationParams.type, activationParams);
|
||||
|
@ -116,7 +116,7 @@ TEST_P(Test_Torch_layers, run_convolution)
|
||||
if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)
|
||||
{
|
||||
l1 = 0.08;
|
||||
lInf = 0.42;
|
||||
lInf = 0.43;
|
||||
}
|
||||
else if (target == DNN_TARGET_CUDA_FP16)
|
||||
{
|
||||
@ -187,7 +187,7 @@ TEST_P(Test_Torch_layers, run_depth_concat)
|
||||
double lInf = 0.0;
|
||||
if (target == DNN_TARGET_OPENCL_FP16)
|
||||
{
|
||||
lInf = 0.021;
|
||||
lInf = 0.032;
|
||||
}
|
||||
else if (target == DNN_TARGET_CUDA_FP16)
|
||||
{
|
||||
|
@ -11,7 +11,7 @@ from tests_common import NewOpenCVTests
|
||||
class TestGoodFeaturesToTrack_test(NewOpenCVTests):
|
||||
def test_goodFeaturesToTrack(self):
|
||||
arr = self.get_sample('samples/data/lena.jpg', 0)
|
||||
original = arr.copy(True)
|
||||
original = arr.copy()
|
||||
threshes = [ x / 100. for x in range(1,10) ]
|
||||
numPoints = 20000
|
||||
|
||||
|
@ -183,7 +183,6 @@ class Builder:
|
||||
] if self.debug_info else [])
|
||||
|
||||
if len(self.exclude) > 0:
|
||||
args += ["-DBUILD_opencv_world=OFF"] if not (self.dynamic and not self.build_objc_wrapper) else []
|
||||
args += ["-DBUILD_opencv_%s=OFF" % m for m in self.exclude]
|
||||
|
||||
if len(self.disable) > 0:
|
||||
|
Loading…
Reference in New Issue
Block a user