mirror of
https://github.com/opencv/opencv.git
synced 2025-06-08 01:53:19 +08:00
Merge remote-tracking branch 'upstream/3.4' into merge-3.4
This commit is contained in:
commit
6b474c4051
@ -8,6 +8,8 @@
|
|||||||
#include <opencv2/core/async.hpp>
|
#include <opencv2/core/async.hpp>
|
||||||
#include <opencv2/core/detail/async_promise.hpp>
|
#include <opencv2/core/detail/async_promise.hpp>
|
||||||
|
|
||||||
|
#include <stdexcept>
|
||||||
|
|
||||||
namespace cv { namespace utils {
|
namespace cv { namespace utils {
|
||||||
//! @addtogroup core_utils
|
//! @addtogroup core_utils
|
||||||
//! @{
|
//! @{
|
||||||
@ -113,6 +115,12 @@ String dumpRange(const Range& argument)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
CV_WRAP static inline
|
||||||
|
void testRaiseGeneralException()
|
||||||
|
{
|
||||||
|
throw std::runtime_error("exception text");
|
||||||
|
}
|
||||||
|
|
||||||
CV_WRAP static inline
|
CV_WRAP static inline
|
||||||
AsyncArray testAsyncArray(InputArray argument)
|
AsyncArray testAsyncArray(InputArray argument)
|
||||||
{
|
{
|
||||||
|
@ -205,21 +205,33 @@ static inline std::ostream& operator<<(std::ostream &out, const MatShape& shape)
|
|||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline int clamp(int ax, int dims)
|
/// @brief Converts axis from `[-dims; dims)` (similar to Python's slice notation) to `[0; dims)` range.
|
||||||
|
static inline
|
||||||
|
int normalize_axis(int axis, int dims)
|
||||||
{
|
{
|
||||||
return ax < 0 ? ax + dims : ax;
|
CV_Check(axis, axis >= -dims && axis < dims, "");
|
||||||
|
axis = (axis < 0) ? (dims + axis) : axis;
|
||||||
|
CV_DbgCheck(axis, axis >= 0 && axis < dims, "");
|
||||||
|
return axis;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline int clamp(int ax, const MatShape& shape)
|
static inline
|
||||||
|
int normalize_axis(int axis, const MatShape& shape)
|
||||||
{
|
{
|
||||||
return clamp(ax, (int)shape.size());
|
return normalize_axis(axis, (int)shape.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
inline Range clamp(const Range& r, int axisSize)
|
static inline
|
||||||
|
Range normalize_axis_range(const Range& r, int axisSize)
|
||||||
{
|
{
|
||||||
Range clamped(std::max(r.start, 0),
|
if (r == Range::all())
|
||||||
|
return Range(0, axisSize);
|
||||||
|
CV_CheckGE(r.start, 0, "");
|
||||||
|
Range clamped(r.start,
|
||||||
r.end > 0 ? std::min(r.end, axisSize) : axisSize + r.end + 1);
|
r.end > 0 ? std::min(r.end, axisSize) : axisSize + r.end + 1);
|
||||||
CV_Assert_N(clamped.start < clamped.end, clamped.end <= axisSize);
|
CV_DbgCheckGE(clamped.start, 0, "");
|
||||||
|
CV_CheckLT(clamped.start, clamped.end, "");
|
||||||
|
CV_CheckLE(clamped.end, axisSize, "");
|
||||||
return clamped;
|
return clamped;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6,7 +6,7 @@
|
|||||||
#define OPENCV_DNN_VERSION_HPP
|
#define OPENCV_DNN_VERSION_HPP
|
||||||
|
|
||||||
/// Use with major OpenCV version only.
|
/// Use with major OpenCV version only.
|
||||||
#define OPENCV_DNN_API_VERSION 20201117
|
#define OPENCV_DNN_API_VERSION 20210205
|
||||||
|
|
||||||
#if !defined CV_DOXYGEN && !defined CV_STATIC_ANALYSIS && !defined CV_DNN_DONT_ADD_INLINE_NS
|
#if !defined CV_DOXYGEN && !defined CV_STATIC_ANALYSIS && !defined CV_DNN_DONT_ADD_INLINE_NS
|
||||||
#define CV__DNN_INLINE_NS __CV_CAT(dnn4_v, OPENCV_DNN_API_VERSION)
|
#define CV__DNN_INLINE_NS __CV_CAT(dnn4_v, OPENCV_DNN_API_VERSION)
|
||||||
|
@ -2972,7 +2972,7 @@ struct Net::Impl : public detail::NetImplBase
|
|||||||
// the concatenation optimization is applied with batch_size > 1.
|
// the concatenation optimization is applied with batch_size > 1.
|
||||||
// so, for now, we only apply this optimization in the most popular
|
// so, for now, we only apply this optimization in the most popular
|
||||||
// case batch_size == 1.
|
// case batch_size == 1.
|
||||||
int axis = clamp(concatLayer->axis, output.dims);
|
int axis = normalize_axis(concatLayer->axis, output.dims);
|
||||||
if( output.total(0, axis) == 1 )
|
if( output.total(0, axis) == 1 )
|
||||||
{
|
{
|
||||||
size_t i, ninputs = ld.inputBlobsId.size();
|
size_t i, ninputs = ld.inputBlobsId.size();
|
||||||
|
@ -79,7 +79,7 @@ public:
|
|||||||
{
|
{
|
||||||
CV_Assert(inputs.size() > 0);
|
CV_Assert(inputs.size() > 0);
|
||||||
outputs.resize(1, inputs[0]);
|
outputs.resize(1, inputs[0]);
|
||||||
int cAxis = clamp(axis, inputs[0]);
|
int cAxis = normalize_axis(axis, inputs[0]);
|
||||||
|
|
||||||
int axisSum = 0;
|
int axisSum = 0;
|
||||||
for (size_t i = 0; i < inputs.size(); i++)
|
for (size_t i = 0; i < inputs.size(); i++)
|
||||||
@ -201,7 +201,7 @@ public:
|
|||||||
inps.getUMatVector(inputs);
|
inps.getUMatVector(inputs);
|
||||||
outs.getUMatVector(outputs);
|
outs.getUMatVector(outputs);
|
||||||
|
|
||||||
int cAxis = clamp(axis, inputs[0].dims);
|
int cAxis = normalize_axis(axis, inputs[0].dims);
|
||||||
if (padding)
|
if (padding)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
@ -255,7 +255,7 @@ public:
|
|||||||
inputs_arr.getMatVector(inputs);
|
inputs_arr.getMatVector(inputs);
|
||||||
outputs_arr.getMatVector(outputs);
|
outputs_arr.getMatVector(outputs);
|
||||||
|
|
||||||
int cAxis = clamp(axis, inputs[0].dims);
|
int cAxis = normalize_axis(axis, inputs[0].dims);
|
||||||
Mat& outMat = outputs[0];
|
Mat& outMat = outputs[0];
|
||||||
|
|
||||||
if (padding)
|
if (padding)
|
||||||
@ -296,7 +296,7 @@ public:
|
|||||||
auto context = reinterpret_cast<csl::CSLContext*>(context_);
|
auto context = reinterpret_cast<csl::CSLContext*>(context_);
|
||||||
|
|
||||||
auto input_wrapper = inputs[0].dynamicCast<CUDABackendWrapper>();
|
auto input_wrapper = inputs[0].dynamicCast<CUDABackendWrapper>();
|
||||||
auto concat_axis = clamp(axis, input_wrapper->getRank());
|
auto concat_axis = normalize_axis(axis, input_wrapper->getRank());
|
||||||
return make_cuda_node<cuda4dnn::ConcatOp>(preferableTarget, std::move(context->stream), concat_axis, padding);
|
return make_cuda_node<cuda4dnn::ConcatOp>(preferableTarget, std::move(context->stream), concat_axis, padding);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@ -305,7 +305,7 @@ public:
|
|||||||
{
|
{
|
||||||
#ifdef HAVE_VULKAN
|
#ifdef HAVE_VULKAN
|
||||||
vkcom::Tensor in = VkComTensor(input[0]);
|
vkcom::Tensor in = VkComTensor(input[0]);
|
||||||
int cAxis = clamp(axis, in.dimNum());
|
int cAxis = normalize_axis(axis, in.dimNum());
|
||||||
std::shared_ptr<vkcom::OpBase> op(new vkcom::OpConcat(cAxis));
|
std::shared_ptr<vkcom::OpBase> op(new vkcom::OpConcat(cAxis));
|
||||||
return Ptr<BackendNode>(new VkComBackendNode(input, op));
|
return Ptr<BackendNode>(new VkComBackendNode(input, op));
|
||||||
#endif // HAVE_VULKAN
|
#endif // HAVE_VULKAN
|
||||||
@ -341,7 +341,7 @@ public:
|
|||||||
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
|
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
|
||||||
|
|
||||||
InferenceEngine::Builder::ConcatLayer ieLayer(name);
|
InferenceEngine::Builder::ConcatLayer ieLayer(name);
|
||||||
ieLayer.setAxis(clamp(axis, input->getDims().size()));
|
ieLayer.setAxis(normalize_axis(axis, input->getDims().size()));
|
||||||
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(inputs.size()));
|
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(inputs.size()));
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
||||||
}
|
}
|
||||||
@ -354,7 +354,7 @@ public:
|
|||||||
{
|
{
|
||||||
InferenceEngine::DataPtr data = ngraphDataNode(inputs[0]);
|
InferenceEngine::DataPtr data = ngraphDataNode(inputs[0]);
|
||||||
const int numDims = data->getDims().size();
|
const int numDims = data->getDims().size();
|
||||||
const int cAxis = clamp(axis, numDims);
|
const int cAxis = normalize_axis(axis, numDims);
|
||||||
std::vector<size_t> maxDims(numDims, 0);
|
std::vector<size_t> maxDims(numDims, 0);
|
||||||
|
|
||||||
CV_Assert(inputs.size() == nodes.size());
|
CV_Assert(inputs.size() == nodes.size());
|
||||||
|
@ -89,8 +89,8 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
int numAxes = inputs[0].size();
|
int numAxes = inputs[0].size();
|
||||||
int startAxis = clamp(_startAxis, numAxes);
|
int startAxis = normalize_axis(_startAxis, numAxes);
|
||||||
int endAxis = clamp(_endAxis, numAxes);
|
int endAxis = normalize_axis(_endAxis, numAxes);
|
||||||
|
|
||||||
CV_Assert(startAxis >= 0);
|
CV_Assert(startAxis >= 0);
|
||||||
CV_Assert(endAxis >= startAxis && endAxis < (int)numAxes);
|
CV_Assert(endAxis >= startAxis && endAxis < (int)numAxes);
|
||||||
@ -120,8 +120,8 @@ public:
|
|||||||
inputs_arr.getMatVector(inputs);
|
inputs_arr.getMatVector(inputs);
|
||||||
|
|
||||||
int numAxes = inputs[0].dims;
|
int numAxes = inputs[0].dims;
|
||||||
_startAxis = clamp(_startAxis, numAxes);
|
_startAxis = normalize_axis(_startAxis, numAxes);
|
||||||
_endAxis = clamp(_endAxis, numAxes);
|
_endAxis = normalize_axis(_endAxis, numAxes);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef HAVE_OPENCL
|
#ifdef HAVE_OPENCL
|
||||||
@ -195,8 +195,8 @@ virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inp
|
|||||||
std::vector<size_t> dims = ieInpNode->get_shape();
|
std::vector<size_t> dims = ieInpNode->get_shape();
|
||||||
|
|
||||||
int numAxes = dims.size();
|
int numAxes = dims.size();
|
||||||
int startAxis = clamp(_startAxis, numAxes);
|
int startAxis = normalize_axis(_startAxis, numAxes);
|
||||||
int endAxis = clamp(_endAxis, numAxes);
|
int endAxis = normalize_axis(_endAxis, numAxes);
|
||||||
|
|
||||||
CV_Assert(startAxis >= 0);
|
CV_Assert(startAxis >= 0);
|
||||||
CV_Assert(endAxis >= startAxis && endAxis < numAxes);
|
CV_Assert(endAxis >= startAxis && endAxis < numAxes);
|
||||||
|
@ -132,7 +132,7 @@ public:
|
|||||||
CV_CheckEQ(blobs[0].dims, 2, "");
|
CV_CheckEQ(blobs[0].dims, 2, "");
|
||||||
numOutput = blobs[0].size[0];
|
numOutput = blobs[0].size[0];
|
||||||
CV_Assert(!bias || (size_t)numOutput == blobs[1].total());
|
CV_Assert(!bias || (size_t)numOutput == blobs[1].total());
|
||||||
cAxis = clamp(axis, inputs[0]);
|
cAxis = normalize_axis(axis, inputs[0]);
|
||||||
}
|
}
|
||||||
|
|
||||||
MatShape outShape(cAxis + 1);
|
MatShape outShape(cAxis + 1);
|
||||||
@ -356,7 +356,7 @@ public:
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
int axisCan = clamp(axis, inputs[0].dims);
|
int axisCan = normalize_axis(axis, inputs[0].dims);
|
||||||
int numOutput = blobs[0].size[0];
|
int numOutput = blobs[0].size[0];
|
||||||
int innerSize = blobs[0].size[1];
|
int innerSize = blobs[0].size[1];
|
||||||
int outerSize = total(shape(inputs[0]), 0, axisCan);
|
int outerSize = total(shape(inputs[0]), 0, axisCan);
|
||||||
@ -477,7 +477,7 @@ public:
|
|||||||
|
|
||||||
if (!blobs.empty())
|
if (!blobs.empty())
|
||||||
{
|
{
|
||||||
int axisCan = clamp(axis, input[0].dims);
|
int axisCan = normalize_axis(axis, input[0].dims);
|
||||||
int outerSize = input[0].total(0, axisCan);
|
int outerSize = input[0].total(0, axisCan);
|
||||||
|
|
||||||
for (size_t i = 0; i < input.size(); i++)
|
for (size_t i = 0; i < input.size(); i++)
|
||||||
@ -525,7 +525,7 @@ public:
|
|||||||
|
|
||||||
auto input_wrapper = inputs[0].dynamicCast<CUDABackendWrapper>();
|
auto input_wrapper = inputs[0].dynamicCast<CUDABackendWrapper>();
|
||||||
|
|
||||||
auto flatten_start_axis = clamp(axis, input_wrapper->getRank());
|
auto flatten_start_axis = normalize_axis(axis, input_wrapper->getRank());
|
||||||
|
|
||||||
auto biasMat_ = bias ? biasMat : Mat();
|
auto biasMat_ = bias ? biasMat : Mat();
|
||||||
return make_cuda_node<cuda4dnn::InnerProductOp>(preferableTarget, std::move(context->stream), std::move(context->cublas_handle), flatten_start_axis, weightsMat, biasMat_);
|
return make_cuda_node<cuda4dnn::InnerProductOp>(preferableTarget, std::move(context->stream), std::move(context->cublas_handle), flatten_start_axis, weightsMat, biasMat_);
|
||||||
|
@ -126,8 +126,8 @@ public:
|
|||||||
|
|
||||||
const UMat& inp0 = inputs[0];
|
const UMat& inp0 = inputs[0];
|
||||||
UMat& buffer = internals[0];
|
UMat& buffer = internals[0];
|
||||||
startAxis = clamp(startAxis, inp0.dims);
|
startAxis = normalize_axis(startAxis, inp0.dims);
|
||||||
endAxis = clamp(endAxis, inp0.dims);
|
endAxis = normalize_axis(endAxis, inp0.dims);
|
||||||
|
|
||||||
size_t num = total(shape(inp0.size), 0, startAxis);
|
size_t num = total(shape(inp0.size), 0, startAxis);
|
||||||
size_t numPlanes = total(shape(inp0.size), startAxis, endAxis + 1);
|
size_t numPlanes = total(shape(inp0.size), startAxis, endAxis + 1);
|
||||||
@ -211,8 +211,8 @@ public:
|
|||||||
|
|
||||||
const Mat& inp0 = inputs[0];
|
const Mat& inp0 = inputs[0];
|
||||||
Mat& buffer = internals[0];
|
Mat& buffer = internals[0];
|
||||||
startAxis = clamp(startAxis, inp0.dims);
|
startAxis = normalize_axis(startAxis, inp0.dims);
|
||||||
endAxis = clamp(endAxis, inp0.dims);
|
endAxis = normalize_axis(endAxis, inp0.dims);
|
||||||
|
|
||||||
const float* inpData = inp0.ptr<float>();
|
const float* inpData = inp0.ptr<float>();
|
||||||
float* outData = outputs[0].ptr<float>();
|
float* outData = outputs[0].ptr<float>();
|
||||||
@ -378,8 +378,8 @@ public:
|
|||||||
|
|
||||||
NormalizeConfiguration<float> config;
|
NormalizeConfiguration<float> config;
|
||||||
config.input_shape.assign(std::begin(input_shape), std::end(input_shape));
|
config.input_shape.assign(std::begin(input_shape), std::end(input_shape));
|
||||||
config.axis_start = clamp(startAxis, input_shape.size());
|
config.axis_start = normalize_axis(startAxis, input_shape.size());
|
||||||
config.axis_end = clamp(endAxis, input_shape.size()) + 1; /* +1 because NormalizeOp follows [start, end) convention */
|
config.axis_end = normalize_axis(endAxis, input_shape.size()) + 1; /* +1 because NormalizeOp follows [start, end) convention */
|
||||||
config.norm = pnorm;
|
config.norm = pnorm;
|
||||||
config.eps = epsilon;
|
config.eps = epsilon;
|
||||||
|
|
||||||
|
@ -66,14 +66,7 @@ static void computeShapeByReshapeMask(const MatShape &srcShape,
|
|||||||
int srcShapeSize = (int)srcShape.size();
|
int srcShapeSize = (int)srcShape.size();
|
||||||
int maskShapeSize = (int)maskShape.size();
|
int maskShapeSize = (int)maskShape.size();
|
||||||
|
|
||||||
if (srcRange == Range::all())
|
srcRange = normalize_axis_range(srcRange, srcShapeSize);
|
||||||
srcRange = Range(0, srcShapeSize);
|
|
||||||
else
|
|
||||||
{
|
|
||||||
int sz = srcRange.size();
|
|
||||||
srcRange.start = clamp(srcRange.start, srcShapeSize);
|
|
||||||
srcRange.end = srcRange.end == INT_MAX ? srcShapeSize : srcRange.start + sz;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool explicitMask = !maskShape.empty(); // All mask values are positive.
|
bool explicitMask = !maskShape.empty(); // All mask values are positive.
|
||||||
for (int i = 0, n = maskShape.size(); i < n && explicitMask; ++i)
|
for (int i = 0, n = maskShape.size(); i < n && explicitMask; ++i)
|
||||||
|
@ -305,7 +305,7 @@ public:
|
|||||||
numChannels = blobs[0].total();
|
numChannels = blobs[0].total();
|
||||||
|
|
||||||
std::vector<size_t> shape(ieInpNode0->get_shape().size(), 1);
|
std::vector<size_t> shape(ieInpNode0->get_shape().size(), 1);
|
||||||
int cAxis = clamp(axis, shape.size());
|
int cAxis = normalize_axis(axis, shape.size());
|
||||||
shape[cAxis] = numChannels;
|
shape[cAxis] = numChannels;
|
||||||
|
|
||||||
auto node = ieInpNode0;
|
auto node = ieInpNode0;
|
||||||
|
@ -153,7 +153,7 @@ public:
|
|||||||
for (int j = 0; j < sliceRanges[i].size(); ++j)
|
for (int j = 0; j < sliceRanges[i].size(); ++j)
|
||||||
{
|
{
|
||||||
if (shapesInitialized || inpShape[j] > 0)
|
if (shapesInitialized || inpShape[j] > 0)
|
||||||
outputs[i][j] = clamp(sliceRanges[i][j], inpShape[j]).size();
|
outputs[i][j] = normalize_axis_range(sliceRanges[i][j], inpShape[j]).size();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -216,7 +216,7 @@ public:
|
|||||||
// Clamp.
|
// Clamp.
|
||||||
for (int j = 0; j < finalSliceRanges[i].size(); ++j)
|
for (int j = 0; j < finalSliceRanges[i].size(); ++j)
|
||||||
{
|
{
|
||||||
finalSliceRanges[i][j] = clamp(finalSliceRanges[i][j], inpShape[j]);
|
finalSliceRanges[i][j] = normalize_axis_range(finalSliceRanges[i][j], inpShape[j]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -634,7 +634,7 @@ public:
|
|||||||
CV_Assert(inputs.size() == 2);
|
CV_Assert(inputs.size() == 2);
|
||||||
|
|
||||||
MatShape dstShape = inputs[0];
|
MatShape dstShape = inputs[0];
|
||||||
int start = clamp(axis, dstShape);
|
int start = normalize_axis(axis, dstShape);
|
||||||
for (int i = start; i < dstShape.size(); i++)
|
for (int i = start; i < dstShape.size(); i++)
|
||||||
{
|
{
|
||||||
dstShape[i] = inputs[1][i];
|
dstShape[i] = inputs[1][i];
|
||||||
@ -653,7 +653,7 @@ public:
|
|||||||
const Mat &inpSzBlob = inputs[1];
|
const Mat &inpSzBlob = inputs[1];
|
||||||
|
|
||||||
int dims = inpBlob.dims;
|
int dims = inpBlob.dims;
|
||||||
int start_axis = clamp(axis, dims);
|
int start_axis = normalize_axis(axis, dims);
|
||||||
|
|
||||||
std::vector<int> offset_final(dims, 0);
|
std::vector<int> offset_final(dims, 0);
|
||||||
if (offset.size() == 1)
|
if (offset.size() == 1)
|
||||||
|
@ -89,7 +89,7 @@ public:
|
|||||||
{
|
{
|
||||||
bool inplace = Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals);
|
bool inplace = Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals);
|
||||||
MatShape shape = inputs[0];
|
MatShape shape = inputs[0];
|
||||||
int cAxis = clamp(axisRaw, shape.size());
|
int cAxis = normalize_axis(axisRaw, shape.size());
|
||||||
shape[cAxis] = 1;
|
shape[cAxis] = 1;
|
||||||
internals.assign(1, shape);
|
internals.assign(1, shape);
|
||||||
return inplace;
|
return inplace;
|
||||||
@ -124,7 +124,7 @@ public:
|
|||||||
|
|
||||||
UMat& src = inputs[0];
|
UMat& src = inputs[0];
|
||||||
UMat& dstMat = outputs[0];
|
UMat& dstMat = outputs[0];
|
||||||
int axis = clamp(axisRaw, src.dims);
|
int axis = normalize_axis(axisRaw, src.dims);
|
||||||
|
|
||||||
if (softmaxOp.empty())
|
if (softmaxOp.empty())
|
||||||
{
|
{
|
||||||
@ -216,7 +216,7 @@ public:
|
|||||||
const Mat &src = inputs[0];
|
const Mat &src = inputs[0];
|
||||||
Mat &dst = outputs[0];
|
Mat &dst = outputs[0];
|
||||||
|
|
||||||
int axis = clamp(axisRaw, src.dims);
|
int axis = normalize_axis(axisRaw, src.dims);
|
||||||
size_t outerSize = src.total(0, axis), channels = src.size[axis],
|
size_t outerSize = src.total(0, axis), channels = src.size[axis],
|
||||||
innerSize = src.total(axis + 1);
|
innerSize = src.total(axis + 1);
|
||||||
|
|
||||||
@ -306,7 +306,7 @@ public:
|
|||||||
auto context = reinterpret_cast<csl::CSLContext*>(context_);
|
auto context = reinterpret_cast<csl::CSLContext*>(context_);
|
||||||
|
|
||||||
auto input_wrapper = inputs[0].dynamicCast<CUDABackendWrapper>();
|
auto input_wrapper = inputs[0].dynamicCast<CUDABackendWrapper>();
|
||||||
auto channel_axis = clamp(axisRaw, input_wrapper->getRank());
|
auto channel_axis = normalize_axis(axisRaw, input_wrapper->getRank());
|
||||||
return make_cuda_node<cuda4dnn::SoftmaxOp>(preferableTarget, std::move(context->cudnn_handle), channel_axis, logSoftMax);
|
return make_cuda_node<cuda4dnn::SoftmaxOp>(preferableTarget, std::move(context->cudnn_handle), channel_axis, logSoftMax);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@ -315,7 +315,7 @@ public:
|
|||||||
{
|
{
|
||||||
#ifdef HAVE_VULKAN
|
#ifdef HAVE_VULKAN
|
||||||
vkcom::Tensor in = VkComTensor(inputs[0]);
|
vkcom::Tensor in = VkComTensor(inputs[0]);
|
||||||
int cAxis = clamp(axisRaw, in.dimNum());
|
int cAxis = normalize_axis(axisRaw, in.dimNum());
|
||||||
std::shared_ptr<vkcom::OpBase> op(new vkcom::OpSoftmax(cAxis, logSoftMax));
|
std::shared_ptr<vkcom::OpBase> op(new vkcom::OpSoftmax(cAxis, logSoftMax));
|
||||||
return Ptr<BackendNode>(new VkComBackendNode(inputs, op));
|
return Ptr<BackendNode>(new VkComBackendNode(inputs, op));
|
||||||
#endif // HAVE_VULKAN
|
#endif // HAVE_VULKAN
|
||||||
@ -354,7 +354,7 @@ public:
|
|||||||
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
|
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
|
||||||
|
|
||||||
InferenceEngine::Builder::SoftMaxLayer ieLayer(name);
|
InferenceEngine::Builder::SoftMaxLayer ieLayer(name);
|
||||||
ieLayer.setAxis(clamp(axisRaw, input->getDims().size()));
|
ieLayer.setAxis(normalize_axis(axisRaw, input->getDims().size()));
|
||||||
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
||||||
}
|
}
|
||||||
@ -365,7 +365,7 @@ public:
|
|||||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||||
{
|
{
|
||||||
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
|
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
|
||||||
int axis = clamp(axisRaw, ieInpNode->get_shape().size());
|
int axis = normalize_axis(axisRaw, ieInpNode->get_shape().size());
|
||||||
auto softmax = std::make_shared<ngraph::op::v1::Softmax>(ieInpNode, axis);
|
auto softmax = std::make_shared<ngraph::op::v1::Softmax>(ieInpNode, axis);
|
||||||
if (logSoftMax)
|
if (logSoftMax)
|
||||||
return Ptr<BackendNode>(new InfEngineNgraphNode(std::make_shared<ngraph::op::v0::Log>(softmax)));
|
return Ptr<BackendNode>(new InfEngineNgraphNode(std::make_shared<ngraph::op::v0::Log>(softmax)));
|
||||||
|
@ -249,6 +249,40 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class NormalizeSubgraph4 : public NormalizeSubgraphBase
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
NormalizeSubgraph4() : NormalizeSubgraphBase(1)
|
||||||
|
{
|
||||||
|
int input = addNodeToMatch("");
|
||||||
|
int mul = addNodeToMatch("Mul", input, input);
|
||||||
|
int sum = addNodeToMatch("ReduceSum", mul);
|
||||||
|
int eps = addNodeToMatch("");
|
||||||
|
int max = addNodeToMatch("Max", sum, eps);
|
||||||
|
int sqrt = addNodeToMatch("Sqrt", max);
|
||||||
|
int reciprocal = addNodeToMatch("Reciprocal", sqrt);
|
||||||
|
addNodeToMatch("Mul", input, reciprocal);
|
||||||
|
setFusedNode("Normalize", input);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class NormalizeSubgraph5 : public NormalizeSubgraphBase
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
NormalizeSubgraph5() : NormalizeSubgraphBase(1)
|
||||||
|
{
|
||||||
|
int input = addNodeToMatch("");
|
||||||
|
int mul = addNodeToMatch("Mul", input, input);
|
||||||
|
int sum = addNodeToMatch("ReduceSum", mul);
|
||||||
|
int clip = addNodeToMatch("Clip", sum);
|
||||||
|
int sqrt = addNodeToMatch("Sqrt", clip);
|
||||||
|
int one = addNodeToMatch("Constant");
|
||||||
|
int div = addNodeToMatch("Div", one, sqrt);
|
||||||
|
addNodeToMatch("Mul", input, div);
|
||||||
|
setFusedNode("Normalize", input);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
class GatherCastSubgraph : public Subgraph
|
class GatherCastSubgraph : public Subgraph
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
@ -526,6 +560,8 @@ void simplifySubgraphs(opencv_onnx::GraphProto& net)
|
|||||||
subgraphs.push_back(makePtr<BatchNormalizationSubgraph2>());
|
subgraphs.push_back(makePtr<BatchNormalizationSubgraph2>());
|
||||||
subgraphs.push_back(makePtr<ExpandSubgraph>());
|
subgraphs.push_back(makePtr<ExpandSubgraph>());
|
||||||
subgraphs.push_back(makePtr<MishSubgraph>());
|
subgraphs.push_back(makePtr<MishSubgraph>());
|
||||||
|
subgraphs.push_back(makePtr<NormalizeSubgraph4>());
|
||||||
|
subgraphs.push_back(makePtr<NormalizeSubgraph5>());
|
||||||
|
|
||||||
simplifySubgraphs(Ptr<ImportGraphWrapper>(new ONNXGraphWrapper(net)), subgraphs);
|
simplifySubgraphs(Ptr<ImportGraphWrapper>(new ONNXGraphWrapper(net)), subgraphs);
|
||||||
}
|
}
|
||||||
|
@ -503,7 +503,7 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
|
|||||||
MatShape targetShape;
|
MatShape targetShape;
|
||||||
std::vector<bool> shouldDelete(inpShape.size(), false);
|
std::vector<bool> shouldDelete(inpShape.size(), false);
|
||||||
for (int i = 0; i < axes.size(); i++) {
|
for (int i = 0; i < axes.size(); i++) {
|
||||||
int axis = clamp(axes.get<int>(i), inpShape.size());
|
int axis = normalize_axis(axes.get<int>(i), inpShape.size());
|
||||||
shouldDelete[axis] = true;
|
shouldDelete[axis] = true;
|
||||||
}
|
}
|
||||||
for (int axis = 0; axis < inpShape.size(); ++axis){
|
for (int axis = 0; axis < inpShape.size(); ++axis){
|
||||||
@ -515,7 +515,7 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
|
|||||||
|
|
||||||
if (inpShape.size() == 3 && axes.size() <= 2)
|
if (inpShape.size() == 3 && axes.size() <= 2)
|
||||||
{
|
{
|
||||||
int axis = clamp(axes.get<int>(0), inpShape.size());
|
int axis = normalize_axis(axes.get<int>(0), inpShape.size());
|
||||||
CV_CheckNE(axis, 0, "");
|
CV_CheckNE(axis, 0, "");
|
||||||
|
|
||||||
LayerParams reshapeLp;
|
LayerParams reshapeLp;
|
||||||
@ -539,8 +539,8 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
|
|||||||
avgLp.set("pool", pool);
|
avgLp.set("pool", pool);
|
||||||
if (axes.size() == 2)
|
if (axes.size() == 2)
|
||||||
{
|
{
|
||||||
CV_CheckEQ(clamp(axes.get<int>(0), inpShape.size()), 1, "Unsupported mode");
|
CV_CheckEQ(normalize_axis(axes.get<int>(0), inpShape.size()), 1, "Unsupported mode");
|
||||||
CV_CheckEQ(clamp(axes.get<int>(1), inpShape.size()), 2, "Unsupported mode");
|
CV_CheckEQ(normalize_axis(axes.get<int>(1), inpShape.size()), 2, "Unsupported mode");
|
||||||
avgLp.set("global_pooling", true);
|
avgLp.set("global_pooling", true);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -560,9 +560,9 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
|
|||||||
|
|
||||||
CV_Assert(axes.size() <= inpShape.size() - 2);
|
CV_Assert(axes.size() <= inpShape.size() - 2);
|
||||||
std::vector<int> kernel_size(inpShape.size() - 2, 1);
|
std::vector<int> kernel_size(inpShape.size() - 2, 1);
|
||||||
if (axes.size() == 1 && (clamp(axes.get<int>(0), inpShape.size()) <= 1))
|
if (axes.size() == 1 && (normalize_axis(axes.get<int>(0), inpShape.size()) <= 1))
|
||||||
{
|
{
|
||||||
int axis = clamp(axes.get<int>(0), inpShape.size());
|
int axis = normalize_axis(axes.get<int>(0), inpShape.size());
|
||||||
MatShape newShape = inpShape;
|
MatShape newShape = inpShape;
|
||||||
newShape[axis + 1] = total(newShape, axis + 1);
|
newShape[axis + 1] = total(newShape, axis + 1);
|
||||||
newShape.resize(axis + 2);
|
newShape.resize(axis + 2);
|
||||||
@ -584,7 +584,7 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
for (int i = 0; i < axes.size(); i++) {
|
for (int i = 0; i < axes.size(); i++) {
|
||||||
int axis = clamp(axes.get<int>(i), inpShape.size());
|
int axis = normalize_axis(axes.get<int>(i), inpShape.size());
|
||||||
CV_Assert_N(axis >= 2 + i, axis < inpShape.size());
|
CV_Assert_N(axis >= 2 + i, axis < inpShape.size());
|
||||||
kernel_size[axis - 2] = inpShape[axis];
|
kernel_size[axis - 2] = inpShape[axis];
|
||||||
}
|
}
|
||||||
@ -1376,7 +1376,7 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
|
|||||||
if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
|
if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
|
||||||
{
|
{
|
||||||
Mat input = getBlob(node_proto, 0);
|
Mat input = getBlob(node_proto, 0);
|
||||||
int axis = clamp(layerParams.get<int>("axis", 1), input.dims);
|
int axis = normalize_axis(layerParams.get<int>("axis", 1), input.dims);
|
||||||
|
|
||||||
std::vector<int> out_size(&input.size[0], &input.size[0] + axis);
|
std::vector<int> out_size(&input.size[0], &input.size[0] + axis);
|
||||||
out_size.push_back(input.total(axis));
|
out_size.push_back(input.total(axis));
|
||||||
|
@ -2414,6 +2414,16 @@ void TFImporter::parseNode(const tensorflow::NodeDef& layer_)
|
|||||||
|
|
||||||
connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
|
connect(layer_id, dstNet, parsePin(layer.input(0)), id, 0);
|
||||||
}
|
}
|
||||||
|
else if (type == "LeakyRelu")
|
||||||
|
{
|
||||||
|
CV_CheckGT(num_inputs, 0, "");
|
||||||
|
CV_Assert(hasLayerAttr(layer, "alpha"));
|
||||||
|
layerParams.set("negative_slope", getLayerAttr(layer, "alpha").f());
|
||||||
|
|
||||||
|
int id = dstNet.addLayer(name, "ReLU", layerParams);
|
||||||
|
layer_id[name] = id;
|
||||||
|
connectToAllBlobs(layer_id, dstNet, parsePin(layer.input(0)), id, num_inputs);
|
||||||
|
}
|
||||||
else if (type == "Abs" || type == "Tanh" || type == "Sigmoid" ||
|
else if (type == "Abs" || type == "Tanh" || type == "Sigmoid" ||
|
||||||
type == "Relu" || type == "Elu" ||
|
type == "Relu" || type == "Elu" ||
|
||||||
type == "Identity" || type == "Relu6")
|
type == "Identity" || type == "Relu6")
|
||||||
|
@ -437,6 +437,11 @@ TEST_P(Test_ONNX_layers, BatchNormalizationSubgraph)
|
|||||||
testONNXModels("batch_norm_subgraph");
|
testONNXModels("batch_norm_subgraph");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_P(Test_ONNX_layers, NormalizeFusionSubgraph)
|
||||||
|
{
|
||||||
|
testONNXModels("normalize_fusion");
|
||||||
|
}
|
||||||
|
|
||||||
TEST_P(Test_ONNX_layers, Transpose)
|
TEST_P(Test_ONNX_layers, Transpose)
|
||||||
{
|
{
|
||||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
||||||
|
@ -484,6 +484,7 @@ TEST_P(Test_TensorFlow_layers, leaky_relu)
|
|||||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_OPENCL)
|
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_OPENCL)
|
||||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||||
#endif
|
#endif
|
||||||
|
runTensorFlowNet("leaky_relu");
|
||||||
runTensorFlowNet("leaky_relu_order1");
|
runTensorFlowNet("leaky_relu_order1");
|
||||||
runTensorFlowNet("leaky_relu_order2");
|
runTensorFlowNet("leaky_relu_order2");
|
||||||
runTensorFlowNet("leaky_relu_order3");
|
runTensorFlowNet("leaky_relu_order3");
|
||||||
|
@ -56,7 +56,10 @@ scaled to fit the 0 to 1 range.
|
|||||||
|
|
||||||
\f[V \leftarrow max(R,G,B)\f]
|
\f[V \leftarrow max(R,G,B)\f]
|
||||||
\f[S \leftarrow \fork{\frac{V-min(R,G,B)}{V}}{if \(V \neq 0\)}{0}{otherwise}\f]
|
\f[S \leftarrow \fork{\frac{V-min(R,G,B)}{V}}{if \(V \neq 0\)}{0}{otherwise}\f]
|
||||||
\f[H \leftarrow \forkthree{{60(G - B)}/{(V-min(R,G,B))}}{if \(V=R\)}{{120+60(B - R)}/{(V-min(R,G,B))}}{if \(V=G\)}{{240+60(R - G)}/{(V-min(R,G,B))}}{if \(V=B\)}\f]
|
\f[H \leftarrow \forkfour{{60(G - B)}/{(V-min(R,G,B))}}{if \(V=R\)}
|
||||||
|
{{120+60(B - R)}/{(V-min(R,G,B))}}{if \(V=G\)}
|
||||||
|
{{240+60(R - G)}/{(V-min(R,G,B))}}{if \(V=B\)}
|
||||||
|
{0}{if \(R=G=B\)}\f]
|
||||||
If \f$H<0\f$ then \f$H \leftarrow H+360\f$ . On output \f$0 \leq V \leq 1\f$, \f$0 \leq S \leq 1\f$,
|
If \f$H<0\f$ then \f$H \leftarrow H+360\f$ . On output \f$0 \leq V \leq 1\f$, \f$0 \leq S \leq 1\f$,
|
||||||
\f$0 \leq H \leq 360\f$ .
|
\f$0 \leq H \leq 360\f$ .
|
||||||
|
|
||||||
@ -78,9 +81,10 @@ scaled to fit the 0 to 1 range.
|
|||||||
\f[L \leftarrow \frac{V_{max} + V_{min}}{2}\f]
|
\f[L \leftarrow \frac{V_{max} + V_{min}}{2}\f]
|
||||||
\f[S \leftarrow \fork { \frac{V_{max} - V_{min}}{V_{max} + V_{min}} }{if \(L < 0.5\) }
|
\f[S \leftarrow \fork { \frac{V_{max} - V_{min}}{V_{max} + V_{min}} }{if \(L < 0.5\) }
|
||||||
{ \frac{V_{max} - V_{min}}{2 - (V_{max} + V_{min})} }{if \(L \ge 0.5\) }\f]
|
{ \frac{V_{max} - V_{min}}{2 - (V_{max} + V_{min})} }{if \(L \ge 0.5\) }\f]
|
||||||
\f[H \leftarrow \forkthree {{60(G - B)}/{(V_{max}-V_{min})}}{if \(V_{max}=R\) }
|
\f[H \leftarrow \forkfour {{60(G - B)}/{(V_{max}-V_{min})}}{if \(V_{max}=R\) }
|
||||||
{{120+60(B - R)}/{(V_{max}-V_{min})}}{if \(V_{max}=G\) }
|
{{120+60(B - R)}/{(V_{max}-V_{min})}}{if \(V_{max}=G\) }
|
||||||
{{240+60(R - G)}/{(V_{max}-V_{min})}}{if \(V_{max}=B\) }\f]
|
{{240+60(R - G)}/{(V_{max}-V_{min})}}{if \(V_{max}=B\) }
|
||||||
|
{0}{if \(R=G=B\) }\f]
|
||||||
If \f$H<0\f$ then \f$H \leftarrow H+360\f$ . On output \f$0 \leq L \leq 1\f$, \f$0 \leq S \leq
|
If \f$H<0\f$ then \f$H \leftarrow H+360\f$ . On output \f$0 \leq L \leq 1\f$, \f$0 \leq S \leq
|
||||||
1\f$, \f$0 \leq H \leq 360\f$ .
|
1\f$, \f$0 \leq H \leq 360\f$ .
|
||||||
|
|
||||||
|
@ -2469,12 +2469,13 @@ std::string QRCodeDetector::decode(InputArray in, InputArray points,
|
|||||||
bool ok = qrdec.straightDecodingProcess();
|
bool ok = qrdec.straightDecodingProcess();
|
||||||
|
|
||||||
std::string decoded_info = qrdec.getDecodeInformation();
|
std::string decoded_info = qrdec.getDecodeInformation();
|
||||||
|
if (!ok && straight_qrcode.needed())
|
||||||
if (ok && straight_qrcode.needed())
|
|
||||||
{
|
{
|
||||||
qrdec.getStraightBarcode().convertTo(straight_qrcode,
|
straight_qrcode.release();
|
||||||
straight_qrcode.fixedType() ?
|
}
|
||||||
straight_qrcode.type() : CV_32FC2);
|
else if (straight_qrcode.needed())
|
||||||
|
{
|
||||||
|
qrdec.getStraightBarcode().convertTo(straight_qrcode, CV_8UC1);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ok ? decoded_info : std::string();
|
return ok ? decoded_info : std::string();
|
||||||
@ -2498,11 +2499,13 @@ cv::String QRCodeDetector::decodeCurved(InputArray in, InputArray points,
|
|||||||
|
|
||||||
std::string decoded_info = qrdec.getDecodeInformation();
|
std::string decoded_info = qrdec.getDecodeInformation();
|
||||||
|
|
||||||
if (ok && straight_qrcode.needed())
|
if (!ok && straight_qrcode.needed())
|
||||||
{
|
{
|
||||||
qrdec.getStraightBarcode().convertTo(straight_qrcode,
|
straight_qrcode.release();
|
||||||
straight_qrcode.fixedType() ?
|
}
|
||||||
straight_qrcode.type() : CV_32FC2);
|
else if (straight_qrcode.needed())
|
||||||
|
{
|
||||||
|
qrdec.getStraightBarcode().convertTo(straight_qrcode, CV_8UC1);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ok ? decoded_info : std::string();
|
return ok ? decoded_info : std::string();
|
||||||
@ -3593,18 +3596,18 @@ bool QRCodeDetector::decodeMulti(
|
|||||||
for_copy.push_back(straight_barcode[i]);
|
for_copy.push_back(straight_barcode[i]);
|
||||||
}
|
}
|
||||||
straight_barcode = for_copy;
|
straight_barcode = for_copy;
|
||||||
vector<Mat> tmp_straight_qrcodes;
|
if (straight_qrcode.needed() && straight_barcode.size() == 0)
|
||||||
if (straight_qrcode.needed())
|
|
||||||
{
|
{
|
||||||
|
straight_qrcode.release();
|
||||||
|
}
|
||||||
|
else if (straight_qrcode.needed())
|
||||||
|
{
|
||||||
|
straight_qrcode.create(Size((int)straight_barcode.size(), 1), CV_8UC1);
|
||||||
|
vector<Mat> tmp_straight_qrcodes(straight_barcode.size());
|
||||||
for (size_t i = 0; i < straight_barcode.size(); i++)
|
for (size_t i = 0; i < straight_barcode.size(); i++)
|
||||||
{
|
{
|
||||||
Mat tmp_straight_qrcode;
|
straight_barcode[i].convertTo(tmp_straight_qrcodes[i], CV_8UC1);
|
||||||
tmp_straight_qrcodes.push_back(tmp_straight_qrcode);
|
|
||||||
straight_barcode[i].convertTo(((OutputArray)tmp_straight_qrcodes[i]),
|
|
||||||
((OutputArray)tmp_straight_qrcodes[i]).fixedType() ?
|
|
||||||
((OutputArray)tmp_straight_qrcodes[i]).type() : CV_32FC2);
|
|
||||||
}
|
}
|
||||||
straight_qrcode.createSameSize(tmp_straight_qrcodes, CV_32FC2);
|
|
||||||
straight_qrcode.assign(tmp_straight_qrcodes);
|
straight_qrcode.assign(tmp_straight_qrcodes);
|
||||||
}
|
}
|
||||||
decoded_info.clear();
|
decoded_info.clear();
|
||||||
|
@ -252,6 +252,8 @@ TEST_P(Objdetect_QRCode, regression)
|
|||||||
decoded_info = qrcode.detectAndDecode(src, corners, straight_barcode);
|
decoded_info = qrcode.detectAndDecode(src, corners, straight_barcode);
|
||||||
ASSERT_FALSE(corners.empty());
|
ASSERT_FALSE(corners.empty());
|
||||||
ASSERT_FALSE(decoded_info.empty());
|
ASSERT_FALSE(decoded_info.empty());
|
||||||
|
int expected_barcode_type = CV_8UC1;
|
||||||
|
EXPECT_EQ(expected_barcode_type, straight_barcode.type());
|
||||||
#else
|
#else
|
||||||
ASSERT_TRUE(qrcode.detect(src, corners));
|
ASSERT_TRUE(qrcode.detect(src, corners));
|
||||||
#endif
|
#endif
|
||||||
@ -317,6 +319,8 @@ TEST_P(Objdetect_QRCode_Close, regression)
|
|||||||
decoded_info = qrcode.detectAndDecode(barcode, corners, straight_barcode);
|
decoded_info = qrcode.detectAndDecode(barcode, corners, straight_barcode);
|
||||||
ASSERT_FALSE(corners.empty());
|
ASSERT_FALSE(corners.empty());
|
||||||
ASSERT_FALSE(decoded_info.empty());
|
ASSERT_FALSE(decoded_info.empty());
|
||||||
|
int expected_barcode_type = CV_8UC1;
|
||||||
|
EXPECT_EQ(expected_barcode_type, straight_barcode.type());
|
||||||
#else
|
#else
|
||||||
ASSERT_TRUE(qrcode.detect(barcode, corners));
|
ASSERT_TRUE(qrcode.detect(barcode, corners));
|
||||||
#endif
|
#endif
|
||||||
@ -382,6 +386,8 @@ TEST_P(Objdetect_QRCode_Monitor, regression)
|
|||||||
decoded_info = qrcode.detectAndDecode(barcode, corners, straight_barcode);
|
decoded_info = qrcode.detectAndDecode(barcode, corners, straight_barcode);
|
||||||
ASSERT_FALSE(corners.empty());
|
ASSERT_FALSE(corners.empty());
|
||||||
ASSERT_FALSE(decoded_info.empty());
|
ASSERT_FALSE(decoded_info.empty());
|
||||||
|
int expected_barcode_type = CV_8UC1;
|
||||||
|
EXPECT_EQ(expected_barcode_type, straight_barcode.type());
|
||||||
#else
|
#else
|
||||||
ASSERT_TRUE(qrcode.detect(barcode, corners));
|
ASSERT_TRUE(qrcode.detect(barcode, corners));
|
||||||
#endif
|
#endif
|
||||||
@ -442,6 +448,8 @@ TEST_P(Objdetect_QRCode_Curved, regression)
|
|||||||
decoded_info = qrcode.detectAndDecodeCurved(src, corners, straight_barcode);
|
decoded_info = qrcode.detectAndDecodeCurved(src, corners, straight_barcode);
|
||||||
ASSERT_FALSE(corners.empty());
|
ASSERT_FALSE(corners.empty());
|
||||||
ASSERT_FALSE(decoded_info.empty());
|
ASSERT_FALSE(decoded_info.empty());
|
||||||
|
int expected_barcode_type = CV_8UC1;
|
||||||
|
EXPECT_EQ(expected_barcode_type, straight_barcode.type());
|
||||||
#else
|
#else
|
||||||
ASSERT_TRUE(qrcode.detect(src, corners));
|
ASSERT_TRUE(qrcode.detect(src, corners));
|
||||||
#endif
|
#endif
|
||||||
@ -502,6 +510,9 @@ TEST_P(Objdetect_QRCode_Multi, regression)
|
|||||||
EXPECT_TRUE(qrcode.detectAndDecodeMulti(src, decoded_info, corners, straight_barcode));
|
EXPECT_TRUE(qrcode.detectAndDecodeMulti(src, decoded_info, corners, straight_barcode));
|
||||||
ASSERT_FALSE(corners.empty());
|
ASSERT_FALSE(corners.empty());
|
||||||
ASSERT_FALSE(decoded_info.empty());
|
ASSERT_FALSE(decoded_info.empty());
|
||||||
|
int expected_barcode_type = CV_8UC1;
|
||||||
|
for(size_t i = 0; i < straight_barcode.size(); i++)
|
||||||
|
EXPECT_EQ(expected_barcode_type, straight_barcode[i].type());
|
||||||
#else
|
#else
|
||||||
ASSERT_TRUE(qrcode.detectMulti(src, corners));
|
ASSERT_TRUE(qrcode.detectMulti(src, corners));
|
||||||
#endif
|
#endif
|
||||||
@ -612,6 +623,32 @@ TEST(Objdetect_QRCode_detectMulti, detect_regression_16961)
|
|||||||
EXPECT_EQ(corners.size(), expect_corners_size);
|
EXPECT_EQ(corners.size(), expect_corners_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST(Objdetect_QRCode_decodeMulti, check_output_parameters_type_19363)
|
||||||
|
{
|
||||||
|
const std::string name_current_image = "9_qrcodes.jpg";
|
||||||
|
const std::string root = "qrcode/multiple/";
|
||||||
|
|
||||||
|
std::string image_path = findDataFile(root + name_current_image);
|
||||||
|
Mat src = imread(image_path);
|
||||||
|
ASSERT_FALSE(src.empty()) << "Can't read image: " << image_path;
|
||||||
|
#ifdef HAVE_QUIRC
|
||||||
|
QRCodeDetector qrcode;
|
||||||
|
std::vector<Point> corners;
|
||||||
|
std::vector<cv::String> decoded_info;
|
||||||
|
#if 0 // FIXIT: OutputArray::create() type check
|
||||||
|
std::vector<Mat2b> straight_barcode_nchannels;
|
||||||
|
EXPECT_ANY_THROW(qrcode.detectAndDecodeMulti(src, decoded_info, corners, straight_barcode_nchannels));
|
||||||
|
#endif
|
||||||
|
|
||||||
|
int expected_barcode_type = CV_8UC1;
|
||||||
|
std::vector<Mat1b> straight_barcode;
|
||||||
|
EXPECT_TRUE(qrcode.detectAndDecodeMulti(src, decoded_info, corners, straight_barcode));
|
||||||
|
ASSERT_FALSE(corners.empty());
|
||||||
|
for(size_t i = 0; i < straight_barcode.size(); i++)
|
||||||
|
EXPECT_EQ(expected_barcode_type, straight_barcode[i].type());
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
TEST(Objdetect_QRCode_basic, not_found_qrcode)
|
TEST(Objdetect_QRCode_basic, not_found_qrcode)
|
||||||
{
|
{
|
||||||
std::vector<Point> corners;
|
std::vector<Point> corners;
|
||||||
|
@ -209,6 +209,11 @@ catch (const cv::Exception &e) \
|
|||||||
{ \
|
{ \
|
||||||
pyRaiseCVException(e); \
|
pyRaiseCVException(e); \
|
||||||
return 0; \
|
return 0; \
|
||||||
|
} \
|
||||||
|
catch (const std::exception &e) \
|
||||||
|
{ \
|
||||||
|
PyErr_SetString(opencv_error, e.what()); \
|
||||||
|
return 0; \
|
||||||
}
|
}
|
||||||
|
|
||||||
using namespace cv;
|
using namespace cv;
|
||||||
|
@ -47,6 +47,12 @@ class Bindings(NewOpenCVTests):
|
|||||||
boost.getMaxDepth() # from ml::DTrees
|
boost.getMaxDepth() # from ml::DTrees
|
||||||
boost.isClassifier() # from ml::StatModel
|
boost.isClassifier() # from ml::StatModel
|
||||||
|
|
||||||
|
def test_raiseGeneralException(self):
|
||||||
|
with self.assertRaises((cv.error,),
|
||||||
|
msg='C++ exception is not propagated to Python in the right way') as cm:
|
||||||
|
cv.utils.testRaiseGeneralException()
|
||||||
|
self.assertEqual(str(cm.exception), 'exception text')
|
||||||
|
|
||||||
def test_redirectError(self):
|
def test_redirectError(self):
|
||||||
try:
|
try:
|
||||||
cv.imshow("", None) # This causes an assert
|
cv.imshow("", None) # This causes an assert
|
||||||
|
@ -43,7 +43,7 @@
|
|||||||
|
|
||||||
namespace opencv_test { namespace {
|
namespace opencv_test { namespace {
|
||||||
|
|
||||||
#ifdef HAVE_OPENCV_XFEATURES2D
|
#if defined(HAVE_OPENCV_XFEATURES2D) && defined(OPENCV_ENABLE_NONFREE)
|
||||||
|
|
||||||
TEST(SurfFeaturesFinder, CanFindInROIs)
|
TEST(SurfFeaturesFinder, CanFindInROIs)
|
||||||
{
|
{
|
||||||
@ -80,7 +80,7 @@ TEST(SurfFeaturesFinder, CanFindInROIs)
|
|||||||
EXPECT_EQ(bad_count, 0);
|
EXPECT_EQ(bad_count, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // HAVE_OPENCV_XFEATURES2D
|
#endif // HAVE_OPENCV_XFEATURES2D && OPENCV_ENABLE_NONFREE
|
||||||
|
|
||||||
TEST(ParallelFeaturesFinder, IsSameWithSerial)
|
TEST(ParallelFeaturesFinder, IsSameWithSerial)
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user