Merge remote-tracking branch 'upstream/3.4' into merge-3.4
0
3rdparty/libpng/mips/filter_msa_intrinsics.c
vendored
Executable file → Normal file
2
3rdparty/openexr/IlmImf/ImfAttribute.cpp
vendored
@ -63,7 +63,7 @@ Attribute::~Attribute () {}
|
||||
|
||||
namespace {
|
||||
|
||||
struct NameCompare: std::binary_function <const char *, const char *, bool>
|
||||
struct NameCompare
|
||||
{
|
||||
bool
|
||||
operator () (const char *x, const char *y) const
|
||||
|
0
3rdparty/protobuf/src/google/protobuf/arena.cc
vendored
Executable file → Normal file
0
3rdparty/protobuf/src/google/protobuf/arenastring.h
vendored
Executable file → Normal file
0
3rdparty/protobuf/src/google/protobuf/reflection.h
vendored
Executable file → Normal file
0
3rdparty/protobuf/src/google/protobuf/stubs/common.cc
vendored
Executable file → Normal file
@ -5,10 +5,9 @@
|
||||
# VA_INTEL_IOCL_ROOT - root of Intel OCL installation
|
||||
|
||||
if(UNIX AND NOT ANDROID)
|
||||
if($ENV{VA_INTEL_IOCL_ROOT})
|
||||
set(VA_INTEL_IOCL_ROOT $ENV{VA_INTEL_IOCL_ROOT})
|
||||
else()
|
||||
set(VA_INTEL_IOCL_ROOT "/opt/intel/opencl")
|
||||
ocv_check_environment_variables(VA_INTEL_IOCL_ROOT)
|
||||
if(NOT DEFINED VA_INTEL_IOCL_ROOT)
|
||||
set(VA_INTEL_IOCL_ROOT "/opt/intel/opencl")
|
||||
endif()
|
||||
|
||||
find_path(
|
||||
|
0
cmake/checks/cpu_msa.cpp
Executable file → Normal file
0
data/haarcascades/haarcascade_frontalcatface.xml
Executable file → Normal file
0
data/haarcascades/haarcascade_frontalcatface_extended.xml
Executable file → Normal file
0
data/lbpcascades/lbpcascade_profileface.xml
Executable file → Normal file
0
data/lbpcascades/lbpcascade_silverware.xml
Executable file → Normal file
0
data/vec_files/trainingfaces_24-24.vec
Executable file → Normal file
0
doc/tutorials/features2d/images/AKAZE_Match_Tutorial_Cover.png
Executable file → Normal file
Before Width: | Height: | Size: 63 KiB After Width: | Height: | Size: 63 KiB |
0
doc/tutorials/imgproc/anisotropic_image_segmentation/anisotropic_image_segmentation.markdown
Executable file → Normal file
0
doc/tutorials/imgproc/anisotropic_image_segmentation/images/gst_coherency.jpg
Executable file → Normal file
Before Width: | Height: | Size: 20 KiB After Width: | Height: | Size: 20 KiB |
0
doc/tutorials/imgproc/anisotropic_image_segmentation/images/gst_input.jpg
Executable file → Normal file
Before Width: | Height: | Size: 40 KiB After Width: | Height: | Size: 40 KiB |
0
doc/tutorials/imgproc/anisotropic_image_segmentation/images/gst_orientation.jpg
Executable file → Normal file
Before Width: | Height: | Size: 19 KiB After Width: | Height: | Size: 19 KiB |
0
doc/tutorials/imgproc/anisotropic_image_segmentation/images/gst_result.jpg
Executable file → Normal file
Before Width: | Height: | Size: 32 KiB After Width: | Height: | Size: 32 KiB |
0
doc/tutorials/imgproc/motion_deblur_filter/images/black_car.jpg
Executable file → Normal file
Before Width: | Height: | Size: 33 KiB After Width: | Height: | Size: 33 KiB |
0
doc/tutorials/imgproc/motion_deblur_filter/images/motion_original.jpg
Executable file → Normal file
Before Width: | Height: | Size: 51 KiB After Width: | Height: | Size: 51 KiB |
0
doc/tutorials/imgproc/motion_deblur_filter/images/motion_psf.png
Executable file → Normal file
Before Width: | Height: | Size: 556 B After Width: | Height: | Size: 556 B |
0
doc/tutorials/imgproc/motion_deblur_filter/images/white_car.jpg
Executable file → Normal file
Before Width: | Height: | Size: 20 KiB After Width: | Height: | Size: 20 KiB |
0
doc/tutorials/imgproc/motion_deblur_filter/motion_deblur_filter.markdown
Executable file → Normal file
0
doc/tutorials/imgproc/out_of_focus_deblur_filter/images/original.jpg
Executable file → Normal file
Before Width: | Height: | Size: 14 KiB After Width: | Height: | Size: 14 KiB |
0
doc/tutorials/imgproc/out_of_focus_deblur_filter/images/psf.png
Executable file → Normal file
Before Width: | Height: | Size: 630 B After Width: | Height: | Size: 630 B |
0
doc/tutorials/imgproc/out_of_focus_deblur_filter/images/recovered.jpg
Executable file → Normal file
Before Width: | Height: | Size: 42 KiB After Width: | Height: | Size: 42 KiB |
0
doc/tutorials/imgproc/out_of_focus_deblur_filter/out_of_focus_deblur_filter.markdown
Executable file → Normal file
0
doc/tutorials/imgproc/periodic_noise_removing_filter/images/period_filter.jpg
Executable file → Normal file
Before Width: | Height: | Size: 8.3 KiB After Width: | Height: | Size: 8.3 KiB |
0
doc/tutorials/imgproc/periodic_noise_removing_filter/images/period_input.jpg
Executable file → Normal file
Before Width: | Height: | Size: 68 KiB After Width: | Height: | Size: 68 KiB |
0
doc/tutorials/imgproc/periodic_noise_removing_filter/images/period_output.jpg
Executable file → Normal file
Before Width: | Height: | Size: 52 KiB After Width: | Height: | Size: 52 KiB |
0
doc/tutorials/imgproc/periodic_noise_removing_filter/images/period_psd.jpg
Executable file → Normal file
Before Width: | Height: | Size: 6.5 KiB After Width: | Height: | Size: 6.5 KiB |
0
doc/tutorials/imgproc/periodic_noise_removing_filter/periodic_noise_removing_filter.markdown
Executable file → Normal file
0
doc/tutorials/introduction/images/visual_studio_image_watch.png
Executable file → Normal file
Before Width: | Height: | Size: 18 KiB After Width: | Height: | Size: 18 KiB |
0
doc/tutorials/introduction/windows_visual_studio_image_watch/images/breakpoint.png
Executable file → Normal file
Before Width: | Height: | Size: 4.0 KiB After Width: | Height: | Size: 4.0 KiB |
0
doc/tutorials/introduction/windows_visual_studio_image_watch/images/edges_zoom.png
Executable file → Normal file
Before Width: | Height: | Size: 20 KiB After Width: | Height: | Size: 20 KiB |
0
doc/tutorials/introduction/windows_visual_studio_image_watch/images/input_zoom.png
Executable file → Normal file
Before Width: | Height: | Size: 22 KiB After Width: | Height: | Size: 22 KiB |
0
doc/tutorials/introduction/windows_visual_studio_image_watch/images/viewer_context_menu.png
Executable file → Normal file
Before Width: | Height: | Size: 29 KiB After Width: | Height: | Size: 29 KiB |
0
doc/tutorials/introduction/windows_visual_studio_image_watch/images/visual_studio_image_watch.png
Executable file → Normal file
Before Width: | Height: | Size: 18 KiB After Width: | Height: | Size: 18 KiB |
0
doc/tutorials/introduction/windows_visual_studio_image_watch/images/vs_locals.png
Executable file → Normal file
Before Width: | Height: | Size: 4.8 KiB After Width: | Height: | Size: 4.8 KiB |
0
modules/core/include/opencv2/core/hal/intrin_msa.hpp
Executable file → Normal file
0
modules/core/include/opencv2/core/hal/msa_macros.h
Executable file → Normal file
@ -3542,6 +3542,8 @@ public:
|
||||
Mat cross(const Mat& m) const;
|
||||
double dot(const Mat& m) const;
|
||||
|
||||
void swap(MatExpr& b);
|
||||
|
||||
const MatOp* op;
|
||||
int flags;
|
||||
|
||||
|
@ -144,9 +144,6 @@ _InputArray::_InputArray(const Mat_<_Tp>& m)
|
||||
inline _InputArray::_InputArray(const double& val)
|
||||
{ init(FIXED_TYPE + FIXED_SIZE + MATX + CV_64F + ACCESS_READ, &val, Size(1,1)); }
|
||||
|
||||
inline _InputArray::_InputArray(const MatExpr& expr)
|
||||
{ init(FIXED_TYPE + FIXED_SIZE + EXPR + ACCESS_READ, &expr); }
|
||||
|
||||
inline _InputArray::_InputArray(const cuda::GpuMat& d_mat)
|
||||
{ init(CUDA_GPU_MAT + ACCESS_READ, &d_mat); }
|
||||
|
||||
@ -4000,6 +3997,9 @@ inline void UMatData::markDeviceCopyObsolete(bool flag)
|
||||
|
||||
//! @endcond
|
||||
|
||||
static inline
|
||||
void swap(MatExpr& a, MatExpr& b) { a.swap(b); }
|
||||
|
||||
} //cv
|
||||
|
||||
#ifdef _MSC_VER
|
||||
|
@ -1821,4 +1821,37 @@ MatExpr Mat::eye(Size size, int type)
|
||||
return e;
|
||||
}
|
||||
|
||||
void MatExpr::swap(MatExpr& other)
|
||||
{
|
||||
using std::swap;
|
||||
|
||||
swap(op, other.op);
|
||||
swap(flags, other.flags);
|
||||
|
||||
swap(a, other.a);
|
||||
swap(b, other.b);
|
||||
swap(c, other.c);
|
||||
|
||||
swap(alpha, other.alpha);
|
||||
swap(beta, other.beta);
|
||||
|
||||
swap(s, other.s);
|
||||
}
|
||||
|
||||
_InputArray::_InputArray(const MatExpr& expr)
|
||||
{
|
||||
#if 1
|
||||
if (!isIdentity(expr))
|
||||
{
|
||||
Mat result = expr; // TODO improve through refcount == 1 of expr.a (inplace operation is possible - except gemm?)
|
||||
MatExpr result_expr(result);
|
||||
swap(const_cast<MatExpr&>(expr), result_expr);
|
||||
}
|
||||
CV_Assert(isIdentity(expr));
|
||||
init(FIXED_TYPE + FIXED_SIZE + MAT + ACCESS_READ, &expr.a);
|
||||
#else
|
||||
init(FIXED_TYPE + FIXED_SIZE + EXPR + ACCESS_READ, &expr);
|
||||
#endif
|
||||
}
|
||||
|
||||
} // cv::
|
||||
|
@ -1959,6 +1959,21 @@ TEST(Core_InputArray, support_CustomType)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
TEST(Core_InputArray, fetch_MatExpr)
|
||||
{
|
||||
Mat a(Size(10, 5), CV_32FC1, 5);
|
||||
Mat b(Size(10, 5), CV_32FC1, 2);
|
||||
MatExpr expr = a * b.t(); // gemm expression
|
||||
Mat dst;
|
||||
cv::add(expr, Scalar(1), dst); // invoke gemm() here
|
||||
void* expr_data = expr.a.data;
|
||||
Mat result = expr; // should not call gemm() here again
|
||||
EXPECT_EQ(expr_data, result.data); // expr data is reused
|
||||
EXPECT_EQ(dst.size(), result.size());
|
||||
}
|
||||
|
||||
|
||||
TEST(Core_Vectors, issue_13078)
|
||||
{
|
||||
float floats_[] = { 1, 2, 3, 4, 5, 6, 7, 8 };
|
||||
|
@ -352,7 +352,7 @@ public:
|
||||
CV_Assert(out.dims == numAxes && out.size == outputs[0].size);
|
||||
|
||||
CV_Assert(inp.isContinuous() && out.isContinuous());
|
||||
CV_Assert(inp.type() == CV_32F && out.type() == CV_32F);
|
||||
// CV_Assert(inp.type() == CV_32F && out.type() == CV_32F);
|
||||
|
||||
if( numAxes == 4 )
|
||||
{
|
||||
|
@ -154,16 +154,10 @@ private:
|
||||
int axis;
|
||||
};
|
||||
|
||||
class NormalizeSubgraph1 : public Subgraph
|
||||
class NormalizeSubgraphBase : public Subgraph
|
||||
{
|
||||
public:
|
||||
NormalizeSubgraph1() : axis(1)
|
||||
{
|
||||
input = addNodeToMatch("");
|
||||
norm = addNodeToMatch("ReduceL2", input);
|
||||
addNodeToMatch("Div", input, norm);
|
||||
setFusedNode("Normalize", input);
|
||||
}
|
||||
NormalizeSubgraphBase(int _normNodeOrder = 0) : axis(1), normNodeOrder(_normNodeOrder) {}
|
||||
|
||||
virtual bool match(const Ptr<ImportGraphWrapper>& net, int nodeId,
|
||||
std::vector<int>& matchedNodesIds,
|
||||
@ -171,7 +165,7 @@ public:
|
||||
{
|
||||
if (Subgraph::match(net, nodeId, matchedNodesIds, targetNodesIds))
|
||||
{
|
||||
Ptr<ImportNodeWrapper> norm = net->getNode(matchedNodesIds[0]);
|
||||
Ptr<ImportNodeWrapper> norm = net->getNode(matchedNodesIds[normNodeOrder]);
|
||||
opencv_onnx::NodeProto* node = norm.dynamicCast<ONNXNodeWrapper>()->node;
|
||||
|
||||
for (int i = 0; i < node->attribute_size(); i++)
|
||||
@ -204,20 +198,51 @@ public:
|
||||
}
|
||||
|
||||
protected:
|
||||
int input, norm;
|
||||
int axis;
|
||||
int axis, normNodeOrder;
|
||||
};
|
||||
|
||||
|
||||
class NormalizeSubgraph2 : public NormalizeSubgraph1
|
||||
class NormalizeSubgraph1 : public NormalizeSubgraphBase
|
||||
{
|
||||
public:
|
||||
NormalizeSubgraph2() : NormalizeSubgraph1()
|
||||
NormalizeSubgraph1()
|
||||
{
|
||||
int input = addNodeToMatch("");
|
||||
int norm = addNodeToMatch("ReduceL2", input);
|
||||
addNodeToMatch("Div", input, norm);
|
||||
setFusedNode("Normalize", input);
|
||||
}
|
||||
};
|
||||
|
||||
class NormalizeSubgraph2 : public NormalizeSubgraphBase
|
||||
{
|
||||
public:
|
||||
NormalizeSubgraph2()
|
||||
{
|
||||
int input = addNodeToMatch("");
|
||||
int norm = addNodeToMatch("ReduceL2", input);
|
||||
int clip = addNodeToMatch("Clip", norm);
|
||||
int shape = addNodeToMatch("Shape", input);
|
||||
int expand = addNodeToMatch("Expand", clip, shape);
|
||||
addNodeToMatch("Div", input, expand);
|
||||
setFusedNode("Normalize", input);
|
||||
}
|
||||
};
|
||||
|
||||
class NormalizeSubgraph3 : public NormalizeSubgraphBase
|
||||
{
|
||||
public:
|
||||
NormalizeSubgraph3() : NormalizeSubgraphBase(1)
|
||||
{
|
||||
int input = addNodeToMatch("");
|
||||
int power = addNodeToMatch("Constant");
|
||||
int squared = addNodeToMatch("Pow", input, power);
|
||||
int sum = addNodeToMatch("ReduceSum", squared);
|
||||
int sqrtNode = addNodeToMatch("Sqrt", sum);
|
||||
int eps = addNodeToMatch("Constant");
|
||||
int add = addNodeToMatch("Add", sqrtNode, eps);
|
||||
|
||||
addNodeToMatch("Div", input, add);
|
||||
setFusedNode("Normalize", input);
|
||||
}
|
||||
};
|
||||
|
||||
@ -368,6 +393,7 @@ void simplifySubgraphs(opencv_onnx::GraphProto& net)
|
||||
subgraphs.push_back(makePtr<SoftMaxSubgraph>());
|
||||
subgraphs.push_back(makePtr<NormalizeSubgraph1>());
|
||||
subgraphs.push_back(makePtr<NormalizeSubgraph2>());
|
||||
subgraphs.push_back(makePtr<NormalizeSubgraph3>());
|
||||
|
||||
simplifySubgraphs(Ptr<ImportGraphWrapper>(new ONNXGraphWrapper(net)), subgraphs);
|
||||
}
|
||||
|
@ -27,10 +27,8 @@ void simplifySubgraphs(opencv_onnx::GraphProto& net);
|
||||
template<typename T1, typename T2>
|
||||
void convertInt64ToInt32(const T1& src, T2& dst, int size)
|
||||
{
|
||||
for (int i = 0; i < size; i++) {
|
||||
if (src[i] < std::numeric_limits<int32_t>::min() || src[i] > std::numeric_limits<int32_t>::max()) {
|
||||
CV_Error(Error::StsOutOfRange, "Input is out of OpenCV 32S range");
|
||||
}
|
||||
for (int i = 0; i < size; i++)
|
||||
{
|
||||
dst[i] = saturate_cast<int32_t>(src[i]);
|
||||
}
|
||||
}
|
||||
|
@ -39,7 +39,7 @@ class ONNXImporter
|
||||
struct LayerInfo {
|
||||
int layerId;
|
||||
int outputId;
|
||||
LayerInfo(int _layerId, int _outputId) : layerId(_layerId), outputId(_outputId) {}
|
||||
LayerInfo(int _layerId = 0, int _outputId = 0) : layerId(_layerId), outputId(_outputId) {}
|
||||
};
|
||||
|
||||
std::map<std::string, Mat> getGraphTensors(
|
||||
@ -300,6 +300,15 @@ void ONNXImporter::addLayer(Net& dstNet, LayerParams& layerParams,
|
||||
}
|
||||
}
|
||||
|
||||
static void addConstant(const std::string& name,
|
||||
const Mat& blob,
|
||||
std::map<std::string, Mat>& constBlobs,
|
||||
std::map<std::string, MatShape>& outShapes)
|
||||
{
|
||||
constBlobs.insert(std::make_pair(name, blob));
|
||||
outShapes.insert(std::make_pair(name, shape(blob)));
|
||||
}
|
||||
|
||||
void ONNXImporter::populateNet(Net dstNet)
|
||||
{
|
||||
CV_Assert(model_proto.has_graph());
|
||||
@ -533,6 +542,23 @@ void ONNXImporter::populateNet(Net dstNet)
|
||||
if (inp_size == 5) {
|
||||
CV_Assert(constBlobs.find(node_proto.input(4)) != constBlobs.end());
|
||||
Mat step_blob = getBlob(node_proto, constBlobs, 4);
|
||||
|
||||
// Very strange application for Slice op with tensor reversing.
|
||||
// We just workaround it for 2d constants.
|
||||
if (constBlobs.find(node_proto.input(0)) != constBlobs.end() &&
|
||||
axis == 0 &&
|
||||
start_blob.at<int>(0) == -1 && step_blob.at<int>(0) == -1 &&
|
||||
end_blob.at<int>(0) == std::numeric_limits<int32_t>::min())
|
||||
{
|
||||
Mat inp = getBlob(node_proto, constBlobs, 0);
|
||||
if (inp.dims == 2)
|
||||
{
|
||||
Mat flipped;
|
||||
flip(inp, flipped, 0);
|
||||
addConstant(layerParams.name, flipped, constBlobs, outShapes);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
CV_CheckEQ(countNonZero(step_blob != 1), 0, "Slice layer only supports steps = 1");
|
||||
}
|
||||
}
|
||||
@ -547,8 +573,7 @@ void ONNXImporter::populateNet(Net dstNet)
|
||||
inputs.push_back(inp);
|
||||
runLayer(layerParams, inputs, sliced);
|
||||
CV_Assert(sliced.size() == 1);
|
||||
constBlobs.insert(std::make_pair(layerParams.name, sliced[0]));
|
||||
outShapes[layerParams.name] = shape(sliced[0]);
|
||||
addConstant(layerParams.name, sliced[0], constBlobs, outShapes);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@ -585,7 +610,7 @@ void ONNXImporter::populateNet(Net dstNet)
|
||||
Mat blob_1 = getBlob(node_proto, constBlobs, 1);
|
||||
CV_Assert(blob_0.size == blob_1.size);
|
||||
Mat output = isSub ? (blob_0 - blob_1) : (blob_0 + blob_1);
|
||||
constBlobs.insert(std::make_pair(layerParams.name, output));
|
||||
addConstant(layerParams.name, output, constBlobs, outShapes);
|
||||
continue;
|
||||
}
|
||||
else if (is_const_0 || is_const_1)
|
||||
@ -670,7 +695,7 @@ void ONNXImporter::populateNet(Net dstNet)
|
||||
{
|
||||
CV_Assert(node_proto.input_size() == 0);
|
||||
CV_Assert(layerParams.blobs.size() == 1);
|
||||
constBlobs.insert(std::make_pair(layerParams.name, layerParams.blobs[0]));
|
||||
addConstant(layerParams.name, layerParams.blobs[0], constBlobs, outShapes);
|
||||
continue;
|
||||
}
|
||||
else if (layer_type == "LSTM")
|
||||
@ -965,7 +990,7 @@ void ONNXImporter::populateNet(Net dstNet)
|
||||
|
||||
out = out.reshape(1, inp0.dims, inp0.size);
|
||||
out.dims = inp0.dims; // to workaround dims == 1
|
||||
constBlobs.insert(std::make_pair(layerParams.name, out));
|
||||
addConstant(layerParams.name, out, constBlobs, outShapes);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@ -1033,7 +1058,7 @@ void ONNXImporter::populateNet(Net dstNet)
|
||||
std::vector<Mat> inputs(1, getBlob(node_proto, constBlobs, 0)), transposed;
|
||||
runLayer(layerParams, inputs, transposed);
|
||||
CV_Assert(transposed.size() == 1);
|
||||
constBlobs.insert(std::make_pair(layerParams.name, transposed[0]));
|
||||
addConstant(layerParams.name, transposed[0], constBlobs, outShapes);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@ -1069,8 +1094,7 @@ void ONNXImporter::populateNet(Net dstNet)
|
||||
Mat inp = getBlob(node_proto, constBlobs, 0);
|
||||
Mat out = inp.reshape(1, outShape);
|
||||
out.dims = outShape.size(); // to workaround dims == 1
|
||||
constBlobs.insert(std::make_pair(layerParams.name, out));
|
||||
outShapes[layerParams.name] = shape(out);
|
||||
addConstant(layerParams.name, out, constBlobs, outShapes);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@ -1085,7 +1109,7 @@ void ONNXImporter::populateNet(Net dstNet)
|
||||
std::vector<int> out_size(&input.size[0], &input.size[0] + axis);
|
||||
out_size.push_back(input.total(axis));
|
||||
Mat output = input.reshape(1, out_size);
|
||||
constBlobs.insert(std::make_pair(layerParams.name, output));
|
||||
addConstant(layerParams.name, output, constBlobs, outShapes);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@ -1108,7 +1132,7 @@ void ONNXImporter::populateNet(Net dstNet)
|
||||
}
|
||||
|
||||
Mat out = input.reshape(0, dims);
|
||||
constBlobs.insert(std::make_pair(layerParams.name, out));
|
||||
addConstant(layerParams.name, out, constBlobs, outShapes);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1210,7 +1234,7 @@ void ONNXImporter::populateNet(Net dstNet)
|
||||
if (layer_id.find(node_proto.input(0)) == layer_id.end()) {
|
||||
std::vector<Mat> inputs(1, getBlob(node_proto, constBlobs, 0)), outputs;
|
||||
runLayer(layerParams, inputs, outputs);
|
||||
constBlobs.insert(std::make_pair(layerParams.name, outputs[0]));
|
||||
addConstant(layerParams.name, outputs[0], constBlobs, outShapes);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@ -1224,7 +1248,7 @@ void ONNXImporter::populateNet(Net dstNet)
|
||||
if (layer_id.find(node_proto.input(0)) == layer_id.end()) {
|
||||
Mat input = getBlob(node_proto, constBlobs, 0);
|
||||
Mat out = input.reshape(0, dim);
|
||||
constBlobs.insert(std::make_pair(layerParams.name, out));
|
||||
addConstant(layerParams.name, out, constBlobs, outShapes);
|
||||
continue;
|
||||
}
|
||||
replaceLayerParam(layerParams, "shape", "dim");
|
||||
@ -1233,6 +1257,21 @@ void ONNXImporter::populateNet(Net dstNet)
|
||||
else if (layer_type == "Pad")
|
||||
{
|
||||
layerParams.type = "Padding";
|
||||
replaceLayerParam(layerParams, "mode", "type");
|
||||
if (node_proto.input_size() == 3 || node_proto.input_size() == 2)
|
||||
{
|
||||
// Paddings are in order begin0, begin1, .. beginN, end0, end1, ..., endN.
|
||||
// We need to shuffle it to begin0, end0, begin1, end1, ...
|
||||
Mat paddings = getBlob(node_proto, constBlobs, 1).reshape(1, 2);
|
||||
paddings = paddings.t();
|
||||
layerParams.set("paddings", DictValue::arrayInt(paddings.ptr<int>(), paddings.total()));
|
||||
|
||||
if (node_proto.input_size() == 3)
|
||||
{
|
||||
Mat value = getBlob(node_proto, constBlobs, 2);
|
||||
layerParams.set("value", value.at<float>(0));
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (layer_type == "Shape")
|
||||
{
|
||||
@ -1246,7 +1285,7 @@ void ONNXImporter::populateNet(Net dstNet)
|
||||
shapeMat.at<int>(j) = inpShape[j];
|
||||
shapeMat.dims = 1;
|
||||
|
||||
constBlobs.insert(std::make_pair(layerParams.name, shapeMat));
|
||||
addConstant(layerParams.name, shapeMat, constBlobs, outShapes);
|
||||
continue;
|
||||
}
|
||||
else if (layer_type == "Cast")
|
||||
@ -1268,7 +1307,7 @@ void ONNXImporter::populateNet(Net dstNet)
|
||||
default: type = blob.type();
|
||||
}
|
||||
blob.convertTo(blob, type);
|
||||
constBlobs.insert(std::make_pair(layerParams.name, blob));
|
||||
addConstant(layerParams.name, blob, constBlobs, outShapes);
|
||||
continue;
|
||||
}
|
||||
else
|
||||
@ -1276,11 +1315,15 @@ void ONNXImporter::populateNet(Net dstNet)
|
||||
}
|
||||
else if (layer_type == "ConstantOfShape" || layer_type == "ConstantFill")
|
||||
{
|
||||
int depth = CV_32F;
|
||||
float fill_value;
|
||||
if (!layerParams.blobs.empty())
|
||||
{
|
||||
CV_Assert(!layerParams.has("value"));
|
||||
fill_value = layerParams.blobs[0].at<float>(0, 0);
|
||||
depth = layerParams.blobs[0].depth();
|
||||
Mat floats;
|
||||
layerParams.blobs[0].convertTo(floats, CV_32F);
|
||||
fill_value = floats.at<float>(0, 0);
|
||||
}
|
||||
else
|
||||
fill_value = layerParams.get("value", 0);
|
||||
@ -1288,9 +1331,8 @@ void ONNXImporter::populateNet(Net dstNet)
|
||||
MatShape inpShape = getBlob(node_proto, constBlobs, 0);
|
||||
for (int i = 0; i < inpShape.size(); i++)
|
||||
CV_CheckGT(inpShape[i], 0, "");
|
||||
Mat tensor(inpShape.size(), &inpShape[0], CV_32F, Scalar(fill_value));
|
||||
constBlobs.insert(std::make_pair(layerParams.name, tensor));
|
||||
outShapes[node_proto.output(0)] = shape(tensor);
|
||||
Mat tensor(inpShape.size(), &inpShape[0], depth, Scalar(fill_value));
|
||||
addConstant(layerParams.name, tensor, constBlobs, outShapes);
|
||||
continue;
|
||||
}
|
||||
else if (layer_type == "Gather")
|
||||
@ -1320,7 +1362,7 @@ void ONNXImporter::populateNet(Net dstNet)
|
||||
out = input.reshape(1, 1).colRange(index, index + 1);
|
||||
out.dims = dims;
|
||||
}
|
||||
constBlobs.insert(std::make_pair(layerParams.name, out));
|
||||
addConstant(layerParams.name, out, constBlobs, outShapes);
|
||||
continue;
|
||||
}
|
||||
else if (layer_type == "Concat")
|
||||
@ -1345,7 +1387,7 @@ void ONNXImporter::populateNet(Net dstNet)
|
||||
runLayer(layerParams, inputs, concatenated);
|
||||
|
||||
CV_Assert(concatenated.size() == 1);
|
||||
constBlobs.insert(std::make_pair(layerParams.name, concatenated[0]));
|
||||
addConstant(layerParams.name, concatenated[0], constBlobs, outShapes);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@ -1415,6 +1457,25 @@ void ONNXImporter::populateNet(Net dstNet)
|
||||
layerParams.type = "Softmax";
|
||||
layerParams.set("log_softmax", layer_type == "LogSoftmax");
|
||||
}
|
||||
else if (layer_type == "DetectionOutput")
|
||||
{
|
||||
CV_CheckEQ(node_proto.input_size(), 3, "");
|
||||
if (constBlobs.find(node_proto.input(2)) != constBlobs.end())
|
||||
{
|
||||
Mat priors = getBlob(node_proto, constBlobs, 2);
|
||||
|
||||
LayerParams constParams;
|
||||
constParams.name = layerParams.name + "/priors";
|
||||
constParams.type = "Const";
|
||||
constParams.blobs.push_back(priors);
|
||||
|
||||
opencv_onnx::NodeProto priorsProto;
|
||||
priorsProto.add_output(constParams.name);
|
||||
addLayer(dstNet, constParams, priorsProto, layer_id, outShapes);
|
||||
|
||||
node_proto.set_input(2, constParams.name);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (int j = 0; j < node_proto.input_size(); j++) {
|
||||
|
@ -465,6 +465,7 @@ TEST_P(Test_ONNX_layers, ReduceL2)
|
||||
{
|
||||
testONNXModels("reduceL2");
|
||||
testONNXModels("reduceL2_subgraph");
|
||||
testONNXModels("reduceL2_subgraph_2");
|
||||
}
|
||||
|
||||
TEST_P(Test_ONNX_layers, Split)
|
||||
@ -515,6 +516,12 @@ TEST_P(Test_ONNX_layers, LSTM_bidirectional)
|
||||
testONNXModels("lstm_bidirectional", npy, 0, 0, false, false);
|
||||
}
|
||||
|
||||
TEST_P(Test_ONNX_layers, Pad2d_Unfused)
|
||||
{
|
||||
testONNXModels("ReflectionPad2d");
|
||||
testONNXModels("ZeroPad2d");
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_ONNX_layers, dnnBackendsAndTargets());
|
||||
|
||||
class Test_ONNX_nets : public Test_ONNX_layers
|
||||
|
0
modules/features2d/src/mser.cpp
Executable file → Normal file
@ -466,11 +466,9 @@ bool PxMEncoder::write(const Mat& img, const std::vector<int>& params)
|
||||
// write header;
|
||||
const int code = ((mode == PXM_TYPE_PBM) ? 1 : (mode == PXM_TYPE_PGM) ? 2 : 3)
|
||||
+ (isBinary ? 3 : 0);
|
||||
const char* comment = "# Generated by OpenCV " CV_VERSION "\n";
|
||||
|
||||
int header_sz = sprintf(buffer, "P%c\n%s%d %d\n",
|
||||
(char)('0' + code), comment,
|
||||
width, height);
|
||||
int header_sz = sprintf(buffer, "P%c\n%d %d\n",
|
||||
(char)('0' + code), width, height);
|
||||
CV_Assert(header_sz > 0);
|
||||
if (mode != PXM_TYPE_PBM)
|
||||
{
|
||||
|
0
modules/imgproc/src/opencl/filter2DSmall.cl
Executable file → Normal file
0
modules/imgproc/src/opencl/filterSmall.cl
Executable file → Normal file
0
modules/imgproc/src/sumpixels.dispatch.cpp
Executable file → Normal file
@ -82,7 +82,7 @@ TEST(CUDA_BruteForceNonLocalMeans, Regression)
|
||||
cv::resize(gray_gold, gray_gold, cv::Size(256, 256));
|
||||
|
||||
EXPECT_MAT_NEAR(bgr_gold, dbgr, 1);
|
||||
EXPECT_MAT_NEAR(gray_gold, dgray, 1e-4);
|
||||
EXPECT_MAT_NEAR(gray_gold, dgray, 1);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////
|
||||
|
0
modules/photo/test/test_npr.cpp
Executable file → Normal file
0
modules/video/src/bgfg_KNN.cpp
Executable file → Normal file
0
platforms/linux/mips.toolchain.cmake
Executable file → Normal file
0
platforms/linux/mips32r5el-gnu.toolchain.cmake
Executable file → Normal file
0
platforms/linux/mips64r6el-gnu.toolchain.cmake
Executable file → Normal file
0
samples/cpp/tutorial_code/ImgProc/anisotropic_image_segmentation/anisotropic_image_segmentation.cpp
Executable file → Normal file
0
samples/cpp/tutorial_code/ImgProc/motion_deblur_filter/motion_deblur_filter.cpp
Executable file → Normal file
0
samples/cpp/tutorial_code/ImgProc/out_of_focus_deblur_filter/out_of_focus_deblur_filter.cpp
Executable file → Normal file
0
samples/cpp/tutorial_code/ImgProc/periodic_noise_removing_filter/periodic_noise_removing_filter.cpp
Executable file → Normal file
0
samples/cpp/tutorial_code/features2D/AKAZE_match.cpp
Executable file → Normal file
0
samples/cpp/tutorial_code/features2D/AKAZE_tracking/planar_tracking.cpp
Executable file → Normal file
0
samples/cpp/tutorial_code/features2D/Homography/panorama_stitching_rotating_camera.cpp
Executable file → Normal file
0
samples/cpp/tutorial_code/features2D/feature_description/SURF_matching_Demo.cpp
Executable file → Normal file
0
samples/cpp/tutorial_code/features2D/feature_detection/SURF_detection_Demo.cpp
Executable file → Normal file
0
samples/cpp/tutorial_code/features2D/feature_flann_matcher/SURF_FLANN_matching_Demo.cpp
Executable file → Normal file
0
samples/cpp/tutorial_code/features2D/feature_homography/SURF_FLANN_matching_homography_Demo.cpp
Executable file → Normal file
0
samples/data/H1to3p.xml
Executable file → Normal file
0
samples/data/LinuxLogo.jpg
Executable file → Normal file
Before Width: | Height: | Size: 6.8 KiB After Width: | Height: | Size: 6.8 KiB |
0
samples/data/WindowsLogo.jpg
Executable file → Normal file
Before Width: | Height: | Size: 11 KiB After Width: | Height: | Size: 11 KiB |
0
samples/data/graf1.png
Executable file → Normal file
Before Width: | Height: | Size: 929 KiB After Width: | Height: | Size: 929 KiB |
0
samples/data/graf3.png
Executable file → Normal file
Before Width: | Height: | Size: 953 KiB After Width: | Height: | Size: 953 KiB |