mirror of
https://github.com/opencv/opencv.git
synced 2025-08-06 14:36:36 +08:00
add cv::flipND; support onnx slice with negative steps via cv::flipND
This commit is contained in:
parent
91ac790249
commit
34a0897f90
@ -1102,6 +1102,13 @@ around both axes.
|
|||||||
*/
|
*/
|
||||||
CV_EXPORTS_W void flip(InputArray src, OutputArray dst, int flipCode);
|
CV_EXPORTS_W void flip(InputArray src, OutputArray dst, int flipCode);
|
||||||
|
|
||||||
|
/** @brief Flips a n-dimensional at given axis
|
||||||
|
* @param src input array
|
||||||
|
* @param dst output array that has the same shape of src
|
||||||
|
* @param axis axis that performs a flip on. 0 <= axis < src.dims.
|
||||||
|
*/
|
||||||
|
CV_EXPORTS_W void flipND(InputArray src, OutputArray dst, int axis);
|
||||||
|
|
||||||
enum RotateFlags {
|
enum RotateFlags {
|
||||||
ROTATE_90_CLOCKWISE = 0, //!<Rotate 90 degrees clockwise
|
ROTATE_90_CLOCKWISE = 0, //!<Rotate 90 degrees clockwise
|
||||||
ROTATE_180 = 1, //!<Rotate 180 degrees clockwise
|
ROTATE_180 = 1, //!<Rotate 180 degrees clockwise
|
||||||
|
@ -6,6 +6,8 @@
|
|||||||
#include "opencl_kernels_core.hpp"
|
#include "opencl_kernels_core.hpp"
|
||||||
#include "opencv2/core/detail/dispatch_helper.impl.hpp"
|
#include "opencv2/core/detail/dispatch_helper.impl.hpp"
|
||||||
|
|
||||||
|
#include <algorithm> // std::swap_ranges
|
||||||
|
|
||||||
namespace cv {
|
namespace cv {
|
||||||
|
|
||||||
////////////////////////////////////// transpose /////////////////////////////////////////
|
////////////////////////////////////// transpose /////////////////////////////////////////
|
||||||
@ -812,6 +814,49 @@ void flip( InputArray _src, OutputArray _dst, int flip_mode )
|
|||||||
flipHoriz( dst.ptr(), dst.step, dst.ptr(), dst.step, dst.size(), esz );
|
flipHoriz( dst.ptr(), dst.step, dst.ptr(), dst.step, dst.size(), esz );
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
flipNDImpl(uchar* data, const int* shape, const size_t* step, int axis)
|
||||||
|
{
|
||||||
|
int total = 1;
|
||||||
|
for (int i = 0; i < axis; ++i)
|
||||||
|
total *= shape[i];
|
||||||
|
|
||||||
|
int shape_at_axis = shape[axis];
|
||||||
|
size_t step_at_axis = step[axis];
|
||||||
|
size_t offset = 0;
|
||||||
|
size_t offset_increment = axis == 0 ? 0 : step[axis - 1];
|
||||||
|
for (int i = 0; i < total; ++i, offset += offset_increment)
|
||||||
|
for (int j = 0, k = shape_at_axis - 1; j < shape_at_axis / 2; ++j, --k)
|
||||||
|
std::swap_ranges(data + offset + j * step_at_axis,
|
||||||
|
data + offset + j * step_at_axis + step_at_axis,
|
||||||
|
data + offset + k * step_at_axis);
|
||||||
|
}
|
||||||
|
|
||||||
|
void flipND(InputArray _src, OutputArray _dst, int _axis)
|
||||||
|
{
|
||||||
|
CV_INSTRUMENT_REGION();
|
||||||
|
|
||||||
|
Mat src = _src.getMat();
|
||||||
|
|
||||||
|
// verify axis
|
||||||
|
int ndim = src.dims;
|
||||||
|
CV_CheckLT(_axis, ndim, "flipND: given axis is out of range");
|
||||||
|
CV_CheckGE(_axis, -ndim, "flipND: given axis is out of range");
|
||||||
|
int axis = (_axis + ndim) % ndim;
|
||||||
|
|
||||||
|
// in-place flip
|
||||||
|
_src.copyTo(_dst);
|
||||||
|
|
||||||
|
// return the src if it has only one element on the flip axis
|
||||||
|
const auto shape = src.size.p;
|
||||||
|
if (shape[axis] == 1)
|
||||||
|
return ;
|
||||||
|
|
||||||
|
// call impl
|
||||||
|
Mat dst = _dst.getMat();
|
||||||
|
flipNDImpl(dst.ptr(), dst.size.p, dst.step.p, axis);
|
||||||
|
}
|
||||||
|
|
||||||
void rotate(InputArray _src, OutputArray _dst, int rotateMode)
|
void rotate(InputArray _src, OutputArray _dst, int rotateMode)
|
||||||
{
|
{
|
||||||
CV_Assert(_src.dims() <= 2);
|
CV_Assert(_src.dims() <= 2);
|
||||||
|
@ -2201,6 +2201,72 @@ INSTANTIATE_TEST_CASE_P(Arithm, TransposeND, testing::Combine(
|
|||||||
testing::Values(perf::MatType(CV_8UC1), CV_32FC1)
|
testing::Values(perf::MatType(CV_8UC1), CV_32FC1)
|
||||||
));
|
));
|
||||||
|
|
||||||
|
class FlipND : public testing::TestWithParam< tuple<std::vector<int>, perf::MatType> >
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
std::vector<int> m_shape;
|
||||||
|
int m_type;
|
||||||
|
|
||||||
|
void SetUp()
|
||||||
|
{
|
||||||
|
std::tie(m_shape, m_type) = GetParam();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
TEST_P(FlipND, basic)
|
||||||
|
{
|
||||||
|
Mat inp(m_shape, m_type);
|
||||||
|
randu(inp, 0, 255);
|
||||||
|
|
||||||
|
int ndim = static_cast<int>(m_shape.size());
|
||||||
|
std::vector<int> axes(ndim*2); // [-shape, shape)
|
||||||
|
std::iota(axes.begin(), axes.end(), -ndim);
|
||||||
|
auto get_flipped_indices = [&inp, ndim] (size_t total, std::vector<int>& indices, int axis)
|
||||||
|
{
|
||||||
|
const int* shape = inp.size.p;
|
||||||
|
size_t t = total, idx;
|
||||||
|
for (int i = ndim - 1; i >= 0; --i)
|
||||||
|
{
|
||||||
|
idx = t / shape[i];
|
||||||
|
indices[i] = int(t - idx * shape[i]);
|
||||||
|
t = idx;
|
||||||
|
}
|
||||||
|
|
||||||
|
int _axis = (axis + ndim) % ndim;
|
||||||
|
std::vector<int> flipped_indices = indices;
|
||||||
|
flipped_indices[_axis] = shape[_axis] - 1 - indices[_axis];
|
||||||
|
return flipped_indices;
|
||||||
|
};
|
||||||
|
|
||||||
|
for (size_t i = 0; i < axes.size(); ++i)
|
||||||
|
{
|
||||||
|
int axis = axes[i];
|
||||||
|
Mat out;
|
||||||
|
cv::flipND(inp, out, axis);
|
||||||
|
// check values
|
||||||
|
std::vector<int> indices(ndim, 0);
|
||||||
|
for (size_t j = 0; j < inp.total(); ++j)
|
||||||
|
{
|
||||||
|
auto flipped_indices = get_flipped_indices(j, indices, axis);
|
||||||
|
switch (inp.type())
|
||||||
|
{
|
||||||
|
case CV_8UC1:
|
||||||
|
ASSERT_EQ(inp.at<uint8_t>(indices.data()), out.at<uint8_t>(flipped_indices.data()));
|
||||||
|
break;
|
||||||
|
case CV_32FC1:
|
||||||
|
ASSERT_EQ(inp.at<float>(indices.data()), out.at<float>(flipped_indices.data()));
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
FAIL() << "Unsupported type: " << inp.type();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_CASE_P(Arithm, FlipND, testing::Combine(
|
||||||
|
testing::Values(std::vector<int>{5, 10}, std::vector<int>{2, 3, 4}),
|
||||||
|
testing::Values(perf::MatType(CV_8UC1), CV_32FC1)
|
||||||
|
));
|
||||||
|
|
||||||
TEST(Core_minMaxIdx, regression_9207_2)
|
TEST(Core_minMaxIdx, regression_9207_2)
|
||||||
{
|
{
|
||||||
|
@ -84,6 +84,34 @@ Range normalizeRange(const Range& input_range, int n)
|
|||||||
return range;
|
return range;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: support cv::Range with steps and negative steps to get rid of this transformation
|
||||||
|
void tranformForNegSteps(const MatShape& inpShape, std::vector<std::vector<Range> >& sliceRanges, std::vector<std::vector<int> >& sliceSteps)
|
||||||
|
{
|
||||||
|
// in case of negative steps,
|
||||||
|
// x of shape [5, 10], x[5:0:-1, 10:1:-3] <=> np.flip(x[1:5:1, 2:10:3], aixs=(0, 1))
|
||||||
|
// new_end_i = start_i + 1 > dim_i ? dim_i : start_i + 1
|
||||||
|
// new_start_i = end + 1
|
||||||
|
// new_start_i = new_end_i - 1 - ((new_end_i - 1 - new_start_i) / abs(step_i)) * abs(step_i)
|
||||||
|
int start, end, new_start, new_end, step;
|
||||||
|
for (int i = 0; i < sliceSteps[0].size(); ++i)
|
||||||
|
{
|
||||||
|
step = sliceSteps[0][i];
|
||||||
|
if (step > 0)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
step = -step;
|
||||||
|
start = sliceRanges[0][i].start;
|
||||||
|
end = sliceRanges[0][i].end;
|
||||||
|
new_end = start >= inpShape[i] ? inpShape[i] : start + 1;
|
||||||
|
new_start = end + 1;
|
||||||
|
new_start = new_end - 1 - ((new_end - 1 - new_start) / step) * step;
|
||||||
|
|
||||||
|
sliceSteps[0][i] = step;
|
||||||
|
sliceRanges[0][i].start = new_start;
|
||||||
|
sliceRanges[0][i].end = new_end;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
std::vector<std::vector<cv::Range> > finalizeSliceRange(const MatShape& inpShape, int& axis,
|
std::vector<std::vector<cv::Range> > finalizeSliceRange(const MatShape& inpShape, int& axis,
|
||||||
const std::vector<std::vector<cv::Range> >& inputSliceRanges)
|
const std::vector<std::vector<cv::Range> >& inputSliceRanges)
|
||||||
{
|
{
|
||||||
@ -149,6 +177,24 @@ public:
|
|||||||
const DictValue &sizesOrEnds = params.has("size") ? params.get("size") : params.get("end");
|
const DictValue &sizesOrEnds = params.has("size") ? params.get("size") : params.get("end");
|
||||||
CV_Assert(begins.size() == sizesOrEnds.size());
|
CV_Assert(begins.size() == sizesOrEnds.size());
|
||||||
|
|
||||||
|
if (params.has("steps"))
|
||||||
|
{
|
||||||
|
const DictValue &steps = params.get("steps");
|
||||||
|
sliceSteps.resize(1);
|
||||||
|
sliceSteps[0].resize(steps.size());
|
||||||
|
|
||||||
|
for (int i = 0; i < steps.size(); ++i)
|
||||||
|
{
|
||||||
|
int step = steps.get<int>(i);
|
||||||
|
CV_Assert(step != 0);
|
||||||
|
if (step < 0)
|
||||||
|
neg_step_dims.push_back(i);
|
||||||
|
if (std::abs(step) > 1)
|
||||||
|
hasSteps = true;
|
||||||
|
sliceSteps[0][i] = step;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
sliceRanges.resize(1);
|
sliceRanges.resize(1);
|
||||||
sliceRanges[0].resize(begins.size(), Range::all());
|
sliceRanges[0].resize(begins.size(), Range::all());
|
||||||
for (int i = 0; i < begins.size(); ++i)
|
for (int i = 0; i < begins.size(); ++i)
|
||||||
@ -166,26 +212,13 @@ public:
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
int end = sizeOrEnd;
|
int end = sizeOrEnd;
|
||||||
CV_Assert(end < 0 || end > start); // End index is excluded.
|
if (hasSteps && !neg_step_dims.empty() && sliceSteps[0][i] < 0)
|
||||||
|
CV_Assert(end < 0 || end != start); // if current step is negative, end < start is allowed.
|
||||||
|
else
|
||||||
|
CV_Assert(end < 0 || end > start); // End index is excluded.
|
||||||
sliceRanges[0][i].end = end; // We'll finalize a negative value later.
|
sliceRanges[0][i].end = end; // We'll finalize a negative value later.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (params.has("steps"))
|
|
||||||
{
|
|
||||||
const DictValue &steps = params.get("steps");
|
|
||||||
sliceSteps.resize(1);
|
|
||||||
sliceSteps[0].resize(steps.size());
|
|
||||||
|
|
||||||
for (int i = 0; i < steps.size(); ++i)
|
|
||||||
{
|
|
||||||
int step = steps.get<int>(i);
|
|
||||||
CV_Assert(step >= 1);
|
|
||||||
if (step > 1)
|
|
||||||
hasSteps = true;
|
|
||||||
sliceSteps[0][i] = step;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -193,11 +226,11 @@ public:
|
|||||||
{
|
{
|
||||||
#ifdef HAVE_INF_ENGINE
|
#ifdef HAVE_INF_ENGINE
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
return sliceRanges.size() == 1 && !hasSteps;
|
return sliceRanges.size() == 1 && !hasSteps && neg_step_dims.empty();
|
||||||
#endif
|
#endif
|
||||||
#ifdef HAVE_CUDA
|
#ifdef HAVE_CUDA
|
||||||
if (backendId == DNN_BACKEND_CUDA)
|
if (backendId == DNN_BACKEND_CUDA)
|
||||||
return !hasSteps;
|
return !hasSteps && neg_step_dims.empty();
|
||||||
#endif
|
#endif
|
||||||
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CANN;
|
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CANN;
|
||||||
}
|
}
|
||||||
@ -210,8 +243,13 @@ public:
|
|||||||
CV_Assert(inputs.size() == 1);
|
CV_Assert(inputs.size() == 1);
|
||||||
MatShape inpShape = inputs[0];
|
MatShape inpShape = inputs[0];
|
||||||
|
|
||||||
|
std::vector<std::vector<int> > sliceSteps_ = sliceSteps;
|
||||||
|
std::vector<std::vector<cv::Range> > sliceRanges_ = sliceRanges;
|
||||||
|
if (hasSteps && !neg_step_dims.empty())
|
||||||
|
tranformForNegSteps(inpShape, sliceRanges_, sliceSteps_);
|
||||||
|
|
||||||
int axis_rw = axis;
|
int axis_rw = axis;
|
||||||
std::vector<std::vector<cv::Range> > sliceRanges_rw = finalizeSliceRange(inpShape, axis_rw, sliceRanges);
|
std::vector<std::vector<cv::Range> > sliceRanges_rw = finalizeSliceRange(inpShape, axis_rw, sliceRanges_);
|
||||||
|
|
||||||
if (!sliceRanges_rw.empty())
|
if (!sliceRanges_rw.empty())
|
||||||
{
|
{
|
||||||
@ -224,8 +262,8 @@ public:
|
|||||||
if (shapesInitialized || inpShape[j] > 0)
|
if (shapesInitialized || inpShape[j] > 0)
|
||||||
outputs[i][j] = normalizeRange(sliceRanges_rw[i][j], inpShape[j]).size();
|
outputs[i][j] = normalizeRange(sliceRanges_rw[i][j], inpShape[j]).size();
|
||||||
|
|
||||||
if (!sliceSteps.empty() && (i < sliceSteps.size()) && (j < sliceSteps[i].size()) && (sliceSteps[i][j] > 1))
|
if (!sliceSteps_.empty() && (i < sliceSteps_.size()) && (j < sliceSteps_[i].size()) && (sliceSteps_[i][j] > 1))
|
||||||
outputs[i][j] = (outputs[i][j] + sliceSteps[i][j] - 1) / sliceSteps[i][j];
|
outputs[i][j] = (outputs[i][j] + sliceSteps_[i][j] - 1) / sliceSteps_[i][j];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -257,7 +295,10 @@ public:
|
|||||||
outputs_arr.getMatVector(outputs);
|
outputs_arr.getMatVector(outputs);
|
||||||
|
|
||||||
CV_Assert(inputs.size() == 1);
|
CV_Assert(inputs.size() == 1);
|
||||||
const MatSize& inpShape = inputs[0].size;
|
MatShape inpShape = shape(inputs[0]);
|
||||||
|
|
||||||
|
if (hasSteps && !neg_step_dims.empty())
|
||||||
|
tranformForNegSteps(inpShape, sliceRanges, sliceSteps);
|
||||||
|
|
||||||
finalSliceRanges = finalizeSliceRange(shape(inputs[0]), axis, sliceRanges);
|
finalSliceRanges = finalizeSliceRange(shape(inputs[0]), axis, sliceRanges);
|
||||||
|
|
||||||
@ -280,9 +321,9 @@ public:
|
|||||||
|
|
||||||
for (int i = 0; i < outputs.size(); ++i)
|
for (int i = 0; i < outputs.size(); ++i)
|
||||||
{
|
{
|
||||||
CV_Assert(finalSliceRanges[i].size() <= inpShape.dims());
|
CV_Assert(finalSliceRanges[i].size() <= inpShape.size());
|
||||||
// Fill the rest of ranges.
|
// Fill the rest of ranges.
|
||||||
for (int j = finalSliceRanges[i].size(); j < inpShape.dims(); ++j)
|
for (int j = finalSliceRanges[i].size(); j < inpShape.size(); ++j)
|
||||||
{
|
{
|
||||||
finalSliceRanges[i].push_back(Range::all());
|
finalSliceRanges[i].push_back(Range::all());
|
||||||
}
|
}
|
||||||
@ -586,6 +627,8 @@ public:
|
|||||||
getSliceRecursive<int8_t>(inpMat, inpIdx, finalSliceRanges[i], sliceSteps[i], 0, dimsNum, outputs[i], outIdx);
|
getSliceRecursive<int8_t>(inpMat, inpIdx, finalSliceRanges[i], sliceSteps[i], 0, dimsNum, outputs[i], outIdx);
|
||||||
else
|
else
|
||||||
getSliceRecursive<float>(inpMat, inpIdx, finalSliceRanges[i], sliceSteps[i], 0, dimsNum, outputs[i], outIdx);
|
getSliceRecursive<float>(inpMat, inpIdx, finalSliceRanges[i], sliceSteps[i], 0, dimsNum, outputs[i], outIdx);
|
||||||
|
// flip for negative steps
|
||||||
|
flip(outputs[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -650,7 +693,6 @@ public:
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
||||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||||
@ -739,9 +781,15 @@ private:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void flip(Mat& output) // break if 1d tensor?
|
||||||
|
{
|
||||||
|
for (int i = 0; i < neg_step_dims.size(); ++i)
|
||||||
|
cv::flipND(output, output, neg_step_dims[i]);
|
||||||
|
}
|
||||||
protected:
|
protected:
|
||||||
// The actual non-negative values determined from @p sliceRanges depends on input size.
|
// The actual non-negative values determined from @p sliceRanges depends on input size.
|
||||||
std::vector<std::vector<Range> > finalSliceRanges;
|
std::vector<std::vector<Range> > finalSliceRanges;
|
||||||
|
std::vector<int> neg_step_dims;
|
||||||
bool hasDynamicShapes;
|
bool hasDynamicShapes;
|
||||||
bool shapesInitialized;
|
bool shapesInitialized;
|
||||||
bool hasSteps;
|
bool hasSteps;
|
||||||
|
@ -1145,6 +1145,7 @@ TEST_P(Test_ONNX_layers, Slice)
|
|||||||
testONNXModels("slice");
|
testONNXModels("slice");
|
||||||
testONNXModels("slice_neg_starts");
|
testONNXModels("slice_neg_starts");
|
||||||
testONNXModels("slice_opset_11");
|
testONNXModels("slice_opset_11");
|
||||||
|
testONNXModels("slice_neg_steps", pb);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user