mirror of
https://github.com/opencv/opencv.git
synced 2025-06-08 01:53:19 +08:00
Merge pull request #19546 from LupusSanctus:am/slice_steps
* Added Steps support in DNN Slice layer * Added code corrections * dnn(slice): fix OCL and OCL_FP16 processing
This commit is contained in:
parent
bf9f67e93f
commit
3e48a91d97
@ -364,6 +364,7 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
|
|||||||
* Inner vector has slice ranges for the first number of input dimensions.
|
* Inner vector has slice ranges for the first number of input dimensions.
|
||||||
*/
|
*/
|
||||||
std::vector<std::vector<Range> > sliceRanges;
|
std::vector<std::vector<Range> > sliceRanges;
|
||||||
|
std::vector<std::vector<int> > sliceSteps;
|
||||||
int axis;
|
int axis;
|
||||||
int num_split;
|
int num_split;
|
||||||
|
|
||||||
|
@ -64,6 +64,7 @@ public:
|
|||||||
SliceLayerImpl(const LayerParams& params)
|
SliceLayerImpl(const LayerParams& params)
|
||||||
{
|
{
|
||||||
setParamsFrom(params);
|
setParamsFrom(params);
|
||||||
|
hasSteps = false;
|
||||||
axis = params.get<int>("axis", 1);
|
axis = params.get<int>("axis", 1);
|
||||||
num_split = params.get<int>("num_split", 0);
|
num_split = params.get<int>("num_split", 0);
|
||||||
hasDynamicShapes = params.get<bool>("has_dynamic_shapes", false);
|
hasDynamicShapes = params.get<bool>("has_dynamic_shapes", false);
|
||||||
@ -112,6 +113,22 @@ public:
|
|||||||
sliceRanges[0][i].end = end; // We'll finalize a negative value later.
|
sliceRanges[0][i].end = end; // We'll finalize a negative value later.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (params.has("steps"))
|
||||||
|
{
|
||||||
|
const DictValue &steps = params.get("steps");
|
||||||
|
sliceSteps.resize(1);
|
||||||
|
sliceSteps[0].resize(steps.size());
|
||||||
|
|
||||||
|
for (int i = 0; i < steps.size(); ++i)
|
||||||
|
{
|
||||||
|
int step = steps.get<int>(i);
|
||||||
|
CV_Assert(step >= 1);
|
||||||
|
if (step > 1)
|
||||||
|
hasSteps = true;
|
||||||
|
sliceSteps[0][i] = step;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -120,11 +137,11 @@ public:
|
|||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
||||||
return INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) &&
|
return INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) &&
|
||||||
sliceRanges.size() == 1 && sliceRanges[0].size() == 4;
|
sliceRanges.size() == 1 && sliceRanges[0].size() == 4 && !hasSteps;
|
||||||
#endif
|
#endif
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
return sliceRanges.size() == 1;
|
return sliceRanges.size() == 1 && !hasSteps;
|
||||||
#endif
|
#endif
|
||||||
return backendId == DNN_BACKEND_OPENCV;
|
return backendId == DNN_BACKEND_OPENCV;
|
||||||
}
|
}
|
||||||
@ -147,6 +164,9 @@ public:
|
|||||||
{
|
{
|
||||||
if (shapesInitialized || inpShape[j] > 0)
|
if (shapesInitialized || inpShape[j] > 0)
|
||||||
outputs[i][j] = normalize_axis_range(sliceRanges[i][j], inpShape[j]).size();
|
outputs[i][j] = normalize_axis_range(sliceRanges[i][j], inpShape[j]).size();
|
||||||
|
|
||||||
|
if (!sliceSteps.empty() && (i < sliceSteps.size()) && (j < sliceSteps[i].size()) && (sliceSteps[i][j] > 1))
|
||||||
|
outputs[i][j] = (outputs[i][j] + sliceSteps[i][j] - 1) / sliceSteps[i][j];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -181,6 +201,7 @@ public:
|
|||||||
const MatSize& inpShape = inputs[0].size;
|
const MatSize& inpShape = inputs[0].size;
|
||||||
|
|
||||||
finalSliceRanges = sliceRanges;
|
finalSliceRanges = sliceRanges;
|
||||||
|
|
||||||
if (sliceRanges.empty())
|
if (sliceRanges.empty())
|
||||||
{
|
{
|
||||||
// Divide input blob on equal parts by axis.
|
// Divide input blob on equal parts by axis.
|
||||||
@ -213,6 +234,9 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!sliceSteps.empty() && sliceSteps[0].size() != inputs[0].dims)
|
||||||
|
sliceSteps[0].resize(inputs[0].dims, 1);
|
||||||
|
|
||||||
#if 0
|
#if 0
|
||||||
std::cout << "DEBUG: DNN/Slice: " << outputs.size() << " inpShape=" << inpShape << std::endl;
|
std::cout << "DEBUG: DNN/Slice: " << outputs.size() << " inpShape=" << inpShape << std::endl;
|
||||||
for (int i = 0; i < outputs.size(); ++i)
|
for (int i = 0; i < outputs.size(); ++i)
|
||||||
@ -420,6 +444,9 @@ public:
|
|||||||
{
|
{
|
||||||
CV_TRACE_FUNCTION();
|
CV_TRACE_FUNCTION();
|
||||||
|
|
||||||
|
if (hasSteps)
|
||||||
|
return false; // TODO not implemented yet: https://github.com/opencv/opencv/pull/19546
|
||||||
|
|
||||||
std::vector<UMat> inputs;
|
std::vector<UMat> inputs;
|
||||||
std::vector<UMat> outputs;
|
std::vector<UMat> outputs;
|
||||||
|
|
||||||
@ -478,9 +505,24 @@ public:
|
|||||||
|
|
||||||
const Mat& inpMat = inputs[0];
|
const Mat& inpMat = inputs[0];
|
||||||
CV_Assert(outputs.size() == finalSliceRanges.size());
|
CV_Assert(outputs.size() == finalSliceRanges.size());
|
||||||
for (size_t i = 0; i < outputs.size(); i++)
|
|
||||||
|
if (!hasSteps)
|
||||||
{
|
{
|
||||||
inpMat(finalSliceRanges[i]).copyTo(outputs[i]);
|
for (size_t i = 0; i < outputs.size(); i++)
|
||||||
|
{
|
||||||
|
inpMat(finalSliceRanges[i]).copyTo(outputs[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
int dimsNum = inpMat.dims;
|
||||||
|
|
||||||
|
for (size_t i = 0; i < outputs.size(); i++)
|
||||||
|
{
|
||||||
|
std::vector<int> inpIdx(dimsNum, 0);
|
||||||
|
std::vector<int> outIdx(dimsNum, 0);
|
||||||
|
getSliceRecursive(inpMat, inpIdx, finalSliceRanges[i], sliceSteps[i], 0, dimsNum, outputs[i], outIdx);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -570,11 +612,42 @@ public:
|
|||||||
}
|
}
|
||||||
#endif // HAVE_DNN_NGRAPH
|
#endif // HAVE_DNN_NGRAPH
|
||||||
|
|
||||||
|
private:
|
||||||
|
void getSliceRecursive(const Mat &inpMat, std::vector<int> &inpIdx,
|
||||||
|
const std::vector<Range> &sliceRanges,
|
||||||
|
const std::vector<int> &sliceSteps, int dim, int dimsNum,
|
||||||
|
Mat &outputs, std::vector<int> &outIdx)
|
||||||
|
{
|
||||||
|
int begin = sliceRanges[dim].start;
|
||||||
|
int end = sliceRanges[dim].end;
|
||||||
|
int step = !sliceSteps.empty() ? sliceSteps[dim] : 1;
|
||||||
|
|
||||||
|
const bool is32F = inpMat.depth() == CV_32F;
|
||||||
|
|
||||||
|
// TODO optimization is required (for 2D tail case at least)
|
||||||
|
for (int k = begin, j = 0; k < end; k += step, j++)
|
||||||
|
{
|
||||||
|
inpIdx[dim] = k;
|
||||||
|
outIdx[dim] = j;
|
||||||
|
|
||||||
|
if (dim + 1 < dimsNum)
|
||||||
|
getSliceRecursive(inpMat, inpIdx, sliceRanges, sliceSteps, dim + 1, dimsNum, outputs, outIdx);
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (is32F)
|
||||||
|
outputs.at<float>(outIdx.data()) = inpMat.at<float>(inpIdx.data());
|
||||||
|
else
|
||||||
|
outputs.at<short>(outIdx.data()) = inpMat.at<short>(inpIdx.data()); // 16F emulation
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
// The actual non-negative values determined from @p sliceRanges depends on input size.
|
// The actual non-negative values determined from @p sliceRanges depends on input size.
|
||||||
std::vector<std::vector<Range> > finalSliceRanges;
|
std::vector<std::vector<Range> > finalSliceRanges;
|
||||||
bool hasDynamicShapes;
|
bool hasDynamicShapes;
|
||||||
bool shapesInitialized;
|
bool shapesInitialized;
|
||||||
|
bool hasSteps;
|
||||||
};
|
};
|
||||||
|
|
||||||
class CropLayerImpl CV_FINAL : public SliceLayerImpl
|
class CropLayerImpl CV_FINAL : public SliceLayerImpl
|
||||||
|
@ -641,20 +641,11 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
|
|||||||
int axis = 0;
|
int axis = 0;
|
||||||
std::vector<int> begin;
|
std::vector<int> begin;
|
||||||
std::vector<int> end;
|
std::vector<int> end;
|
||||||
|
std::vector<int> steps;
|
||||||
int inp_size = node_proto.input_size();
|
int inp_size = node_proto.input_size();
|
||||||
|
|
||||||
if (inp_size == 1)
|
if (inp_size == 1)
|
||||||
{
|
{
|
||||||
if (layerParams.has("steps"))
|
|
||||||
{
|
|
||||||
DictValue steps = layerParams.get("steps");
|
|
||||||
for (int i = 0; i < steps.size(); ++i)
|
|
||||||
{
|
|
||||||
if (steps.get<int>(i) != 1)
|
|
||||||
CV_Error(Error::StsNotImplemented,
|
|
||||||
"Slice layer only supports steps = 1");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (layerParams.has("axes")) {
|
if (layerParams.has("axes")) {
|
||||||
DictValue axes = layerParams.get("axes");
|
DictValue axes = layerParams.get("axes");
|
||||||
for (int i = 1; i < axes.size(); ++i) {
|
for (int i = 1; i < axes.size(); ++i) {
|
||||||
@ -677,7 +668,7 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
|
|||||||
int finish = ends.get<int>(i);
|
int finish = ends.get<int>(i);
|
||||||
end.push_back((finish < 0) ? --finish : finish); // numpy doesn't include last dim
|
end.push_back((finish < 0) ? --finish : finish); // numpy doesn't include last dim
|
||||||
}
|
}
|
||||||
} else {
|
} else { // inp_size > 1
|
||||||
CV_Assert(inp_size >= 3);
|
CV_Assert(inp_size >= 3);
|
||||||
for (int i = 1; i < inp_size; i++) {
|
for (int i = 1; i < inp_size; i++) {
|
||||||
CV_Assert(constBlobs.find(node_proto.input(i)) != constBlobs.end());
|
CV_Assert(constBlobs.find(node_proto.input(i)) != constBlobs.end());
|
||||||
@ -711,6 +702,12 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
|
|||||||
if (inp_size == 5) {
|
if (inp_size == 5) {
|
||||||
CV_Assert(constBlobs.find(node_proto.input(4)) != constBlobs.end());
|
CV_Assert(constBlobs.find(node_proto.input(4)) != constBlobs.end());
|
||||||
Mat step_blob = getBlob(node_proto, 4);
|
Mat step_blob = getBlob(node_proto, 4);
|
||||||
|
const int* steps_ptr = step_blob.ptr<int>();
|
||||||
|
|
||||||
|
if (axis > 0)
|
||||||
|
steps.resize(axis, 1);
|
||||||
|
|
||||||
|
std::copy(steps_ptr, steps_ptr + step_blob.total(), std::back_inserter(steps));
|
||||||
|
|
||||||
// Very strange application for Slice op with tensor reversing.
|
// Very strange application for Slice op with tensor reversing.
|
||||||
// We just workaround it for 2d constants.
|
// We just workaround it for 2d constants.
|
||||||
@ -728,13 +725,15 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
CV_CheckEQ(countNonZero(step_blob != 1), 0, "Slice layer only supports steps = 1");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
layerParams.set("begin", DictValue::arrayInt(&begin[0], begin.size()));
|
layerParams.set("begin", DictValue::arrayInt(&begin[0], begin.size()));
|
||||||
layerParams.set("end", DictValue::arrayInt(&end[0], end.size()));
|
layerParams.set("end", DictValue::arrayInt(&end[0], end.size()));
|
||||||
layerParams.set("axis", axis);
|
layerParams.set("axis", axis);
|
||||||
|
|
||||||
|
if (!steps.empty())
|
||||||
|
layerParams.set("steps", DictValue::arrayInt(&steps[0], steps.size()));
|
||||||
|
|
||||||
if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
|
if (constBlobs.find(node_proto.input(0)) != constBlobs.end())
|
||||||
{
|
{
|
||||||
Mat inp = getBlob(node_proto, 0);
|
Mat inp = getBlob(node_proto, 0);
|
||||||
|
@ -627,6 +627,26 @@ TEST_P(Test_ONNX_layers, Slice)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_P(Test_ONNX_layers, Slice_Steps_2DInput)
|
||||||
|
{
|
||||||
|
testONNXModels("slice_opset_11_steps_2d");
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_P(Test_ONNX_layers, Slice_Steps_3DInput)
|
||||||
|
{
|
||||||
|
testONNXModels("slice_opset_11_steps_3d");
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_P(Test_ONNX_layers, Slice_Steps_4DInput)
|
||||||
|
{
|
||||||
|
testONNXModels("slice_opset_11_steps_4d");
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_P(Test_ONNX_layers, Slice_Steps_5DInput)
|
||||||
|
{
|
||||||
|
testONNXModels("slice_opset_11_steps_5d");
|
||||||
|
}
|
||||||
|
|
||||||
TEST_P(Test_ONNX_layers, Softmax)
|
TEST_P(Test_ONNX_layers, Softmax)
|
||||||
{
|
{
|
||||||
testONNXModels("softmax");
|
testONNXModels("softmax");
|
||||||
|
Loading…
Reference in New Issue
Block a user