Merge pull request #12264 from dkurt:dnn_remove_forward_method

* Remove a forward method in dnn::Layer

* Add a test

* Fix tests

* Mark multiple dnn::Layer::finalize methods as deprecated

* Replace back dnn's inputBlobs to vector of pointers

* Remove Layer::forward_fallback from CV_OCL_RUN scopes
This commit is contained in:
Dmitry Kurtaev 2018-09-06 13:26:47 +03:00 committed by Alexander Alekhin
parent 8e8315abfd
commit d486204a0d
40 changed files with 820 additions and 487 deletions

View File

@ -46,9 +46,9 @@
#include <opencv2/core.hpp>
#if !defined CV_DOXYGEN && !defined CV_DNN_DONT_ADD_EXPERIMENTAL_NS
#define CV__DNN_EXPERIMENTAL_NS_BEGIN namespace experimental_dnn_34_v7 {
#define CV__DNN_EXPERIMENTAL_NS_BEGIN namespace experimental_dnn_34_v8 {
#define CV__DNN_EXPERIMENTAL_NS_END }
namespace cv { namespace dnn { namespace experimental_dnn_34_v7 { } using namespace experimental_dnn_34_v7; }}
namespace cv { namespace dnn { namespace experimental_dnn_34_v8 { } using namespace experimental_dnn_34_v8; }}
#else
#define CV__DNN_EXPERIMENTAL_NS_BEGIN
#define CV__DNN_EXPERIMENTAL_NS_END
@ -165,8 +165,6 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
};
class CV_EXPORTS ActivationLayer;
class CV_EXPORTS BatchNormLayer;
class CV_EXPORTS ScaleLayer;
/** @brief This interface class allows to build new Layers - are building blocks of networks.
*
@ -181,20 +179,31 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
CV_PROP_RW std::vector<Mat> blobs;
/** @brief Computes and sets internal parameters according to inputs, outputs and blobs.
* @deprecated Use Layer::finalize(InputArrayOfArrays, OutputArrayOfArrays) instead
* @param[in] input vector of already allocated input blobs
* @param[out] output vector of already allocated output blobs
*
* If this method is called after network has allocated all memory for input and output blobs
* and before inferencing.
*/
virtual void finalize(const std::vector<Mat*> &input, std::vector<Mat> &output);
CV_DEPRECATED virtual void finalize(const std::vector<Mat*> &input, std::vector<Mat> &output);
/** @brief Computes and sets internal parameters according to inputs, outputs and blobs.
* @param[in] inputs vector of already allocated input blobs
* @param[out] outputs vector of already allocated output blobs
*
* If this method is called after network has allocated all memory for input and output blobs
* and before inferencing.
*/
CV_WRAP virtual void finalize(InputArrayOfArrays inputs, OutputArrayOfArrays outputs);
/** @brief Given the @p input blobs, computes the output @p blobs.
* @deprecated Use Layer::forward(InputArrayOfArrays, OutputArrayOfArrays, OutputArrayOfArrays) instead
* @param[in] input the input blobs.
* @param[out] output allocated output blobs, which will store results of the computation.
* @param[out] internals allocated internal blobs
*/
virtual void forward(std::vector<Mat*> &input, std::vector<Mat> &output, std::vector<Mat> &internals) = 0;
CV_DEPRECATED virtual void forward(std::vector<Mat*> &input, std::vector<Mat> &output, std::vector<Mat> &internals);
/** @brief Given the @p input blobs, computes the output @p blobs.
* @param[in] inputs the input blobs.
@ -210,14 +219,22 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
*/
void forward_fallback(InputArrayOfArrays inputs, OutputArrayOfArrays outputs, OutputArrayOfArrays internals);
/** @brief @overload */
CV_WRAP void finalize(const std::vector<Mat> &inputs, CV_OUT std::vector<Mat> &outputs);
/** @brief
* @overload
* @deprecated Use Layer::finalize(InputArrayOfArrays, OutputArrayOfArrays) instead
*/
CV_DEPRECATED void finalize(const std::vector<Mat> &inputs, CV_OUT std::vector<Mat> &outputs);
/** @brief @overload */
CV_WRAP std::vector<Mat> finalize(const std::vector<Mat> &inputs);
/** @brief
* @overload
* @deprecated Use Layer::finalize(InputArrayOfArrays, OutputArrayOfArrays) instead
*/
CV_DEPRECATED std::vector<Mat> finalize(const std::vector<Mat> &inputs);
/** @brief Allocates layer and computes output. */
CV_WRAP void run(const std::vector<Mat> &inputs, CV_OUT std::vector<Mat> &outputs,
/** @brief Allocates layer and computes output.
* @deprecated This method will be removed in the future release.
*/
CV_DEPRECATED CV_WRAP void run(const std::vector<Mat> &inputs, CV_OUT std::vector<Mat> &outputs,
CV_IN_OUT std::vector<Mat> &internals);
/** @brief Returns index of input blob into the input array.
@ -388,9 +405,6 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
/** @brief Returns pointers to input layers of specific layer. */
std::vector<Ptr<Layer> > getLayerInputs(LayerId layerId); // FIXIT: CV_WRAP
/** @brief Delete layer for the network (not implemented yet) */
CV_WRAP void deleteLayer(LayerId layer);
/** @brief Connects output of the first layer to input of the second layer.
* @param outPin descriptor of the first layer output.
* @param inpPin descriptor of the second layer input.

View File

@ -146,16 +146,16 @@ public:
return false;
}
virtual void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &) CV_OVERRIDE
virtual void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays) CV_OVERRIDE
{
PyGILState_STATE gstate;
gstate = PyGILState_Ensure();
std::vector<Mat> inps(inputs.size());
for (size_t i = 0; i < inputs.size(); ++i)
inps[i] = *inputs[i];
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
PyObject* args = pyopencv_from(inps);
PyObject* args = pyopencv_from(inputs);
PyObject* res = PyObject_CallMethodObjArgs(o, PyString_FromString("forward"), args, NULL);
Py_DECREF(args);
PyGILState_Release(gstate);
@ -174,11 +174,6 @@ public:
}
}
virtual void forward(InputArrayOfArrays, OutputArrayOfArrays, OutputArrayOfArrays) CV_OVERRIDE
{
CV_Error(Error::StsNotImplemented, "");
}
private:
// Map layers types to python classes.
static std::map<std::string, std::vector<PyObject*> > pyLayers;

View File

@ -430,19 +430,24 @@ struct DataLayer : public Layer
backendId == DNN_BACKEND_INFERENCE_ENGINE && inputsData.size() == 1;
}
void forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs, OutputArrayOfArrays internals) CV_OVERRIDE
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
forward_ocl(inputs, outputs, internals));
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs, outputs, internals);
if (outputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*>&, std::vector<Mat>& outputs, std::vector<Mat> &) CV_OVERRIDE
{
std::vector<Mat> outputs, internals;
outputs_arr.getMatVector(outputs);
internals_arr.getMatVector(internals);
// Supported modes:
// | Input type | Output type |
// | fp32 | fp32 |
@ -567,8 +572,11 @@ struct DataLayer : public Layer
return false;
}
void finalize(const std::vector<Mat*>&, std::vector<Mat>& outputs) CV_OVERRIDE
virtual void finalize(InputArrayOfArrays, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
{
std::vector<Mat> outputs;
outputs_arr.getMatVector(outputs);
CV_Assert_N(outputs.size() == scaleFactors.size(), outputs.size() == means.size(),
inputsData.size() == outputs.size());
skip = true;
@ -1414,6 +1422,7 @@ struct Net::Impl
addInfEngineNetOutputs(ld);
net = Ptr<InfEngineBackendNet>();
netBlobsWrappers.clear();
layer->preferableTarget = DNN_TARGET_CPU;
continue;
}
ld.skip = true; // Initially skip all Inference Engine supported layers.
@ -1622,7 +1631,12 @@ struct Net::Impl
Ptr<Layer> layerPtr = ld.getLayerInstance();
{
layerPtr->finalize(ld.inputBlobs, ld.outputBlobs);
std::vector<Mat> inps(ld.inputBlobs.size());
for (int i = 0; i < ld.inputBlobs.size(); ++i)
{
inps[i] = *ld.inputBlobs[i];
}
layerPtr->finalize(inps, ld.outputBlobs);
layerPtr->preferableTarget = preferableTarget;
#if 0
std::cout << "\toutputs:";
@ -2138,7 +2152,12 @@ struct Net::Impl
ld.inputBlobsWrappers[i]->copyToHost();
}
layer->forward(ld.inputBlobs, ld.outputBlobs, ld.internals);
std::vector<Mat> inps(ld.inputBlobs.size());
for (int i = 0; i < ld.inputBlobs.size(); ++i)
{
inps[i] = *ld.inputBlobs[i];
}
layer->forward(inps, ld.outputBlobs, ld.internals);
if (DNN_CHECK_NAN_INF)
{
@ -2712,11 +2731,6 @@ int Net::getLayerId(const String &layer)
return impl->getLayerId(layer);
}
void Net::deleteLayer(LayerId)
{
CV_Error(Error::StsNotImplemented, "");
}
Ptr<Layer> Net::getLayer(LayerId layerId)
{
LayerData &ld = impl->getLayerData(layerId);
@ -3172,10 +3186,7 @@ static void vecToPVec(const std::vector<T> &v, std::vector<T*> &pv)
void Layer::finalize(const std::vector<Mat> &inputs, std::vector<Mat> &outputs)
{
CV_TRACE_FUNCTION();
std::vector<Mat*> inputsp;
vecToPVec(inputs, inputsp);
this->finalize(inputsp, outputs);
this->finalize((InputArrayOfArrays)inputs, (OutputArrayOfArrays)outputs);
}
void Layer::finalize(const std::vector<Mat*> &input, std::vector<Mat> &output)
@ -3183,6 +3194,18 @@ void Layer::finalize(const std::vector<Mat*> &input, std::vector<Mat> &output)
(void)input;(void)output;
}
void Layer::finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr)
{
CV_TRACE_FUNCTION();
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
std::vector<Mat*> inputsp;
vecToPVec(inputs, inputsp);
this->finalize(inputsp, outputs);
}
std::vector<Mat> Layer::finalize(const std::vector<Mat> &inputs)
{
CV_TRACE_FUNCTION();
@ -3192,12 +3215,17 @@ std::vector<Mat> Layer::finalize(const std::vector<Mat> &inputs)
return outputs;
}
void Layer::forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs, OutputArrayOfArrays internals)
void Layer::forward(std::vector<Mat*> &input, std::vector<Mat> &output, std::vector<Mat> &internals)
{
// We kept this method for compatibility. DNN calls it now only to support users' implementations.
}
void Layer::forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs, outputs, internals);
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
void Layer::forward_fallback(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
@ -3241,7 +3269,6 @@ void Layer::forward_fallback(InputArrayOfArrays inputs_arr, OutputArrayOfArrays
internals_arr.assign(orig_internals);
return;
}
std::vector<Mat> inpvec;
std::vector<Mat> outputs;
std::vector<Mat> internals;
@ -3265,10 +3292,8 @@ void Layer::run(const std::vector<Mat> &inputs, std::vector<Mat> &outputs, std::
{
CV_TRACE_FUNCTION();
std::vector<Mat*> inputsp;
vecToPVec(inputs, inputsp);
this->finalize(inputsp, outputs);
this->forward(inputsp, outputs, internals);
this->finalize(inputs, outputs);
this->forward(inputs, outputs, internals);
}
Layer::~Layer() {}

View File

@ -234,18 +234,20 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
CV_Assert(blobs.size() >= 2);
CV_Assert(inputs.size() == 1);
Mat &inpBlob = *inputs[0];
Mat &inpBlob = inputs[0];
CV_Assert(inpBlob.dims == 2 || inpBlob.dims == 4);
int rows = inpBlob.dims > 2 ? inpBlob.size[2] : 1;
int cols = inpBlob.dims > 2 ? inpBlob.size[3] : 1;

View File

@ -99,17 +99,19 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
for (int i = 0, n = outputs.size(); i < n; ++i)
if (outputs[i].data != inputs[i]->data)
inputs[i]->copyTo(outputs[i]);
if (outputs[i].data != inputs[i].data)
inputs[i].copyTo(outputs[i]);
}
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE

View File

@ -111,12 +111,12 @@ public:
class ChannelConcatInvoker : public ParallelLoopBody
{
public:
std::vector<Mat*>* inputs;
std::vector<Mat>* inputs;
Mat* output;
int nstripes;
std::vector<const float*> chptrs;
static void run(std::vector<Mat*>& inputs, Mat& output, int nstripes)
static void run(std::vector<Mat>& inputs, Mat& output, int nstripes)
{
ChannelConcatInvoker cc;
cc.inputs = &inputs;
@ -127,7 +127,7 @@ public:
int nchannels = 0, batchsz = output.size[0];
for( i = 0; i < ninputs; i++ )
{
Mat& inp = *inputs[i];
Mat& inp = inputs[i];
CV_Assert( inp.isContinuous() && (inp.type() == CV_32F || inp.type() == CV_16S) &&
inp.dims == 4 && inp.size[0] == output.size[0] &&
inp.size[2] == output.size[2] &&
@ -142,7 +142,7 @@ public:
int ofs = 0;
for( i = 0; i < ninputs; i++)
{
Mat& inp = *inputs[i];
Mat& inp = inputs[i];
for( int j = 0; j < batchsz; j++ )
for( int k = 0; k < inp.size[1]; k++ )
{
@ -241,15 +241,17 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
int cAxis = clamp(axis, inputs[0]->dims);
int cAxis = clamp(axis, inputs[0].dims);
Mat& outMat = outputs[0];
if (padding)
@ -267,14 +269,14 @@ public:
ranges[cAxis].start = 0;
for (size_t i = 0; i < inputs.size(); i++)
{
ranges[cAxis].end = ranges[cAxis].start + inputs[i]->size[cAxis];
ranges[cAxis].end = ranges[cAxis].start + inputs[i].size[cAxis];
for (int j = 0; j < outMat.dims; ++j)
{
if (j == cAxis) continue;
ranges[j].start = (outMat.size[j] - inputs[i]->size[j]) / 2;
ranges[j].end = ranges[j].start + inputs[i]->size[j];
ranges[j].start = (outMat.size[j] - inputs[i].size[j]) / 2;
ranges[j].end = ranges[j].start + inputs[i].size[j];
}
inputs[i]->copyTo(outMat(&ranges[0]));
inputs[i].copyTo(outMat(&ranges[0]));
ranges[cAxis].start = ranges[cAxis].end;
}
}

View File

@ -79,49 +79,24 @@ public:
adjustPad.height < stride.height);
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
{
if (type == "Convolution")
return preferableTarget != DNN_TARGET_MYRIAD || dilation.width == dilation.height;
else
{
CV_Assert(type == "Deconvolution");
const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW layout
const int group = numOutput / outGroupCn;
if (group != 1)
{
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R3)
return preferableTarget == DNN_TARGET_CPU;
#endif
return false;
}
if (preferableTarget == DNN_TARGET_OPENCL || preferableTarget == DNN_TARGET_OPENCL_FP16)
return dilation.width == 1 && dilation.height == 1;
return true;
}
}
else
#endif // HAVE_INF_ENGINE
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE;
}
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
{
CV_Assert(inputs.size() > 0);
CV_Assert(blobs.size() >= 1 && blobs.size() <= 2);
CV_Assert(blobs[0].dims == 4 && blobs[0].size[3] == kernel.width && blobs[0].size[2] == kernel.height);
const Mat &input = *inputs[0];
const Mat &input = inputs[0];
CV_Assert(input.dims == 4 && (input.type() == CV_32F || input.type() == CV_64F || input.type() == CV_16S));
for (size_t i = 0; i < inputs.size(); i++)
{
CV_Assert(inputs[i]->type() == input.type());
CV_Assert(inputs[i]->dims == 4 && inputs[i]->size[1] == input.size[1]);
CV_Assert(inputs[i]->size[2] == input.size[2] && inputs[i]->size[3] == input.size[3]);
CV_Assert(inputs[i].type() == input.type());
CV_Assert(inputs[i].dims == 4 && inputs[i].size[1] == input.size[1]);
CV_Assert(inputs[i].size[2] == input.size[2] && inputs[i].size[3] == input.size[3]);
}
Size outSize = Size(outputs[0].size[3], outputs[0].size[2]);
@ -225,6 +200,14 @@ public:
return shape(out.area(), ksize);
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
return preferableTarget != DNN_TARGET_MYRIAD || dilation.width == dilation.height;
else
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE;
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
@ -262,9 +245,9 @@ public:
return false;
}
virtual void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
virtual void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
{
BaseConvolutionLayerImpl::finalize(inputs, outputs);
BaseConvolutionLayerImpl::finalize(inputs_arr, outputs_arr);
CV_Assert(!blobs.empty());
const int outCn = blobs[0].size[0];
@ -1007,22 +990,24 @@ public:
CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
/*printf("conv %s: input (%d x %d x %d x %d), kernel (%d x %d), pad (%d x %d), stride (%d x %d), dilation (%d x %d)\n",
name.c_str(), inputs[0]->size[0], inputs[0]->size[1], inputs[0]->size[2], inputs[0]->size[3],
name.c_str(), inputs[0].size[0], inputs[0].size[1], inputs[0].size[2], inputs[0].size[3],
kernel.width, kernel.height, pad.width, pad.height,
stride.width, stride.height, dilation.width, dilation.height);*/
CV_Assert_N(inputs.size() == (size_t)1, inputs[0]->size[1] % blobs[0].size[1] == 0,
outputs.size() == 1, inputs[0]->data != outputs[0].data);
CV_Assert_N(inputs.size() == (size_t)1, inputs[0].size[1] % blobs[0].size[1] == 0,
outputs.size() == 1, inputs[0].data != outputs[0].data);
int ngroups = inputs[0]->size[1]/blobs[0].size[1];
int ngroups = inputs[0].size[1]/blobs[0].size[1];
CV_Assert(outputs[0].size[1] % ngroups == 0);
int outCn = blobs[0].size[0];
@ -1049,7 +1034,7 @@ public:
int nstripes = std::max(getNumThreads(), 1);
ParallelConv::run(*inputs[0], outputs[0], weightsMat, biasvec, reluslope,
ParallelConv::run(inputs[0], outputs[0], weightsMat, biasvec, reluslope,
kernel, pad, stride, dilation, activ.get(), ngroups, nstripes);
}
@ -1089,6 +1074,29 @@ public:
return shape(ksize, inpH * inpW);
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
{
const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW layout
const int group = numOutput / outGroupCn;
if (group != 1)
{
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R3)
return preferableTarget == DNN_TARGET_CPU;
#endif
return false;
}
if (preferableTarget == DNN_TARGET_OPENCL || preferableTarget == DNN_TARGET_OPENCL_FP16)
return dilation.width == 1 && dilation.height == 1;
return true;
}
else
#endif // HAVE_INF_ENGINE
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE;
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
@ -1141,11 +1149,15 @@ public:
return false;
}
void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
{
BaseConvolutionLayerImpl::finalize(inputs, outputs);
BaseConvolutionLayerImpl::finalize(inputs_arr, outputs_arr);
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
getConvPoolPaddings(Size(outputs[0].size[3], outputs[0].size[2]),
Size(inputs[0]->size[3], inputs[0]->size[2]),
Size(inputs[0].size[3], inputs[0].size[2]),
kernel, stride, padMode, dilation, pad);
}
@ -1494,18 +1506,21 @@ public:
CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget) &&
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
forward_ocl(inputs_arr, outputs_arr, internals_arr));
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat *> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs, internals;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
internals_arr.getMatVector(internals);
int outCn = numOutput;
int inpCn = inputs[0]->size[1];
int inpCn = inputs[0].size[1];
bool is1x1flag = is1x1();
int nstripes = getNumThreads();
@ -1520,13 +1535,13 @@ public:
int ngroups = outCn / blobs[0].size[1];
int inpGroupCn = inpCn / ngroups;
int outGroupCn = blobs[0].size[1];
const Mat& inp = *inputs[ii];
const Mat& inp = inputs[ii];
Mat& out = outputs[ii];
int numImg = inp.size[0];
int inpH = inp.size[2], inpW = inp.size[3];
int outH = out.size[2], outW = out.size[3];
Mat convBlob = inputs[ii]->reshape(1, numImg*inpCn);
Mat convBlob = inputs[ii].reshape(1, numImg*inpCn);
Mat decnBlob = out.reshape(1, numImg*outCn);
for (int n = 0; n < numImg; n++)

View File

@ -40,17 +40,19 @@ public:
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
Mat& inp = *inputs[0];
Mat& inp = inputs[0];
Mat& out = outputs[0];
Mat boxes = inputs[1]->reshape(1, inputs[1]->total() / 7);
Mat boxes = inputs[1].reshape(1, inputs[1].total() / 7);
const int numChannels = inp.size[1];
const int inpHeight = inp.size[2];
const int inpWidth = inp.size[3];

View File

@ -90,12 +90,14 @@ public:
return false;
}
void finalize(const std::vector<Mat *> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE
{
std::vector<Mat> inputs;
inputs_arr.getMatVector(inputs);
CV_Assert(2 == inputs.size());
const Mat &inpBlob = *inputs[0];
const Mat &inpSzBlob = *inputs[1];
const Mat &inpBlob = inputs[0];
const Mat &inpSzBlob = inputs[1];
int dims = inpBlob.dims;
int start_axis = clamp(startAxis, dims);
@ -135,18 +137,18 @@ public:
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat *> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
Mat &input = *inputs[0];
Mat &output = outputs[0];
input(&crop_ranges[0]).copyTo(output);
Mat &input = inputs[0];
input(&crop_ranges[0]).copyTo(outputs[0]);
}
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE

View File

@ -419,27 +419,28 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
}
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
std::vector<LabelBBox> allDecodedBBoxes;
std::vector<Mat> allConfidenceScores;
int num = inputs[0]->size[0];
int num = inputs[0].size[0];
// extract predictions from input layers
{
int numPriors = inputs[2]->size[2] / 4;
int numPriors = inputs[2].size[2] / 4;
const float* locationData = inputs[0]->ptr<float>();
const float* confidenceData = inputs[1]->ptr<float>();
const float* priorData = inputs[2]->ptr<float>();
const float* locationData = inputs[0].ptr<float>();
const float* confidenceData = inputs[1].ptr<float>();
const float* priorData = inputs[2].ptr<float>();
// Retrieve all location predictions
std::vector<LabelBBox> allLocationPredictions;
@ -465,9 +466,9 @@ public:
else
{
// Input image sizes;
CV_Assert(inputs[3]->dims == 4);
clipBounds.xmax = inputs[3]->size[3] - 1;
clipBounds.ymax = inputs[3]->size[2] - 1;
CV_Assert(inputs[3].dims == 4);
clipBounds.xmax = inputs[3].size[3] - 1;
clipBounds.ymax = inputs[3].size[2] - 1;
}
}
DecodeBBoxesAll(allLocationPredictions, priorBBoxes, priorVariances, num,
@ -502,6 +503,8 @@ public:
allIndices[i], _groupByClasses);
}
CV_Assert(count == numKept);
// Sync results back due changed output shape.
outputs_arr.assign(outputs);
}
size_t outputDetections_(

View File

@ -187,16 +187,19 @@ public:
CV_OCL_RUN(IS_DNN_OPENCL_TARGET(this->preferableTarget),
func.applyOCL(inputs_arr, outputs_arr, internals_arr))
if (inputs_arr.depth() == CV_16S)
{
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
for (size_t i = 0; i < inputs.size(); i++)
{
const Mat &src = *inputs[i];
const Mat &src = inputs[i];
Mat &dst = outputs[i];
CV_Assert(src.size == dst.size && src.type() == dst.type() &&
src.isContinuous() && dst.isContinuous() && src.type() == CV_32F);

View File

@ -123,7 +123,7 @@ public:
class EltwiseInvoker : public ParallelLoopBody
{
public:
const Mat** srcs;
const Mat* srcs;
int nsrcs;
Mat* dst;
const std::vector<float>* coeffs;
@ -135,7 +135,7 @@ public:
EltwiseInvoker() : srcs(0), nsrcs(0), dst(0), coeffs(0), op(PROD), nstripes(0), activ(0), channels(0), planeSize(0) {}
static void run(const Mat** srcs, int nsrcs, Mat& dst,
static void run(const Mat* srcs, int nsrcs, Mat& dst,
const std::vector<float>& coeffs, EltwiseOp op,
const ActivationLayer* activ, int nstripes)
{
@ -144,9 +144,9 @@ public:
for( int i = 0; i > nsrcs; i++ )
{
CV_Assert(srcs[i]->size == dst.size &&
srcs[i]->type() == dst.type() &&
srcs[i]->isContinuous());
CV_Assert(srcs[i].size == dst.size &&
srcs[i].type() == dst.type() &&
srcs[i].isContinuous());
}
EltwiseInvoker p;
@ -200,14 +200,14 @@ public:
for( c = 0; c < channels; c++ )
{
size_t globalDelta = delta + (sampleIdx*channels + c)*planeSize;
const float* srcptr0 = srcs[0]->ptr<float>() + globalDelta;
const float* srcptr0 = srcs[0].ptr<float>() + globalDelta;
float* dstptr = dstptr0 + globalDelta;
if( op == PROD )
{
for( k = 1; k < n; k++ )
{
const float* srcptr1 = srcs[k]->ptr<float>() + globalDelta;
const float* srcptr1 = srcs[k].ptr<float>() + globalDelta;
for( j = 0; j < blockSize; j++ )
{
dstptr[j] = srcptr0[j]*srcptr1[j];
@ -219,7 +219,7 @@ public:
{
for( k = 1; k < n; k++ )
{
const float* srcptr1 = srcs[k]->ptr<float>() + globalDelta;
const float* srcptr1 = srcs[k].ptr<float>() + globalDelta;
for( j = 0; j < blockSize; j++ )
{
dstptr[j] = std::max(srcptr0[j], srcptr1[j]);
@ -231,7 +231,7 @@ public:
{
for( k = 1; k < n; k++ )
{
const float* srcptr1 = srcs[k]->ptr<float>() + globalDelta;
const float* srcptr1 = srcs[k].ptr<float>() + globalDelta;
for( j = 0; j < blockSize; j++ )
{
dstptr[j] = srcptr0[j] + srcptr1[j];
@ -244,7 +244,7 @@ public:
float c0 = coeffsptr[0];
for( k = 1; k < n; k++ )
{
const float* srcptr1 = srcs[k]->ptr<float>() + globalDelta;
const float* srcptr1 = srcs[k].ptr<float>() + globalDelta;
float c1 = coeffsptr[k];
for( j = 0; j < blockSize; j++ )
{
@ -358,17 +358,19 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat *> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
CV_Assert(outputs.size() == 1);
const int nstripes = getNumThreads();
EltwiseInvoker::run((const Mat**)&inputs[0], (int)inputs.size(), outputs[0],
EltwiseInvoker::run(&inputs[0], (int)inputs.size(), outputs[0],
coeffs, op, activ.get(), nstripes);
}

View File

@ -139,18 +139,23 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
for (size_t i = 0; i < inputs.size(); i++)
{
MatShape outShape = shape(outputs[i]);
outputs[i] = inputs[i]->reshape(1, (int)outShape.size(), &outShape[0]);
if (inputs[i].data != outputs[i].data)
{
inputs[i].reshape(1, (int)outShape.size(), &outShape[0]).copyTo(outputs[i]);
}
}
}

View File

@ -273,7 +273,7 @@ public:
};
#ifdef HAVE_OPENCL
void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
virtual void finalize(InputArrayOfArrays, OutputArrayOfArrays) CV_OVERRIDE
{
innerProductOp.release();
}
@ -393,20 +393,22 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &input, std::vector<Mat> &output, std::vector<Mat> &) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> input, output;
inputs_arr.getMatVector(input);
outputs_arr.getMatVector(output);
int axisCan = clamp(axis, input[0]->dims);
int outerSize = input[0]->total(0, axisCan);
int axisCan = clamp(axis, input[0].dims);
int outerSize = input[0].total(0, axisCan);
for (size_t i = 0; i < input.size(); i++)
{
Mat srcMat = input[i]->reshape(1, outerSize);
Mat srcMat = input[i].reshape(1, outerSize);
Mat dstMat = output[i].reshape(1, outerSize);
const int nstripes = getNumThreads();

View File

@ -96,7 +96,7 @@ public:
}
#ifdef HAVE_OPENCL
void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
virtual void finalize(InputArrayOfArrays, OutputArrayOfArrays) CV_OVERRIDE
{
lrnOp.release();
}
@ -152,21 +152,23 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
CV_Assert(inputs.size() == outputs.size());
for (int i = 0; i < inputs.size(); i++)
{
CV_Assert(inputs[i]->dims == 4);
CV_Assert(inputs[i].dims == 4);
Mat &src = *inputs[i];
Mat &src = inputs[i];
Mat &dst = outputs[i];
switch (type)

View File

@ -62,17 +62,19 @@ public:
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
CV_Assert(inputs.size() == 2);
Mat& input = *inputs[0];
Mat& indices = *inputs[1];
Mat& input = inputs[0];
Mat& indices = inputs[1];
CV_Assert(input.total() == indices.total());
CV_Assert(input.size[0] == 1);

View File

@ -96,13 +96,15 @@ public:
return fuse_relu;
}
void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE
{
std::vector<Mat> inputs;
inputs_arr.getMatVector(inputs);
int splitDim = (acrossChannels) ? 1 : 2;
int i, newRows = 1;
for( i = 0; i < splitDim; i++ )
newRows *= inputs[0]->size[i];
zeroDev = inputs[0]->total() == newRows;
newRows *= inputs[0].size[i];
zeroDev = inputs[0].total() == newRows;
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
@ -271,17 +273,20 @@ public:
CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat *> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs, internals;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
internals_arr.getMatVector(internals);
for (size_t inpIdx = 0; inpIdx < inputs.size(); inpIdx++)
{
Mat &inpBlob = *inputs[inpIdx];
Mat &inpBlob = inputs[inpIdx];
Mat &outBlob = outputs[inpIdx];
int splitDim = (acrossChannels) ? 1 : 2;

View File

@ -89,12 +89,14 @@ public:
return true;
}
void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE
{
std::vector<Mat> inputs;
inputs_arr.getMatVector(inputs);
CV_Assert(inputs.size() == 1);
endAxis = endAxis == -1 ? (inputs[0]->dims - 1) : endAxis;
startAxis = startAxis == -1 ? (inputs[0]->dims - 1) : startAxis;
acrossSpatial = (startAxis == 1 && endAxis == inputs[0]->dims - 1);
endAxis = endAxis == -1 ? (inputs[0].dims - 1) : endAxis;
startAxis = startAxis == -1 ? (inputs[0].dims - 1) : startAxis;
acrossSpatial = (startAxis == 1 && endAxis == inputs[0].dims - 1);
}
#ifdef HAVE_OPENCL
@ -186,18 +188,21 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs, internals;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
internals_arr.getMatVector(internals);
CV_Assert(inputs.size() == 1 && outputs.size() == 1);
CV_Assert(inputs[0]->total() == outputs[0].total());
CV_Assert(inputs[0].total() == outputs[0].total());
const Mat& inp0 = *inputs[0];
const Mat& inp0 = inputs[0];
Mat& buffer = internals[0];
startAxis = clamp(startAxis, inp0.dims);
endAxis = clamp(endAxis, inp0.dims);

View File

@ -61,14 +61,17 @@ public:
return false;
}
void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE
{
std::vector<Mat> inputs;
inputs_arr.getMatVector(inputs);
// Compute dstRanges.
const MatSize& inpShape = inputs[0]->size;
const MatSize& inpShape = inputs[0].size;
dstRanges.resize(paddings.size());
int offset = 0;
if (inputDims != -1 && inputs[0]->dims != inputDims)
if (inputDims != -1 && inputs[0].dims != inputDims)
{
dstRanges.insert(dstRanges.begin(), Range::all());
offset = 1;
@ -81,7 +84,7 @@ public:
}
// Add the rest of dimensions.
for (int i = dstRanges.size(); i < inputs[0]->dims; ++i)
for (int i = dstRanges.size(); i < inputs[0].dims; ++i)
dstRanges.push_back(Range::all());
}
@ -96,31 +99,33 @@ public:
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
if (paddingType == "constant")
{
outputs[0].setTo(paddingValue);
inputs[0]->copyTo(outputs[0](dstRanges));
inputs[0].copyTo(outputs[0](dstRanges));
}
else if (paddingType == "reflect")
{
CV_Assert(inputs.size() == 1);
CV_Assert(outputs.size() == 1);
CV_Assert(inputs[0]->dims == 4);
CV_Assert(inputs[0].dims == 4);
CV_Assert(outputs[0].dims == 4);
if (inputs[0]->size[0] != outputs[0].size[0] || inputs[0]->size[1] != outputs[0].size[1])
if (inputs[0].size[0] != outputs[0].size[0] || inputs[0].size[1] != outputs[0].size[1])
CV_Error(Error::StsNotImplemented, "Only spatial reflection padding is supported.");
const int inpHeight = inputs[0]->size[2];
const int inpWidth = inputs[0]->size[3];
const int inpHeight = inputs[0].size[2];
const int inpWidth = inputs[0].size[3];
const int outHeight = outputs[0].size[2];
const int outWidth = outputs[0].size[3];
const int padTop = dstRanges[2].start;
@ -130,11 +135,11 @@ public:
CV_CheckLT(padTop, inpHeight, ""); CV_CheckLT(padBottom, inpHeight, "");
CV_CheckLT(padLeft, inpWidth, ""); CV_CheckLT(padRight, inpWidth, "");
for (size_t n = 0; n < inputs[0]->size[0]; ++n)
for (size_t n = 0; n < inputs[0].size[0]; ++n)
{
for (size_t ch = 0; ch < inputs[0]->size[1]; ++ch)
for (size_t ch = 0; ch < inputs[0].size[1]; ++ch)
{
copyMakeBorder(getPlane(*inputs[0], n, ch),
copyMakeBorder(getPlane(inputs[0], n, ch),
getPlane(outputs[0], n, ch),
padTop, padBottom, padLeft, padRight,
BORDER_REFLECT_101);

View File

@ -172,18 +172,21 @@ public:
_count = _oldStride[0] * shapeBefore[0];
}
void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
{
if(!_needsPermute)
{
return;
}
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
CV_Assert(inputs.size() > 0);
const Mat& inp0 = *inputs[0];
const Mat& inp0 = inputs[0];
CV_Assert((int)_numAxes == inp0.dims);
computeStrides(shape(*inputs[0]), shape(outputs[0]));
computeStrides(shape(inputs[0]), shape(outputs[0]));
#ifdef HAVE_OPENCL
if (uorder.empty())
@ -319,22 +322,24 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
size_t k, ninputs = inputs.size();
if(!_needsPermute)
{
for (k = 0; k < ninputs; k++)
{
CV_Assert(outputs[k].total() == inputs[k]->total());
if (outputs[k].data != inputs[k]->data)
inputs[k]->copyTo(outputs[k]);
CV_Assert(outputs[k].total() == inputs[k].total());
if (outputs[k].data != inputs[k].data)
inputs[k].copyTo(outputs[k]);
}
}
else
@ -346,10 +351,10 @@ public:
for (k = 0; k < ninputs; k++)
{
const Mat& inp = *inputs[k];
const Mat& inp = inputs[k];
Mat& out = outputs[k];
CV_Assert(inp.dims == numAxes && inp.size == inputs[0]->size);
CV_Assert(inp.dims == numAxes && inp.size == inputs[0].size);
CV_Assert(out.dims == numAxes && out.size == outputs[0].size);
CV_Assert(inp.isContinuous() && out.isContinuous());

View File

@ -114,11 +114,15 @@ public:
Ptr<OCL4DNNPool<float> > poolOp;
#endif
void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
{
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
CV_Assert(!inputs.empty());
cv::Size inp(inputs[0]->size[3], inputs[0]->size[2]),
cv::Size inp(inputs[0].size[3], inputs[0].size[2]),
out(outputs[0].size[3], outputs[0].size[2]);
if(globalPooling)
@ -204,28 +208,29 @@ public:
CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
}
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
switch (type)
{
case MAX:
CV_Assert_N(inputs.size() == 1, outputs.size() == 2);
maxPooling(*inputs[0], outputs[0], outputs[1]);
maxPooling(inputs[0], outputs[0], outputs[1]);
break;
case AVE:
CV_Assert_N(inputs.size() == 1, outputs.size() == 1);
avePooling(*inputs[0], outputs[0]);
avePooling(inputs[0], outputs[0]);
break;
case ROI: case PSROI:
CV_Assert_N(inputs.size() == 2, outputs.size() == 1);
roiPooling(*inputs[0], *inputs[1], outputs[0]);
roiPooling(inputs[0], inputs[1], outputs[0]);
break;
default:
CV_Error(Error::StsNotImplemented, "Not implemented");

View File

@ -297,15 +297,18 @@ public:
return false;
}
void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE
{
CV_CheckGT(inputs.size(), (size_t)1, "");
CV_CheckEQ(inputs[0]->dims, 4, ""); CV_CheckEQ(inputs[1]->dims, 4, "");
int layerWidth = inputs[0]->size[3];
int layerHeight = inputs[0]->size[2];
std::vector<Mat> inputs;
inputs_arr.getMatVector(inputs);
int imageWidth = inputs[1]->size[3];
int imageHeight = inputs[1]->size[2];
CV_CheckGT(inputs.size(), (size_t)1, "");
CV_CheckEQ(inputs[0].dims, 4, ""); CV_CheckEQ(inputs[1].dims, 4, "");
int layerWidth = inputs[0].size[3];
int layerHeight = inputs[0].size[2];
int imageWidth = inputs[1].size[3];
int imageHeight = inputs[1].size[2];
_stepY = _stepY == 0 ? (static_cast<float>(imageHeight) / layerHeight) : _stepY;
_stepX = _stepX == 0 ? (static_cast<float>(imageWidth) / layerWidth) : _stepX;
@ -403,21 +406,23 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
CV_Assert(inputs.size() == 2);
int _layerWidth = inputs[0]->size[3];
int _layerHeight = inputs[0]->size[2];
int _layerWidth = inputs[0].size[3];
int _layerHeight = inputs[0].size[2];
int _imageWidth = inputs[1]->size[3];
int _imageHeight = inputs[1]->size[2];
int _imageWidth = inputs[1].size[3];
int _imageHeight = inputs[1].size[2];
float* outputPtr = outputs[0].ptr<float>();
float _boxWidth, _boxHeight;

View File

@ -137,24 +137,27 @@ public:
return false;
}
void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE
{
std::vector<Mat*> layerInputs;
std::vector<Mat> inputs;
inputs_arr.getMatVector(inputs);
std::vector<Mat> layerInputs;
std::vector<Mat> layerOutputs;
// Scores permute layer.
Mat scores = getObjectScores(*inputs[0]);
layerInputs.assign(1, &scores);
Mat scores = getObjectScores(inputs[0]);
layerInputs.assign(1, scores);
layerOutputs.assign(1, Mat(shape(scores.size[0], scores.size[2],
scores.size[3], scores.size[1]), CV_32FC1));
scoresPermute->finalize(layerInputs, layerOutputs);
// BBox predictions permute layer.
Mat* bboxDeltas = inputs[1];
CV_Assert(bboxDeltas->dims == 4);
const Mat& bboxDeltas = inputs[1];
CV_Assert(bboxDeltas.dims == 4);
layerInputs.assign(1, bboxDeltas);
layerOutputs.assign(1, Mat(shape(bboxDeltas->size[0], bboxDeltas->size[2],
bboxDeltas->size[3], bboxDeltas->size[1]), CV_32FC1));
layerOutputs.assign(1, Mat(shape(bboxDeltas.size[0], bboxDeltas.size[2],
bboxDeltas.size[3], bboxDeltas.size[1]), CV_32FC1));
deltasPermute->finalize(layerInputs, layerOutputs);
}
@ -251,19 +254,22 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs, internals;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
internals_arr.getMatVector(internals);
CV_Assert(inputs.size() == 3);
CV_Assert(internals.size() == 3);
const Mat& scores = *inputs[0];
const Mat& bboxDeltas = *inputs[1];
const Mat& imInfo = *inputs[2];
const Mat& scores = inputs[0];
const Mat& bboxDeltas = inputs[1];
const Mat& imInfo = inputs[2];
Mat& priorBoxes = internals[0];
Mat& permuttedScores = internals[1];
Mat& permuttedDeltas = internals[2];

View File

@ -216,11 +216,14 @@ public:
return false;
}
void finalize(const std::vector<Mat*> &input, std::vector<Mat> &output) CV_OVERRIDE
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE
{
std::vector<Mat> input;
inputs_arr.getMatVector(input);
CV_Assert(!usePeephole && blobs.size() == 3 || usePeephole && blobs.size() == 6);
CV_Assert(input.size() == 1);
const Mat& inp0 = *input[0];
const Mat& inp0 = input[0];
Mat &Wh = blobs[0], &Wx = blobs[1];
int numOut = Wh.size[1];
@ -256,13 +259,16 @@ public:
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &input, std::vector<Mat> &output, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> input, output, internals;
inputs_arr.getMatVector(input);
outputs_arr.getMatVector(output);
internals_arr.getMatVector(internals);
const Mat &Wh = blobs[0];
const Mat &Wx = blobs[1];
@ -277,7 +283,7 @@ public:
dummyOnes.setTo(1.);
int numSamplesTotal = numTimeStamps*numSamples;
Mat xTs = input[0]->reshape(1, numSamplesTotal);
Mat xTs = input[0].reshape(1, numSamplesTotal);
Mat hOutTs = output[0].reshape(1, numSamplesTotal);
Mat cOutTs = produceCellOutput ? output[1].reshape(1, numSamplesTotal) : Mat();
@ -432,8 +438,11 @@ public:
return false;
}
void finalize(const std::vector<Mat*> &input, std::vector<Mat> &output) CV_OVERRIDE
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE
{
std::vector<Mat> input, outputs;
inputs_arr.getMatVector(input);
CV_Assert(input.size() >= 1 && input.size() <= 2);
Wxh = blobs[0];
@ -446,7 +455,7 @@ public:
numX = Wxh.cols;
numO = Who.rows;
const Mat& inp0 = *input[0];
const Mat& inp0 = input[0];
CV_Assert(inp0.dims >= 2);
CV_Assert(inp0.total(2) == numX);
@ -477,15 +486,18 @@ public:
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &input, std::vector<Mat> &output, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> input, output, internals;
inputs_arr.getMatVector(input);
outputs_arr.getMatVector(output);
internals_arr.getMatVector(internals);
Mat xTs = input[0]->reshape(1, numSamplesTotal);
Mat xTs = input[0].reshape(1, numSamplesTotal);
Mat oTs = output[0].reshape(1, numSamplesTotal);
Mat hTs = produceH ? output[1].reshape(1, numSamplesTotal) : Mat();
Mat hCurr = internals[0];

View File

@ -190,13 +190,16 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs, internals;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
internals_arr.getMatVector(internals);
CV_Assert(inputs.size() >= 1);
CV_Assert(outputs.size() == 1);
@ -206,14 +209,14 @@ public:
for (size_t ii = 0; ii < outputs.size(); ii++)
{
Mat &inpBlob = *inputs[ii];
Mat &inpBlob = inputs[ii];
Mat &outBlob = outputs[ii];
int rows = inpBlob.size[1];
int cols = inpBlob.size[2];
CV_Assert(inputs.size() < 2 || inputs[1]->dims == 4);
int hNorm = inputs.size() > 1 ? inputs[1]->size[2] : rows;
int wNorm = inputs.size() > 1 ? inputs[1]->size[3] : cols;
CV_Assert(inputs.size() < 2 || inputs[1].dims == 4);
int hNorm = inputs.size() > 1 ? inputs[1].size[2] : rows;
int wNorm = inputs.size() > 1 ? inputs[1].size[3] : cols;
const float *srcData = inpBlob.ptr<float>();
float *dstData = outBlob.ptr<float>();

View File

@ -139,17 +139,19 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
for (size_t i = 0; i < inputs.size(); i++)
{
Mat srcBlob = *inputs[i];
Mat srcBlob = inputs[i];
MatShape inputShape = shape(srcBlob), outShape = shape(outputs[i]);
float *dstData = outputs[0].ptr<float>();
const float *srcData = srcBlob.ptr<float>();

View File

@ -237,17 +237,18 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
for (size_t i = 0; i < outputs.size(); i++)
{
Mat srcBlob = *inputs[i];
Mat srcBlob = inputs[i];
if (outputs[i].data != srcBlob.data)
srcBlob.reshape(1, shape(outputs[i])).copyTo(outputs[i]);
}

View File

@ -57,22 +57,26 @@ public:
return backendId == DNN_BACKEND_OPENCV;
}
virtual void finalize(const std::vector<Mat*>& inputs, std::vector<Mat> &outputs) CV_OVERRIDE
virtual void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
{
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
if (!outWidth && !outHeight)
{
outHeight = outputs[0].size[2];
outWidth = outputs[0].size[3];
}
if (alignCorners && outHeight > 1)
scaleHeight = static_cast<float>(inputs[0]->size[2] - 1) / (outHeight - 1);
scaleHeight = static_cast<float>(inputs[0].size[2] - 1) / (outHeight - 1);
else
scaleHeight = static_cast<float>(inputs[0]->size[2]) / outHeight;
scaleHeight = static_cast<float>(inputs[0].size[2]) / outHeight;
if (alignCorners && outWidth > 1)
scaleWidth = static_cast<float>(inputs[0]->size[3] - 1) / (outWidth - 1);
scaleWidth = static_cast<float>(inputs[0].size[3] - 1) / (outWidth - 1);
else
scaleWidth = static_cast<float>(inputs[0]->size[3]) / outWidth;
scaleWidth = static_cast<float>(inputs[0].size[3]) / outWidth;
}
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
@ -80,24 +84,27 @@ public:
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs, internals;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
internals_arr.getMatVector(internals);
if (outHeight == inputs[0]->size[2] && outWidth == inputs[0]->size[3])
if (outHeight == inputs[0].size[2] && outWidth == inputs[0].size[3])
return;
Mat& inp = *inputs[0];
Mat& inp = inputs[0];
Mat& out = outputs[0];
if (interpolation == "nearest")
{
for (size_t n = 0; n < inputs[0]->size[0]; ++n)
for (size_t n = 0; n < inputs[0].size[0]; ++n)
{
for (size_t ch = 0; ch < inputs[0]->size[1]; ++ch)
for (size_t ch = 0; ch < inputs[0].size[1]; ++ch)
{
resize(getPlane(inp, n, ch), getPlane(out, n, ch),
Size(outWidth, outHeight), 0, 0, INTER_NEAREST);
@ -203,15 +210,19 @@ public:
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_INFERENCE_ENGINE;
}
virtual void finalize(const std::vector<Mat*>& inputs, std::vector<Mat> &outputs) CV_OVERRIDE
virtual void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
{
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
if (!outWidth && !outHeight)
{
outHeight = outputs[0].size[2];
outWidth = outputs[0].size[3];
}
int inpHeight = inputs[0]->size[2];
int inpWidth = inputs[0]->size[3];
int inpHeight = inputs[0].size[2];
int inpWidth = inputs[0].size[3];
scaleHeight = (outHeight > 1) ? (static_cast<float>(inpHeight - 1) / (outHeight - 1)) : 0.f;
scaleWidth = (outWidth > 1) ? (static_cast<float>(inpWidth - 1) / (outWidth - 1)) : 0.f;
}

View File

@ -40,8 +40,10 @@ public:
return true;
}
virtual void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
virtual void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE
{
std::vector<Mat> inputs;
inputs_arr.getMatVector(inputs);
hasWeights = blobs.size() == 2 || (blobs.size() == 1 && !hasBias);
CV_Assert(inputs.size() == 2 && blobs.empty() || blobs.size() == (int)hasWeights + (int)hasBias);
}
@ -57,20 +59,23 @@ public:
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
CV_Assert_N(outputs.size() == 1, !blobs.empty() || inputs.size() == 2);
Mat &inpBlob = *inputs[0];
Mat &inpBlob = inputs[0];
Mat &outBlob = outputs[0];
// There is a mode when we multiply a first blob by a second one
// instead of trainable weights.
Mat weights = blobs.empty() ? *inputs[1] : (hasWeights ? blobs[0] : Mat());
Mat weights = blobs.empty() ? inputs[1] : (hasWeights ? blobs[0] : Mat());
Mat bias = hasBias ? blobs.back().reshape(1, 1) : Mat();
if (!weights.empty())
weights = weights.reshape(1, 1);

View File

@ -28,17 +28,21 @@ public:
return group == 1;
}
virtual void finalize(const std::vector<Mat*>& inputs, std::vector<Mat> &outputs) CV_OVERRIDE
virtual void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
{
if (group != 1)
{
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
LayerParams lp;
float order[] = {0, 2, 1, 3};
lp.set("order", DictValue::arrayInt(&order[0], 4));
permute = PermuteLayer::create(lp);
Mat inp = *inputs[0];
Mat out = outputs[0];
const Mat& inp = inputs[0];
const Mat& out = outputs[0];
permuteInpShape.resize(4);
permuteInpShape[0] = inp.size[0];
@ -52,11 +56,8 @@ public:
permuteOutShape[2] = permuteInpShape[1];
permuteOutShape[3] = permuteInpShape[3];
inp = inp.reshape(1, permuteInpShape);
out = out.reshape(1, permuteOutShape);
std::vector<Mat*> permuteInputs(1, &inp);
std::vector<Mat> permuteOutputs(1, out);
std::vector<Mat> permuteInputs(1, inp.reshape(1, permuteInpShape));
std::vector<Mat> permuteOutputs(1, out.reshape(1, permuteOutShape));
permute->finalize(permuteInputs, permuteOutputs);
}
}
@ -66,15 +67,18 @@ public:
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs, internals;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
internals_arr.getMatVector(internals);
Mat inp = *inputs[0];
Mat inp = inputs[0];
Mat out = outputs[0];
if (inp.data != out.data)
{
@ -82,7 +86,7 @@ public:
{
inp = inp.reshape(1, permuteInpShape);
out = out.reshape(1, permuteOutShape);
std::vector<Mat*> permuteInputs(1, &inp);
std::vector<Mat> permuteInputs(1, inp);
std::vector<Mat> permuteOutputs(1, out);
permute->forward(permuteInputs, permuteOutputs, internals);
}

View File

@ -144,10 +144,14 @@ public:
return false;
}
void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
{
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
CV_Assert(inputs.size() == 1);
const MatSize& inpShape = inputs[0]->size;
const MatSize& inpShape = inputs[0].size;
if (sliceRanges.empty())
{
@ -239,15 +243,17 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
const Mat& inpMat = *inputs[0];
const Mat& inpMat = inputs[0];
CV_Assert(outputs.size() == sliceRanges.size());
for (size_t i = 0; i < outputs.size(); i++)
{

View File

@ -191,15 +191,18 @@ public:
OCL_PERFORMANCE_CHECK(ocl::Device::getDefault().isIntel()),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs, internals;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
internals_arr.getMatVector(internals);
const Mat &src = *inputs[0];
const Mat &src = inputs[0];
Mat &dst = outputs[0];
int axis = clamp(axisRaw, src.dims);

View File

@ -83,18 +83,19 @@ public:
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
for (size_t i = 0; i < outputs.size(); i++)
{
CV_Assert(inputs[0]->total() == outputs[i].total());
inputs[0]->copyTo(outputs[i]);
CV_Assert(inputs[0].total() == outputs[i].total());
inputs[0].copyTo(outputs[i]);
}
}
};

View File

@ -551,12 +551,6 @@ bool InfEngineBackendLayer::supportBackend(int backendId)
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
}
void InfEngineBackendLayer::forward(std::vector<Mat*> &input, std::vector<Mat> &output,
std::vector<Mat> &internals)
{
CV_Error(Error::StsError, "Choose Inference Engine as a preferable backend.");
}
void InfEngineBackendLayer::forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs,
OutputArrayOfArrays internals)
{

View File

@ -196,9 +196,6 @@ public:
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const CV_OVERRIDE;
virtual void forward(std::vector<Mat*> &input, std::vector<Mat> &output,
std::vector<Mat> &internals) CV_OVERRIDE;
virtual void forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs,
OutputArrayOfArrays internals) CV_OVERRIDE;

View File

@ -61,16 +61,13 @@ static String _tf(TString filename)
void runLayer(Ptr<Layer> layer, std::vector<Mat> &inpBlobs, std::vector<Mat> &outBlobs)
{
size_t ninputs = inpBlobs.size();
std::vector<Mat> inp_(ninputs);
std::vector<Mat*> inp(ninputs);
std::vector<Mat> outp, intp;
std::vector<Mat> inp(ninputs), outp, intp;
std::vector<MatShape> inputs, outputs, internals;
for (size_t i = 0; i < ninputs; i++)
{
inp_[i] = inpBlobs[i].clone();
inp[i] = &inp_[i];
inputs.push_back(shape(inp_[i]));
inp[i] = inpBlobs[i].clone();
inputs.push_back(shape(inp[i]));
}
layer->getMemoryShapes(inputs, 0, outputs, internals);
@ -1052,8 +1049,6 @@ public:
return backendId == DNN_BACKEND_OPENCV;
}
virtual void forward(std::vector<cv::Mat*> &inputs, std::vector<cv::Mat> &outputs, std::vector<cv::Mat> &internals) CV_OVERRIDE {}
virtual void forward(cv::InputArrayOfArrays inputs, cv::OutputArrayOfArrays outputs, cv::OutputArrayOfArrays internals) CV_OVERRIDE {}
};
@ -1151,8 +1146,11 @@ public:
return false;
}
virtual void finalize(const std::vector<Mat*>& inputs, std::vector<Mat> &outputs) CV_OVERRIDE
virtual void finalize(InputArrayOfArrays, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
{
std::vector<Mat> outputs;
outputs_arr.getMatVector(outputs);
if (!outWidth && !outHeight)
{
outHeight = outputs[0].size[2];
@ -1161,9 +1159,22 @@ public:
}
// Implementation of this custom layer is based on https://github.com/cdmh/deeplab-public/blob/master/src/caffe/layers/interp_layer.cpp
virtual void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat>& internals) CV_OVERRIDE
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
Mat& inp = *inputs[0];
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
if (inputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
Mat& inp = inputs[0];
Mat& out = outputs[0];
const float* inpData = (float*)inp.data;
float* outData = (float*)out.data;

View File

@ -6,7 +6,8 @@
// Third party copyrights are property of their respective owners.
#include "test_precomp.hpp"
#include <opencv2/core/ocl.hpp>
#include <opencv2/core/opencl/ocl_defs.hpp>
#include <opencv2/dnn/layer.details.hpp> // CV_DNN_REGISTER_LAYER_CLASS
namespace opencv_test { namespace {
@ -87,9 +88,13 @@ public:
return Ptr<Layer>(new FirstCustomLayer(params));
}
virtual void forward(InputArrayOfArrays, OutputArrayOfArrays, OutputArrayOfArrays) CV_OVERRIDE {}
virtual void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat>& internals) CV_OVERRIDE
void forward(InputArrayOfArrays, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> outputs;
outputs_arr.getMatVector(outputs);
outputs[0].setTo(1);
}
};
@ -104,9 +109,13 @@ public:
return Ptr<Layer>(new SecondCustomLayer(params));
}
virtual void forward(InputArrayOfArrays, OutputArrayOfArrays, OutputArrayOfArrays) CV_OVERRIDE {}
virtual void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat>& internals) CV_OVERRIDE
void forward(InputArrayOfArrays, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> outputs;
outputs_arr.getMatVector(outputs);
outputs[0].setTo(2);
}
};
@ -178,4 +187,125 @@ INSTANTIATE_TEST_CASE_P(/**/, setInput, Combine(
dnnBackendsAndTargets()
));
class CustomLayerWithDeprecatedForward CV_FINAL : public Layer
{
public:
CustomLayerWithDeprecatedForward(const LayerParams &params) : Layer(params) {}
static Ptr<Layer> create(LayerParams& params)
{
return Ptr<Layer>(new CustomLayerWithDeprecatedForward(params));
}
virtual void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_Assert_N(inputs[0]->depth() == CV_32F, outputs[0].depth() == CV_32F);
cv::add(*inputs[0], 0.5f, outputs[0]);
}
};
class CustomLayerWithDeprecatedForwardAndFallback CV_FINAL : public Layer
{
public:
CustomLayerWithDeprecatedForwardAndFallback(const LayerParams &params) : Layer(params) {}
static Ptr<Layer> create(LayerParams& params)
{
return Ptr<Layer>(new CustomLayerWithDeprecatedForwardAndFallback(params));
}
void forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs, OutputArrayOfArrays internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
CV_OCL_RUN(preferableTarget == DNN_TARGET_OPENCL || preferableTarget == DNN_TARGET_OPENCL_FP16,
forward_ocl(inputs, outputs, internals));
Layer::forward_fallback(inputs, outputs, internals);
}
virtual void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_Assert_N(inputs[0]->depth() == CV_32F, outputs[0].depth() == CV_32F);
cv::add(*inputs[0], 0.5f, outputs[0]);
}
#ifdef HAVE_OPENCL
bool forward_ocl(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
{
if (inputs_arr.depth() != CV_32F)
return false;
std::vector<UMat> inputs;
std::vector<UMat> outputs;
inputs_arr.getUMatVector(inputs);
outputs_arr.getUMatVector(outputs);
cv::add(inputs[0], 0.5f, outputs[0]);
return true;
}
#endif
};
typedef testing::TestWithParam<tuple<Backend, Target> > DeprecatedForward;
TEST_P(DeprecatedForward, CustomLayer)
{
const int backend = get<0>(GetParam());
const int target = get<1>(GetParam());
Mat inp(5, 5, CV_32FC1);
randu(inp, -1.0f, 1.0f);
inp = blobFromImage(inp);
CV_DNN_REGISTER_LAYER_CLASS(CustomType, CustomLayerWithDeprecatedForward);
try
{
LayerParams lp;
Net net;
net.addLayerToPrev("testLayer", "CustomType", lp);
net.setPreferableBackend(backend);
net.setPreferableTarget(target);
net.setInput(inp);
Mat out = net.forward();
normAssert(out, inp + 0.5f, "", 2e-4, 7e-4);
}
catch (...)
{
LayerFactory::unregisterLayer("CustomType");
throw;
}
LayerFactory::unregisterLayer("CustomType");
}
TEST_P(DeprecatedForward, CustomLayerWithFallback)
{
const int backend = get<0>(GetParam());
const int target = get<1>(GetParam());
Mat inp(5, 5, CV_32FC1);
randu(inp, -1.0f, 1.0f);
inp = blobFromImage(inp);
CV_DNN_REGISTER_LAYER_CLASS(CustomType, CustomLayerWithDeprecatedForwardAndFallback);
try
{
LayerParams lp;
Net net;
net.addLayerToPrev("testLayer", "CustomType", lp);
net.setPreferableBackend(backend);
net.setPreferableTarget(target);
net.setInput(inp);
Mat out = net.forward();
normAssert(out, inp + 0.5f, "", 2e-4, 7e-4);
}
catch (...)
{
LayerFactory::unregisterLayer("CustomType");
throw;
}
LayerFactory::unregisterLayer("CustomType");
}
INSTANTIATE_TEST_CASE_P(/**/, DeprecatedForward, dnnBackendsAndTargets());
}} // namespace

View File

@ -411,15 +411,22 @@ public:
return false;
}
virtual void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays) CV_OVERRIDE
{
Mat& inp = *inputs[0];
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
Mat& inp = inputs[0];
Mat& out = outputs[0];
const int outHeight = out.size[2];
const int outWidth = out.size[3];
for (size_t n = 0; n < inputs[0]->size[0]; ++n)
for (size_t n = 0; n < inp.size[0]; ++n)
{
for (size_t ch = 0; ch < inputs[0]->size[1]; ++ch)
for (size_t ch = 0; ch < inp.size[1]; ++ch)
{
resize(getPlane(inp, n, ch), getPlane(out, n, ch),
Size(outWidth, outHeight), 0, 0, INTER_NEAREST);

View File

@ -5,6 +5,12 @@ else()
ocv_update(OPENCV_JAVA_LIB_NAME_SUFFIX "${OPENCV_VERSION_MAJOR}${OPENCV_VERSION_MINOR}${OPENCV_VERSION_PATCH}")
endif()
if(MSVC)
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4996)
else()
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wdeprecated-declarations)
endif()
# get list of modules to wrap
# message(STATUS "Wrapped in java:")
set(OPENCV_JAVA_MODULES)

View File

@ -35,10 +35,23 @@ public:
}
// Implementation of this custom layer is based on https://github.com/cdmh/deeplab-public/blob/master/src/caffe/layers/interp_layer.cpp
virtual void forward(std::vector<cv::Mat*> &inputs, std::vector<cv::Mat> &outputs, std::vector<cv::Mat> &internals) CV_OVERRIDE
virtual void forward(cv::InputArrayOfArrays inputs_arr,
cv::OutputArrayOfArrays outputs_arr,
cv::OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_UNUSED(internals);
cv::Mat& inp = *inputs[0];
if (inputs_arr.depth() == CV_16S)
{
// In case of DNN_TARGET_OPENCL_FP16 target the following method
// converts data from FP16 to FP32 and calls this forward again.
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
std::vector<cv::Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
cv::Mat& inp = inputs[0];
cv::Mat& out = outputs[0];
const float* inpData = (float*)inp.data;
float* outData = (float*)out.data;
@ -78,8 +91,6 @@ public:
}
}
virtual void forward(cv::InputArrayOfArrays, cv::OutputArrayOfArrays, cv::OutputArrayOfArrays) CV_OVERRIDE {}
private:
int outWidth, outHeight;
};
@ -134,8 +145,10 @@ public:
return false;
}
virtual void finalize(const std::vector<cv::Mat*>&, std::vector<cv::Mat> &outputs) CV_OVERRIDE
virtual void finalize(cv::InputArrayOfArrays, cv::OutputArrayOfArrays outputs_arr) CV_OVERRIDE
{
std::vector<cv::Mat> outputs;
outputs_arr.getMatVector(outputs);
if (!outWidth && !outHeight)
{
outHeight = outputs[0].size[2];
@ -145,9 +158,23 @@ public:
// This implementation is based on a reference implementation from
// https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
virtual void forward(std::vector<cv::Mat*> &inputs, std::vector<cv::Mat> &outputs, std::vector<cv::Mat> &) CV_OVERRIDE
virtual void forward(cv::InputArrayOfArrays inputs_arr,
cv::OutputArrayOfArrays outputs_arr,
cv::OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
cv::Mat& inp = *inputs[0];
if (inputs_arr.depth() == CV_16S)
{
// In case of DNN_TARGET_OPENCL_FP16 target the following method
// converts data from FP16 to FP32 and calls this forward again.
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
std::vector<cv::Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
cv::Mat& inp = inputs[0];
cv::Mat& out = outputs[0];
const float* inpData = (float*)inp.data;
float* outData = (float*)out.data;
@ -185,8 +212,6 @@ public:
}
}
virtual void forward(cv::InputArrayOfArrays, cv::OutputArrayOfArrays, cv::OutputArrayOfArrays) CV_OVERRIDE {}
private:
static inline int offset(const cv::MatSize& size, int c, int x, int y, int b)
{
@ -221,14 +246,15 @@ public:
//! [MyLayer::getMemoryShapes]
//! [MyLayer::forward]
virtual void forward(std::vector<cv::Mat*> &inputs, std::vector<cv::Mat> &outputs, std::vector<cv::Mat> &internals) CV_OVERRIDE;
virtual void forward(cv::InputArrayOfArrays inputs,
cv::OutputArrayOfArrays outputs,
cv::OutputArrayOfArrays internals) CV_OVERRIDE;
//! [MyLayer::forward]
//! [MyLayer::finalize]
virtual void finalize(const std::vector<cv::Mat*> &inputs, std::vector<cv::Mat> &outputs) CV_OVERRIDE;
virtual void finalize(cv::InputArrayOfArrays inputs,
cv::OutputArrayOfArrays outputs) CV_OVERRIDE;
//! [MyLayer::finalize]
virtual void forward(cv::InputArrayOfArrays inputs, cv::OutputArrayOfArrays outputs, cv::OutputArrayOfArrays internals) CV_OVERRIDE;
};
//! [A custom layer interface]