mirror of
https://github.com/opencv/opencv.git
synced 2024-11-28 13:10:12 +08:00
Merge pull request #9517 from dkurt:tf_mobilenet
This commit is contained in:
commit
e012ccda4a
@ -359,6 +359,12 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
|
|||||||
static Ptr<ReLULayer> create(const LayerParams ¶ms);
|
static Ptr<ReLULayer> create(const LayerParams ¶ms);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class CV_EXPORTS ReLU6Layer : public ActivationLayer
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
static Ptr<ReLU6Layer> create(const LayerParams ¶ms);
|
||||||
|
};
|
||||||
|
|
||||||
class CV_EXPORTS ChannelsPReLULayer : public ActivationLayer
|
class CV_EXPORTS ChannelsPReLULayer : public ActivationLayer
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
@ -94,6 +94,7 @@ void initializeLayerFactory()
|
|||||||
CV_DNN_REGISTER_LAYER_CLASS(LPNormalize, LPNormalizeLayer);
|
CV_DNN_REGISTER_LAYER_CLASS(LPNormalize, LPNormalizeLayer);
|
||||||
|
|
||||||
CV_DNN_REGISTER_LAYER_CLASS(ReLU, ReLULayer);
|
CV_DNN_REGISTER_LAYER_CLASS(ReLU, ReLULayer);
|
||||||
|
CV_DNN_REGISTER_LAYER_CLASS(ReLU6, ReLU6Layer);
|
||||||
CV_DNN_REGISTER_LAYER_CLASS(ChannelsPReLU, ChannelsPReLULayer);
|
CV_DNN_REGISTER_LAYER_CLASS(ChannelsPReLU, ChannelsPReLULayer);
|
||||||
CV_DNN_REGISTER_LAYER_CLASS(Sigmoid, SigmoidLayer);
|
CV_DNN_REGISTER_LAYER_CLASS(Sigmoid, SigmoidLayer);
|
||||||
CV_DNN_REGISTER_LAYER_CLASS(TanH, TanHLayer);
|
CV_DNN_REGISTER_LAYER_CLASS(TanH, TanHLayer);
|
||||||
|
@ -248,6 +248,62 @@ struct ReLUFunctor
|
|||||||
int64 getFLOPSPerElement() const { return 1; }
|
int64 getFLOPSPerElement() const { return 1; }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct ReLU6Functor
|
||||||
|
{
|
||||||
|
typedef ReLU6Layer Layer;
|
||||||
|
float minValue, maxValue;
|
||||||
|
|
||||||
|
ReLU6Functor(float minValue_ = 0.0f, float maxValue_ = 6.0f)
|
||||||
|
: minValue(minValue_), maxValue(maxValue_)
|
||||||
|
{
|
||||||
|
CV_Assert(minValue <= maxValue);
|
||||||
|
}
|
||||||
|
|
||||||
|
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
|
||||||
|
{
|
||||||
|
for( int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize )
|
||||||
|
{
|
||||||
|
int i = 0;
|
||||||
|
#if CV_SIMD128
|
||||||
|
v_float32x4 minV = v_setall_f32(minValue), maxV = v_setall_f32(maxValue);
|
||||||
|
for( ; i <= len - 16; i += 16 )
|
||||||
|
{
|
||||||
|
v_float32x4 x0 = v_load(srcptr + i);
|
||||||
|
v_float32x4 x1 = v_load(srcptr + i + 4);
|
||||||
|
v_float32x4 x2 = v_load(srcptr + i + 8);
|
||||||
|
v_float32x4 x3 = v_load(srcptr + i + 12);
|
||||||
|
x0 = v_min(v_max(minV, x0), maxV);
|
||||||
|
x1 = v_min(v_max(minV, x1), maxV);
|
||||||
|
x2 = v_min(v_max(minV, x2), maxV);
|
||||||
|
x3 = v_min(v_max(minV, x3), maxV);
|
||||||
|
v_store(dstptr + i, x0);
|
||||||
|
v_store(dstptr + i + 4, x1);
|
||||||
|
v_store(dstptr + i + 8, x2);
|
||||||
|
v_store(dstptr + i + 12, x3);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
for( ; i < len; i++ )
|
||||||
|
{
|
||||||
|
float x = srcptr[i];
|
||||||
|
if (x >= minValue)
|
||||||
|
dstptr[i] = x <= maxValue ? x : maxValue;
|
||||||
|
else
|
||||||
|
dstptr[i] = minValue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef HAVE_HALIDE
|
||||||
|
void attachHalide(const Halide::Expr& input, Halide::Func& top)
|
||||||
|
{
|
||||||
|
Halide::Var x("x"), y("y"), c("c"), n("n");
|
||||||
|
top(x, y, c, n) = clamp(input, minValue, maxValue);
|
||||||
|
}
|
||||||
|
#endif // HAVE_HALIDE
|
||||||
|
|
||||||
|
int64 getFLOPSPerElement() const { return 2; }
|
||||||
|
};
|
||||||
|
|
||||||
struct TanHFunctor
|
struct TanHFunctor
|
||||||
{
|
{
|
||||||
typedef TanHLayer Layer;
|
typedef TanHLayer Layer;
|
||||||
@ -517,6 +573,15 @@ Ptr<ReLULayer> ReLULayer::create(const LayerParams& params)
|
|||||||
return l;
|
return l;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Ptr<ReLU6Layer> ReLU6Layer::create(const LayerParams& params)
|
||||||
|
{
|
||||||
|
float minValue = params.get<float>("min_value", 0.0f);
|
||||||
|
float maxValue = params.get<float>("max_value", 6.0f);
|
||||||
|
Ptr<ReLU6Layer> l(new ElementWiseLayer<ReLU6Functor>(ReLU6Functor(minValue, maxValue)));
|
||||||
|
l->setParamsFrom(params);
|
||||||
|
return l;
|
||||||
|
}
|
||||||
|
|
||||||
Ptr<TanHLayer> TanHLayer::create(const LayerParams& params)
|
Ptr<TanHLayer> TanHLayer::create(const LayerParams& params)
|
||||||
{
|
{
|
||||||
Ptr<TanHLayer> l(new ElementWiseLayer<TanHFunctor>());
|
Ptr<TanHLayer> l(new ElementWiseLayer<TanHFunctor>());
|
||||||
|
@ -85,11 +85,38 @@ static Mat getTensorContent(const tensorflow::TensorProto &tensor)
|
|||||||
switch (tensor.dtype())
|
switch (tensor.dtype())
|
||||||
{
|
{
|
||||||
case tensorflow::DT_FLOAT:
|
case tensorflow::DT_FLOAT:
|
||||||
return Mat(1, content.size() / sizeof(float), CV_32FC1, (void*)content.c_str()).clone();
|
{
|
||||||
|
if (!content.empty())
|
||||||
|
return Mat(1, content.size() / sizeof(float), CV_32FC1, (void*)content.c_str()).clone();
|
||||||
|
else
|
||||||
|
{
|
||||||
|
const RepeatedField<float>& field = tensor.float_val();
|
||||||
|
CV_Assert(!field.empty());
|
||||||
|
return Mat(1, field.size(), CV_32FC1, (void*)field.data()).clone();
|
||||||
|
}
|
||||||
|
}
|
||||||
case tensorflow::DT_DOUBLE:
|
case tensorflow::DT_DOUBLE:
|
||||||
return Mat(1, content.size() / sizeof(double), CV_64FC1, (void*)content.c_str()).clone();
|
{
|
||||||
|
if (!content.empty())
|
||||||
|
return Mat(1, content.size() / sizeof(double), CV_64FC1, (void*)content.c_str()).clone();
|
||||||
|
else
|
||||||
|
{
|
||||||
|
const RepeatedField<double>& field = tensor.double_val();
|
||||||
|
CV_Assert(!field.empty());
|
||||||
|
return Mat(1, field.size(), CV_64FC1, (void*)field.data()).clone();
|
||||||
|
}
|
||||||
|
}
|
||||||
case tensorflow::DT_INT32:
|
case tensorflow::DT_INT32:
|
||||||
return Mat(1, content.size() / sizeof(int32_t), CV_32SC1, (void*)content.c_str()).clone();
|
{
|
||||||
|
if (!content.empty())
|
||||||
|
return Mat(1, content.size() / sizeof(int32_t), CV_32SC1, (void*)content.c_str()).clone();
|
||||||
|
else
|
||||||
|
{
|
||||||
|
const RepeatedField<int32_t>& field = tensor.int_val();
|
||||||
|
CV_Assert(!field.empty());
|
||||||
|
return Mat(1, field.size(), CV_32SC1, (void*)field.data()).clone();
|
||||||
|
}
|
||||||
|
}
|
||||||
case tensorflow::DT_HALF:
|
case tensorflow::DT_HALF:
|
||||||
{
|
{
|
||||||
Mat halfs;
|
Mat halfs;
|
||||||
@ -573,7 +600,7 @@ void TFImporter::populateNet(Net dstNet)
|
|||||||
if(layers_to_ignore.find(li) != layers_to_ignore.end())
|
if(layers_to_ignore.find(li) != layers_to_ignore.end())
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (type == "Conv2D" || type == "SpaceToBatchND")
|
if (type == "Conv2D" || type == "SpaceToBatchND" || type == "DepthwiseConv2dNative")
|
||||||
{
|
{
|
||||||
// The first node of dilated convolution subgraph.
|
// The first node of dilated convolution subgraph.
|
||||||
// Extract input node, dilation rate and paddings.
|
// Extract input node, dilation rate and paddings.
|
||||||
@ -621,7 +648,28 @@ void TFImporter::populateNet(Net dstNet)
|
|||||||
}
|
}
|
||||||
|
|
||||||
kernelFromTensor(getConstBlob(layer, value_id), layerParams.blobs[0]);
|
kernelFromTensor(getConstBlob(layer, value_id), layerParams.blobs[0]);
|
||||||
const int* kshape = layerParams.blobs[0].size.p;
|
int* kshape = layerParams.blobs[0].size.p;
|
||||||
|
if (type == "DepthwiseConv2dNative")
|
||||||
|
{
|
||||||
|
const int chMultiplier = kshape[0];
|
||||||
|
const int inCh = kshape[1];
|
||||||
|
const int height = kshape[2];
|
||||||
|
const int width = kshape[3];
|
||||||
|
|
||||||
|
Mat copy = layerParams.blobs[0].clone();
|
||||||
|
float* src = (float*)copy.data;
|
||||||
|
float* dst = (float*)layerParams.blobs[0].data;
|
||||||
|
for (int i = 0; i < chMultiplier; ++i)
|
||||||
|
for (int j = 0; j < inCh; ++j)
|
||||||
|
for (int s = 0; s < height * width; ++s)
|
||||||
|
{
|
||||||
|
int src_i = (i * inCh + j) * height * width + s;
|
||||||
|
int dst_i = (j * chMultiplier + i) * height* width + s;
|
||||||
|
dst[dst_i] = src[src_i];
|
||||||
|
}
|
||||||
|
kshape[0] = inCh * chMultiplier;
|
||||||
|
kshape[1] = 1;
|
||||||
|
}
|
||||||
layerParams.set("kernel_h", kshape[2]);
|
layerParams.set("kernel_h", kshape[2]);
|
||||||
layerParams.set("kernel_w", kshape[3]);
|
layerParams.set("kernel_w", kshape[3]);
|
||||||
layerParams.set("num_output", kshape[0]);
|
layerParams.set("num_output", kshape[0]);
|
||||||
@ -689,6 +737,10 @@ void TFImporter::populateNet(Net dstNet)
|
|||||||
layerParams.blobs.resize(1);
|
layerParams.blobs.resize(1);
|
||||||
|
|
||||||
StrIntVector next_layers = getNextLayers(net, name, "BiasAdd");
|
StrIntVector next_layers = getNextLayers(net, name, "BiasAdd");
|
||||||
|
if (next_layers.empty())
|
||||||
|
{
|
||||||
|
next_layers = getNextLayers(net, name, "Add");
|
||||||
|
}
|
||||||
if (next_layers.size() == 1) {
|
if (next_layers.size() == 1) {
|
||||||
layerParams.set("bias_term", true);
|
layerParams.set("bias_term", true);
|
||||||
layerParams.blobs.resize(2);
|
layerParams.blobs.resize(2);
|
||||||
@ -840,20 +892,20 @@ void TFImporter::populateNet(Net dstNet)
|
|||||||
{
|
{
|
||||||
// Multiplication by constant.
|
// Multiplication by constant.
|
||||||
CV_Assert(layer.input_size() == 2);
|
CV_Assert(layer.input_size() == 2);
|
||||||
|
Mat scaleMat = getTensorContent(getConstBlob(layer, value_id));
|
||||||
|
CV_Assert(scaleMat.type() == CV_32FC1);
|
||||||
|
|
||||||
float scale;
|
int id;
|
||||||
if (!getConstBlob(layer, value_id).float_val().empty())
|
if (scaleMat.total() == 1) // is a scalar.
|
||||||
scale = getConstBlob(layer, value_id).float_val()[0];
|
|
||||||
else
|
|
||||||
{
|
{
|
||||||
Mat scaleMat;
|
layerParams.set("scale", scaleMat.at<float>(0));
|
||||||
blobFromTensor(getConstBlob(layer, value_id), scaleMat);
|
id = dstNet.addLayer(name, "Power", layerParams);
|
||||||
CV_Assert(scaleMat.total() == 1 && scaleMat.type() == CV_32FC1);
|
}
|
||||||
scale = scaleMat.at<float>(0, 0);
|
else // is a vector
|
||||||
|
{
|
||||||
|
layerParams.blobs.resize(1, scaleMat);
|
||||||
|
id = dstNet.addLayer(name, "Scale", layerParams);
|
||||||
}
|
}
|
||||||
layerParams.set("scale", scale);
|
|
||||||
|
|
||||||
int id = dstNet.addLayer(name, "Power", layerParams);
|
|
||||||
layer_id[name] = id;
|
layer_id[name] = id;
|
||||||
|
|
||||||
Pin inp0 = parsePin(layer.input(0));
|
Pin inp0 = parsePin(layer.input(0));
|
||||||
@ -1006,12 +1058,13 @@ void TFImporter::populateNet(Net dstNet)
|
|||||||
}
|
}
|
||||||
else if (type == "Abs" || type == "Tanh" || type == "Sigmoid" ||
|
else if (type == "Abs" || type == "Tanh" || type == "Sigmoid" ||
|
||||||
type == "Relu" || type == "Elu" || type == "Softmax" ||
|
type == "Relu" || type == "Elu" || type == "Softmax" ||
|
||||||
type == "Identity")
|
type == "Identity" || type == "Relu6")
|
||||||
{
|
{
|
||||||
std::string dnnType = type;
|
std::string dnnType = type;
|
||||||
if (type == "Abs") dnnType = "AbsVal";
|
if (type == "Abs") dnnType = "AbsVal";
|
||||||
else if (type == "Tanh") dnnType = "TanH";
|
else if (type == "Tanh") dnnType = "TanH";
|
||||||
else if (type == "Relu") dnnType = "ReLU";
|
else if (type == "Relu") dnnType = "ReLU";
|
||||||
|
else if (type == "Relu6") dnnType = "ReLU6";
|
||||||
else if (type == "Elu") dnnType = "ELU";
|
else if (type == "Elu") dnnType = "ELU";
|
||||||
|
|
||||||
int id = dstNet.addLayer(name, dnnType, layerParams);
|
int id = dstNet.addLayer(name, dnnType, layerParams);
|
||||||
|
@ -93,11 +93,12 @@ static void runTensorFlowNet(const std::string& prefix,
|
|||||||
normAssert(target, output, "", l1, lInf);
|
normAssert(target, output, "", l1, lInf);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(Test_TensorFlow, single_conv)
|
TEST(Test_TensorFlow, conv)
|
||||||
{
|
{
|
||||||
runTensorFlowNet("single_conv");
|
runTensorFlowNet("single_conv");
|
||||||
runTensorFlowNet("atrous_conv2d_valid");
|
runTensorFlowNet("atrous_conv2d_valid");
|
||||||
runTensorFlowNet("atrous_conv2d_same");
|
runTensorFlowNet("atrous_conv2d_same");
|
||||||
|
runTensorFlowNet("depthwise_conv2d");
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(Test_TensorFlow, padding)
|
TEST(Test_TensorFlow, padding)
|
||||||
@ -116,8 +117,9 @@ TEST(Test_TensorFlow, pad_and_concat)
|
|||||||
runTensorFlowNet("pad_and_concat");
|
runTensorFlowNet("pad_and_concat");
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(Test_TensorFlow, fused_batch_norm)
|
TEST(Test_TensorFlow, batch_norm)
|
||||||
{
|
{
|
||||||
|
runTensorFlowNet("batch_norm");
|
||||||
runTensorFlowNet("fused_batch_norm");
|
runTensorFlowNet("fused_batch_norm");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -133,6 +135,11 @@ TEST(Test_TensorFlow, deconvolution)
|
|||||||
runTensorFlowNet("deconvolution");
|
runTensorFlowNet("deconvolution");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST(Test_TensorFlow, matmul)
|
||||||
|
{
|
||||||
|
runTensorFlowNet("matmul");
|
||||||
|
}
|
||||||
|
|
||||||
TEST(Test_TensorFlow, fp16)
|
TEST(Test_TensorFlow, fp16)
|
||||||
{
|
{
|
||||||
const float l1 = 1e-3;
|
const float l1 = 1e-3;
|
||||||
|
Loading…
Reference in New Issue
Block a user