Fuse multipliers but not convolution layers weights

This commit is contained in:
Dmitry Kurtaev 2018-05-04 12:09:06 +03:00
parent 777d77848c
commit c99c3e761e
2 changed files with 9 additions and 4 deletions

View File

@ -270,6 +270,7 @@ void UpgradeV0PaddingLayers(const NetParameter& param,
bool UpgradeV0LayerParameter(V1LayerParameter* v0_layer_connection_,
V1LayerParameter* layer_param) {
CV_Assert(v0_layer_connection_ != NULL);
const V1LayerParameter& v0_layer_connection = *v0_layer_connection_;
bool is_fully_compatible = true;
layer_param->Clear();
@ -791,6 +792,7 @@ bool UpgradeNetAsNeeded(const string& param_file, NetParameter* param) {
bool UpgradeV1Net(NetParameter* net_param) {
// V1LayerParameter layers -> LayerParameter layer
CV_Assert(net_param != NULL);
bool is_fully_compatible = true;
if (net_param->layer_size() > 0) {
LOG(ERROR) << "Input NetParameter to be upgraded already specifies 'layer' "
@ -834,6 +836,7 @@ void UpgradeNetBatchNorm(NetParameter* net_param) {
bool UpgradeV1LayerParameter(V1LayerParameter* v1_layer_param_,
LayerParameter* layer_param) {
CV_Assert(v1_layer_param_ != NULL);
const V1LayerParameter& v1_layer_param = *v1_layer_param_;
layer_param->Clear();
bool is_fully_compatible = true;

View File

@ -169,7 +169,8 @@ class ConvolutionLayerImpl CV_FINAL : public BaseConvolutionLayerImpl
{
public:
enum { VEC_ALIGN = 8, DFT_TYPE = CV_32F };
Mat weightsMat, weightsMat_doubles;
Mat weightsMat;
std::vector<double> weightsMultipliers;
std::vector<float> biasvec;
std::vector<float> reluslope;
Ptr<ActivationLayer> activ;
@ -259,7 +260,7 @@ public:
wm = wm_aligned;
}
weightsMat = wm;
weightsMat.convertTo(weightsMat_doubles, CV_64F);
weightsMultipliers.assign(outCn, 1.0);
Mat biasMat = hasBias() ? blobs[1].reshape(1, outCn) : Mat();
biasvec.resize(outCn+2);
@ -335,13 +336,14 @@ public:
if (!w.empty())
{
Mat originWeights = blobs[0].reshape(1, outCn);
for (int i = 0; i < outCn; ++i)
{
double wi = w.at<float>(i);
cv::multiply(slice(weightsMat_doubles, i), wi, slice(weightsMat_doubles, i));
weightsMultipliers[i] *= wi;
cv::multiply(originWeights.row(i), weightsMultipliers[i], weightsMat.row(i));
biasvec[i] *= wi;
}
weightsMat_doubles.convertTo(weightsMat, weightsMat.type());
}
if (!b.empty())