Fix conv/deconv/fc layers FLOPS computation

This commit is contained in:
Dmitry Kurtaev 2017-12-06 21:58:36 +03:00
parent ef04ca9e0f
commit ef0650179b
3 changed files with 4 additions and 3 deletions

View File

@ -87,6 +87,7 @@ public:
size_t weightsMemory = 0, blobsMemory = 0;
net.getMemoryConsumption(netInputShape, weightsMemory, blobsMemory);
int64 flops = net.getFLOPS(netInputShape);
CV_Assert(flops > 0);
net.forward(outputLayer); // warmup

View File

@ -1022,7 +1022,7 @@ public:
int64 flops = 0;
for (int i = 0; i < inputs.size(); i++)
{
flops += total(outputs[i])*(2*kernel.area()*inputs[i][1] + 1);
flops += total(outputs[i])*(CV_BIG_INT(2)*kernel.area()*inputs[i][1] + 1);
}
return flops;
@ -1440,7 +1440,7 @@ public:
for (int i = 0; i < inputs.size(); i++)
{
flops += 2*outChannels*kernel.area()*total(inputs[i]);
flops += CV_BIG_INT(2)*outChannels*kernel.area()*total(inputs[i]);
}
return flops;

View File

@ -397,7 +397,7 @@ public:
int innerSize = blobs[0].size[1];
for(int i = 0; i < outputs.size(); i++)
{
flops += 3*innerSize*total(outputs[i]);
flops += CV_BIG_INT(3)*innerSize*total(outputs[i]);
}
return flops;