dnn(opencl): bypass unsupported fusion cases

This commit is contained in:
Alexander Alekhin 2020-10-09 11:57:49 +00:00
parent e87a0baa4b
commit 718dd9f170
4 changed files with 18 additions and 18 deletions

View File

@ -2460,10 +2460,12 @@ struct Net::Impl : public detail::NetImplBase
if( nextData ) if( nextData )
nextActivLayer = nextData->layerInstance.dynamicCast<ActivationLayer>(); nextActivLayer = nextData->layerInstance.dynamicCast<ActivationLayer>();
Ptr<PowerLayer> activ_power;
if( !nextActivLayer.empty() && if( !nextActivLayer.empty() &&
(!nextData->type.compare("ReLU") || (!nextData->type.compare("ReLU") ||
!nextData->type.compare("ChannelsPReLU") || !nextData->type.compare("ChannelsPReLU") ||
!nextData->type.compare("Power")) && (!nextData->type.compare("Power") && (activ_power = nextActivLayer.dynamicCast<PowerLayer>()) && activ_power->scale == 1.0f)
) &&
currLayer->setActivation(nextActivLayer) ) currLayer->setActivation(nextActivLayer) )
{ {
CV_Assert_N(biasLayerData->outputBlobsWrappers.size() == 1, ld.inputBlobsWrappers.size() == 1); CV_Assert_N(biasLayerData->outputBlobsWrappers.size() == 1, ld.inputBlobsWrappers.size() == 1);

View File

@ -46,6 +46,8 @@
#include "../op_inf_engine.hpp" #include "../op_inf_engine.hpp"
#include "../ie_ngraph.hpp" #include "../ie_ngraph.hpp"
#include <opencv2/core/utils/logger.hpp>
#include "opencv2/core/hal/hal.hpp" #include "opencv2/core/hal/hal.hpp"
#include "opencv2/core/hal/intrin.hpp" #include "opencv2/core/hal/intrin.hpp"
#include <iostream> #include <iostream>
@ -371,6 +373,14 @@ public:
Ptr<PowerLayer> activ_power = activ.dynamicCast<PowerLayer>(); Ptr<PowerLayer> activ_power = activ.dynamicCast<PowerLayer>();
if (!activ_power.empty()) if (!activ_power.empty())
{ {
if (activ_power->scale != 1.0f) // not supported well by implementation, #17964
{
// FIXIT no way to check number of blobs (like, eltwise input)
CV_LOG_INFO(NULL, "DNN/OpenCL: can't configure Power activation (scale != 1.0f)");
activ.release();
newActiv = false;
return false;
}
if (activ_power->scale != 1.f || activ_power->shift != 0.f) if (activ_power->scale != 1.f || activ_power->shift != 0.f)
{ {
const int outCh = blobs[0].size[0]; const int outCh = blobs[0].size[0];

View File

@ -63,10 +63,10 @@ void normAssert(
double l1 /*= 0.00001*/, double lInf /*= 0.0001*/) double l1 /*= 0.00001*/, double lInf /*= 0.0001*/)
{ {
double normL1 = cvtest::norm(ref, test, cv::NORM_L1) / ref.getMat().total(); double normL1 = cvtest::norm(ref, test, cv::NORM_L1) / ref.getMat().total();
EXPECT_LE(normL1, l1) << comment; EXPECT_LE(normL1, l1) << comment << " |ref| = " << cvtest::norm(ref, cv::NORM_INF);
double normInf = cvtest::norm(ref, test, cv::NORM_INF); double normInf = cvtest::norm(ref, test, cv::NORM_INF);
EXPECT_LE(normInf, lInf) << comment; EXPECT_LE(normInf, lInf) << comment << " |ref| = " << cvtest::norm(ref, cv::NORM_INF);
} }
std::vector<cv::Rect2d> matToBoxes(const cv::Mat& m) std::vector<cv::Rect2d> matToBoxes(const cv::Mat& m)

View File

@ -2219,10 +2219,6 @@ TEST_P(ConvolutionActivationFusion, Accuracy)
Backend backendId = get<0>(get<2>(GetParam())); Backend backendId = get<0>(get<2>(GetParam()));
Target targetId = get<1>(get<2>(GetParam())); Target targetId = get<1>(get<2>(GetParam()));
// bug: https://github.com/opencv/opencv/issues/17964
if (actType == "Power" && backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16))
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL);
Net net; Net net;
int convId = net.addLayer(convParams.name, convParams.type, convParams); int convId = net.addLayer(convParams.name, convParams.type, convParams);
int activId = net.addLayerToPrev(activationParams.name, activationParams.type, activationParams); int activId = net.addLayerToPrev(activationParams.name, activationParams.type, activationParams);
@ -2235,7 +2231,7 @@ TEST_P(ConvolutionActivationFusion, Accuracy)
expectedFusedLayers.push_back(activId); // all activations are fused expectedFusedLayers.push_back(activId); // all activations are fused
else if (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16) else if (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16)
{ {
if (actType == "ReLU" || actType == "ChannelsPReLU" || actType == "ReLU6" || actType == "TanH" || actType == "Power") if (actType == "ReLU" || actType == "ChannelsPReLU" || actType == "ReLU6" || actType == "TanH" /*|| actType == "Power"*/)
expectedFusedLayers.push_back(activId); expectedFusedLayers.push_back(activId);
} }
} }
@ -2349,10 +2345,6 @@ TEST_P(ConvolutionEltwiseActivationFusion, Accuracy)
if ((eltwiseOp != "sum" || weightedEltwise) && backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16)) if ((eltwiseOp != "sum" || weightedEltwise) && backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16))
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL); applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL);
// bug: https://github.com/opencv/opencv/issues/17964
if (actType == "Power" && backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16))
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL);
Net net; Net net;
int convId = net.addLayer(convParams.name, convParams.type, convParams); int convId = net.addLayer(convParams.name, convParams.type, convParams);
int eltwiseId = net.addLayer(eltwiseParams.name, eltwiseParams.type, eltwiseParams); int eltwiseId = net.addLayer(eltwiseParams.name, eltwiseParams.type, eltwiseParams);
@ -2369,7 +2361,7 @@ TEST_P(ConvolutionEltwiseActivationFusion, Accuracy)
expectedFusedLayers.push_back(activId); // activation is fused with eltwise layer expectedFusedLayers.push_back(activId); // activation is fused with eltwise layer
else if (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16) else if (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16)
{ {
if (actType == "ReLU" || actType == "ChannelsPReLU" || actType == "Power") if (actType == "ReLU" || actType == "ChannelsPReLU" /*|| actType == "Power"*/)
{ {
expectedFusedLayers.push_back(eltwiseId); expectedFusedLayers.push_back(eltwiseId);
expectedFusedLayers.push_back(activId); expectedFusedLayers.push_back(activId);
@ -2431,10 +2423,6 @@ TEST_P(ConvolutionActivationEltwiseFusion, Accuracy)
Backend backendId = get<0>(get<4>(GetParam())); Backend backendId = get<0>(get<4>(GetParam()));
Target targetId = get<1>(get<4>(GetParam())); Target targetId = get<1>(get<4>(GetParam()));
// bug: https://github.com/opencv/opencv/issues/17964
if (actType == "Power" && backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16))
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL);
Net net; Net net;
int convId = net.addLayer(convParams.name, convParams.type, convParams); int convId = net.addLayer(convParams.name, convParams.type, convParams);
int activId = net.addLayer(activationParams.name, activationParams.type, activationParams); int activId = net.addLayer(activationParams.name, activationParams.type, activationParams);
@ -2451,7 +2439,7 @@ TEST_P(ConvolutionActivationEltwiseFusion, Accuracy)
expectedFusedLayers.push_back(activId); // activation fused with convolution expectedFusedLayers.push_back(activId); // activation fused with convolution
else if (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16) else if (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16)
{ {
if (actType == "ReLU" || actType == "ChannelsPReLU" || actType == "ReLU6" || actType == "TanH" || actType == "Power") if (actType == "ReLU" || actType == "ChannelsPReLU" || actType == "ReLU6" || actType == "TanH" /*|| actType == "Power"*/)
expectedFusedLayers.push_back(activId); // activation fused with convolution expectedFusedLayers.push_back(activId); // activation fused with convolution
} }
} }