diff --git a/modules/dnn/include/opencv2/dnn/all_layers.hpp b/modules/dnn/include/opencv2/dnn/all_layers.hpp index c408921c04..41fe0df70f 100644 --- a/modules/dnn/include/opencv2/dnn/all_layers.hpp +++ b/modules/dnn/include/opencv2/dnn/all_layers.hpp @@ -291,7 +291,7 @@ CV__DNN_INLINE_NS_BEGIN static Ptr create(const LayerParams& params); bool fusedActivation = false; bool fusedAdd = false; - bool useWinograd = false; // Flag whether to use Winograd to speed up 3x3 convolution. + bool useWinograd = true; // Flag whether to use Winograd to speed up 3x3 convolution. }; class CV_EXPORTS ConvolutionLayerInt8 : public BaseConvolutionLayer diff --git a/modules/dnn/src/layers/layers_common.cpp b/modules/dnn/src/layers/layers_common.cpp index 48401893f0..3b3a007b06 100644 --- a/modules/dnn/src/layers/layers_common.cpp +++ b/modules/dnn/src/layers/layers_common.cpp @@ -195,7 +195,7 @@ void getConvolutionKernelParams(const LayerParams ¶ms, std::vector& util::getStrideAndPadding(params, pads_begin, pads_end, strides, padMode, kernel.size()); util::getParameter(params, "dilation", "dilation", dilations, true, std::vector(kernel.size(), 1)); util::getParameter(params, "adj", "adj", adjust_pads, true, std::vector(kernel.size(), 0)); - useWinograd = params.get("use_winograd", true); + useWinograd = params.get("use_winograd", useWinograd); for (int i = 0; i < dilations.size(); i++) CV_Assert(dilations[i] > 0); diff --git a/modules/dnn/src/net_impl_backend.cpp b/modules/dnn/src/net_impl_backend.cpp index 9896153a82..b53908f8ec 100644 --- a/modules/dnn/src/net_impl_backend.cpp +++ b/modules/dnn/src/net_impl_backend.cpp @@ -251,6 +251,14 @@ void Net::Impl::setPreferableTarget(int targetId) #endif clear(); + + if (targetId == DNN_TARGET_CPU_FP16) + { + if (useWinograd) { + CV_LOG_INFO(NULL, "DNN: DNN_TARGET_CPU_FP16 is set => Winograd convolution is disabled by default to preserve accuracy. If needed, enable it explicitly using enableWinograd(true)."); + enableWinograd(false); + } + } } }