From fa4871b0131d3a5afaf82293091e40fa333d2597 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Wed, 5 Feb 2020 19:22:37 +0000 Subject: [PATCH 1/8] dnn: don't require setInput in .dump() --- modules/dnn/src/dnn.cpp | 271 ++++++++++++++++++++++++---------------- 1 file changed, 165 insertions(+), 106 deletions(-) diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp index 719b0afd50..9e81af604b 100644 --- a/modules/dnn/src/dnn.cpp +++ b/modules/dnn/src/dnn.cpp @@ -86,6 +86,7 @@ using std::vector; using std::map; using std::make_pair; using std::set; +using std::string; //================================================================================================== @@ -3496,20 +3497,26 @@ int Net::getLayerId(const String &layer) return impl->getLayerId(layer); } -String parseLayerParams(const String& name, const LayerParams& lp) { +static +string dumpLayerParameterSize(const string& name, const LayerParams& lp) +{ + std::ostringstream out(name, std::ios::ate); DictValue param = lp.get(name); - std::ostringstream out; - out << name << " "; - switch (param.size()) { - case 1: out << ": "; break; - case 2: out << "(HxW): "; break; - case 3: out << "(DxHxW): "; break; - default: CV_Error(Error::StsNotImplemented, format("Unsupported %s size = %d", name.c_str(), param.size())); + switch (param.size()) + { + case 1: out << " : "; break; + case 2: out << " (HxW): "; break; + case 3: out << " (DxHxW): "; break; + default: + CV_LOG_INFO(NULL, format("DNN/dumpLayerParameterSize(): Unsupported '%s' size = %d", name.c_str(), param.size())); + out << ": "; } - for (size_t i = 0; i < param.size() - 1; i++) { - out << param.get(i) << " x "; + for (size_t i = 0; i < param.size(); i++) + { + if (i > 0) + out << " x "; + out << param.get(i); } - out << param.get(param.size() - 1) << "\\l"; return out.str(); } @@ -3517,23 +3524,26 @@ String Net::dump() { CV_Assert(!empty()); - if (impl->netInputLayer->inputsData.empty()) - CV_Error(Error::StsError, "Requested set input"); + bool hasInput = !impl->netInputLayer->inputsData.empty(); - if (!impl->netWasAllocated) - impl->setUpNet(); + if (hasInput) + { + if (!impl->netWasAllocated) + impl->setUpNet(); + } std::ostringstream out; - std::map& map = impl->layers; - int prefBackend = impl->preferableBackend; + const std::map& map = impl->layers; + + Backend prefBackend = (Backend)impl->preferableBackend; std::vector > skippedLayers; std::vector skipId; std::vector allLayers(map.size(), -1); int idPrev = -1; Ptr prevNode; - for (std::map::reverse_iterator rit = map.rbegin(); rit != map.rend(); ++rit) + for (std::map::const_reverse_iterator rit = map.rbegin(); rit != map.rend(); ++rit) { - std::map >::iterator itBackend = rit->second.backendNodes.find(prefBackend); + std::map >::const_iterator itBackend = rit->second.backendNodes.find(prefBackend); if (prefBackend == DNN_BACKEND_OPENCV || itBackend == rit->second.backendNodes.end() || itBackend->second.empty()) { @@ -3572,154 +3582,203 @@ String Net::dump() prevNode = itBackend->second; } } - String colors[] = {"#ffffb3", "#fccde5", "#8dd3c7", "#bebada", "#80b1d3", "#fdb462"}; - String backend; - switch (prefBackend) { + string colors[] = {"#ffffb3", "#fccde5", "#8dd3c7", "#bebada", "#80b1d3", "#fdb462"}; + string backend; + switch (prefBackend) + { case DNN_BACKEND_DEFAULT: backend = "DEFAULT/"; break; case DNN_BACKEND_HALIDE: backend = "HALIDE/"; break; case DNN_BACKEND_INFERENCE_ENGINE: // fallthru case DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019: backend = "DLIE/"; break; case DNN_BACKEND_INFERENCE_ENGINE_NGRAPH: backend = "NGRAPH/"; break; case DNN_BACKEND_OPENCV: backend = "OCV/"; break; + // don't use default: } - out << "digraph G {" << '\n'; + out << "digraph G {\n"; // Add nodes - for (std::map::iterator it = map.begin(); it != map.end(); ++it) + for (std::map::const_iterator it = map.begin(); it != map.end(); ++it) { - String name = it->second.params.name; - if (allLayers[it->first] == -1 && !name.empty()) { - out << " " << "\"" << name << "\"" << " [label=\""; - skipId.clear(); - skipId.push_back(it->first); + const LayerData& ld = it->second; + string name = ld.params.name; + std::vector clusterIds(1, it->first); + if (allLayers[it->first] == -1 && !name.empty()) + { + out << "\t\"" << name << "\" [label=\""; } else if (name.empty() || it->first != skippedLayers[allLayers[it->first]][0]) - continue; - else { // first node in cluster : it->first == skippedLayers[allLayers[it->first]][0] - int cluster = allLayers[it->first]; - out << " " << "\"" << "cluster_" << cluster << "\"" << " [label=\"{"; - skipId = skippedLayers[allLayers[it->first]]; // vertices in current cluster - } - for (int i = 0; i < skipId.size(); i++) { - LayerParams& lp = map[skipId[i]].params; + continue; + } + else // first node in cluster : it->first == skippedLayers[allLayers[it->first]][0] + { + int cluster = allLayers[it->first]; + out << "\t\"" << "cluster_" << cluster << "\" [label=\"{"; + clusterIds = skippedLayers[allLayers[it->first]]; // vertices in current cluster + } + for (int i = 0; i < clusterIds.size(); i++) + { + CV_DbgAssert(map.find(clusterIds[i]) != map.end()); + const LayerParams& lp = map.find(clusterIds[i])->second.params; if (!lp.name.empty()) { if (i > 0) { out << " | "; } - out << lp.name << "\\n" << lp.type << "\\n"; - if (lp.has("kernel_size")) { - String kernel = parseLayerParams("kernel_size", lp); + out << lp.name << "\\n" << lp.type << "\\n"; // align center + if (lp.has("kernel_size")) + { + string kernel = dumpLayerParameterSize("kernel_size", lp); out << kernel; + out << "\\l"; // align left } else if (lp.has("kernel_h") && lp.has("kernel_w")) { DictValue h = lp.get("kernel_h"); DictValue w = lp.get("kernel_w"); - out << "kernel (HxW): " << h << " x " << w << "\\l"; + out << "kernel (HxW): " << h << " x " << w; + out << "\\l"; // align left } if (lp.has("stride")) { - String stride = parseLayerParams("stride", lp); + string stride = dumpLayerParameterSize("stride", lp); out << stride; + out << "\\l"; // align left } else if (lp.has("stride_h") && lp.has("stride_w")) { DictValue h = lp.get("stride_h"); DictValue w = lp.get("stride_w"); - out << "stride (HxW): " << h << " x " << w << "\\l"; + out << "stride (HxW): " << h << " x " << w; + out << "\\l"; // align left } if (lp.has("dilation")) { - String dilation = parseLayerParams("dilation", lp); + string dilation = dumpLayerParameterSize("dilation", lp); out << dilation; + out << "\\l"; // align left } else if (lp.has("dilation_h") && lp.has("dilation_w")) { DictValue h = lp.get("dilation_h"); DictValue w = lp.get("dilation_w"); - out << "dilation (HxW): " << h << " x " << w << "\\l"; + out << "dilation (HxW): " << h << " x " << w; + out << "\\l"; // align left } if (lp.has("pad")) { DictValue pad = lp.get("pad"); out << "pad "; - switch (pad.size()) { - case 1: out << ": " << pad << "\\l"; break; - case 2: out << "(HxW): (" << pad.get(0) << " x " << pad.get(1) << ")" << "\\l"; break; - case 4: out << "(HxW): (" << pad.get(0) << ", " << pad.get(2) << ") x (" << pad.get(1) << ", " << pad.get(3) << ")" << "\\l"; break; - case 6: out << "(DxHxW): (" << pad.get(0) << ", " << pad.get(3) << ") x (" << pad.get(1) << ", " << pad.get(4) - << ") x (" << pad.get(2) << ", " << pad.get(5) << ")" << "\\l"; break; + switch (pad.size()) + { + case 1: out << ": " << pad; break; + case 2: + out << "(HxW): (" << pad.get(0) << " x " << pad.get(1) << ")"; + break; + case 4: + out << "(HxW): (" << pad.get(0) << ", " << pad.get(2) + << ") x (" << pad.get(1) << ", " << pad.get(3) << ")"; + break; + case 6: + out << "(DxHxW): (" << pad.get(0) << ", " << pad.get(3) + << ") x (" << pad.get(1) << ", " << pad.get(4) + << ") x (" << pad.get(2) << ", " << pad.get(5) << ")"; + break; default: CV_Error(Error::StsNotImplemented, format("Unsupported pad size = %d", pad.size())); } - } else if (lp.has("pad_l") && lp.has("pad_t") && lp.has("pad_r") && lp.has("pad_b")) { - DictValue l = lp.get("pad_l"); - DictValue t = lp.get("pad_t"); - DictValue r = lp.get("pad_r"); - DictValue b = lp.get("pad_b"); - out << "pad (HxW): (" << t << ", " << b << ") x (" << l << ", " << r << ")" << "\\l"; - } - else if (lp.has("pooled_w") || lp.has("pooled_h")) { - DictValue h = lp.get("pooled_h"); - DictValue w = lp.get("pooled_w"); - out << "pad (HxW): " << h << " x " << w << "\\l"; - } - if (lp.has("pool")) { - out << "pool: " << lp.get("pool") << "\\l"; - } - if (lp.has("global_pooling")) { - out << "global_pooling: " << lp.get("global_pooling") << "\\l"; - } - if (lp.has("group")) { - out << "group: " << lp.get("group") << "\\l"; - } - } - } - if (!it->second.outputBlobs.empty()) - out << "output: " << it->second.outputBlobs[0].size << "\\l"; + out << "\\l"; // align left + } else if (lp.has("pad_l") && lp.has("pad_t") && lp.has("pad_r") && lp.has("pad_b")) { + DictValue l = lp.get("pad_l"); + DictValue t = lp.get("pad_t"); + DictValue r = lp.get("pad_r"); + DictValue b = lp.get("pad_b"); + out << "pad (HxW): (" << t << ", " << b << ") x (" << l << ", " << r << ")"; + out << "\\l"; // align left + } + else if (lp.has("pooled_w") || lp.has("pooled_h")) { + DictValue h = lp.get("pooled_h"); + DictValue w = lp.get("pooled_w"); + out << "pad pooled (HxW): " << h << " x " << w; + out << "\\l"; // align left + } + if (lp.has("pool")) { + out << "pool: " << lp.get("pool"); + out << "\\l"; // align left + } + if (lp.has("global_pooling")) { + out << "global_pooling: " << lp.get("global_pooling"); + out << "\\l"; // align left + } + if (lp.has("group")) { + out << "group: " << lp.get("group"); + out << "\\l"; // align left + } + } + } + if (!ld.outputBlobs.empty()) + { + out << "output: " << ld.outputBlobs[0].size; + out << "\\l"; // align left + } - Ptr layerBackend = it->second.backendNodes[prefBackend]; - out << (!layerBackend.empty() ? backend : "OCV/"); - int colorId = 0; - switch (it->second.layerInstance->preferableTarget) { - case DNN_TARGET_CPU: out << "CPU\\n"; colorId = layerBackend.empty() ? 0 : 5; break; - case DNN_TARGET_OPENCL: out << "OCL\\n"; colorId = 1; break; - case DNN_TARGET_OPENCL_FP16: out << "OCL_FP16\\n"; colorId = 2; break; - case DNN_TARGET_MYRIAD: out << "MYRIAD\\n"; colorId = 3; break; - case DNN_TARGET_FPGA: out << "FPGA\\n"; colorId = 4; break; - } - out << ((skipId.size() == 1)? "\" " : " }\" "); - out << "fillcolor=\"" << colors[colorId] << "\" "; - out << "style=filled "; - out << "shape=" << ((skipId.size() == 1)? "box" : "record") << "]" << '\n'; + Ptr layerBackend; + std::map >::const_iterator ibn = ld.backendNodes.find(prefBackend); + if (ibn != ld.backendNodes.end()) + layerBackend = ibn->second; + out << (!layerBackend.empty() ? backend : "OCV/"); + int colorId = 0; + const Target target = ld.layerInstance.empty() + ? DNN_TARGET_CPU + : (Target)(ld.layerInstance->preferableTarget); // TODO fix preferableTarget type + switch (target) + { + case DNN_TARGET_CPU: out << "CPU"; colorId = layerBackend.empty() ? 0 : 5; break; + case DNN_TARGET_OPENCL: out << "OCL"; colorId = 1; break; + case DNN_TARGET_OPENCL_FP16: out << "OCL_FP16"; colorId = 2; break; + case DNN_TARGET_MYRIAD: out << "MYRIAD"; colorId = 3; break; + case DNN_TARGET_FPGA: out << "FPGA"; colorId = 4; break; + // don't use default: + } + out << "\\n"; // align center + out << ((clusterIds.size() == 1)? "\" " : " }\" "); + out << "fillcolor=\"" << colors[colorId] << "\" "; + out << "style=filled "; + out << "shape=" << ((clusterIds.size() == 1)? "box" : "record") << "]\n"; } out << '\n'; // Add edges - int inputsSize = impl->netInputLayer->outNames.size(); - for (std::map::iterator it = map.begin(); it != map.end(); ++it) + int inputsSize = hasInput ? impl->netInputLayer->outNames.size() : 0; + for (std::map::const_iterator it = map.begin(); it != map.end(); ++it) { + const LayerData& ld = it->second; if (allLayers[it->first] == -1) // node { - for (int i = 0; i < it->second.consumers.size(); i++) + for (int i = 0; i < ld.consumers.size(); i++) { - int outId = it->second.consumers[i].lid; + int outId = ld.consumers[i].lid; if (it == map.begin() && inputsSize > 1) - out << " " << "\"" << it->second.name << "_" << i << "\"" << " -> "; + out << "\t\"" << ld.name << "_" << i << "\"" << " -> "; else - out << " " << "\"" << it->second.name << "\"" << " -> "; + out << "\t\"" << ld.name << "\"" << " -> "; if (allLayers[outId] == -1) // node - out << "\"" << map[outId].name << "\"" << '\n'; + { + CV_DbgAssert(map.find(outId) != map.end()); + out << "\"" << map.find(outId)->second.name << "\"\n"; + } else // cluster - out << "\"" << "cluster_" << allLayers[outId] << "\"" << '\n'; + { + out << "\"" << "cluster_" << allLayers[outId] << "\"\n"; + } } } else if (it->first == skippedLayers[allLayers[it->first]].back()) // edges from last layer in cluster { - for (int i = 0; i < it->second.consumers.size(); i++) + for (int i = 0; i < ld.consumers.size(); i++) { - int outId = it->second.consumers[i].lid; - if (allLayers[outId] == -1) { // node - out << " " << "\"" << "cluster_" << allLayers[it->first] << "\"" << " -> "; - out << "\"" << map[outId].name << "\"" << '\n'; + int outId = ld.consumers[i].lid; + if (allLayers[outId] == -1) // node + { + CV_DbgAssert(map.find(outId) != map.end()); + out << "\t\"" << "cluster_" << allLayers[it->first] << "\"" << " -> "; + out << "\"" << map.find(outId)->second.name << "\"\n"; } else if (allLayers[outId] != allLayers[it->first]) { // another cluster - out << " " << "\"" << "cluster_" << allLayers[it->first] << "\"" << " -> "; - out << "\"" << "cluster_" << allLayers[outId] << "\"" << '\n'; + out << "\t\"" << "cluster_" << allLayers[it->first] << "\"" << " -> "; + out << "\"" << "cluster_" << allLayers[outId] << "\"\n"; } } } } - out << "}"; + out << "}\n"; return out.str(); } From 6eba1a4d4436c2c5ab8ec5988bce316f6e64b1f8 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Wed, 5 Feb 2020 21:20:10 +0000 Subject: [PATCH 2/8] dnn: auto network dump through parameter --- modules/dnn/src/dnn.cpp | 59 ++++++++++++++++++++++++++++++++++++++--- 1 file changed, 55 insertions(+), 4 deletions(-) diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp index 9e81af604b..be3ce435b3 100644 --- a/modules/dnn/src/dnn.cpp +++ b/modules/dnn/src/dnn.cpp @@ -62,6 +62,8 @@ namespace cv { namespace dnn { CV__DNN_EXPERIMENTAL_NS_BEGIN +static size_t DNN_NETWORK_DUMP = utils::getConfigurationParameterSizeT("OPENCV_DNN_NETWORK_DUMP", 0); + // this option is useful to run valgrind memory errors detection static bool DNN_DISABLE_MEMORY_OPTIMIZATIONS = utils::getConfigurationParameterBool("OPENCV_DNN_DISABLE_MEMORY_OPTIMIZATIONS", false); @@ -1055,12 +1057,19 @@ static Ptr wrapMat(int backendId, int targetId, cv::Mat& m) return Ptr(); // TODO Error? } +static int g_networkId = 0; + struct Net::Impl { typedef std::map LayersShapesMap; typedef std::map MapIdToLayerData; + const int networkId; // network global identifier + int networkDumpCounter; // dump counter + Impl() + : networkId(CV_XADD(&g_networkId, 1)) + , networkDumpCounter(0) { //allocate fake net input layer netInputLayer = Ptr(new DataLayer()); @@ -1224,6 +1233,11 @@ struct Net::Impl { CV_TRACE_FUNCTION(); + if (DNN_NETWORK_DUMP > 0 && networkDumpCounter == 0) + { + dumpNetworkToFile(); + } + if (preferableBackend == DNN_BACKEND_DEFAULT) preferableBackend = (Backend)PARAM_DNN_BACKEND_DEFAULT; #ifdef HAVE_INF_ENGINE @@ -1300,6 +1314,11 @@ struct Net::Impl netWasAllocated = true; this->blobsToKeep = blobsToKeep_; + + if (DNN_NETWORK_DUMP > 0) + { + dumpNetworkToFile(); + } } } @@ -2980,6 +2999,31 @@ struct Net::Impl static Net createNetworkFromModelOptimizer(InferenceEngine::CNNNetwork& ieNet); #endif + + string dump(); + + void dumpNetworkToFile() + { +#ifndef OPENCV_DNN_DISABLE_NETWORK_AUTO_DUMP + String dumpFileName = cv::format("ocv_dnn_net_%05d_%02d.dot", networkId, networkDumpCounter++); + try + { + string dumpStr = dump(); + std::ofstream out(dumpFileName.c_str(), std::ios::out | std::ios::binary); + out << dumpStr; + } + catch (const std::exception& e) + { + std::ofstream out((dumpFileName + ".error").c_str(), std::ios::out); + out << "Exception: " << e.what() << std::endl; + } + catch (...) + { + std::ofstream out((dumpFileName + ".error").c_str(), std::ios::out); + out << "Can't dump: unknown exception" << std::endl; + } +#endif + } }; Net::Net() : impl(new Net::Impl) @@ -3532,10 +3576,17 @@ String Net::dump() impl->setUpNet(); } - std::ostringstream out; - const std::map& map = impl->layers; + return impl->dump(); +} - Backend prefBackend = (Backend)impl->preferableBackend; +string Net::Impl::dump() +{ + bool hasInput = !netInputLayer->inputsData.empty(); + + std::ostringstream out; + const std::map& map = layers; + + Backend prefBackend = (Backend)preferableBackend; std::vector > skippedLayers; std::vector skipId; std::vector allLayers(map.size(), -1); @@ -3736,7 +3787,7 @@ String Net::dump() } out << '\n'; // Add edges - int inputsSize = hasInput ? impl->netInputLayer->outNames.size() : 0; + int inputsSize = hasInput ? netInputLayer->outNames.size() : 0; for (std::map::const_iterator it = map.begin(); it != map.end(); ++it) { const LayerData& ld = it->second; From fb13b87285b7483101ea3c3af702a55febafc9b8 Mon Sep 17 00:00:00 2001 From: rayonnant14 Date: Mon, 10 Feb 2020 20:21:58 +0300 Subject: [PATCH 3/8] fix wrong memory allocation --- modules/objdetect/src/qrcode.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/objdetect/src/qrcode.cpp b/modules/objdetect/src/qrcode.cpp index f4a26fb93a..f99a46c922 100644 --- a/modules/objdetect/src/qrcode.cpp +++ b/modules/objdetect/src/qrcode.cpp @@ -1312,7 +1312,7 @@ protected: { public: ParallelSearch(vector< vector< Point2f > >& true_points_group_, - vector< vector< Point2f > >& loc_, int iter_, int* end_, + vector< vector< Point2f > >& loc_, int iter_, vector& end_, vector< vector< Vec3i > >& all_points_, QRDetectMulti& cl_) : @@ -1328,7 +1328,7 @@ protected: vector< vector< Point2f > >& true_points_group; vector< vector< Point2f > >& loc; int iter; - int* end; + vector& end; vector< vector< Vec3i > >& all_points; QRDetectMulti& cl; }; @@ -1940,7 +1940,7 @@ bool QRDetectMulti::checkSets(vector >& true_points_group, vecto return false; - int* set_size = new int[true_points_group.size()]; + vector set_size(true_points_group.size()); for (size_t i = 0; i < true_points_group.size(); i++) { set_size[i] = int(0.5 * (true_points_group[i].size() - 2 ) * (true_points_group[i].size() - 1)); @@ -1978,7 +1978,7 @@ bool QRDetectMulti::checkSets(vector >& true_points_group, vecto transformation_points.resize(iter + true_points_group.size()); true_points_group_copy = true_points_group; - int* end = new int[true_points_group.size()]; + vector end(true_points_group.size()); for (size_t i = 0; i < true_points_group.size(); i++) end[i] = iter + set_size[i]; ParallelSearch parallelSearch(true_points_group, From 1159453139be593b200ab3c281bc7ee2c5c28dc7 Mon Sep 17 00:00:00 2001 From: Dmitry Kurtaev Date: Tue, 11 Feb 2020 09:31:16 +0300 Subject: [PATCH 4/8] 3.4 docs for 3.4 branch --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index a3933c7404..66c497e4a0 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ ### Resources * Homepage: -* Docs: +* Docs: * Q&A forum: * Issue tracking: From ef93aea0da04a4151af23a396814cec0203d4d8b Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Tue, 11 Feb 2020 17:24:52 +0300 Subject: [PATCH 5/8] cmake: update handling of MSVC /MP flag --- cmake/OpenCVCRTLinkage.cmake | 9 -------- cmake/OpenCVCompilerOptions.cmake | 35 +++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 9 deletions(-) diff --git a/cmake/OpenCVCRTLinkage.cmake b/cmake/OpenCVCRTLinkage.cmake index b87dfd3a7c..0e0a54ecf9 100644 --- a/cmake/OpenCVCRTLinkage.cmake +++ b/cmake/OpenCVCRTLinkage.cmake @@ -64,12 +64,3 @@ else() endif() endforeach(flag_var) endif() - -if(CMAKE_VERSION VERSION_GREATER "2.8.6") - include(ProcessorCount) - ProcessorCount(N) - if(NOT N EQUAL 0) - SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /MP${N} ") - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP${N} ") - endif() -endif() diff --git a/cmake/OpenCVCompilerOptions.cmake b/cmake/OpenCVCompilerOptions.cmake index dd4dcc2296..eafca64068 100644 --- a/cmake/OpenCVCompilerOptions.cmake +++ b/cmake/OpenCVCompilerOptions.cmake @@ -442,3 +442,38 @@ if(OPENCV_EXTRA_RPATH_LINK_PATH) message(WARNING "OPENCV_EXTRA_RPATH_LINK_PATH may not work properly because CMAKE_EXECUTABLE_RPATH_LINK_CXX_FLAG is not defined (not supported)") endif() endif() + +# Control MSVC /MP flag +# Input variables: OPENCV_MSVC_PARALLEL (ON,1,2,3,...) + OPENCV_SKIP_MSVC_PARALLEL +# Details: +# - https://docs.microsoft.com/en-us/cpp/build/reference/mp-build-with-multiple-processes +# - https://docs.microsoft.com/en-us/cpp/build/reference/cl-environment-variables +# - https://gitlab.kitware.com/cmake/cmake/merge_requests/1718/diffs +if(CMAKE_GENERATOR MATCHES "Visual Studio" AND CMAKE_CXX_COMPILER_ID MATCHES "MSVC|Intel") + ocv_check_environment_variables(OPENCV_SKIP_MSVC_PARALLEL) + if(OPENCV_SKIP_MSVC_PARALLEL) + # nothing + elseif(" ${CMAKE_CXX_FLAGS}" MATCHES "/MP") + # nothing, already defined in compiler flags + elseif(DEFINED ENV{CL} AND " $ENV{CL}" MATCHES "/MP") + # nothing, compiler will use CL environment variable + elseif(DEFINED ENV{_CL_} AND " $ENV{_CL_}" MATCHES "/MP") + # nothing, compiler will use _CL_ environment variable + else() + ocv_check_environment_variables(OPENCV_MSVC_PARALLEL) + set(_mp_value "ON") + if(DEFINED OPENCV_MSVC_PARALLEL) + set(_mp_value "${OPENCV_MSVC_PARALLEL}") + endif() + set(OPENCV_MSVC_PARALLEL "${_mp_value}" CACHE STRING "Control MSVC /MP flag") + if(_mp_value) + if(_mp_value GREATER 0) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /MP${_mp_value}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP${_mp_value}") + else() + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /MP") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP") + endif() + endif() + endif() +endif() From d81a0da3e0e8152965e9bbf08dee4bee5f1a8197 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Tue, 14 Jan 2020 16:20:12 +0300 Subject: [PATCH 6/8] dnn: use OpenVINO 2020.1 defines --- cmake/OpenCVDetectInferenceEngine.cmake | 4 ++-- modules/dnn/src/op_inf_engine.hpp | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/cmake/OpenCVDetectInferenceEngine.cmake b/cmake/OpenCVDetectInferenceEngine.cmake index 5326eb9795..8121f471fa 100644 --- a/cmake/OpenCVDetectInferenceEngine.cmake +++ b/cmake/OpenCVDetectInferenceEngine.cmake @@ -99,9 +99,9 @@ endif() if(INF_ENGINE_TARGET) if(NOT INF_ENGINE_RELEASE) - message(WARNING "InferenceEngine version have not been set, 2019R3 will be used by default. Set INF_ENGINE_RELEASE variable if you experience build errors.") + message(WARNING "InferenceEngine version has not been set, 2020.1 will be used by default. Set INF_ENGINE_RELEASE variable if you experience build errors.") endif() - set(INF_ENGINE_RELEASE "2019030000" CACHE STRING "Force IE version, should be in form YYYYAABBCC (e.g. 2018R2.0.2 -> 2018020002)") + set(INF_ENGINE_RELEASE "2020010000" CACHE STRING "Force IE version, should be in form YYYYAABBCC (e.g. 2020.1.0.2 -> 2020010002)") set_target_properties(${INF_ENGINE_TARGET} PROPERTIES INTERFACE_COMPILE_DEFINITIONS "HAVE_INF_ENGINE=1;INF_ENGINE_RELEASE=${INF_ENGINE_RELEASE}" ) diff --git a/modules/dnn/src/op_inf_engine.hpp b/modules/dnn/src/op_inf_engine.hpp index 217eb9a008..408fe0c4b7 100644 --- a/modules/dnn/src/op_inf_engine.hpp +++ b/modules/dnn/src/op_inf_engine.hpp @@ -23,10 +23,11 @@ #define INF_ENGINE_RELEASE_2019R1 2019010000 #define INF_ENGINE_RELEASE_2019R2 2019020000 #define INF_ENGINE_RELEASE_2019R3 2019030000 +#define INF_ENGINE_RELEASE_2020_1 2020010000 #ifndef INF_ENGINE_RELEASE -#warning("IE version have not been provided via command-line. Using 2019R3 by default") -#define INF_ENGINE_RELEASE INF_ENGINE_RELEASE_2019R3 +#warning("IE version have not been provided via command-line. Using 2019.1 by default") +#define INF_ENGINE_RELEASE INF_ENGINE_RELEASE_2020_1 #endif #define INF_ENGINE_VER_MAJOR_GT(ver) (((INF_ENGINE_RELEASE) / 10000) > ((ver) / 10000)) From 0a91261c7f21db40c8ef8a180d9e7c77a5dd81c9 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Wed, 12 Feb 2020 17:22:44 +0300 Subject: [PATCH 7/8] dnn: turn off visibility workaround for OpenVINO 2020.1 --- modules/dnn/src/op_inf_engine.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/dnn/src/op_inf_engine.hpp b/modules/dnn/src/op_inf_engine.hpp index 408fe0c4b7..7cf8a324aa 100644 --- a/modules/dnn/src/op_inf_engine.hpp +++ b/modules/dnn/src/op_inf_engine.hpp @@ -50,7 +50,7 @@ #pragma warning(disable: 4996) // was declared deprecated #endif -#if defined(__GNUC__) +#if defined(__GNUC__) && INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_1) #pragma GCC visibility push(default) #endif @@ -58,7 +58,7 @@ #include -#if defined(__GNUC__) +#if defined(__GNUC__) && INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_1) #pragma GCC visibility pop #endif From 9a4cafa319d0a84d9301fa59826d032e6c58b2dd Mon Sep 17 00:00:00 2001 From: Dmitry Kurtaev Date: Fri, 14 Feb 2020 00:21:38 +0300 Subject: [PATCH 8/8] Resolve #14566 --- modules/dnn/src/layers/elementwise_layers.cpp | 100 +++++++----------- 1 file changed, 37 insertions(+), 63 deletions(-) diff --git a/modules/dnn/src/layers/elementwise_layers.cpp b/modules/dnn/src/layers/elementwise_layers.cpp index 3459734a08..42b277838b 100644 --- a/modules/dnn/src/layers/elementwise_layers.cpp +++ b/modules/dnn/src/layers/elementwise_layers.cpp @@ -112,13 +112,18 @@ public: } }; - ElementWiseLayer(const Func &f=Func()) : run_parallel(false) { func = f; } + ElementWiseLayer(const Func &f=Func()) { func = f; } virtual bool supportBackend(int backendId) CV_OVERRIDE { return func.supportBackend(backendId, this->preferableTarget); } + virtual void finalize(InputArrayOfArrays, OutputArrayOfArrays) CV_OVERRIDE + { + func.finalize(); + } + virtual Ptr tryAttach(const Ptr& node) CV_OVERRIDE { switch (node->backendId) @@ -235,7 +240,6 @@ public: } Func func; - bool run_parallel; }; #ifdef HAVE_OPENCL @@ -250,7 +254,16 @@ static String oclGetTMacro(const UMat &m) } #endif -struct ReLUFunctor +struct BaseFunctor +{ + void finalize() {} + + bool tryFuse(Ptr&) { return false; } + + void getScaleShift(Mat&, Mat&) const {} +}; + +struct ReLUFunctor : public BaseFunctor { typedef ReLULayer Layer; float slope; @@ -376,14 +389,10 @@ struct ReLUFunctor } #endif // HAVE_DNN_NGRAPH - bool tryFuse(Ptr&) { return false; } - - void getScaleShift(Mat&, Mat&) const {} - int64 getFLOPSPerElement() const { return 1; } }; -struct ReLU6Functor +struct ReLU6Functor : public BaseFunctor { typedef ReLU6Layer Layer; float minValue, maxValue; @@ -486,14 +495,10 @@ struct ReLU6Functor } #endif // HAVE_DNN_NGRAPH - bool tryFuse(Ptr&) { return false; } - - void getScaleShift(Mat&, Mat&) const {} - int64 getFLOPSPerElement() const { return 2; } }; -struct TanHFunctor +struct TanHFunctor : public BaseFunctor { typedef TanHLayer Layer; @@ -565,14 +570,10 @@ struct TanHFunctor } #endif // HAVE_DNN_NGRAPH - bool tryFuse(Ptr&) { return false; } - - void getScaleShift(Mat&, Mat&) const {} - int64 getFLOPSPerElement() const { return 1; } }; -struct SwishFunctor +struct SwishFunctor : public BaseFunctor { typedef SwishLayer Layer; @@ -645,15 +646,10 @@ struct SwishFunctor } #endif // HAVE_DNN_NGRAPH - bool tryFuse(Ptr&) { return false; } - - void getScaleShift(Mat&, Mat&) const {} - int64 getFLOPSPerElement() const { return 3; } - }; -struct MishFunctor +struct MishFunctor : public BaseFunctor { typedef MishLayer Layer; @@ -731,15 +727,10 @@ struct MishFunctor } #endif // HAVE_DNN_NGRAPH - bool tryFuse(Ptr&) { return false; } - - void getScaleShift(Mat&, Mat&) const {} - int64 getFLOPSPerElement() const { return 3; } - }; -struct SigmoidFunctor +struct SigmoidFunctor : public BaseFunctor { typedef SigmoidLayer Layer; @@ -811,19 +802,13 @@ struct SigmoidFunctor } #endif // HAVE_DNN_NGRAPH - bool tryFuse(Ptr&) { return false; } - - void getScaleShift(Mat&, Mat&) const {} - int64 getFLOPSPerElement() const { return 3; } }; -struct ELUFunctor +struct ELUFunctor : public BaseFunctor { typedef ELULayer Layer; - explicit ELUFunctor() {} - bool supportBackend(int backendId, int) { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE || @@ -892,14 +877,10 @@ struct ELUFunctor } #endif // HAVE_DNN_NGRAPH - bool tryFuse(Ptr&) { return false; } - - void getScaleShift(Mat&, Mat&) const {} - int64 getFLOPSPerElement() const { return 2; } }; -struct AbsValFunctor +struct AbsValFunctor : public BaseFunctor { typedef AbsLayer Layer; @@ -977,14 +958,10 @@ struct AbsValFunctor } #endif // HAVE_DNN_NGRAPH - bool tryFuse(Ptr&) { return false; } - - void getScaleShift(Mat&, Mat&) const {} - int64 getFLOPSPerElement() const { return 1; } }; -struct BNLLFunctor +struct BNLLFunctor : public BaseFunctor { typedef BNLLLayer Layer; @@ -1057,23 +1034,19 @@ struct BNLLFunctor } #endif // HAVE_DNN_NGRAPH - bool tryFuse(Ptr&) { return false; } - - void getScaleShift(Mat&, Mat&) const {} - int64 getFLOPSPerElement() const { return 5; } }; -struct PowerFunctor +struct PowerFunctor : public BaseFunctor { typedef PowerLayer Layer; - float power; - float scale; - float shift; + float power, scale, shift; + float originPower, originScale, originShift; explicit PowerFunctor(float power_ = 1.f, float scale_ = 1.f, float shift_ = 0.f) - : power(power_), scale(scale_), shift(shift_) {} + : power(power_), scale(scale_), shift(shift_), + originPower(power_), originScale(scale_), originShift(shift_) {} bool supportBackend(int backendId, int targetId) { @@ -1083,6 +1056,13 @@ struct PowerFunctor return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; } + void finalize() + { + power = originPower; + scale = originScale; + shift = originShift; + } + void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const { float a = scale, b = shift, p = power; @@ -1212,8 +1192,7 @@ struct PowerFunctor int64 getFLOPSPerElement() const { return power == 1 ? 2 : 10; } }; - -struct ChannelsPReLUFunctor +struct ChannelsPReLUFunctor : public BaseFunctor { typedef ChannelsPReLULayer Layer; Mat scale; @@ -1330,11 +1309,6 @@ struct ChannelsPReLUFunctor } #endif // HAVE_DNN_NGRAPH - - bool tryFuse(Ptr&) { return false; } - - void getScaleShift(Mat&, Mat&) const {} - int64 getFLOPSPerElement() const { return 1; } };