Merge remote-tracking branch 'upstream/3.4' into merge-3.4

This commit is contained in:
Alexander Alekhin 2021-10-07 04:27:22 +00:00
commit 03a08435e2
7 changed files with 70 additions and 14 deletions

View File

@ -1786,6 +1786,13 @@ static void WINAPI opencv_fls_destructor(void* pData)
#endif // CV_USE_FLS #endif // CV_USE_FLS
#endif // _WIN32 #endif // _WIN32
static TlsAbstraction* const g_force_initialization_of_TlsAbstraction
#if defined __GNUC__
__attribute__((unused))
#endif
= getTlsAbstraction();
#else // OPENCV_DISABLE_THREAD_SUPPORT #else // OPENCV_DISABLE_THREAD_SUPPORT
// no threading (OPENCV_DISABLE_THREAD_SUPPORT=ON) // no threading (OPENCV_DISABLE_THREAD_SUPPORT=ON)

View File

@ -391,20 +391,22 @@ class dnn_test(NewOpenCVTests):
raise unittest.SkipTest("Missing DNN test files (dnn/onnx/data/{input/output}_hidden_lstm.npy). " raise unittest.SkipTest("Missing DNN test files (dnn/onnx/data/{input/output}_hidden_lstm.npy). "
"Verify OPENCV_DNN_TEST_DATA_PATH configuration parameter.") "Verify OPENCV_DNN_TEST_DATA_PATH configuration parameter.")
net = cv.dnn.readNet(model)
input = np.load(input_file) input = np.load(input_file)
# we have to expand the shape of input tensor because Python bindings cut 3D tensors to 2D # we have to expand the shape of input tensor because Python bindings cut 3D tensors to 2D
# it should be fixed in future. see : https://github.com/opencv/opencv/issues/19091 # it should be fixed in future. see : https://github.com/opencv/opencv/issues/19091
# please remove `expand_dims` after that # please remove `expand_dims` after that
input = np.expand_dims(input, axis=3) input = np.expand_dims(input, axis=3)
gold_output = np.load(output_file) gold_output = np.load(output_file)
net.setInput(input)
for backend, target in self.dnnBackendsAndTargets: for backend, target in self.dnnBackendsAndTargets:
printParams(backend, target) printParams(backend, target)
net = cv.dnn.readNet(model)
net.setPreferableBackend(backend) net.setPreferableBackend(backend)
net.setPreferableTarget(target) net.setPreferableTarget(target)
net.setInput(input)
real_output = net.forward() real_output = net.forward()
normAssert(self, real_output, gold_output, "", getDefaultThreshold(target)) normAssert(self, real_output, gold_output, "", getDefaultThreshold(target))

View File

@ -19,6 +19,16 @@ CV__DNN_INLINE_NS_BEGIN
using ::google::protobuf::RepeatedField; using ::google::protobuf::RepeatedField;
using ::google::protobuf::MapPair; using ::google::protobuf::MapPair;
static Mat getTensorContentRef_(const tensorflow::TensorProto& tensor);
static inline
bool isAlignedMat(const Mat& m)
{
int depth = m.depth();
int alignment = CV_ELEM_SIZE1(depth);
return (((size_t)m.data) & (alignment - 1)) == 0;
}
class TFNodeWrapper : public ImportNodeWrapper class TFNodeWrapper : public ImportNodeWrapper
{ {
public: public:
@ -719,8 +729,19 @@ public:
{ {
if (!negativeScales) if (!negativeScales)
{ {
Mat scales = getTensorContent(inputNodes[1]->attr().at("value").tensor(), /*copy*/false); Mat scalesRef = getTensorContentRef_(inputNodes[1]->attr().at("value").tensor());
scales *= -1; // FIXME: This breaks the const guarantees of tensor() by writing to scalesRef
if (isAlignedMat(scalesRef))
{
scalesRef *= -1;
}
else
{
Mat scales = scalesRef.clone() * -1;
CV_Assert(scalesRef.isContinuous());
CV_Assert(scales.isContinuous());
memcpy(scalesRef.data, scales.data, scales.total() * scales.elemSize());
}
} }
} }
@ -832,7 +853,8 @@ void RemoveIdentityOps(tensorflow::GraphDef& net)
} }
} }
Mat getTensorContent(const tensorflow::TensorProto &tensor, bool copy) // NB: returned Mat::data pointer may be unaligned
Mat getTensorContentRef_(const tensorflow::TensorProto& tensor)
{ {
const std::string& content = tensor.tensor_content(); const std::string& content = tensor.tensor_content();
Mat m; Mat m;
@ -904,7 +926,18 @@ Mat getTensorContent(const tensorflow::TensorProto &tensor, bool copy)
CV_Error(Error::StsError, "Tensor's data type is not supported"); CV_Error(Error::StsError, "Tensor's data type is not supported");
break; break;
} }
return copy ? m.clone() : m;
return m;
}
Mat getTensorContent(const tensorflow::TensorProto& tensor, bool forceCopy)
{
// If necessary clone m to have aligned data pointer
Mat m = getTensorContentRef_(tensor);
if (forceCopy || !isAlignedMat(m))
return m.clone();
else
return m;
} }
void releaseTensor(tensorflow::TensorProto* tensor) void releaseTensor(tensorflow::TensorProto* tensor)

View File

@ -21,7 +21,7 @@ void RemoveIdentityOps(tensorflow::GraphDef& net);
void simplifySubgraphs(tensorflow::GraphDef& net); void simplifySubgraphs(tensorflow::GraphDef& net);
Mat getTensorContent(const tensorflow::TensorProto &tensor, bool copy = true); Mat getTensorContent(const tensorflow::TensorProto& tensor, bool forceCopy = true);
void releaseTensor(tensorflow::TensorProto* tensor); void releaseTensor(tensorflow::TensorProto* tensor);

View File

@ -124,8 +124,10 @@ void parseTensor(const tensorflow::TensorProto &tensor, Mat &dstBlob)
} }
dstBlob.create(shape, CV_32F); dstBlob.create(shape, CV_32F);
CV_Assert(dstBlob.isContinuous());
Mat tensorContent = getTensorContent(tensor, /*no copy*/false); Mat tensorContent = getTensorContent(tensor, /*no copy*/false);
CV_Assert(tensorContent.isContinuous());
int size = tensorContent.total(); int size = tensorContent.total();
CV_Assert(size == (int)dstBlob.total()); CV_Assert(size == (int)dstBlob.total());
@ -2671,8 +2673,10 @@ void TFImporter::kernelFromTensor(const tensorflow::TensorProto &tensor, Mat &ds
out_c = shape[0]; input_c = shape[1]; out_c = shape[0]; input_c = shape[1];
dstBlob.create(shape, CV_32F); dstBlob.create(shape, CV_32F);
CV_Assert(dstBlob.isContinuous());
Mat tensorContent = getTensorContent(tensor, /*no copy*/false); Mat tensorContent = getTensorContent(tensor, /*no copy*/false);
CV_Assert(tensorContent.isContinuous());
int size = tensorContent.total(); int size = tensorContent.total();
CV_Assert(size == (int)dstBlob.total()); CV_Assert(size == (int)dstBlob.total());

View File

@ -44,6 +44,8 @@
#include <iterator> #include <iterator>
#include <limits> #include <limits>
#include <opencv2/core/utils/logger.hpp>
// Requires CMake flag: DEBUG_opencv_features2d=ON // Requires CMake flag: DEBUG_opencv_features2d=ON
//#define DEBUG_BLOB_DETECTOR //#define DEBUG_BLOB_DETECTOR
@ -317,6 +319,19 @@ void SimpleBlobDetectorImpl::detect(InputArray image, std::vector<cv::KeyPoint>&
CV_Error(Error::StsUnsupportedFormat, "Blob detector only supports 8-bit images!"); CV_Error(Error::StsUnsupportedFormat, "Blob detector only supports 8-bit images!");
} }
CV_CheckGT(params.thresholdStep, 0.0f, "");
if (params.minThreshold + params.thresholdStep >= params.maxThreshold)
{
// https://github.com/opencv/opencv/issues/6667
CV_LOG_ONCE_INFO(NULL, "SimpleBlobDetector: params.minDistBetweenBlobs is ignored for case with single threshold");
#if 0 // OpenCV 5.0
CV_CheckEQ(params.minRepeatability, 1u, "Incompatible parameters for case with single threshold");
#else
if (params.minRepeatability != 1)
CV_LOG_WARNING(NULL, "SimpleBlobDetector: params.minRepeatability=" << params.minRepeatability << " is incompatible for case with single threshold. Empty result is expected.");
#endif
}
std::vector < std::vector<Center> > centers; std::vector < std::vector<Center> > centers;
for (double thresh = params.minThreshold; thresh < params.maxThreshold; thresh += params.thresholdStep) for (double thresh = params.minThreshold; thresh < params.maxThreshold; thresh += params.thresholdStep)
{ {
@ -325,19 +340,13 @@ void SimpleBlobDetectorImpl::detect(InputArray image, std::vector<cv::KeyPoint>&
std::vector < Center > curCenters; std::vector < Center > curCenters;
findBlobs(grayscaleImage, binarizedImage, curCenters); findBlobs(grayscaleImage, binarizedImage, curCenters);
if(params.maxThreshold - params.minThreshold <= params.thresholdStep) {
// if the difference between min and max threshold is less than the threshold step
// we're only going to enter the loop once, so we need to add curCenters
// to ensure we still use minDistBetweenBlobs
centers.push_back(curCenters);
}
std::vector < std::vector<Center> > newCenters; std::vector < std::vector<Center> > newCenters;
for (size_t i = 0; i < curCenters.size(); i++) for (size_t i = 0; i < curCenters.size(); i++)
{ {
bool isNew = true; bool isNew = true;
for (size_t j = 0; j < centers.size(); j++) for (size_t j = 0; j < centers.size(); j++)
{ {
double dist = norm(centers[j][centers[j].size() / 2 ].location - curCenters[i].location); double dist = norm(centers[j][ centers[j].size() / 2 ].location - curCenters[i].location);
isNew = dist >= params.minDistBetweenBlobs && dist >= centers[j][ centers[j].size() / 2 ].radius && dist >= curCenters[i].radius; isNew = dist >= params.minDistBetweenBlobs && dist >= centers[j][ centers[j].size() / 2 ].radius && dist >= curCenters[i].radius;
if (!isNew) if (!isNew)
{ {

View File

@ -12,6 +12,7 @@ TEST(Features2d_BlobDetector, bug_6667)
SimpleBlobDetector::Params params; SimpleBlobDetector::Params params;
params.minThreshold = 250; params.minThreshold = 250;
params.maxThreshold = 260; params.maxThreshold = 260;
params.minRepeatability = 1; // https://github.com/opencv/opencv/issues/6667
std::vector<KeyPoint> keypoints; std::vector<KeyPoint> keypoints;
Ptr<SimpleBlobDetector> detector = SimpleBlobDetector::create(params); Ptr<SimpleBlobDetector> detector = SimpleBlobDetector::create(params);