Merge remote-tracking branch 'upstream/3.4' into merge-3.4

This commit is contained in:
Alexander Alekhin 2019-12-19 14:01:24 +03:00
commit 4c86fc13cb
16 changed files with 559 additions and 77 deletions

View File

@ -5,10 +5,11 @@ if (WIN32 AND NOT ARM)
message(FATAL_ERROR "BUILD_TBB option supports Windows on ARM only!\nUse regular official TBB build instead of the BUILD_TBB option!")
endif()
ocv_update(OPENCV_TBB_RELEASE "2019_U8")
ocv_update(OPENCV_TBB_RELEASE_MD5 "7c371d0f62726154d2c568a85697a0ad")
ocv_update(OPENCV_TBB_RELEASE "v2020.0")
ocv_update(OPENCV_TBB_RELEASE_MD5 "5858dd01ec007c139d5d178b21e06dae")
ocv_update(OPENCV_TBB_FILENAME "${OPENCV_TBB_RELEASE}.tar.gz")
ocv_update(OPENCV_TBB_SUBDIR "tbb-${OPENCV_TBB_RELEASE}")
string(REGEX REPLACE "^v" "" OPENCV_TBB_RELEASE_ "${OPENCV_TBB_RELEASE}")
ocv_update(OPENCV_TBB_SUBDIR "tbb-${OPENCV_TBB_RELEASE_}")
set(tbb_src_dir "${OpenCV_BINARY_DIR}/3rdparty/tbb")
ocv_download(FILENAME ${OPENCV_TBB_FILENAME}
@ -34,10 +35,12 @@ ocv_include_directories("${tbb_src_dir}/include"
file(GLOB lib_srcs "${tbb_src_dir}/src/tbb/*.cpp")
file(GLOB lib_hdrs "${tbb_src_dir}/src/tbb/*.h")
list(APPEND lib_srcs "${tbb_src_dir}/src/rml/client/rml_tbb.cpp")
ocv_list_filterout(lib_srcs "${tbb_src_dir}/src/tbb/tbbbind.cpp") # hwloc.h requirement
if (WIN32)
add_definitions(/D__TBB_DYNAMIC_LOAD_ENABLED=0
/D__TBB_BUILD=1
/DTBB_SUPPRESS_DEPRECATED_MESSAGES=1
/DTBB_NO_LEGACY=1
/D_UNICODE
/DUNICODE

View File

@ -3327,28 +3327,30 @@ static void collectCalibrationData( InputArrayOfArrays objectPoints,
Mat& npoints )
{
int nimages = (int)objectPoints.total();
int i, j = 0, ni = 0, total = 0;
CV_Assert(nimages > 0 && nimages == (int)imagePoints1.total() &&
(!imgPtMat2 || nimages == (int)imagePoints2.total()));
int total = 0;
CV_Assert(nimages > 0);
CV_CheckEQ(nimages, (int)imagePoints1.total(), "");
if (imgPtMat2)
CV_CheckEQ(nimages, (int)imagePoints2.total(), "");
for( i = 0; i < nimages; i++ )
for (int i = 0; i < nimages; i++)
{
Mat objectPoint = objectPoints.getMat(i);
if (objectPoint.empty())
CV_Error(CV_StsBadSize, "objectPoints should not contain empty vector of vectors of points");
ni = objectPoint.checkVector(3, CV_32F);
if( ni <= 0 )
int numberOfObjectPoints = objectPoint.checkVector(3, CV_32F);
if (numberOfObjectPoints <= 0)
CV_Error(CV_StsUnsupportedFormat, "objectPoints should contain vector of vectors of points of type Point3f");
Mat imagePoint1 = imagePoints1.getMat(i);
if (imagePoint1.empty())
CV_Error(CV_StsBadSize, "imagePoints1 should not contain empty vector of vectors of points");
int ni1 = imagePoint1.checkVector(2, CV_32F);
if( ni1 <= 0 )
int numberOfImagePoints = imagePoint1.checkVector(2, CV_32F);
if (numberOfImagePoints <= 0)
CV_Error(CV_StsUnsupportedFormat, "imagePoints1 should contain vector of vectors of points of type Point2f");
CV_Assert( ni == ni1 );
CV_CheckEQ(numberOfObjectPoints, numberOfImagePoints, "Number of object and image points must be equal");
total += ni;
total += numberOfObjectPoints;
}
npoints.create(1, (int)nimages, CV_32S);
@ -3356,7 +3358,7 @@ static void collectCalibrationData( InputArrayOfArrays objectPoints,
imgPtMat1.create(1, (int)total, CV_32FC2);
Point2f* imgPtData2 = 0;
if( imgPtMat2 )
if (imgPtMat2)
{
imgPtMat2->create(1, (int)total, CV_32FC2);
imgPtData2 = imgPtMat2->ptr<Point2f>();
@ -3365,36 +3367,38 @@ static void collectCalibrationData( InputArrayOfArrays objectPoints,
Point3f* objPtData = objPtMat.ptr<Point3f>();
Point2f* imgPtData1 = imgPtMat1.ptr<Point2f>();
for( i = 0; i < nimages; i++, j += ni )
for (int i = 0, j = 0; i < nimages; i++)
{
Mat objpt = objectPoints.getMat(i);
Mat imgpt1 = imagePoints1.getMat(i);
ni = objpt.checkVector(3, CV_32F);
npoints.at<int>(i) = ni;
for (int n = 0; n < ni; ++n)
int numberOfObjectPoints = objpt.checkVector(3, CV_32F);
npoints.at<int>(i) = numberOfObjectPoints;
for (int n = 0; n < numberOfObjectPoints; ++n)
{
objPtData[j + n] = objpt.ptr<Point3f>()[n];
imgPtData1[j + n] = imgpt1.ptr<Point2f>()[n];
}
if( imgPtData2 )
if (imgPtData2)
{
Mat imgpt2 = imagePoints2.getMat(i);
int ni2 = imgpt2.checkVector(2, CV_32F);
CV_Assert( ni == ni2 );
for (int n = 0; n < ni2; ++n)
int numberOfImage2Points = imgpt2.checkVector(2, CV_32F);
CV_CheckEQ(numberOfObjectPoints, numberOfImage2Points, "Number of object and image(2) points must be equal");
for (int n = 0; n < numberOfImage2Points; ++n)
{
imgPtData2[j + n] = imgpt2.ptr<Point2f>()[n];
}
}
j += numberOfObjectPoints;
}
ni = npoints.at<int>(0);
int ni = npoints.at<int>(0);
bool releaseObject = iFixedPoint > 0 && iFixedPoint < ni - 1;
// check object points. If not qualified, report errors.
if( releaseObject )
{
for( i = 1; i < nimages; i++ )
for (int i = 1; i < nimages; i++)
{
if( npoints.at<int>(i) != ni )
{

View File

@ -488,13 +488,13 @@ public:
for(j = 0; j < i; ++j)
{
Point3f d1 = ptr[j] - ptr[i];
float n1 = d1.x*d1.x + d1.y*d1.y;
float n1 = d1.x*d1.x + d1.y*d1.y + d1.z*d1.z;
for(k = 0; k < j; ++k)
{
Point3f d2 = ptr[k] - ptr[i];
float denom = (d2.x*d2.x + d2.y*d2.y)*n1;
float num = d1.x*d2.x + d1.y*d2.y;
float denom = (d2.x*d2.x + d2.y*d2.y + d2.z*d2.z)*n1;
float num = d1.x*d2.x + d1.y*d2.y + d1.z*d2.z;
if( num*num > threshold*threshold*denom )
return false;

View File

@ -187,4 +187,18 @@ void CV_Affine3D_EstTest::run( int /* start_from */)
TEST(Calib3d_EstimateAffine3D, accuracy) { CV_Affine3D_EstTest test; test.safe_run(); }
TEST(Calib3d_EstimateAffine3D, regression_16007)
{
std::vector<cv::Point3f> m1, m2;
m1.push_back(Point3f(1.0f, 0.0f, 0.0f)); m2.push_back(Point3f(1.0f, 1.0f, 0.0f));
m1.push_back(Point3f(1.0f, 0.0f, 1.0f)); m2.push_back(Point3f(1.0f, 1.0f, 1.0f));
m1.push_back(Point3f(0.5f, 0.0f, 0.5f)); m2.push_back(Point3f(0.5f, 1.0f, 0.5f));
m1.push_back(Point3f(2.5f, 0.0f, 2.5f)); m2.push_back(Point3f(2.5f, 1.0f, 2.5f));
m1.push_back(Point3f(2.0f, 0.0f, 1.0f)); m2.push_back(Point3f(2.0f, 1.0f, 1.0f));
cv::Mat m3D, inl;
int res = cv::estimateAffine3D(m1, m2, m3D, inl);
EXPECT_EQ(1, res);
}
}} // namespace

View File

@ -516,6 +516,43 @@ static inline size_t roundUp(size_t a, unsigned int b)
return a + b - 1 - (a + b - 1) % b;
}
/** @brief Alignment check of passed values
Usage: `isAligned<sizeof(int)>(...)`
@note Alignment(N) must be a power of 2 (2**k, 2^k)
*/
template<int N, typename T> static inline
bool isAligned(const T& data)
{
CV_StaticAssert((N & (N - 1)) == 0, ""); // power of 2
return (((size_t)data) & (N - 1)) == 0;
}
/** @overload */
template<int N> static inline
bool isAligned(const void* p1)
{
return isAligned<N>((size_t)p1);
}
/** @overload */
template<int N> static inline
bool isAligned(const void* p1, const void* p2)
{
return isAligned<N>(((size_t)p1)|((size_t)p2));
}
/** @overload */
template<int N> static inline
bool isAligned(const void* p1, const void* p2, const void* p3)
{
return isAligned<N>(((size_t)p1)|((size_t)p2)|((size_t)p3));
}
/** @overload */
template<int N> static inline
bool isAligned(const void* p1, const void* p2, const void* p3, const void* p4)
{
return isAligned<N>(((size_t)p1)|((size_t)p2)|((size_t)p3)|((size_t)p4));
}
/** @brief Enables or disables the optimized code.
The function can be used to dynamically turn on and off optimized dispatched code (code that uses SSE4.2, AVX/AVX2,

View File

@ -563,6 +563,12 @@ Mat& Mat::setTo(InputArray _value, InputArray _mask)
return *this;
}
#if CV_NEON && !defined(__aarch64__)
#define CV_CHECK_ALIGNMENT 1
#else
#define CV_CHECK_ALIGNMENT 0
#endif
#if CV_SIMD128
template<typename V> CV_ALWAYS_INLINE void flipHoriz_single( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size size, size_t esz )
{
@ -572,6 +578,10 @@ template<typename V> CV_ALWAYS_INLINE void flipHoriz_single( const uchar* src, s
int width_1 = width & -v_uint8x16::nlanes;
int i, j;
#if CV_CHECK_ALIGNMENT
CV_Assert(isAligned<sizeof(T)>(src, dst));
#endif
for( ; size.height--; src += sstep, dst += dstep )
{
for( i = 0, j = end; i < width_1; i += v_uint8x16::nlanes, j -= v_uint8x16::nlanes )
@ -585,7 +595,7 @@ template<typename V> CV_ALWAYS_INLINE void flipHoriz_single( const uchar* src, s
v_store((T*)(dst + j - v_uint8x16::nlanes), t0);
v_store((T*)(dst + i), t1);
}
if (((size_t)src|(size_t)dst) % sizeof(T) == 0)
if (isAligned<sizeof(T)>(src, dst))
{
for ( ; i < width; i += sizeof(T), j -= sizeof(T) )
{
@ -620,6 +630,11 @@ template<typename T1, typename T2> CV_ALWAYS_INLINE void flipHoriz_double( const
int end = (int)(size.width*esz);
int width = (end + 1)/2;
#if CV_CHECK_ALIGNMENT
CV_Assert(isAligned<sizeof(T1)>(src, dst));
CV_Assert(isAligned<sizeof(T2)>(src, dst));
#endif
for( ; size.height--; src += sstep, dst += dstep )
{
for ( int i = 0, j = end; i < width; i += sizeof(T1) + sizeof(T2), j -= sizeof(T1) + sizeof(T2) )
@ -644,6 +659,9 @@ static void
flipHoriz( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size size, size_t esz )
{
#if CV_SIMD
#if CV_CHECK_ALIGNMENT
size_t alignmentMark = ((size_t)src)|((size_t)dst)|sstep|dstep;
#endif
if (esz == 2 * v_uint8x16::nlanes)
{
int end = (int)(size.width*esz);
@ -693,15 +711,27 @@ flipHoriz( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size size,
}
}
}
else if (esz == 8)
else if (esz == 8
#if CV_CHECK_ALIGNMENT
&& isAligned<sizeof(uint64)>(alignmentMark)
#endif
)
{
flipHoriz_single<v_uint64x2>(src, sstep, dst, dstep, size, esz);
}
else if (esz == 4)
else if (esz == 4
#if CV_CHECK_ALIGNMENT
&& isAligned<sizeof(unsigned)>(alignmentMark)
#endif
)
{
flipHoriz_single<v_uint32x4>(src, sstep, dst, dstep, size, esz);
}
else if (esz == 2)
else if (esz == 2
#if CV_CHECK_ALIGNMENT
&& isAligned<sizeof(ushort)>(alignmentMark)
#endif
)
{
flipHoriz_single<v_uint16x8>(src, sstep, dst, dstep, size, esz);
}
@ -709,7 +739,11 @@ flipHoriz( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size size,
{
flipHoriz_single<v_uint8x16>(src, sstep, dst, dstep, size, esz);
}
else if (esz == 24)
else if (esz == 24
#if CV_CHECK_ALIGNMENT
&& isAligned<sizeof(uint64_t)>(alignmentMark)
#endif
)
{
int end = (int)(size.width*esz);
int width = (end + 1)/2;
@ -732,6 +766,7 @@ flipHoriz( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size size,
}
}
}
#if !CV_CHECK_ALIGNMENT
else if (esz == 12)
{
flipHoriz_double<uint64_t,uint>(src, sstep, dst, dstep, size, esz);
@ -744,8 +779,9 @@ flipHoriz( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size size,
{
flipHoriz_double<ushort,uchar>(src, sstep, dst, dstep, size, esz);
}
else
#endif
else
#endif // CV_SIMD
{
int i, j, limit = (int)(((size.width + 1)/2)*esz);
AutoBuffer<int> _tab(size.width*esz);
@ -779,16 +815,33 @@ flipVert( const uchar* src0, size_t sstep, uchar* dst0, size_t dstep, Size size,
{
int i = 0;
#if CV_SIMD
for( ; i <= size.width - (v_int32::nlanes * 4); i += v_int32::nlanes * 4 )
#if CV_CHECK_ALIGNMENT
if (isAligned<sizeof(int)>(src0, src1, dst0, dst1))
#endif
{
v_int32 t0 = vx_load((int*)(src0 + i));
v_int32 t1 = vx_load((int*)(src1 + i));
vx_store((int*)(dst0 + i), t1);
vx_store((int*)(dst1 + i), t0);
for (; i <= size.width - CV_SIMD_WIDTH; i += CV_SIMD_WIDTH)
{
v_int32 t0 = vx_load((int*)(src0 + i));
v_int32 t1 = vx_load((int*)(src1 + i));
vx_store((int*)(dst0 + i), t1);
vx_store((int*)(dst1 + i), t0);
}
}
#if CV_CHECK_ALIGNMENT
else
{
for (; i <= size.width - CV_SIMD_WIDTH; i += CV_SIMD_WIDTH)
{
v_uint8 t0 = vx_load(src0 + i);
v_uint8 t1 = vx_load(src1 + i);
vx_store(dst0 + i, t1);
vx_store(dst1 + i, t0);
}
}
#endif
#endif
if( ((size_t)src0|(size_t)dst0|(size_t)src1|(size_t)dst1) % sizeof(int) == 0 )
if (isAligned<sizeof(int)>(src0, src1, dst0, dst1))
{
for( ; i <= size.width - 16; i += 16 )
{

View File

@ -0,0 +1,37 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef OPENCV_CUDA_MOG2_H
#define OPENCV_CUDA_MOG2_H
#include "opencv2/core/cuda.hpp"
struct CUstream_st;
typedef struct CUstream_st *cudaStream_t;
namespace cv { namespace cuda {
class Stream;
namespace device { namespace mog2 {
typedef struct
{
float Tb_;
float TB_;
float Tg_;
float varInit_;
float varMin_;
float varMax_;
float tau_;
int nmixtures_;
unsigned char shadowVal_;
} Constants;
void mog2_gpu(PtrStepSzb frame, int cn, PtrStepSzb fgmask, PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzf variance, PtrStepSzb mean, float alphaT, float prune, bool detectShadows, const Constants *const constants, cudaStream_t stream);
void getBackgroundImage2_gpu(int cn, PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzb mean, PtrStepSzb dst, const Constants *const constants, cudaStream_t stream);
} } } }
#endif /* OPENCV_CUDA_MOG2_H */

View File

@ -394,7 +394,7 @@ CV__DNN_INLINE_NS_BEGIN
CV_WRAP Net(); //!< Default constructor.
CV_WRAP ~Net(); //!< Destructor frees the net only if there aren't references to the net anymore.
/** @brief Create a network from Intel's Model Optimizer intermediate representation.
/** @brief Create a network from Intel's Model Optimizer intermediate representation (IR).
* @param[in] xml XML configuration file with network's topology.
* @param[in] bin Binary file with trained weights.
* Networks imported from Intel's Model Optimizer are launched in Intel's Inference Engine
@ -402,6 +402,25 @@ CV__DNN_INLINE_NS_BEGIN
*/
CV_WRAP static Net readFromModelOptimizer(const String& xml, const String& bin);
/** @brief Create a network from Intel's Model Optimizer in-memory buffers with intermediate representation (IR).
* @param[in] bufferModelConfig buffer with model's configuration.
* @param[in] bufferWeights buffer with model's trained weights.
* @returns Net object.
*/
CV_WRAP static
Net readFromModelOptimizer(const std::vector<uchar>& bufferModelConfig, const std::vector<uchar>& bufferWeights);
/** @brief Create a network from Intel's Model Optimizer in-memory buffers with intermediate representation (IR).
* @param[in] bufferModelConfigPtr buffer pointer of model's configuration.
* @param[in] bufferModelConfigSize buffer size of model's configuration.
* @param[in] bufferWeightsPtr buffer pointer of model's trained weights.
* @param[in] bufferWeightsSize buffer size of model's trained weights.
* @returns Net object.
*/
static
Net readFromModelOptimizer(const uchar* bufferModelConfigPtr, size_t bufferModelConfigSize,
const uchar* bufferWeightsPtr, size_t bufferWeightsSize);
/** Returns true if there are no layers in the network. */
CV_WRAP bool empty() const;
@ -869,7 +888,31 @@ CV__DNN_INLINE_NS_BEGIN
* Networks imported from Intel's Model Optimizer are launched in Intel's Inference Engine
* backend.
*/
CV_EXPORTS_W Net readNetFromModelOptimizer(const String &xml, const String &bin);
CV_EXPORTS_W
Net readNetFromModelOptimizer(const String &xml, const String &bin);
/** @brief Load a network from Intel's Model Optimizer intermediate representation.
* @param[in] bufferModelConfig Buffer contains XML configuration with network's topology.
* @param[in] bufferWeights Buffer contains binary data with trained weights.
* @returns Net object.
* Networks imported from Intel's Model Optimizer are launched in Intel's Inference Engine
* backend.
*/
CV_EXPORTS_W
Net readNetFromModelOptimizer(const std::vector<uchar>& bufferModelConfig, const std::vector<uchar>& bufferWeights);
/** @brief Load a network from Intel's Model Optimizer intermediate representation.
* @param[in] bufferModelConfigPtr Pointer to buffer which contains XML configuration with network's topology.
* @param[in] bufferModelConfigSize Binary size of XML configuration data.
* @param[in] bufferWeightsPtr Pointer to buffer which contains binary data with trained weights.
* @param[in] bufferWeightsSize Binary size of trained weights data.
* @returns Net object.
* Networks imported from Intel's Model Optimizer are launched in Intel's Inference Engine
* backend.
*/
CV_EXPORTS
Net readNetFromModelOptimizer(const uchar* bufferModelConfigPtr, size_t bufferModelConfigSize,
const uchar* bufferWeightsPtr, size_t bufferWeightsSize);
/** @brief Reads a network model <a href="https://onnx.ai/">ONNX</a>.
* @param onnxFile path to the .onnx file with text description of the network architecture.

View File

@ -3192,28 +3192,22 @@ struct Net::Impl
return getBlobAsync(getPinByAlias(outputName));
}
#endif // CV_CXX11
#ifdef HAVE_INF_ENGINE
static
Net createNetworkFromModelOptimizer(InferenceEngine::CNNNetwork& ieNet);
#endif
};
Net::Net() : impl(new Net::Impl)
{
}
Net Net::readFromModelOptimizer(const String& xml, const String& bin)
#ifdef HAVE_INF_ENGINE
/*static*/
Net Net::Impl::createNetworkFromModelOptimizer(InferenceEngine::CNNNetwork& ieNet)
{
#ifndef HAVE_INF_ENGINE
CV_Error(Error::StsError, "Build OpenCV with Inference Engine to enable loading models from Model Optimizer.");
#else
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R3)
InferenceEngine::CNNNetReader reader;
reader.ReadNetwork(xml);
reader.ReadWeights(bin);
InferenceEngine::CNNNetwork ieNet = reader.getNetwork();
#else
InferenceEngine::Core& ie = getCore();
InferenceEngine::CNNNetwork ieNet = ie.ReadNetwork(xml, bin);
#endif
CV_TRACE_FUNCTION();
std::vector<String> inputsNames;
std::vector<MatShape> inp_shapes;
@ -3293,9 +3287,95 @@ Net Net::readFromModelOptimizer(const String& xml, const String& bin)
cvNet.impl->skipInfEngineInit = true;
return cvNet;
}
#endif // HAVE_INF_ENGINE
Net Net::readFromModelOptimizer(const String& xml, const String& bin)
{
CV_TRACE_FUNCTION();
#ifndef HAVE_INF_ENGINE
CV_UNUSED(xml); CV_UNUSED(bin);
CV_Error(Error::StsError, "Build OpenCV with Inference Engine to enable loading models from Model Optimizer.");
#else
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R3)
InferenceEngine::CNNNetReader reader;
reader.ReadNetwork(xml);
reader.ReadWeights(bin);
InferenceEngine::CNNNetwork ieNet = reader.getNetwork();
#else
InferenceEngine::Core& ie = getCore();
InferenceEngine::CNNNetwork ieNet = ie.ReadNetwork(xml, bin);
#endif
return Impl::createNetworkFromModelOptimizer(ieNet);
#endif // HAVE_INF_ENGINE
}
Net Net::readFromModelOptimizer(const std::vector<uchar>& bufferModelConfig, const std::vector<uchar>& bufferWeights)
{
CV_TRACE_FUNCTION();
CV_Assert(!bufferModelConfig.empty());
CV_Assert(!bufferWeights.empty());
return readFromModelOptimizer(bufferModelConfig.data(), bufferModelConfig.size(),
bufferWeights.data(), bufferWeights.size());
}
Net Net::readFromModelOptimizer(
const uchar* bufferModelConfigPtr, size_t bufferModelConfigSize,
const uchar* bufferWeightsPtr, size_t bufferWeightsSize
)
{
CV_TRACE_FUNCTION();
#ifndef HAVE_INF_ENGINE
CV_UNUSED(bufferModelConfigPtr); CV_UNUSED(bufferWeightsPtr);
CV_UNUSED(bufferModelConfigSize); CV_UNUSED(bufferModelConfigSize);
CV_Error(Error::StsError, "Build OpenCV with Inference Engine to enable loading models from Model Optimizer.");
#else
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R3)
InferenceEngine::CNNNetReader reader;
try
{
reader.ReadNetwork(bufferModelConfigPtr, bufferModelConfigSize);
InferenceEngine::TensorDesc tensorDesc(InferenceEngine::Precision::U8, { bufferWeightsSize }, InferenceEngine::Layout::C);
InferenceEngine::TBlob<uint8_t>::Ptr weightsBlobPtr(new InferenceEngine::TBlob<uint8_t>(tensorDesc));
weightsBlobPtr->allocate();
std::memcpy(weightsBlobPtr->buffer(), (uchar*)bufferWeightsPtr, bufferWeightsSize);
reader.SetWeights(weightsBlobPtr);
}
catch (const std::exception& e)
{
CV_Error(Error::StsError, std::string("DNN: IE failed to load model: ") + e.what());
}
InferenceEngine::CNNNetwork ieNet = reader.getNetwork();
#else
InferenceEngine::Core& ie = getCore();
std::string model; model.assign((char*)bufferModelConfigPtr, bufferModelConfigSize);
InferenceEngine::CNNNetwork ieNet;
try
{
InferenceEngine::TensorDesc tensorDesc(InferenceEngine::Precision::U8, { bufferWeightsSize }, InferenceEngine::Layout::C);
InferenceEngine::Blob::CPtr weights_blob = InferenceEngine::make_shared_blob<uint8_t>(tensorDesc, (uint8_t*)bufferWeightsPtr, bufferWeightsSize);
ieNet = ie.ReadNetwork(model, weights_blob);
}
catch (const std::exception& e)
{
CV_Error(Error::StsError, std::string("DNN: IE failed to load model: ") + e.what());
}
#endif
return Impl::createNetworkFromModelOptimizer(ieNet);
#endif // HAVE_INF_ENGINE
}
Net::~Net()
{
}
@ -4656,7 +4736,7 @@ Net readNet(const String& _framework, const std::vector<uchar>& bufferModel,
else if (framework == "torch")
CV_Error(Error::StsNotImplemented, "Reading Torch models from buffers");
else if (framework == "dldt")
CV_Error(Error::StsNotImplemented, "Reading Intel's Model Optimizer models from buffers");
return readNetFromModelOptimizer(bufferConfig, bufferModel);
CV_Error(Error::StsError, "Cannot determine an origin framework with a name " + framework);
}
@ -4665,5 +4745,21 @@ Net readNetFromModelOptimizer(const String &xml, const String &bin)
return Net::readFromModelOptimizer(xml, bin);
}
Net readNetFromModelOptimizer(const std::vector<uchar>& bufferCfg, const std::vector<uchar>& bufferModel)
{
return Net::readFromModelOptimizer(bufferCfg, bufferModel);
}
Net readNetFromModelOptimizer(
const uchar* bufferModelConfigPtr, size_t bufferModelConfigSize,
const uchar* bufferWeightsPtr, size_t bufferWeightsSize
)
{
return Net::readFromModelOptimizer(
bufferModelConfigPtr, bufferModelConfigSize,
bufferWeightsPtr, bufferWeightsSize
);
}
CV__DNN_INLINE_NS_END
}} // namespace

View File

@ -641,6 +641,60 @@ TEST_P(Test_Model_Optimizer, forward_two_nets)
normAssert(ref0, ref2, 0, 0);
}
TEST_P(Test_Model_Optimizer, readFromBuffer)
{
const Backend backendId = get<0>(GetParam());
const Target targetId = get<1>(GetParam());
if (backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
throw SkipTestException("No support for async forward");
const std::string suffix = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? "_fp16" : "";
const std::string& weightsFile = findDataFile("dnn/layers/layer_convolution" + suffix + ".bin");
const std::string& modelFile = findDataFile("dnn/layers/layer_convolution" + suffix + ".xml");
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
else
FAIL() << "Unknown backendId";
Net net1 = readNetFromModelOptimizer(modelFile, weightsFile);
net1.setPreferableBackend(backendId);
net1.setPreferableTarget(targetId);
std::vector<char> modelConfig;
readFileContent(modelFile, modelConfig);
std::vector<char> weights;
readFileContent(weightsFile, weights);
Net net2 = readNetFromModelOptimizer(
(const uchar*)modelConfig.data(), modelConfig.size(),
(const uchar*)weights.data(), weights.size()
);
net2.setPreferableBackend(backendId);
net2.setPreferableTarget(targetId);
int blobSize[] = {2, 6, 75, 113};
Mat input(4, &blobSize[0], CV_32F);
randu(input, 0, 255);
Mat ref, actual;
{
net1.setInput(input);
ref = net1.forward();
}
{
net2.setInput(input);
actual = net2.forward();
}
normAssert(ref, actual, "", 0, 0);
}
INSTANTIATE_TEST_CASE_P(/**/, Test_Model_Optimizer,
dnnBackendsAndTargetsIE()
);

View File

@ -145,7 +145,7 @@ rgbe2float(float *red, float *green, float *blue, unsigned char rgbe[4])
/* default minimal header. modify if you want more information in header */
int RGBE_WriteHeader(FILE *fp, int width, int height, rgbe_header_info *info)
{
const char *programtype = "RGBE";
const char *programtype = "RADIANCE";
if (info && (info->valid & RGBE_VALID_PROGRAMTYPE))
programtype = info->programtype;

View File

@ -920,20 +920,23 @@ static inline void interpolateLanczos4( float x, float* coeffs )
static const double cs[][2]=
{{1, 0}, {-s45, -s45}, {0, 1}, {s45, -s45}, {-1, 0}, {s45, s45}, {0, -1}, {-s45, s45}};
if( x < FLT_EPSILON )
{
for( int i = 0; i < 8; i++ )
coeffs[i] = 0;
coeffs[3] = 1;
return;
}
float sum = 0;
double y0=-(x+3)*CV_PI*0.25, s0 = std::sin(y0), c0= std::cos(y0);
for(int i = 0; i < 8; i++ )
{
double y = -(x+3-i)*CV_PI*0.25;
coeffs[i] = (float)((cs[i][0]*s0 + cs[i][1]*c0)/(y*y));
float y0_ = (x+3-i);
if (fabs(y0_) >= 1e-6f)
{
double y = -y0_*CV_PI*0.25;
coeffs[i] = (float)((cs[i][0]*s0 + cs[i][1]*c0)/(y*y));
}
else
{
// special handling for 'x' values:
// - ~0.0: 0 0 0 1 0 0 0 0
// - ~1.0: 0 0 0 0 1 0 0 0
coeffs[i] = 1e30f;
}
sum += coeffs[i];
}
@ -1605,13 +1608,14 @@ struct HResizeLinearVecU8_X4
for( dx = 0; dx < len0; dx += step )
{
int ofs[4] = { xofs[dx], xofs[dx + 2], xofs[dx + 4], xofs[dx + 6] };
v_int16x8 al = v_load(alpha+dx*2);
v_int16x8 ah = v_load(alpha+dx*2+8);
v_uint16x8 sl, sh;
v_expand(v_interleave_pairs(v_lut_quads(S0, xofs+dx)), sl, sh);
v_expand(v_interleave_pairs(v_lut_quads(S0, ofs)), sl, sh);
v_store(&D0[dx], v_dotprod(v_reinterpret_as_s16(sl), al));
v_store(&D0[dx+4], v_dotprod(v_reinterpret_as_s16(sh), ah));
v_expand(v_interleave_pairs(v_lut_pairs(S1, xofs+dx)), sl, sh);
v_expand(v_interleave_pairs(v_lut_quads(S1, ofs)), sl, sh);
v_store(&D1[dx], v_dotprod(v_reinterpret_as_s16(sl), al));
v_store(&D1[dx+4], v_dotprod(v_reinterpret_as_s16(sh), ah));
}
@ -1622,10 +1626,11 @@ struct HResizeLinearVecU8_X4
int *D = dst[k];
for( dx = 0; dx < len0; dx += step )
{
int ofs[4] = { xofs[dx], xofs[dx + 2], xofs[dx + 4], xofs[dx + 6] };
v_int16x8 al = v_load(alpha+dx*2);
v_int16x8 ah = v_load(alpha+dx*2+8);
v_uint16x8 sl, sh;
v_expand(v_interleave_pairs(v_lut_quads(S, xofs+dx)), sl, sh);
v_expand(v_interleave_pairs(v_lut_quads(S, ofs)), sl, sh);
v_store(&D[dx], v_dotprod(v_reinterpret_as_s16(sl), al));
v_store(&D[dx+4], v_dotprod(v_reinterpret_as_s16(sh), ah));
}

View File

@ -60,6 +60,29 @@ static void findCircle3pts(Point2f *pts, Point2f &center, float &radius)
Point2f midPoint2 = (pts[0] + pts[2]) / 2.0f;
float c2 = midPoint2.x * v2.x + midPoint2.y * v2.y;
float det = v1.x * v2.y - v1.y * v2.x;
if (fabs(det) <= EPS)
{
// v1 and v2 are colinear, so the longest distance between any 2 points
// is the diameter of the minimum enclosing circle.
float d1 = normL2Sqr<float>(pts[0] - pts[1]);
float d2 = normL2Sqr<float>(pts[0] - pts[2]);
float d3 = normL2Sqr<float>(pts[1] - pts[2]);
radius = sqrt(std::max(d1, std::max(d2, d3))) * 0.5f + EPS;
if (d1 >= d2 && d1 >= d3)
{
center = (pts[0] + pts[1]) * 0.5f;
}
else if (d2 >= d1 && d2 >= d3)
{
center = (pts[0] + pts[2]) * 0.5f;
}
else
{
CV_DbgAssert(d3 >= d1 && d3 >= d2);
center = (pts[1] + pts[2]) * 0.5f;
}
return;
}
float cx = (c1 * v2.y - c2 * v1.y) / det;
float cy = (v1.x * c2 - v2.x * c1) / det;
center.x = (float)cx;
@ -92,7 +115,13 @@ static void findThirdPoint(const PT *pts, int i, int j, Point2f &center, float &
ptsf[0] = (Point2f)pts[i];
ptsf[1] = (Point2f)pts[j];
ptsf[2] = (Point2f)pts[k];
findCircle3pts(ptsf, center, radius);
Point2f new_center; float new_radius = 0;
findCircle3pts(ptsf, new_center, new_radius);
if (new_radius > 0)
{
radius = new_radius;
center = new_center;
}
}
}
}
@ -117,7 +146,13 @@ void findSecondPoint(const PT *pts, int i, Point2f &center, float &radius)
}
else
{
findThirdPoint(pts, i, j, center, radius);
Point2f new_center; float new_radius = 0;
findThirdPoint(pts, i, j, new_center, new_radius);
if (new_radius > 0)
{
radius = new_radius;
center = new_center;
}
}
}
}
@ -143,7 +178,13 @@ static void findMinEnclosingCircle(const PT *pts, int count, Point2f &center, fl
}
else
{
findSecondPoint(pts, i, center, radius);
Point2f new_center; float new_radius = 0;
findSecondPoint(pts, i, new_center, new_radius);
if (new_radius > 0)
{
radius = new_radius;
center = new_center;
}
}
}
}

View File

@ -1084,6 +1084,87 @@ int CV_MinCircleTest2::validate_test_results( int test_case_idx )
return code;
}
/****************************************************************************************\
* minEnclosingCircle Test 3 *
\****************************************************************************************/
TEST(Imgproc_minEnclosingCircle, basic_test)
{
vector<Point2f> pts;
pts.push_back(Point2f(0, 0));
pts.push_back(Point2f(10, 0));
pts.push_back(Point2f(5, 1));
const float EPS = 1.0e-3f;
Point2f center;
float radius;
// pts[2] is within the circle with diameter pts[0] - pts[1].
// 2
// 0 1
// NB: The triangle is obtuse, so the only pts[0] and pts[1] are on the circle.
minEnclosingCircle(pts, center, radius);
EXPECT_NEAR(center.x, 5, EPS);
EXPECT_NEAR(center.y, 0, EPS);
EXPECT_NEAR(5, radius, EPS);
// pts[2] is on the circle with diameter pts[0] - pts[1].
// 2
// 0 1
pts[2] = Point2f(5, 5);
minEnclosingCircle(pts, center, radius);
EXPECT_NEAR(center.x, 5, EPS);
EXPECT_NEAR(center.y, 0, EPS);
EXPECT_NEAR(5, radius, EPS);
// pts[2] is outside the circle with diameter pts[0] - pts[1].
// 2
//
//
// 0 1
// NB: The triangle is acute, so all 3 points are on the circle.
pts[2] = Point2f(5, 10);
minEnclosingCircle(pts, center, radius);
EXPECT_NEAR(center.x, 5, EPS);
EXPECT_NEAR(center.y, 3.75, EPS);
EXPECT_NEAR(6.25f, radius, EPS);
// The 3 points are colinear.
pts[2] = Point2f(3, 0);
minEnclosingCircle(pts, center, radius);
EXPECT_NEAR(center.x, 5, EPS);
EXPECT_NEAR(center.y, 0, EPS);
EXPECT_NEAR(5, radius, EPS);
// 2 points are the same.
pts[2] = pts[1];
minEnclosingCircle(pts, center, radius);
EXPECT_NEAR(center.x, 5, EPS);
EXPECT_NEAR(center.y, 0, EPS);
EXPECT_NEAR(5, radius, EPS);
// 3 points are the same.
pts[0] = pts[1];
minEnclosingCircle(pts, center, radius);
EXPECT_NEAR(center.x, 10, EPS);
EXPECT_NEAR(center.y, 0, EPS);
EXPECT_NEAR(0, radius, EPS);
}
TEST(Imgproc_minEnclosingCircle, regression_16051) {
vector<Point2f> pts;
pts.push_back(Point2f(85, 1415));
pts.push_back(Point2f(87, 1415));
pts.push_back(Point2f(89, 1414));
pts.push_back(Point2f(89, 1414));
pts.push_back(Point2f(87, 1412));
Point2f center;
float radius;
minEnclosingCircle(pts, center, radius);
EXPECT_NEAR(center.x, 86.9f, 1e-3);
EXPECT_NEAR(center.y, 1414.1f, 1e-3);
EXPECT_NEAR(2.1024551f, radius, 1e-3);
}
/****************************************************************************************\
* Perimeter Test *
\****************************************************************************************/

View File

@ -1400,6 +1400,19 @@ TEST(Resize, Area_half)
}
}
TEST(Resize, lanczos4_regression_16192)
{
Size src_size(11, 17);
Size dst_size(11, 153);
Mat src(src_size, CV_8UC3, Scalar::all(128));
Mat dst(dst_size, CV_8UC3, Scalar::all(255));
cv::resize(src, dst, dst_size, 0, 0, INTER_LANCZOS4);
Mat expected(dst_size, CV_8UC3, Scalar::all(128));
EXPECT_EQ(cvtest::norm(dst, expected, NORM_INF), 0) << dst(Rect(0,0,8,8));
}
TEST(Imgproc_Warp, multichannel)
{
static const int inter_types[] = {INTER_NEAREST, INTER_AREA, INTER_CUBIC,

View File

@ -230,7 +230,7 @@ public class JavaCamera2View extends CameraBridgeViewBase {
@Override
protected void disconnectCamera() {
Log.i(LOGTAG, "closeCamera");
Log.i(LOGTAG, "close camera");
try {
CameraDevice c = mCameraDevice;
mCameraDevice = null;
@ -241,13 +241,14 @@ public class JavaCamera2View extends CameraBridgeViewBase {
if (null != c) {
c.close();
}
} finally {
stopBackgroundThread();
if (null != mImageReader) {
mImageReader.close();
mImageReader = null;
}
} finally {
stopBackgroundThread();
}
Log.i(LOGTAG, "camera closed!");
}
public static class JavaCameraSizeAccessor implements ListItemAccessor {