mirror of
https://github.com/opencv/opencv.git
synced 2024-12-15 09:49:13 +08:00
8cbdd0c833
C-API cleanup: apps, imgproc_c and some constants #25075 Merge with https://github.com/opencv/opencv_contrib/pull/3642 * Removed obsolete apps - traincascade and createsamples (please use older OpenCV versions if you need them). These apps relied heavily on C-API * removed all mentions of imgproc C-API headers (imgproc_c.h, types_c.h) - they were empty, included core C-API headers * replaced usage of several C constants with C++ ones (error codes, norm modes, RNG modes, PCA modes, ...) - most part of this PR (split into two parts - all modules and calib+3d - for easier backporting) * removed imgproc C-API headers (as separate commit, so that other changes could be backported to 4.x) Most of these changes can be backported to 4.x.
1381 lines
38 KiB
C++
1381 lines
38 KiB
C++
/*M///////////////////////////////////////////////////////////////////////////////////////
|
|
//
|
|
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
|
//
|
|
// By downloading, copying, installing or using the software you agree to this license.
|
|
// If you do not agree to this license, do not download, install,
|
|
// copy or use the software.
|
|
//
|
|
//
|
|
// License Agreement
|
|
// For Open Source Computer Vision Library
|
|
//
|
|
// Copyright (C) 2014, Itseez Inc., all rights reserved.
|
|
// Third party copyrights are property of their respective owners.
|
|
//
|
|
// Redistribution and use in source and binary forms, with or without modification,
|
|
// are permitted provided that the following conditions are met:
|
|
//
|
|
// * Redistribution's of source code must retain the above copyright notice,
|
|
// this list of conditions and the following disclaimer.
|
|
//
|
|
// * Redistribution's in binary form must reproduce the above copyright notice,
|
|
// this list of conditions and the following disclaimer in the documentation
|
|
// and/or other materials provided with the distribution.
|
|
//
|
|
// * The name of the copyright holders may not be used to endorse or promote products
|
|
// derived from this software without specific prior written permission.
|
|
//
|
|
// This software is provided by the copyright holders and contributors "as is" and
|
|
// any express or implied warranties, including, but not limited to, the implied
|
|
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
|
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
|
// indirect, incidental, special, exemplary, or consequential damages
|
|
// (including, but not limited to, procurement of substitute goods or services;
|
|
// loss of use, data, or profits; or business interruption) however caused
|
|
// and on any theory of liability, whether in contract, strict liability,
|
|
// or tort (including negligence or otherwise) arising in any way out of
|
|
// the use of this software, even if advised of the possibility of such damage.
|
|
//
|
|
//M*/
|
|
|
|
#include "precomp.hpp"
|
|
#include "opencl_kernels_core.hpp"
|
|
#include "umatrix.hpp"
|
|
|
|
#include <opencv2/core/utils/tls.hpp>
|
|
|
|
///////////////////////////////// UMat implementation ///////////////////////////////
|
|
|
|
namespace cv {
|
|
|
|
// forward decls, implementation is below in this file
|
|
void setSize(UMat& m, int _dims, const int* _sz, const size_t* _steps,
|
|
bool autoSteps = false);
|
|
|
|
void updateContinuityFlag(UMat& m);
|
|
void finalizeHdr(UMat& m);
|
|
|
|
UMatData::UMatData(const MatAllocator* allocator)
|
|
{
|
|
prevAllocator = currAllocator = allocator;
|
|
urefcount = refcount = mapcount = 0;
|
|
data = origdata = 0;
|
|
size = 0;
|
|
flags = static_cast<UMatData::MemoryFlag>(0);
|
|
handle = 0;
|
|
userdata = 0;
|
|
allocatorFlags_ = 0;
|
|
originalUMatData = NULL;
|
|
}
|
|
|
|
UMatData::~UMatData()
|
|
{
|
|
prevAllocator = currAllocator = 0;
|
|
urefcount = refcount = 0;
|
|
CV_Assert(mapcount == 0);
|
|
data = origdata = 0;
|
|
size = 0;
|
|
bool isAsyncCleanup = !!(flags & UMatData::ASYNC_CLEANUP);
|
|
flags = static_cast<UMatData::MemoryFlag>(0);
|
|
handle = 0;
|
|
userdata = 0;
|
|
allocatorFlags_ = 0;
|
|
if (originalUMatData)
|
|
{
|
|
bool showWarn = false;
|
|
UMatData* u = originalUMatData;
|
|
bool zero_Ref = CV_XADD(&(u->refcount), -1) == 1;
|
|
if (zero_Ref)
|
|
{
|
|
// simulate Mat::deallocate
|
|
if (u->mapcount != 0)
|
|
{
|
|
(u->currAllocator ? u->currAllocator : /* TODO allocator ? allocator :*/ Mat::getDefaultAllocator())->unmap(u);
|
|
}
|
|
else
|
|
{
|
|
// we don't do "map", so we can't do "unmap"
|
|
}
|
|
}
|
|
bool zero_URef = CV_XADD(&(u->urefcount), -1) == 1;
|
|
if (zero_Ref && !zero_URef)
|
|
showWarn = true;
|
|
if (zero_Ref && zero_URef) // oops, we need to free resources
|
|
{
|
|
showWarn = !isAsyncCleanup;
|
|
// simulate UMat::deallocate
|
|
u->currAllocator->deallocate(u);
|
|
}
|
|
#ifndef NDEBUG
|
|
if (showWarn)
|
|
{
|
|
static int warn_message_showed = 0;
|
|
if (warn_message_showed++ < 100)
|
|
{
|
|
fflush(stdout);
|
|
fprintf(stderr, "\n! OPENCV warning: getUMat()/getMat() call chain possible problem."
|
|
"\n! Base object is dead, while nested/derived object is still alive or processed."
|
|
"\n! Please check lifetime of UMat/Mat objects!\n");
|
|
fflush(stderr);
|
|
}
|
|
}
|
|
#else
|
|
CV_UNUSED(showWarn);
|
|
#endif
|
|
originalUMatData = NULL;
|
|
}
|
|
}
|
|
|
|
#ifndef OPENCV_DISABLE_THREAD_SUPPORT
|
|
|
|
// it should be a prime number for the best hash function
|
|
enum { UMAT_NLOCKS = 31 };
|
|
static Mutex umatLocks[UMAT_NLOCKS];
|
|
|
|
static size_t getUMatDataLockIndex(const UMatData* u)
|
|
{
|
|
size_t idx = ((size_t)(void*)u) % UMAT_NLOCKS;
|
|
return idx;
|
|
}
|
|
|
|
void UMatData::lock()
|
|
{
|
|
size_t idx = getUMatDataLockIndex(this);
|
|
//printf("%d lock(%d)\n", cv::utils::getThreadID(), (int)idx);
|
|
umatLocks[idx].lock();
|
|
}
|
|
|
|
void UMatData::unlock()
|
|
{
|
|
size_t idx = getUMatDataLockIndex(this);
|
|
//printf("%d unlock(%d)\n", cv::utils::getThreadID(), (int)idx);
|
|
umatLocks[idx].unlock();
|
|
}
|
|
|
|
|
|
// Do not allow several lock() calls with different UMatData objects.
|
|
struct UMatDataAutoLocker
|
|
{
|
|
int usage_count;
|
|
UMatData* locked_objects[2];
|
|
UMatDataAutoLocker() : usage_count(0) { locked_objects[0] = NULL; locked_objects[1] = NULL; }
|
|
|
|
void lock(UMatData*& u1)
|
|
{
|
|
bool locked_1 = (u1 == locked_objects[0] || u1 == locked_objects[1]);
|
|
if (locked_1)
|
|
{
|
|
u1 = NULL;
|
|
return;
|
|
}
|
|
CV_Assert(usage_count == 0); // UMatDataAutoLock can't be used multiple times from the same thread
|
|
usage_count = 1;
|
|
locked_objects[0] = u1;
|
|
u1->lock();
|
|
}
|
|
void lock(UMatData*& u1, UMatData*& u2)
|
|
{
|
|
bool locked_1 = (u1 == locked_objects[0] || u1 == locked_objects[1]);
|
|
bool locked_2 = (u2 == locked_objects[0] || u2 == locked_objects[1]);
|
|
if (locked_1)
|
|
u1 = NULL;
|
|
if (locked_2)
|
|
u2 = NULL;
|
|
if (locked_1 && locked_2)
|
|
return;
|
|
CV_Assert(usage_count == 0); // UMatDataAutoLock can't be used multiple times from the same thread
|
|
usage_count = 1;
|
|
locked_objects[0] = u1;
|
|
locked_objects[1] = u2;
|
|
if (u1)
|
|
u1->lock();
|
|
if (u2)
|
|
u2->lock();
|
|
}
|
|
void release(UMatData* u1, UMatData* u2)
|
|
{
|
|
if (u1 == NULL && u2 == NULL)
|
|
return;
|
|
CV_Assert(usage_count == 1);
|
|
usage_count = 0;
|
|
if (u1)
|
|
u1->unlock();
|
|
if (u2)
|
|
u2->unlock();
|
|
locked_objects[0] = NULL; locked_objects[1] = NULL;
|
|
}
|
|
};
|
|
static TLSData<UMatDataAutoLocker>& getUMatDataAutoLockerTLS()
|
|
{
|
|
CV_SINGLETON_LAZY_INIT_REF(TLSData<UMatDataAutoLocker>, new TLSData<UMatDataAutoLocker>());
|
|
}
|
|
static UMatDataAutoLocker& getUMatDataAutoLocker() { return getUMatDataAutoLockerTLS().getRef(); }
|
|
|
|
|
|
UMatDataAutoLock::UMatDataAutoLock(UMatData* u) : u1(u), u2(NULL)
|
|
{
|
|
getUMatDataAutoLocker().lock(u1);
|
|
}
|
|
UMatDataAutoLock::UMatDataAutoLock(UMatData* u1_, UMatData* u2_) : u1(u1_), u2(u2_)
|
|
{
|
|
if (getUMatDataLockIndex(u1) > getUMatDataLockIndex(u2))
|
|
{
|
|
std::swap(u1, u2);
|
|
}
|
|
getUMatDataAutoLocker().lock(u1, u2);
|
|
}
|
|
UMatDataAutoLock::~UMatDataAutoLock()
|
|
{
|
|
getUMatDataAutoLocker().release(u1, u2);
|
|
}
|
|
|
|
#else
|
|
|
|
void UMatData::lock()
|
|
{
|
|
// nothing in OPENCV_DISABLE_THREAD_SUPPORT mode
|
|
}
|
|
|
|
void UMatData::unlock()
|
|
{
|
|
// nothing in OPENCV_DISABLE_THREAD_SUPPORT mode
|
|
}
|
|
|
|
UMatDataAutoLock::UMatDataAutoLock(UMatData* u) : u1(u), u2(NULL)
|
|
{
|
|
// nothing in OPENCV_DISABLE_THREAD_SUPPORT mode
|
|
}
|
|
UMatDataAutoLock::UMatDataAutoLock(UMatData* u1_, UMatData* u2_) : u1(u1_), u2(u2_)
|
|
{
|
|
// nothing in OPENCV_DISABLE_THREAD_SUPPORT mode
|
|
}
|
|
UMatDataAutoLock::~UMatDataAutoLock()
|
|
{
|
|
// nothing in OPENCV_DISABLE_THREAD_SUPPORT mode
|
|
}
|
|
|
|
#endif // OPENCV_DISABLE_THREAD_SUPPORT
|
|
|
|
//////////////////////////////// UMat ////////////////////////////////
|
|
|
|
UMat::UMat(UMatUsageFlags _usageFlags) CV_NOEXCEPT
|
|
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)
|
|
{}
|
|
|
|
UMat::UMat(int _rows, int _cols, int _type, UMatUsageFlags _usageFlags)
|
|
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)
|
|
{
|
|
create(_rows, _cols, _type);
|
|
}
|
|
|
|
UMat::UMat(int _rows, int _cols, int _type, const Scalar& _s, UMatUsageFlags _usageFlags)
|
|
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)
|
|
{
|
|
create(_rows, _cols, _type);
|
|
*this = _s;
|
|
}
|
|
|
|
UMat::UMat(Size _sz, int _type, UMatUsageFlags _usageFlags)
|
|
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)
|
|
{
|
|
create( _sz.height, _sz.width, _type );
|
|
}
|
|
|
|
UMat::UMat(Size _sz, int _type, const Scalar& _s, UMatUsageFlags _usageFlags)
|
|
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)
|
|
{
|
|
create(_sz.height, _sz.width, _type);
|
|
*this = _s;
|
|
}
|
|
|
|
UMat::UMat(int _dims, const int* _sz, int _type, UMatUsageFlags _usageFlags)
|
|
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)
|
|
{
|
|
create(_dims, _sz, _type);
|
|
}
|
|
|
|
UMat::UMat(int _dims, const int* _sz, int _type, const Scalar& _s, UMatUsageFlags _usageFlags)
|
|
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows)
|
|
{
|
|
create(_dims, _sz, _type);
|
|
*this = _s;
|
|
}
|
|
|
|
UMat::UMat(const UMat& m)
|
|
: flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), allocator(m.allocator),
|
|
usageFlags(m.usageFlags), u(m.u), offset(m.offset), size(&rows)
|
|
{
|
|
addref();
|
|
if( m.dims <= 2 )
|
|
{
|
|
int _1d = dims <= 1;
|
|
step.buf[0] = m.step.buf[0]; step.buf[1] = m.step.buf[1];
|
|
step.p = &step.buf[_1d];
|
|
size.p = &rows + _1d;
|
|
}
|
|
else
|
|
{
|
|
dims = 0;
|
|
copySize(m);
|
|
}
|
|
}
|
|
|
|
UMat& UMat::operator=(const UMat& m)
|
|
{
|
|
if( this != &m )
|
|
{
|
|
const_cast<UMat&>(m).addref();
|
|
release();
|
|
flags = m.flags;
|
|
if( dims <= 2 && m.dims <= 2 )
|
|
{
|
|
dims = m.dims;
|
|
rows = m.rows;
|
|
cols = m.cols;
|
|
int _1d = dims <= 1;
|
|
step.buf[0] = m.step.buf[0];
|
|
step.buf[1] = m.step.buf[1];
|
|
step.p = &step.buf[_1d];
|
|
size.p = &rows + _1d;
|
|
}
|
|
else
|
|
copySize(m);
|
|
allocator = m.allocator;
|
|
usageFlags = m.usageFlags;
|
|
u = m.u;
|
|
offset = m.offset;
|
|
}
|
|
return *this;
|
|
}
|
|
|
|
UMat UMat::clone() const
|
|
{
|
|
UMat m;
|
|
copyTo(m);
|
|
return m;
|
|
}
|
|
|
|
void UMat::assignTo(UMat& m, int _type) const
|
|
{
|
|
if( _type < 0 )
|
|
m = *this;
|
|
else
|
|
convertTo(m, _type);
|
|
}
|
|
|
|
void UMat::create(int _rows, int _cols, int _type, UMatUsageFlags _usageFlags)
|
|
{
|
|
int sz[] = {_rows, _cols};
|
|
create(2, sz, _type, _usageFlags);
|
|
}
|
|
|
|
void UMat::create(Size _sz, int _type, UMatUsageFlags _usageFlags)
|
|
{
|
|
create(_sz.height, _sz.width, _type, _usageFlags);
|
|
}
|
|
|
|
void UMat::createSameSize(InputArray arr, int _type, UMatUsageFlags _usageFlags)
|
|
{
|
|
int arr_size[CV_MAX_DIM];
|
|
int ndims = arr.sizend(arr_size);
|
|
create(ndims, arr_size, _type, _usageFlags);
|
|
}
|
|
|
|
void UMat::addref()
|
|
{
|
|
if( u )
|
|
CV_XADD(&(u->urefcount), 1);
|
|
}
|
|
|
|
void UMat::release()
|
|
{
|
|
if( u && CV_XADD(&(u->urefcount), -1) == 1 )
|
|
deallocate();
|
|
for(int i = 0; i < dims; i++)
|
|
size.p[i] = 0;
|
|
u = 0;
|
|
}
|
|
|
|
bool UMat::empty() const
|
|
{
|
|
return u == 0 || total() == 0;
|
|
}
|
|
|
|
size_t UMat::total() const
|
|
{
|
|
if( dims <= 2 )
|
|
return (size_t)rows * cols;
|
|
size_t p = 1;
|
|
for( int i = 0; i < dims; i++ )
|
|
p *= size[i];
|
|
return p;
|
|
}
|
|
|
|
|
|
UMat::UMat(UMat&& m)
|
|
: flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), allocator(m.allocator),
|
|
usageFlags(m.usageFlags), u(m.u), offset(m.offset), size(&rows)
|
|
{
|
|
if (m.dims <= 2) // move new step/size info
|
|
{
|
|
int _1d = m.dims <= 1;
|
|
step.buf[0] = m.step.buf[0];
|
|
step.buf[1] = m.step.buf[1];
|
|
step.p = &step.buf[_1d];
|
|
size.p = &rows + _1d;
|
|
}
|
|
else
|
|
{
|
|
CV_DbgAssert(m.step.p != m.step.buf && m.step.p != m.step.buf+1);
|
|
step.p = m.step.p;
|
|
size.p = m.size.p;
|
|
m.step.p = m.step.buf;
|
|
m.size.p = &m.rows;
|
|
}
|
|
m.flags = MAGIC_VAL; m.dims = m.rows = m.cols = 0;
|
|
m.allocator = NULL;
|
|
m.u = NULL;
|
|
m.offset = 0;
|
|
}
|
|
|
|
UMat& UMat::operator=(UMat&& m)
|
|
{
|
|
if (this == &m)
|
|
return *this;
|
|
release();
|
|
flags = m.flags; dims = m.dims; rows = m.rows; cols = m.cols;
|
|
allocator = m.allocator; usageFlags = m.usageFlags;
|
|
u = m.u;
|
|
offset = m.offset;
|
|
if (step.p != step.buf && step.p != step.buf+1) // release self step/size
|
|
{
|
|
fastFree(step.p);
|
|
}
|
|
step.p = step.buf;
|
|
size.p = &rows;
|
|
if (m.dims <= 2) // move new step/size info
|
|
{
|
|
int _1d = dims <= 1;
|
|
step.buf[0] = m.step.buf[0];
|
|
step.buf[1] = m.step.buf[1];
|
|
step.p = &step.buf[_1d];
|
|
size.p = &rows + _1d;
|
|
}
|
|
else
|
|
{
|
|
CV_DbgAssert(m.step.p != m.step.buf && m.step.p != m.step.buf+1);
|
|
step.p = m.step.p;
|
|
size.p = m.size.p;
|
|
}
|
|
m.step.p = m.step.buf;
|
|
m.size.p = &m.rows;
|
|
m.flags = MAGIC_VAL;
|
|
m.usageFlags = USAGE_DEFAULT;
|
|
m.dims = m.rows = m.cols = 0;
|
|
m.allocator = NULL;
|
|
m.u = NULL;
|
|
m.offset = 0;
|
|
return *this;
|
|
}
|
|
|
|
|
|
MatAllocator* UMat::getStdAllocator()
|
|
{
|
|
#ifdef HAVE_OPENCL
|
|
if (ocl::useOpenCL())
|
|
return ocl::getOpenCLAllocator();
|
|
#endif
|
|
return Mat::getDefaultAllocator();
|
|
}
|
|
|
|
void swap( UMat& a, UMat& b )
|
|
{
|
|
std::swap(a.flags, b.flags);
|
|
std::swap(a.dims, b.dims);
|
|
std::swap(a.rows, b.rows);
|
|
std::swap(a.cols, b.cols);
|
|
std::swap(a.allocator, b.allocator);
|
|
std::swap(a.u, b.u);
|
|
std::swap(a.offset, b.offset);
|
|
|
|
std::swap(a.size.p, b.size.p);
|
|
std::swap(a.step.p, b.step.p);
|
|
std::swap(a.step.buf[0], b.step.buf[0]);
|
|
std::swap(a.step.buf[1], b.step.buf[1]);
|
|
|
|
if(a.dims <= 2)
|
|
{
|
|
int a_1d = a.dims <= 1;
|
|
a.step.p = &a.step.buf[a_1d];
|
|
a.size.p = &a.rows + a_1d;
|
|
}
|
|
|
|
if( b.dims <= 2)
|
|
{
|
|
int b_1d = b.dims <= 1;
|
|
b.step.p = &b.step.buf[b_1d];
|
|
b.size.p = &b.rows + b_1d;
|
|
}
|
|
}
|
|
|
|
|
|
void setSize( UMat& m, int _dims, const int* _sz,
|
|
const size_t* _steps, bool autoSteps )
|
|
{
|
|
CV_Assert( 0 <= _dims && _dims <= CV_MAX_DIM );
|
|
if( m.dims != _dims )
|
|
{
|
|
if( m.step.p != m.step.buf && m.step.p != m.step.buf+1 )
|
|
{
|
|
fastFree(m.step.p);
|
|
}
|
|
m.step.p = m.step.buf;
|
|
m.size.p = &m.rows;
|
|
if( _dims > 2 )
|
|
{
|
|
m.step.p = (size_t*)fastMalloc(_dims*sizeof(m.step.p[0]) + (_dims+1)*sizeof(m.size.p[0]));
|
|
m.size.p = (int*)(m.step.p + _dims) + 1;
|
|
m.size.p[-1] = _dims;
|
|
m.rows = m.cols = -1;
|
|
}
|
|
}
|
|
|
|
m.dims = _dims;
|
|
|
|
size_t esz = CV_ELEM_SIZE(m.flags), total = esz;
|
|
if (_sz != 0) {
|
|
int i;
|
|
for( i = _dims-1; i >= 0; i-- )
|
|
{
|
|
int s = _sz[i];
|
|
CV_Assert( s >= 0 );
|
|
m.size.p[i] = s;
|
|
|
|
if( _steps )
|
|
m.step.p[i] = i < _dims-1 ? _steps[i] : esz;
|
|
else if( autoSteps )
|
|
{
|
|
m.step.p[i] = total;
|
|
int64 total1 = (int64)total*s;
|
|
if( (uint64)total1 != (size_t)total1 )
|
|
CV_Error( cv::Error::StsOutOfRange, "The total matrix size does not fit to \"size_t\" type" );
|
|
total = (size_t)total1;
|
|
}
|
|
}
|
|
}
|
|
|
|
if( _dims < 2 )
|
|
{
|
|
m.cols = _dims >= 1 && _sz ? _sz[0] : 1;
|
|
m.rows = 1;
|
|
m.size.p = &m.cols;
|
|
m.step.buf[0] = m.cols*esz;
|
|
m.step.buf[1] = esz;
|
|
m.step.p = &m.step.buf[1];
|
|
}
|
|
}
|
|
|
|
|
|
void UMat::updateContinuityFlag()
|
|
{
|
|
flags = cv::updateContinuityFlag(flags, dims, size.p, step.p);
|
|
}
|
|
|
|
|
|
void finalizeHdr(UMat& m)
|
|
{
|
|
m.updateContinuityFlag();
|
|
int d = m.dims;
|
|
if( d > 2 )
|
|
m.rows = m.cols = -1;
|
|
}
|
|
|
|
|
|
UMat Mat::getUMat(AccessFlag accessFlags, UMatUsageFlags usageFlags) const
|
|
{
|
|
UMat hdr;
|
|
if(!data)
|
|
return hdr;
|
|
if (data != datastart)
|
|
{
|
|
Size wholeSize;
|
|
Point ofs;
|
|
locateROI(wholeSize, ofs);
|
|
Size sz(cols, rows);
|
|
if (ofs.x != 0 || ofs.y != 0)
|
|
{
|
|
Mat src = *this;
|
|
int dtop = ofs.y;
|
|
int dbottom = wholeSize.height - src.rows - ofs.y;
|
|
int dleft = ofs.x;
|
|
int dright = wholeSize.width - src.cols - ofs.x;
|
|
src.adjustROI(dtop, dbottom, dleft, dright);
|
|
return src.getUMat(accessFlags, usageFlags)(cv::Rect(ofs.x, ofs.y, sz.width, sz.height));
|
|
}
|
|
}
|
|
CV_Assert(data == datastart);
|
|
|
|
accessFlags |= ACCESS_RW;
|
|
UMatData* new_u = NULL;
|
|
{
|
|
MatAllocator *a = allocator, *a0 = getDefaultAllocator();
|
|
if(!a)
|
|
a = a0;
|
|
new_u = a->allocate(dims, size.p, type(), data, step.p, accessFlags, usageFlags);
|
|
new_u->originalUMatData = u;
|
|
}
|
|
bool allocated = false;
|
|
try
|
|
{
|
|
allocated = UMat::getStdAllocator()->allocate(new_u, accessFlags, usageFlags);
|
|
}
|
|
catch (const cv::Exception& e)
|
|
{
|
|
fprintf(stderr, "Exception: %s\n", e.what());
|
|
}
|
|
if (!allocated)
|
|
{
|
|
allocated = getDefaultAllocator()->allocate(new_u, accessFlags, usageFlags);
|
|
CV_Assert(allocated);
|
|
}
|
|
if (u != NULL)
|
|
{
|
|
#ifdef HAVE_OPENCL
|
|
if (ocl::useOpenCL() && new_u->currAllocator == ocl::getOpenCLAllocator())
|
|
{
|
|
CV_Assert(new_u->tempUMat());
|
|
}
|
|
#endif
|
|
CV_XADD(&(u->refcount), 1);
|
|
CV_XADD(&(u->urefcount), 1);
|
|
}
|
|
try
|
|
{
|
|
hdr.flags = flags;
|
|
hdr.usageFlags = usageFlags;
|
|
setSize(hdr, dims, size.p, step.p);
|
|
finalizeHdr(hdr);
|
|
hdr.u = new_u;
|
|
hdr.offset = 0; //data - datastart;
|
|
hdr.addref();
|
|
return hdr;
|
|
}
|
|
catch(...)
|
|
{
|
|
if (u != NULL)
|
|
{
|
|
CV_XADD(&(u->refcount), -1);
|
|
CV_XADD(&(u->urefcount), -1);
|
|
}
|
|
new_u->currAllocator->deallocate(new_u);
|
|
throw;
|
|
}
|
|
|
|
}
|
|
|
|
void UMat::create(int d0, const int* _sizes, int _type, UMatUsageFlags _usageFlags)
|
|
{
|
|
int sz1 = 1, d = d0;
|
|
int i;
|
|
if (d == 0) {
|
|
d = 1;
|
|
_sizes = (const int*)&sz1;
|
|
}
|
|
CV_Assert(0 <= d && d <= CV_MAX_DIM && _sizes);
|
|
_type = CV_MAT_TYPE(_type);
|
|
|
|
// if param value is USAGE_DEFAULT by implicit default param value -or- explicit value
|
|
// ...then don't change the existing usageFlags
|
|
// it is not possible to change usage from non-default to USAGE_DEFAULT through create()
|
|
// ...instead must construct UMat()
|
|
if (_usageFlags == cv::USAGE_DEFAULT)
|
|
{
|
|
_usageFlags = usageFlags;
|
|
}
|
|
|
|
if( u && d == dims && _type == type() && _usageFlags == usageFlags )
|
|
{
|
|
for( i = 0; i < d; i++ )
|
|
if( size[i] != _sizes[i] )
|
|
break;
|
|
if( i == d )
|
|
return;
|
|
}
|
|
|
|
int _sizes_backup[CV_MAX_DIM]; // #5991
|
|
if (_sizes == (this->size.p))
|
|
{
|
|
for(i = 0; i < d; i++ )
|
|
_sizes_backup[i] = _sizes[i];
|
|
_sizes = _sizes_backup;
|
|
}
|
|
|
|
release();
|
|
usageFlags = _usageFlags;
|
|
if( d == 0 )
|
|
return;
|
|
flags = (_type & CV_MAT_TYPE_MASK) | MAGIC_VAL;
|
|
setSize(*this, d, _sizes, 0, true);
|
|
offset = 0;
|
|
|
|
if( total() > 0 )
|
|
{
|
|
MatAllocator *a = allocator, *a0 = getStdAllocator();
|
|
if (!a)
|
|
{
|
|
a = a0;
|
|
a0 = Mat::getDefaultAllocator();
|
|
}
|
|
try
|
|
{
|
|
u = a->allocate(dims, size, _type, 0, step.p, ACCESS_RW /* ignored */, usageFlags);
|
|
CV_Assert(u != 0);
|
|
}
|
|
catch(...)
|
|
{
|
|
if(a != a0)
|
|
u = a0->allocate(dims, size, _type, 0, step.p, ACCESS_RW /* ignored */, usageFlags);
|
|
CV_Assert(u != 0);
|
|
}
|
|
CV_Assert( step[dims-1] == (size_t)CV_ELEM_SIZE(flags) );
|
|
}
|
|
|
|
finalizeHdr(*this);
|
|
addref();
|
|
dims = d0;
|
|
}
|
|
|
|
void UMat::create(const std::vector<int>& _sizes, int _type, UMatUsageFlags _usageFlags)
|
|
{
|
|
create((int)_sizes.size(), _sizes.data(), _type, _usageFlags);
|
|
}
|
|
|
|
void UMat::copySize(const UMat& m)
|
|
{
|
|
setSize(*this, m.dims, 0, 0);
|
|
for( int i = 0; i < dims; i++ )
|
|
{
|
|
size[i] = m.size[i];
|
|
step[i] = m.step[i];
|
|
}
|
|
}
|
|
|
|
|
|
UMat::~UMat()
|
|
{
|
|
release();
|
|
if( step.p != step.buf && step.p != step.buf+1 )
|
|
fastFree(step.p);
|
|
}
|
|
|
|
void UMat::deallocate()
|
|
{
|
|
UMatData* u_ = u;
|
|
u = NULL;
|
|
u_->currAllocator->deallocate(u_);
|
|
}
|
|
|
|
|
|
UMat::UMat(const UMat& m, const Range& _rowRange, const Range& _colRange)
|
|
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(USAGE_DEFAULT), u(0), offset(0), size(&rows)
|
|
{
|
|
CV_Assert( m.dims >= 2 );
|
|
if( m.dims > 2 )
|
|
{
|
|
AutoBuffer<Range> rs(m.dims);
|
|
rs[0] = _rowRange;
|
|
rs[1] = _colRange;
|
|
for( int i = 2; i < m.dims; i++ )
|
|
rs[i] = Range::all();
|
|
*this = m(rs.data());
|
|
return;
|
|
}
|
|
|
|
*this = m;
|
|
if( _rowRange != Range::all() && _rowRange != Range(0,rows) )
|
|
{
|
|
CV_Assert( 0 <= _rowRange.start && _rowRange.start <= _rowRange.end && _rowRange.end <= m.rows );
|
|
rows = _rowRange.size();
|
|
offset += step*_rowRange.start;
|
|
flags |= SUBMATRIX_FLAG;
|
|
}
|
|
|
|
if( _colRange != Range::all() && _colRange != Range(0,cols) )
|
|
{
|
|
CV_Assert( 0 <= _colRange.start && _colRange.start <= _colRange.end && _colRange.end <= m.cols );
|
|
cols = _colRange.size();
|
|
offset += _colRange.start*elemSize();
|
|
flags |= SUBMATRIX_FLAG;
|
|
}
|
|
|
|
updateContinuityFlag();
|
|
|
|
if( rows <= 0 || cols <= 0 )
|
|
{
|
|
release();
|
|
rows = cols = 0;
|
|
}
|
|
}
|
|
|
|
|
|
UMat::UMat(const UMat& m, const Rect& roi)
|
|
: flags(m.flags), dims(2), rows(roi.height), cols(roi.width),
|
|
allocator(m.allocator), usageFlags(m.usageFlags), u(m.u), offset(m.offset + roi.y*m.step[0]), size(&rows)
|
|
{
|
|
CV_Assert( m.dims <= 2 );
|
|
|
|
size_t esz = CV_ELEM_SIZE(flags);
|
|
offset += roi.x*esz;
|
|
CV_Assert( 0 <= roi.x && 0 <= roi.width && roi.x + roi.width <= m.cols &&
|
|
0 <= roi.y && 0 <= roi.height && roi.y + roi.height <= m.rows );
|
|
if( roi.width < m.cols || roi.height < m.rows )
|
|
flags |= SUBMATRIX_FLAG;
|
|
|
|
step[0] = m.step[0]; step[1] = esz;
|
|
updateContinuityFlag();
|
|
|
|
addref();
|
|
if( rows <= 0 || cols <= 0 )
|
|
{
|
|
rows = cols = 0;
|
|
release();
|
|
}
|
|
}
|
|
|
|
|
|
UMat::UMat(const UMat& m, const Range* ranges)
|
|
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(USAGE_DEFAULT), u(0), offset(0), size(&rows)
|
|
{
|
|
int i, d = m.dims;
|
|
|
|
CV_Assert(ranges);
|
|
for( i = 0; i < d; i++ )
|
|
{
|
|
Range r = ranges[i];
|
|
CV_Assert( r == Range::all() || (0 <= r.start && r.start < r.end && r.end <= m.size[i]) );
|
|
}
|
|
*this = m;
|
|
for( i = 0; i < d; i++ )
|
|
{
|
|
Range r = ranges[i];
|
|
if( r != Range::all() && r != Range(0, size.p[i]))
|
|
{
|
|
size.p[i] = r.end - r.start;
|
|
offset += r.start*step.p[i];
|
|
flags |= SUBMATRIX_FLAG;
|
|
}
|
|
}
|
|
updateContinuityFlag();
|
|
}
|
|
|
|
UMat::UMat(const UMat& m, const std::vector<Range>& ranges)
|
|
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(USAGE_DEFAULT), u(0), offset(0), size(&rows)
|
|
{
|
|
int i, d = m.dims;
|
|
|
|
CV_Assert((int)ranges.size() == d);
|
|
for (i = 0; i < d; i++)
|
|
{
|
|
Range r = ranges[i];
|
|
CV_Assert(r == Range::all() || (0 <= r.start && r.start < r.end && r.end <= m.size[i]));
|
|
}
|
|
*this = m;
|
|
for (i = 0; i < d; i++)
|
|
{
|
|
Range r = ranges[i];
|
|
if (r != Range::all() && r != Range(0, size.p[i]))
|
|
{
|
|
size.p[i] = r.end - r.start;
|
|
offset += r.start*step.p[i];
|
|
flags |= SUBMATRIX_FLAG;
|
|
}
|
|
}
|
|
updateContinuityFlag();
|
|
}
|
|
|
|
UMat UMat::diag(int d) const
|
|
{
|
|
CV_Assert( dims <= 2 );
|
|
UMat m = *this;
|
|
size_t esz = elemSize();
|
|
int len;
|
|
|
|
if( d >= 0 )
|
|
{
|
|
len = std::min(cols - d, rows);
|
|
m.offset += esz*d;
|
|
}
|
|
else
|
|
{
|
|
len = std::min(rows + d, cols);
|
|
m.offset -= step[0]*d;
|
|
}
|
|
CV_DbgAssert( len > 0 );
|
|
|
|
m.size[0] = m.rows = len;
|
|
m.size[1] = m.cols = 1;
|
|
m.step[0] += (len > 1 ? esz : 0);
|
|
|
|
m.updateContinuityFlag();
|
|
|
|
if( size() != Size(1,1) )
|
|
m.flags |= SUBMATRIX_FLAG;
|
|
|
|
return m;
|
|
}
|
|
|
|
void UMat::locateROI( Size& wholeSize, Point& ofs ) const
|
|
{
|
|
CV_Assert( dims <= 2 && step[0] > 0 );
|
|
size_t esz = elemSize(), minstep;
|
|
ptrdiff_t delta1 = (ptrdiff_t)offset, delta2 = (ptrdiff_t)u->size;
|
|
|
|
if( delta1 == 0 )
|
|
ofs.x = ofs.y = 0;
|
|
else
|
|
{
|
|
ofs.y = (int)(delta1/step[0]);
|
|
ofs.x = (int)((delta1 - step[0]*ofs.y)/esz);
|
|
CV_DbgAssert( offset == (size_t)(ofs.y*step[0] + ofs.x*esz) );
|
|
}
|
|
minstep = (ofs.x + cols)*esz;
|
|
wholeSize.height = (int)((delta2 - minstep)/step[0] + 1);
|
|
wholeSize.height = std::max(wholeSize.height, ofs.y + rows);
|
|
wholeSize.width = (int)((delta2 - step*(wholeSize.height-1))/esz);
|
|
wholeSize.width = std::max(wholeSize.width, ofs.x + cols);
|
|
}
|
|
|
|
|
|
UMat& UMat::adjustROI( int dtop, int dbottom, int dleft, int dright )
|
|
{
|
|
CV_Assert( dims == 2 && step[0] > 0 );
|
|
Size wholeSize; Point ofs;
|
|
size_t esz = elemSize();
|
|
locateROI( wholeSize, ofs );
|
|
int row1 = std::min(std::max(ofs.y - dtop, 0), wholeSize.height), row2 = std::max(0, std::min(ofs.y + rows + dbottom, wholeSize.height));
|
|
int col1 = std::min(std::max(ofs.x - dleft, 0), wholeSize.width), col2 = std::max(0, std::min(ofs.x + cols + dright, wholeSize.width));
|
|
if(row1 > row2)
|
|
std::swap(row1, row2);
|
|
if(col1 > col2)
|
|
std::swap(col1, col2);
|
|
|
|
offset += (row1 - ofs.y)*step + (col1 - ofs.x)*esz;
|
|
rows = row2 - row1; cols = col2 - col1;
|
|
size.p[0] = rows; size.p[1] = cols;
|
|
updateContinuityFlag();
|
|
return *this;
|
|
}
|
|
|
|
|
|
UMat UMat::reshape(int new_cn, int new_rows) const
|
|
{
|
|
int cn = channels();
|
|
UMat hdr = *this;
|
|
|
|
if( dims > 2 && new_rows == 0 && new_cn != 0 && size[dims-1]*cn % new_cn == 0 )
|
|
{
|
|
hdr.flags = (hdr.flags & ~CV_MAT_CN_MASK) | ((new_cn-1) << CV_CN_SHIFT);
|
|
hdr.step[dims-1] = CV_ELEM_SIZE(hdr.flags);
|
|
hdr.size[dims-1] = hdr.size[dims-1]*cn / new_cn;
|
|
return hdr;
|
|
}
|
|
|
|
CV_Assert( dims <= 2 );
|
|
|
|
if( new_cn == 0 )
|
|
new_cn = cn;
|
|
|
|
int total_width = cols * cn;
|
|
|
|
if( (new_cn > total_width || total_width % new_cn != 0) && new_rows == 0 )
|
|
new_rows = rows * total_width / new_cn;
|
|
|
|
if( new_rows != 0 && new_rows != rows )
|
|
{
|
|
int total_size = total_width * rows;
|
|
if( !isContinuous() )
|
|
CV_Error( cv::Error::BadStep,
|
|
"The matrix is not continuous, thus its number of rows can not be changed" );
|
|
|
|
if( (unsigned)new_rows > (unsigned)total_size )
|
|
CV_Error( cv::Error::StsOutOfRange, "Bad new number of rows" );
|
|
|
|
total_width = total_size / new_rows;
|
|
|
|
if( total_width * new_rows != total_size )
|
|
CV_Error( cv::Error::StsBadArg, "The total number of matrix elements "
|
|
"is not divisible by the new number of rows" );
|
|
|
|
hdr.rows = new_rows;
|
|
hdr.step.buf[0] = total_width * elemSize1();
|
|
}
|
|
|
|
int new_width = total_width / new_cn;
|
|
|
|
if( new_width * new_cn != total_width )
|
|
CV_Error( cv::Error::BadNumChannels,
|
|
"The total width is not divisible by the new number of channels" );
|
|
|
|
hdr.dims = 2;
|
|
hdr.cols = new_width;
|
|
hdr.flags = (hdr.flags & ~CV_MAT_CN_MASK) | ((new_cn-1) << CV_CN_SHIFT);
|
|
hdr.step.buf[1] = CV_ELEM_SIZE(hdr.flags);
|
|
hdr.step.p = &hdr.step.buf[0];
|
|
hdr.size.p = &hdr.rows;
|
|
return hdr;
|
|
}
|
|
|
|
UMat UMat::diag(const UMat& d, UMatUsageFlags usageFlags)
|
|
{
|
|
CV_Assert( d.cols == 1 || d.rows == 1 );
|
|
int len = d.rows + d.cols - 1;
|
|
UMat m(len, len, d.type(), Scalar(0), usageFlags);
|
|
UMat md = m.diag();
|
|
if( d.cols == 1 )
|
|
d.copyTo(md);
|
|
else
|
|
transpose(d, md);
|
|
return m;
|
|
}
|
|
|
|
int UMat::checkVector(int _elemChannels, int _depth, bool _requireContinuous) const
|
|
{
|
|
return (depth() == _depth || _depth <= 0) &&
|
|
(isContinuous() || !_requireContinuous) &&
|
|
((dims <= 2 && (((rows == 1 || cols == 1) && channels() == _elemChannels) ||
|
|
(cols == _elemChannels && channels() == 1))) ||
|
|
(dims == 3 && channels() == 1 && size.p[2] == _elemChannels && (size.p[0] == 1 || size.p[1] == 1) &&
|
|
(isContinuous() || step.p[1] == step.p[2]*size.p[2])))
|
|
? (int)(total()*channels()/_elemChannels) : -1;
|
|
}
|
|
|
|
UMat UMat::reshape(int _cn, int _newndims, const int* _newsz) const
|
|
{
|
|
if(_newndims == dims)
|
|
{
|
|
if(_newsz == 0)
|
|
return reshape(_cn);
|
|
if(_newndims == 2)
|
|
return reshape(_cn, _newsz[0]);
|
|
}
|
|
|
|
if (isContinuous())
|
|
{
|
|
CV_Assert(_cn >= 0 && _newndims > 0 && _newndims <= CV_MAX_DIM && _newsz);
|
|
|
|
if (_cn == 0)
|
|
_cn = this->channels();
|
|
else
|
|
CV_Assert(_cn <= CV_CN_MAX);
|
|
|
|
size_t total_elem1_ref = this->total() * this->channels();
|
|
size_t total_elem1 = _cn;
|
|
|
|
AutoBuffer<int, 4> newsz_buf( (size_t)_newndims );
|
|
|
|
for (int i = 0; i < _newndims; i++)
|
|
{
|
|
CV_Assert(_newsz[i] >= 0);
|
|
|
|
if (_newsz[i] > 0)
|
|
newsz_buf[i] = _newsz[i];
|
|
else if (i < dims)
|
|
newsz_buf[i] = this->size[i];
|
|
else
|
|
CV_Error(cv::Error::StsOutOfRange, "Copy dimension (which has zero size) is not present in source matrix");
|
|
|
|
total_elem1 *= (size_t)newsz_buf[i];
|
|
}
|
|
|
|
if (total_elem1 != total_elem1_ref)
|
|
CV_Error(cv::Error::StsUnmatchedSizes, "Requested and source matrices have different count of elements");
|
|
|
|
UMat hdr = *this;
|
|
hdr.flags = (hdr.flags & ~CV_MAT_CN_MASK) | ((_cn-1) << CV_CN_SHIFT);
|
|
setSize(hdr, _newndims, newsz_buf.data(), NULL, true);
|
|
|
|
return hdr;
|
|
}
|
|
|
|
CV_Error(cv::Error::StsNotImplemented, "Reshaping of n-dimensional non-continuous matrices is not supported yet");
|
|
}
|
|
|
|
Mat UMat::getMat(AccessFlag accessFlags) const
|
|
{
|
|
if(!u)
|
|
return Mat();
|
|
// TODO Support ACCESS_READ (ACCESS_WRITE) without unnecessary data transfers
|
|
accessFlags |= ACCESS_RW;
|
|
UMatDataAutoLock autolock(u);
|
|
try
|
|
{
|
|
if(CV_XADD(&u->refcount, 1) == 0)
|
|
u->currAllocator->map(u, accessFlags);
|
|
if (u->data != 0)
|
|
{
|
|
Mat hdr(dims, size.p, type(), u->data + offset, step.p);
|
|
hdr.flags = flags;
|
|
hdr.u = u;
|
|
hdr.datastart = u->data;
|
|
hdr.data = u->data + offset;
|
|
hdr.datalimit = hdr.dataend = u->data + u->size;
|
|
return hdr;
|
|
}
|
|
}
|
|
catch(...)
|
|
{
|
|
CV_XADD(&u->refcount, -1);
|
|
throw;
|
|
}
|
|
CV_XADD(&u->refcount, -1);
|
|
CV_Assert(u->data != 0 && "Error mapping of UMat to host memory.");
|
|
return Mat();
|
|
}
|
|
|
|
void* UMat::handle(AccessFlag accessFlags) const
|
|
{
|
|
if( !u )
|
|
return 0;
|
|
|
|
CV_Assert(u->refcount == 0);
|
|
CV_Assert(!u->deviceCopyObsolete() || u->copyOnMap());
|
|
if (u->deviceCopyObsolete())
|
|
{
|
|
u->currAllocator->unmap(u);
|
|
}
|
|
|
|
if (!!(accessFlags & ACCESS_WRITE))
|
|
u->markHostCopyObsolete(true);
|
|
|
|
return u->handle;
|
|
}
|
|
|
|
void UMat::ndoffset(size_t* ofs) const
|
|
{
|
|
// offset = step[0]*ofs[0] + step[1]*ofs[1] + step[2]*ofs[2] + ...;
|
|
size_t val = offset;
|
|
for( int i = 0; i < dims; i++ )
|
|
{
|
|
size_t s = step.p[i];
|
|
ofs[i] = val / s;
|
|
val -= ofs[i]*s;
|
|
}
|
|
}
|
|
|
|
void UMat::copyTo(OutputArray _dst) const
|
|
{
|
|
CV_INSTRUMENT_REGION();
|
|
|
|
#ifdef HAVE_CUDA
|
|
if (_dst.isGpuMat())
|
|
{
|
|
_dst.getGpuMat().upload(*this);
|
|
return;
|
|
}
|
|
#endif
|
|
|
|
int dtype = _dst.type();
|
|
if( _dst.fixedType() && dtype != type() )
|
|
{
|
|
CV_Assert( channels() == CV_MAT_CN(dtype) );
|
|
convertTo( _dst, dtype );
|
|
return;
|
|
}
|
|
|
|
if( empty() )
|
|
{
|
|
_dst.release();
|
|
return;
|
|
}
|
|
|
|
size_t sz[CV_MAX_DIM] = {1}, srcofs[CV_MAX_DIM]={0}, dstofs[CV_MAX_DIM]={0};
|
|
size_t esz = elemSize();
|
|
int i, d = std::max(dims, 1);
|
|
for( i = 0; i < d; i++ )
|
|
sz[i] = size.p[i];
|
|
sz[d-1] *= esz;
|
|
ndoffset(srcofs);
|
|
srcofs[d-1] *= esz;
|
|
|
|
_dst.create( dims, size.p, type() );
|
|
if( _dst.isUMat() )
|
|
{
|
|
UMat dst = _dst.getUMat();
|
|
CV_Assert(dst.u);
|
|
if( u == dst.u && dst.offset == offset )
|
|
return;
|
|
|
|
if (u->currAllocator == dst.u->currAllocator)
|
|
{
|
|
dst.ndoffset(dstofs);
|
|
dstofs[dims-1] *= esz;
|
|
u->currAllocator->copy(u, dst.u, d, sz, srcofs, step.p, dstofs, dst.step.p, false);
|
|
return;
|
|
}
|
|
}
|
|
|
|
Mat dst = _dst.getMat();
|
|
u->currAllocator->download(u, dst.ptr(), d, sz, srcofs, step.p, dst.step.p);
|
|
}
|
|
|
|
void UMat::copyTo(OutputArray _dst, InputArray _mask) const
|
|
{
|
|
CV_INSTRUMENT_REGION();
|
|
|
|
if( _mask.empty() )
|
|
{
|
|
copyTo(_dst);
|
|
return;
|
|
}
|
|
#ifdef HAVE_OPENCL
|
|
int cn = channels(), mtype = _mask.type(), mdepth = CV_MAT_DEPTH(mtype), mcn = CV_MAT_CN(mtype);
|
|
CV_Assert( mdepth == CV_8U && (mcn == 1 || mcn == cn) );
|
|
|
|
if (ocl::useOpenCL() && _dst.isUMat() && dims <= 2)
|
|
{
|
|
UMatData * prevu = _dst.getUMat().u;
|
|
_dst.create( dims, size, type() );
|
|
|
|
UMat dst = _dst.getUMat();
|
|
|
|
bool haveDstUninit = false;
|
|
if( prevu != dst.u ) // do not leave dst uninitialized
|
|
haveDstUninit = true;
|
|
|
|
String opts = format("-D COPY_TO_MASK -D T1=%s -D scn=%d -D mcn=%d%s",
|
|
ocl::memopTypeToStr(depth()), cn, mcn,
|
|
haveDstUninit ? " -D HAVE_DST_UNINIT" : "");
|
|
|
|
ocl::Kernel k("copyToMask", ocl::core::copyset_oclsrc, opts);
|
|
if (!k.empty())
|
|
{
|
|
k.args(ocl::KernelArg::ReadOnlyNoSize(*this),
|
|
ocl::KernelArg::ReadOnlyNoSize(_mask.getUMat()),
|
|
haveDstUninit ? ocl::KernelArg::WriteOnly(dst) :
|
|
ocl::KernelArg::ReadWrite(dst));
|
|
|
|
size_t globalsize[2] = { (size_t)cols, (size_t)rows };
|
|
if (k.run(2, globalsize, NULL, false))
|
|
{
|
|
CV_IMPL_ADD(CV_IMPL_OCL);
|
|
return;
|
|
}
|
|
}
|
|
}
|
|
#endif
|
|
Mat src = getMat(ACCESS_READ);
|
|
src.copyTo(_dst, _mask);
|
|
}
|
|
|
|
|
|
//
|
|
// void UMat::convertTo moved to convert.dispatch.cpp
|
|
//
|
|
|
|
UMat& UMat::setTo(InputArray _value, InputArray _mask)
|
|
{
|
|
CV_INSTRUMENT_REGION();
|
|
|
|
bool haveMask = !_mask.empty();
|
|
#ifdef HAVE_OPENCL
|
|
int tp = type(), cn = CV_MAT_CN(tp), d = CV_MAT_DEPTH(tp);
|
|
|
|
if( dims <= 2 && cn <= 4 && CV_MAT_DEPTH(tp) < CV_64F && ocl::useOpenCL() )
|
|
{
|
|
Mat value = _value.getMat();
|
|
CV_Assert( checkScalar(value, type(), _value.kind(), _InputArray::UMAT) );
|
|
int kercn = haveMask || cn == 3 ? cn : std::max(cn, ocl::predictOptimalVectorWidth(*this)),
|
|
kertp = CV_MAKE_TYPE(d, kercn);
|
|
|
|
double buf[16] = { 0, 0, 0, 0, 0, 0, 0, 0,
|
|
0, 0, 0, 0, 0, 0, 0, 0 };
|
|
convertAndUnrollScalar(value, tp, (uchar *)buf, kercn / cn);
|
|
|
|
int scalarcn = kercn == 3 ? 4 : kercn, rowsPerWI = ocl::Device::getDefault().isIntel() ? 4 : 1;
|
|
String opts = format("-D dstT=%s -D rowsPerWI=%d -D dstST=%s -D dstT1=%s -D cn=%d",
|
|
ocl::memopTypeToStr(kertp), rowsPerWI,
|
|
ocl::memopTypeToStr(CV_MAKETYPE(d, scalarcn)),
|
|
ocl::memopTypeToStr(d), kercn);
|
|
|
|
ocl::Kernel setK(haveMask ? "setMask" : "set", ocl::core::copyset_oclsrc, opts);
|
|
if( !setK.empty() )
|
|
{
|
|
ocl::KernelArg scalararg(ocl::KernelArg::CONSTANT, 0, 0, 0, buf, CV_ELEM_SIZE(d) * scalarcn);
|
|
UMat mask;
|
|
|
|
if( haveMask )
|
|
{
|
|
mask = _mask.getUMat();
|
|
CV_Assert( mask.size() == size() && mask.type() == CV_8UC1 );
|
|
ocl::KernelArg maskarg = ocl::KernelArg::ReadOnlyNoSize(mask),
|
|
dstarg = ocl::KernelArg::ReadWrite(*this);
|
|
setK.args(maskarg, dstarg, scalararg);
|
|
}
|
|
else
|
|
{
|
|
ocl::KernelArg dstarg = ocl::KernelArg::WriteOnly(*this, cn, kercn);
|
|
setK.args(dstarg, scalararg);
|
|
}
|
|
|
|
size_t globalsize[] = { (size_t)cols * cn / kercn, ((size_t)rows + rowsPerWI - 1) / rowsPerWI };
|
|
if( setK.run(2, globalsize, NULL, false) )
|
|
{
|
|
CV_IMPL_ADD(CV_IMPL_OCL);
|
|
return *this;
|
|
}
|
|
}
|
|
}
|
|
#endif
|
|
Mat m = getMat(haveMask ? ACCESS_RW : ACCESS_WRITE);
|
|
m.setTo(_value, _mask);
|
|
return *this;
|
|
}
|
|
|
|
UMat& UMat::operator = (const Scalar& s)
|
|
{
|
|
setTo(s);
|
|
return *this;
|
|
}
|
|
|
|
UMat UMat::t() const
|
|
{
|
|
UMat m;
|
|
transpose(*this, m);
|
|
return m;
|
|
}
|
|
|
|
UMat UMat::zeros(int rows, int cols, int type, UMatUsageFlags usageFlags)
|
|
{
|
|
return UMat(rows, cols, type, Scalar::all(0), usageFlags);
|
|
}
|
|
|
|
UMat UMat::zeros(Size size, int type, UMatUsageFlags usageFlags)
|
|
{
|
|
return UMat(size, type, Scalar::all(0), usageFlags);
|
|
}
|
|
|
|
UMat UMat::zeros(int ndims, const int* sz, int type, UMatUsageFlags usageFlags)
|
|
{
|
|
return UMat(ndims, sz, type, Scalar::all(0), usageFlags);
|
|
}
|
|
|
|
UMat UMat::ones(int rows, int cols, int type, UMatUsageFlags usageFlags)
|
|
{
|
|
return UMat(rows, cols, type, Scalar(1), usageFlags);
|
|
}
|
|
|
|
UMat UMat::ones(Size size, int type, UMatUsageFlags usageFlags)
|
|
{
|
|
return UMat(size, type, Scalar(1), usageFlags);
|
|
}
|
|
|
|
UMat UMat::ones(int ndims, const int* sz, int type, UMatUsageFlags usageFlags)
|
|
{
|
|
return UMat(ndims, sz, type, Scalar(1), usageFlags);
|
|
}
|
|
|
|
}
|
|
|
|
/* End of file. */
|