// This file is part of OpenCV project. // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html #include "precomp.hpp" #include "bufferpool.impl.hpp" namespace cv { void MatAllocator::map(UMatData*, AccessFlag) const { } void MatAllocator::unmap(UMatData* u) const { if(u->urefcount == 0 && u->refcount == 0) { deallocate(u); } } void MatAllocator::download(UMatData* u, void* dstptr, int dims, const size_t sz[], const size_t srcofs[], const size_t srcstep[], const size_t dststep[]) const { if(!u) return; int isz[CV_MAX_DIM]; uchar* srcptr = u->data; for( int i = 0; i < dims; i++ ) { CV_Assert( sz[i] <= (size_t)INT_MAX ); if( sz[i] == 0 ) return; if( srcofs ) srcptr += srcofs[i]*(i <= dims-2 ? srcstep[i] : 1); isz[i] = (int)sz[i]; } Mat src(dims, isz, CV_8U, srcptr, srcstep); Mat dst(dims, isz, CV_8U, dstptr, dststep); const Mat* arrays[] = { &src, &dst }; uchar* ptrs[2]; NAryMatIterator it(arrays, ptrs, 2); size_t planesz = it.size; for( size_t j = 0; j < it.nplanes; j++, ++it ) memcpy(ptrs[1], ptrs[0], planesz); } void MatAllocator::upload(UMatData* u, const void* srcptr, int dims, const size_t sz[], const size_t dstofs[], const size_t dststep[], const size_t srcstep[]) const { if(!u) return; int isz[CV_MAX_DIM]; uchar* dstptr = u->data; for( int i = 0; i < dims; i++ ) { CV_Assert( sz[i] <= (size_t)INT_MAX ); if( sz[i] == 0 ) return; if( dstofs ) dstptr += dstofs[i]*(i <= dims-2 ? dststep[i] : 1); isz[i] = (int)sz[i]; } Mat src(dims, isz, CV_8U, (void*)srcptr, srcstep); Mat dst(dims, isz, CV_8U, dstptr, dststep); const Mat* arrays[] = { &src, &dst }; uchar* ptrs[2]; NAryMatIterator it(arrays, ptrs, 2); size_t planesz = it.size; for( size_t j = 0; j < it.nplanes; j++, ++it ) memcpy(ptrs[1], ptrs[0], planesz); } void MatAllocator::copy(UMatData* usrc, UMatData* udst, int dims, const size_t sz[], const size_t srcofs[], const size_t srcstep[], const size_t dstofs[], const size_t dststep[], bool /*sync*/) const { CV_INSTRUMENT_REGION(); if(!usrc || !udst) return; int isz[CV_MAX_DIM]; uchar* srcptr = usrc->data; uchar* dstptr = udst->data; for( int i = 0; i < dims; i++ ) { CV_Assert( sz[i] <= (size_t)INT_MAX ); if( sz[i] == 0 ) return; if( srcofs ) srcptr += srcofs[i]*(i <= dims-2 ? srcstep[i] : 1); if( dstofs ) dstptr += dstofs[i]*(i <= dims-2 ? dststep[i] : 1); isz[i] = (int)sz[i]; } Mat src(dims, isz, CV_8U, srcptr, srcstep); Mat dst(dims, isz, CV_8U, dstptr, dststep); const Mat* arrays[] = { &src, &dst }; uchar* ptrs[2]; NAryMatIterator it(arrays, ptrs, 2); size_t planesz = it.size; for( size_t j = 0; j < it.nplanes; j++, ++it ) memcpy(ptrs[1], ptrs[0], planesz); } BufferPoolController* MatAllocator::getBufferPoolController(const char* id) const { CV_UNUSED(id); static DummyBufferPoolController dummy; return &dummy; } class StdMatAllocator CV_FINAL : public MatAllocator { public: UMatData* allocate(int dims, const int* sizes, int type, void* data0, size_t* step, AccessFlag /*flags*/, UMatUsageFlags /*usageFlags*/) const CV_OVERRIDE { size_t total = CV_ELEM_SIZE(type); for( int i = dims-1; i >= 0; i-- ) { if( step ) { if( data0 && step[i] != CV_AUTOSTEP ) { CV_Assert(total <= step[i]); total = step[i]; } else step[i] = total; } total *= sizes[i]; } uchar* data = data0 ? (uchar*)data0 : (uchar*)fastMalloc(total); UMatData* u = new UMatData(this); u->data = u->origdata = data; u->size = total; if(data0) u->flags |= UMatData::USER_ALLOCATED; return u; } bool allocate(UMatData* u, AccessFlag /*accessFlags*/, UMatUsageFlags /*usageFlags*/) const CV_OVERRIDE { if(!u) return false; return true; } void deallocate(UMatData* u) const CV_OVERRIDE { if(!u) return; CV_Assert(u->urefcount == 0); CV_Assert(u->refcount == 0); if( !(u->flags & UMatData::USER_ALLOCATED) ) { fastFree(u->origdata); u->origdata = 0; } delete u; } }; namespace { MatAllocator* volatile g_matAllocator = NULL; } MatAllocator* Mat::getDefaultAllocator() { if (g_matAllocator == NULL) { cv::AutoLock lock(cv::getInitializationMutex()); if (g_matAllocator == NULL) { g_matAllocator = getStdAllocator(); } } return g_matAllocator; } void Mat::setDefaultAllocator(MatAllocator* allocator) { g_matAllocator = allocator; } MatAllocator* Mat::getStdAllocator() { CV_SINGLETON_LAZY_INIT(MatAllocator, new StdMatAllocator()) } //================================================================================================== void setSize( Mat& m, int _dims, const int* _sz, const size_t* _steps, bool autoSteps) { CV_Assert( 0 <= _dims && _dims <= CV_MAX_DIM ); if( m.dims != _dims ) { if( m.step.p != m.step.buf ) { fastFree(m.step.p); m.step.p = m.step.buf; m.size.p = &m.rows; } if( _dims > 2 ) { m.step.p = (size_t*)fastMalloc(_dims*sizeof(m.step.p[0]) + (_dims+1)*sizeof(m.size.p[0])); m.size.p = (int*)(m.step.p + _dims) + 1; m.size.p[-1] = _dims; m.rows = m.cols = -1; } } m.dims = _dims; if( !_sz ) return; size_t esz = CV_ELEM_SIZE(m.flags), esz1 = CV_ELEM_SIZE1(m.flags), total = esz; for( int i = _dims-1; i >= 0; i-- ) { int s = _sz[i]; CV_Assert( s >= 0 ); m.size.p[i] = s; if( _steps ) { if (_steps[i] % esz1 != 0) { CV_Error(Error::BadStep, "Step must be a multiple of esz1"); } m.step.p[i] = i < _dims-1 ? _steps[i] : esz; } else if( autoSteps ) { m.step.p[i] = total; int64 total1 = (int64)total*s; if( (uint64)total1 != (size_t)total1 ) CV_Error( CV_StsOutOfRange, "The total matrix size does not fit to \"size_t\" type" ); total = (size_t)total1; } } if( _dims == 1 ) { m.dims = 2; m.cols = 1; m.step[1] = esz; } } int updateContinuityFlag(int flags, int dims, const int* size, const size_t* step) { int i, j; for( i = 0; i < dims; i++ ) { if( size[i] > 1 ) break; } uint64 t = (uint64)size[std::min(i, dims-1)]*CV_MAT_CN(flags); for( j = dims-1; j > i; j-- ) { t *= size[j]; if( step[j]*size[j] < step[j-1] ) break; } if( j <= i && t == (uint64)(int)t ) return flags | Mat::CONTINUOUS_FLAG; return flags & ~Mat::CONTINUOUS_FLAG; } void Mat::updateContinuityFlag() { flags = cv::updateContinuityFlag(flags, dims, size.p, step.p); } void finalizeHdr(Mat& m) { m.updateContinuityFlag(); int d = m.dims; if( d > 2 ) m.rows = m.cols = -1; if(m.u) m.datastart = m.data = m.u->data; if( m.data ) { m.datalimit = m.datastart + m.size[0]*m.step[0]; if( m.size[0] > 0 ) { m.dataend = m.ptr() + m.size[d-1]*m.step[d-1]; for( int i = 0; i < d-1; i++ ) m.dataend += (m.size[i] - 1)*m.step[i]; } else m.dataend = m.datalimit; } else m.dataend = m.datalimit = 0; } //================================================================================================== void Mat::create(int d, const int* _sizes, int _type) { int i; CV_Assert(0 <= d && d <= CV_MAX_DIM && _sizes); _type = CV_MAT_TYPE(_type); if( data && (d == dims || (d == 1 && dims <= 2)) && _type == type() ) { if( d == 2 && rows == _sizes[0] && cols == _sizes[1] ) return; for( i = 0; i < d; i++ ) if( size[i] != _sizes[i] ) break; if( i == d && (d > 1 || size[1] == 1)) return; } int _sizes_backup[CV_MAX_DIM]; // #5991 if (_sizes == (this->size.p)) { for(i = 0; i < d; i++ ) _sizes_backup[i] = _sizes[i]; _sizes = _sizes_backup; } release(); if( d == 0 ) return; flags = (_type & CV_MAT_TYPE_MASK) | MAGIC_VAL; setSize(*this, d, _sizes, 0, true); if( total() > 0 ) { MatAllocator *a = allocator, *a0 = getDefaultAllocator(); #ifdef HAVE_TGPU if( !a || a == tegra::getAllocator() ) a = tegra::getAllocator(d, _sizes, _type); #endif if(!a) a = a0; try { u = a->allocate(dims, size, _type, 0, step.p, ACCESS_RW /* ignored */, USAGE_DEFAULT); CV_Assert(u != 0); } catch (...) { if (a == a0) throw; u = a0->allocate(dims, size, _type, 0, step.p, ACCESS_RW /* ignored */, USAGE_DEFAULT); CV_Assert(u != 0); } CV_Assert( step[dims-1] == (size_t)CV_ELEM_SIZE(flags) ); } addref(); finalizeHdr(*this); } void Mat::create(const std::vector& _sizes, int _type) { create((int)_sizes.size(), _sizes.data(), _type); } void Mat::copySize(const Mat& m) { setSize(*this, m.dims, 0, 0); for( int i = 0; i < dims; i++ ) { size[i] = m.size[i]; step[i] = m.step[i]; } } void Mat::deallocate() { if(u) { UMatData* u_ = u; u = NULL; (u_->currAllocator ? u_->currAllocator : allocator ? allocator : getDefaultAllocator())->unmap(u_); } } Mat::Mat(const Mat& m, const Range& _rowRange, const Range& _colRange) : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0), datalimit(0), allocator(0), u(0), size(&rows) { CV_Assert( m.dims >= 2 ); if( m.dims > 2 ) { AutoBuffer rs(m.dims); rs[0] = _rowRange; rs[1] = _colRange; for( int i = 2; i < m.dims; i++ ) rs[i] = Range::all(); *this = m(rs.data()); return; } *this = m; try { if( _rowRange != Range::all() && _rowRange != Range(0,rows) ) { CV_Assert( 0 <= _rowRange.start && _rowRange.start <= _rowRange.end && _rowRange.end <= m.rows ); rows = _rowRange.size(); data += step*_rowRange.start; flags |= SUBMATRIX_FLAG; } if( _colRange != Range::all() && _colRange != Range(0,cols) ) { CV_Assert( 0 <= _colRange.start && _colRange.start <= _colRange.end && _colRange.end <= m.cols ); cols = _colRange.size(); data += _colRange.start*elemSize(); flags |= SUBMATRIX_FLAG; } } catch(...) { release(); throw; } updateContinuityFlag(); if( rows <= 0 || cols <= 0 ) { release(); rows = cols = 0; } } Mat::Mat(const Mat& m, const Rect& roi) : flags(m.flags), dims(2), rows(roi.height), cols(roi.width), data(m.data + roi.y*m.step[0]), datastart(m.datastart), dataend(m.dataend), datalimit(m.datalimit), allocator(m.allocator), u(m.u), size(&rows) { CV_Assert( m.dims <= 2 ); size_t esz = CV_ELEM_SIZE(flags); data += roi.x*esz; CV_Assert( 0 <= roi.x && 0 <= roi.width && roi.x + roi.width <= m.cols && 0 <= roi.y && 0 <= roi.height && roi.y + roi.height <= m.rows ); if( u ) CV_XADD(&u->refcount, 1); if( roi.width < m.cols || roi.height < m.rows ) flags |= SUBMATRIX_FLAG; step[0] = m.step[0]; step[1] = esz; updateContinuityFlag(); if( rows <= 0 || cols <= 0 ) { release(); rows = cols = 0; } } Mat::Mat(int _dims, const int* _sizes, int _type, void* _data, const size_t* _steps) : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0), datalimit(0), allocator(0), u(0), size(&rows) { flags |= CV_MAT_TYPE(_type); datastart = data = (uchar*)_data; setSize(*this, _dims, _sizes, _steps, true); finalizeHdr(*this); } Mat::Mat(const std::vector& _sizes, int _type, void* _data, const size_t* _steps) : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0), datalimit(0), allocator(0), u(0), size(&rows) { flags |= CV_MAT_TYPE(_type); datastart = data = (uchar*)_data; setSize(*this, (int)_sizes.size(), _sizes.data(), _steps, true); finalizeHdr(*this); } Mat::Mat(const Mat& m, const Range* ranges) : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0), datalimit(0), allocator(0), u(0), size(&rows) { int d = m.dims; CV_Assert(ranges); for( int i = 0; i < d; i++ ) { Range r = ranges[i]; CV_Assert( r == Range::all() || (0 <= r.start && r.start < r.end && r.end <= m.size[i]) ); } *this = m; for( int i = 0; i < d; i++ ) { Range r = ranges[i]; if( r != Range::all() && r != Range(0, size.p[i])) { size.p[i] = r.end - r.start; data += r.start*step.p[i]; flags |= SUBMATRIX_FLAG; } } updateContinuityFlag(); } Mat::Mat(const Mat& m, const std::vector& ranges) : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0), datalimit(0), allocator(0), u(0), size(&rows) { int d = m.dims; CV_Assert((int)ranges.size() == d); for (int i = 0; i < d; i++) { Range r = ranges[i]; CV_Assert(r == Range::all() || (0 <= r.start && r.start < r.end && r.end <= m.size[i])); } *this = m; for (int i = 0; i < d; i++) { Range r = ranges[i]; if (r != Range::all() && r != Range(0, size.p[i])) { size.p[i] = r.end - r.start; data += r.start*step.p[i]; flags |= SUBMATRIX_FLAG; } } updateContinuityFlag(); } Mat Mat::diag(int d) const { CV_Assert( dims <= 2 ); Mat m = *this; size_t esz = elemSize(); int len; if( d >= 0 ) { len = std::min(cols - d, rows); m.data += esz*d; } else { len = std::min(rows + d, cols); m.data -= step[0]*d; } CV_DbgAssert( len > 0 ); m.size[0] = m.rows = len; m.size[1] = m.cols = 1; m.step[0] += (len > 1 ? esz : 0); m.updateContinuityFlag(); if( size() != Size(1,1) ) m.flags |= SUBMATRIX_FLAG; return m; } void Mat::pop_back(size_t nelems) { CV_Assert( nelems <= (size_t)size.p[0] ); if( isSubmatrix() ) *this = rowRange(0, size.p[0] - (int)nelems); else { size.p[0] -= (int)nelems; dataend -= nelems*step.p[0]; } } void Mat::push_back_(const void* elem) { size_t r = size.p[0]; if( isSubmatrix() || dataend + step.p[0] > datalimit ) reserve( std::max(r + 1, (r*3+1)/2) ); size_t esz = elemSize(); memcpy(data + r*step.p[0], elem, esz); size.p[0] = int(r + 1); dataend += step.p[0]; uint64 tsz = size.p[0]; for( int i = 1; i < dims; i++ ) tsz *= size.p[i]; if( esz < step.p[0] || tsz != (uint64)(int)tsz ) flags &= ~CONTINUOUS_FLAG; } void Mat::reserve(size_t nelems) { const size_t MIN_SIZE = 64; CV_Assert( (int)nelems >= 0 ); if( !isSubmatrix() && data + step.p[0]*nelems <= datalimit ) return; int r = size.p[0]; if( (size_t)r >= nelems ) return; size.p[0] = std::max((int)nelems, 1); size_t newsize = total()*elemSize(); if( newsize < MIN_SIZE ) size.p[0] = (int)((MIN_SIZE + newsize - 1)*nelems/newsize); Mat m(dims, size.p, type()); size.p[0] = r; if( r > 0 ) { Mat mpart = m.rowRange(0, r); copyTo(mpart); } *this = m; size.p[0] = r; dataend = data + step.p[0]*r; } void Mat::reserveBuffer(size_t nbytes) { size_t esz = 1; int mtype = CV_8UC1; if (!empty()) { if (!isSubmatrix() && data + nbytes <= dataend)//Should it be datalimit? return; esz = elemSize(); mtype = type(); } size_t nelems = (nbytes - 1) / esz + 1; #if SIZE_MAX > UINT_MAX CV_Assert(nelems <= size_t(INT_MAX)*size_t(INT_MAX)); int newrows = nelems > size_t(INT_MAX) ? nelems > 0x400*size_t(INT_MAX) ? nelems > 0x100000 * size_t(INT_MAX) ? nelems > 0x40000000 * size_t(INT_MAX) ? size_t(INT_MAX) : 0x40000000 : 0x100000 : 0x400 : 1; #else int newrows = nelems > size_t(INT_MAX) ? 2 : 1; #endif int newcols = (int)((nelems - 1) / newrows + 1); create(newrows, newcols, mtype); } void Mat::resize(size_t nelems) { int saveRows = size.p[0]; if( saveRows == (int)nelems ) return; CV_Assert( (int)nelems >= 0 ); if( isSubmatrix() || data + step.p[0]*nelems > datalimit ) reserve(nelems); size.p[0] = (int)nelems; dataend += (size.p[0] - saveRows)*step.p[0]; //updateContinuityFlag(*this); } void Mat::resize(size_t nelems, const Scalar& s) { int saveRows = size.p[0]; resize(nelems); if( size.p[0] > saveRows ) { Mat part = rowRange(saveRows, size.p[0]); part = s; } } void Mat::push_back(const Mat& elems) { size_t r = size.p[0]; size_t delta = elems.size.p[0]; if( delta == 0 ) return; if( this == &elems ) { Mat tmp = elems; push_back(tmp); return; } if( !data ) { *this = elems.clone(); return; } size.p[0] = elems.size.p[0]; bool eq = size == elems.size; size.p[0] = int(r); if( !eq ) CV_Error(CV_StsUnmatchedSizes, "Pushed vector length is not equal to matrix row length"); if( type() != elems.type() ) CV_Error(CV_StsUnmatchedFormats, "Pushed vector type is not the same as matrix type"); if( isSubmatrix() || dataend + step.p[0]*delta > datalimit ) reserve( std::max(r + delta, (r*3+1)/2) ); size.p[0] += int(delta); dataend += step.p[0]*delta; //updateContinuityFlag(*this); if( isContinuous() && elems.isContinuous() ) memcpy(data + r*step.p[0], elems.data, elems.total()*elems.elemSize()); else { Mat part = rowRange(int(r), int(r + delta)); elems.copyTo(part); } } void Mat::locateROI( Size& wholeSize, Point& ofs ) const { CV_Assert( dims <= 2 && step[0] > 0 ); size_t esz = elemSize(), minstep; ptrdiff_t delta1 = data - datastart, delta2 = dataend - datastart; if( delta1 == 0 ) ofs.x = ofs.y = 0; else { ofs.y = (int)(delta1/step[0]); ofs.x = (int)((delta1 - step[0]*ofs.y)/esz); CV_DbgAssert( data == datastart + ofs.y*step[0] + ofs.x*esz ); } minstep = (ofs.x + cols)*esz; wholeSize.height = (int)((delta2 - minstep)/step[0] + 1); wholeSize.height = std::max(wholeSize.height, ofs.y + rows); wholeSize.width = (int)((delta2 - step*(wholeSize.height-1))/esz); wholeSize.width = std::max(wholeSize.width, ofs.x + cols); } Mat& Mat::adjustROI( int dtop, int dbottom, int dleft, int dright ) { CV_Assert( dims <= 2 && step[0] > 0 ); Size wholeSize; Point ofs; size_t esz = elemSize(); locateROI( wholeSize, ofs ); int row1 = std::min(std::max(ofs.y - dtop, 0), wholeSize.height), row2 = std::max(0, std::min(ofs.y + rows + dbottom, wholeSize.height)); int col1 = std::min(std::max(ofs.x - dleft, 0), wholeSize.width), col2 = std::max(0, std::min(ofs.x + cols + dright, wholeSize.width)); if(row1 > row2) std::swap(row1, row2); if(col1 > col2) std::swap(col1, col2); data += (row1 - ofs.y)*step + (col1 - ofs.x)*esz; rows = row2 - row1; cols = col2 - col1; size.p[0] = rows; size.p[1] = cols; updateContinuityFlag(); return *this; } Mat Mat::reshape(int new_cn, int new_rows) const { int cn = channels(); Mat hdr = *this; if( dims > 2 ) { if( new_rows == 0 && new_cn != 0 && size[dims-1]*cn % new_cn == 0 ) { hdr.flags = (hdr.flags & ~CV_MAT_CN_MASK) | ((new_cn-1) << CV_CN_SHIFT); hdr.step[dims-1] = CV_ELEM_SIZE(hdr.flags); hdr.size[dims-1] = hdr.size[dims-1]*cn / new_cn; return hdr; } if( new_rows > 0 ) { int sz[] = { new_rows, (int)(total()/new_rows) }; return reshape(new_cn, 2, sz); } } CV_Assert( dims <= 2 ); if( new_cn == 0 ) new_cn = cn; int total_width = cols * cn; if( (new_cn > total_width || total_width % new_cn != 0) && new_rows == 0 ) new_rows = rows * total_width / new_cn; if( new_rows != 0 && new_rows != rows ) { int total_size = total_width * rows; if( !isContinuous() ) CV_Error( CV_BadStep, "The matrix is not continuous, thus its number of rows can not be changed" ); if( (unsigned)new_rows > (unsigned)total_size ) CV_Error( CV_StsOutOfRange, "Bad new number of rows" ); total_width = total_size / new_rows; if( total_width * new_rows != total_size ) CV_Error( CV_StsBadArg, "The total number of matrix elements " "is not divisible by the new number of rows" ); hdr.rows = new_rows; hdr.step[0] = total_width * elemSize1(); } int new_width = total_width / new_cn; if( new_width * new_cn != total_width ) CV_Error( CV_BadNumChannels, "The total width is not divisible by the new number of channels" ); hdr.cols = new_width; hdr.flags = (hdr.flags & ~CV_MAT_CN_MASK) | ((new_cn-1) << CV_CN_SHIFT); hdr.step[1] = CV_ELEM_SIZE(hdr.flags); return hdr; } Mat Mat::reshape(int _cn, int _newndims, const int* _newsz) const { if(_newndims == dims) { if(_newsz == 0) return reshape(_cn); if(_newndims == 2) return reshape(_cn, _newsz[0]); } if (isContinuous()) { CV_Assert(_cn >= 0 && _newndims > 0 && _newndims <= CV_MAX_DIM && _newsz); if (_cn == 0) _cn = this->channels(); else CV_Assert(_cn <= CV_CN_MAX); size_t total_elem1_ref = this->total() * this->channels(); size_t total_elem1 = _cn; AutoBuffer newsz_buf( (size_t)_newndims ); for (int i = 0; i < _newndims; i++) { CV_Assert(_newsz[i] >= 0); if (_newsz[i] > 0) newsz_buf[i] = _newsz[i]; else if (i < dims) newsz_buf[i] = this->size[i]; else CV_Error(CV_StsOutOfRange, "Copy dimension (which has zero size) is not present in source matrix"); total_elem1 *= (size_t)newsz_buf[i]; } if (total_elem1 != total_elem1_ref) CV_Error(CV_StsUnmatchedSizes, "Requested and source matrices have different count of elements"); Mat hdr = *this; hdr.flags = (hdr.flags & ~CV_MAT_CN_MASK) | ((_cn-1) << CV_CN_SHIFT); setSize(hdr, _newndims, newsz_buf.data(), NULL, true); return hdr; } CV_Error(CV_StsNotImplemented, "Reshaping of n-dimensional non-continuous matrices is not supported yet"); // TBD } Mat Mat::reshape(int _cn, const std::vector& _newshape) const { if(_newshape.empty()) { CV_Assert(empty()); return *this; } return reshape(_cn, (int)_newshape.size(), &_newshape[0]); } Mat Mat::diag(const Mat& d) { CV_Assert( d.cols == 1 || d.rows == 1 ); int len = d.rows + d.cols - 1; Mat m(len, len, d.type(), Scalar(0)); Mat md = m.diag(); if( d.cols == 1 ) d.copyTo(md); else transpose(d, md); return m; } int Mat::checkVector(int _elemChannels, int _depth, bool _requireContinuous) const { return data && (depth() == _depth || _depth <= 0) && (isContinuous() || !_requireContinuous) && ((dims == 2 && (((rows == 1 || cols == 1) && channels() == _elemChannels) || (cols == _elemChannels && channels() == 1))) || (dims == 3 && channels() == 1 && size.p[2] == _elemChannels && (size.p[0] == 1 || size.p[1] == 1) && (isContinuous() || step.p[1] == step.p[2]*size.p[2]))) ? (int)(total()*channels()/_elemChannels) : -1; } static inline Size getContinuousSize_(int flags, int cols, int rows, int widthScale) { int64 sz = (int64)cols * rows * widthScale; bool has_int_overflow = sz >= INT_MAX; bool isContiguous = (flags & Mat::CONTINUOUS_FLAG) != 0; return (isContiguous && !has_int_overflow) ? Size((int)sz, 1) : Size(cols * widthScale, rows); } Size getContinuousSize2D(Mat& m1, int widthScale) { CV_CheckLE(m1.dims, 2, ""); return getContinuousSize_(m1.flags, m1.cols, m1.rows, widthScale); } Size getContinuousSize2D(Mat& m1, Mat& m2, int widthScale) { CV_CheckLE(m1.dims, 2, ""); CV_CheckLE(m2.dims, 2, ""); const Size sz1 = m1.size(); if (sz1 != m2.size()) // reshape all matrixes to the same size (#4159) { size_t total_sz = m1.total(); CV_CheckEQ(total_sz, m2.total(), ""); bool is_m1_vector = m1.cols == 1 || m1.rows == 1; bool is_m2_vector = m2.cols == 1 || m2.rows == 1; CV_Assert(is_m1_vector); CV_Assert(is_m2_vector); int total = (int)total_sz; // vector-column bool isContiguous = ((m1.flags & m2.flags) & Mat::CONTINUOUS_FLAG) != 0; bool has_int_overflow = ((int64)total_sz * widthScale) >= INT_MAX; if (isContiguous && !has_int_overflow) total = 1; // vector-row m1 = m1.reshape(0, total); m2 = m2.reshape(0, total); CV_Assert(m1.cols == m2.cols && m1.rows == m2.rows); return Size(m1.cols * widthScale, m1.rows); } return getContinuousSize_(m1.flags & m2.flags, m1.cols, m1.rows, widthScale); } Size getContinuousSize2D(Mat& m1, Mat& m2, Mat& m3, int widthScale) { CV_CheckLE(m1.dims, 2, ""); CV_CheckLE(m2.dims, 2, ""); CV_CheckLE(m3.dims, 2, ""); const Size sz1 = m1.size(); if (sz1 != m2.size() || sz1 != m3.size()) // reshape all matrixes to the same size (#4159) { size_t total_sz = m1.total(); CV_CheckEQ(total_sz, m2.total(), ""); CV_CheckEQ(total_sz, m3.total(), ""); bool is_m1_vector = m1.cols == 1 || m1.rows == 1; bool is_m2_vector = m2.cols == 1 || m2.rows == 1; bool is_m3_vector = m3.cols == 1 || m3.rows == 1; CV_Assert(is_m1_vector); CV_Assert(is_m2_vector); CV_Assert(is_m3_vector); int total = (int)total_sz; // vector-column bool isContiguous = ((m1.flags & m2.flags & m3.flags) & Mat::CONTINUOUS_FLAG) != 0; bool has_int_overflow = ((int64)total_sz * widthScale) >= INT_MAX; if (isContiguous && !has_int_overflow) total = 1; // vector-row m1 = m1.reshape(0, total); m2 = m2.reshape(0, total); m3 = m3.reshape(0, total); CV_Assert(m1.cols == m2.cols && m1.rows == m2.rows && m1.cols == m3.cols && m1.rows == m3.rows); return Size(m1.cols * widthScale, m1.rows); } return getContinuousSize_(m1.flags & m2.flags & m3.flags, m1.cols, m1.rows, widthScale); } } // cv::