Merge remote-tracking branch 'upstream/3.4' into merge-3.4

This commit is contained in:
Alexander Alekhin 2021-12-03 12:32:49 +00:00
commit 8b4fa2605e
89 changed files with 1116 additions and 542 deletions

View File

@ -2329,7 +2329,7 @@ static double cvStereoCalibrateImpl( const CvMat* _objectPoints, const CvMat* _i
if( solver.state == CvLevMarq::CALC_J )
{
int iofs = (nimages+1)*6 + k*NINTRINSIC, eofs = (i+1)*6;
assert( JtJ && JtErr );
CV_Assert( JtJ && JtErr );
Mat _JtJ(cvarrToMat(JtJ)), _JtErr(cvarrToMat(JtErr));
@ -3140,7 +3140,7 @@ cvRQDecomp3x3( const CvMat *matrixM, CvMat *matrixR, CvMat *matrixQ,
CvMat Qx = cvMat(3, 3, CV_64F, _Qx);
cvMatMul(&M, &Qx, &R);
assert(fabs(matR[2][1]) < FLT_EPSILON);
CV_DbgAssert(fabs(matR[2][1]) < FLT_EPSILON);
matR[2][1] = 0;
/* Find Givens rotation for y axis. */
@ -3159,7 +3159,7 @@ cvRQDecomp3x3( const CvMat *matrixM, CvMat *matrixR, CvMat *matrixQ,
CvMat Qy = cvMat(3, 3, CV_64F, _Qy);
cvMatMul(&R, &Qy, &M);
assert(fabs(matM[2][0]) < FLT_EPSILON);
CV_DbgAssert(fabs(matM[2][0]) < FLT_EPSILON);
matM[2][0] = 0;
/* Find Givens rotation for z axis. */
@ -3179,7 +3179,7 @@ cvRQDecomp3x3( const CvMat *matrixM, CvMat *matrixR, CvMat *matrixQ,
CvMat Qz = cvMat(3, 3, CV_64F, _Qz);
cvMatMul(&M, &Qz, &R);
assert(fabs(matR[1][0]) < FLT_EPSILON);
CV_DbgAssert(fabs(matR[1][0]) < FLT_EPSILON);
matR[1][0] = 0;
// Solve the decomposition ambiguity.

View File

@ -122,7 +122,7 @@ bool CvLevMarq::update( const CvMat*& _param, CvMat*& matJ, CvMat*& _err )
{
matJ = _err = 0;
assert( !err.empty() );
CV_Assert( !err.empty() );
if( state == DONE )
{
_param = param;
@ -155,7 +155,7 @@ bool CvLevMarq::update( const CvMat*& _param, CvMat*& matJ, CvMat*& _err )
return true;
}
assert( state == CHECK_ERR );
CV_Assert( state == CHECK_ERR );
errNorm = cvNorm( err, 0, CV_L2 );
if( errNorm > prevErrNorm )
{
@ -223,7 +223,7 @@ bool CvLevMarq::updateAlt( const CvMat*& _param, CvMat*& _JtJ, CvMat*& _JtErr, d
return true;
}
assert( state == CHECK_ERR );
CV_Assert( state == CHECK_ERR );
if( errNorm > prevErrNorm )
{
if( ++lambdaLg10 <= 16 )

View File

@ -1072,7 +1072,7 @@ void CV_ProjectPointsTest::run(int)
imgPoints, dpdrot, dpdt, dpdf, dpdc, dpddist, 0 );
// calculate and check image points
assert( (int)imgPoints.size() == pointCount );
CV_Assert( (int)imgPoints.size() == pointCount );
vector<Point2f>::const_iterator it = imgPoints.begin();
for( int i = 0; i < pointCount; i++, ++it )
{

View File

@ -57,7 +57,7 @@ static int cvTsRodrigues( const CvMat* src, CvMat* dst, CvMat* jacobian )
if( jacobian )
{
assert( (jacobian->rows == 9 && jacobian->cols == 3) ||
CV_Assert( (jacobian->rows == 9 && jacobian->cols == 3) ||
(jacobian->rows == 3 && jacobian->cols == 9) );
}
@ -66,7 +66,7 @@ static int cvTsRodrigues( const CvMat* src, CvMat* dst, CvMat* jacobian )
double r[3], theta;
CvMat _r = cvMat( src->rows, src->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(src->type)), r);
assert( dst->rows == 3 && dst->cols == 3 );
CV_Assert( dst->rows == 3 && dst->cols == 3 );
cvConvert( src, &_r );
@ -321,7 +321,7 @@ static int cvTsRodrigues( const CvMat* src, CvMat* dst, CvMat* jacobian )
}
else
{
assert(0);
CV_Assert(0);
return 0;
}
@ -407,7 +407,7 @@ static void test_convertHomogeneous( const Mat& _src, Mat& _dst )
}
else
{
assert( count == dst.cols );
CV_Assert( count == dst.cols );
ddims = dst.channels()*dst.rows;
if( dst.rows == 1 )
{

View File

@ -406,7 +406,7 @@ void CV_StereoMatchingTest::run(int)
{
string dataPath = ts->get_data_path() + "cv/";
string algorithmName = name;
assert( !algorithmName.empty() );
CV_Assert( !algorithmName.empty() );
if( dataPath.empty() )
{
ts->printf( cvtest::TS::LOG, "dataPath is empty" );
@ -553,22 +553,22 @@ int CV_StereoMatchingTest::processStereoMatchingResults( FileStorage& fs, int ca
{
// rightDisp is not used in current test virsion
int code = cvtest::TS::OK;
assert( fs.isOpened() );
assert( trueLeftDisp.type() == CV_32FC1 );
assert( trueRightDisp.empty() || trueRightDisp.type() == CV_32FC1 );
assert( leftDisp.type() == CV_32FC1 && (rightDisp.empty() || rightDisp.type() == CV_32FC1) );
CV_Assert( fs.isOpened() );
CV_Assert( trueLeftDisp.type() == CV_32FC1 );
CV_Assert( trueRightDisp.empty() || trueRightDisp.type() == CV_32FC1 );
CV_Assert( leftDisp.type() == CV_32FC1 && (rightDisp.empty() || rightDisp.type() == CV_32FC1) );
// get masks for unknown ground truth disparity values
Mat leftUnknMask, rightUnknMask;
DatasetParams params = datasetsParams[caseDatasets[caseIdx]];
absdiff( trueLeftDisp, Scalar(params.dispUnknVal), leftUnknMask );
leftUnknMask = leftUnknMask < std::numeric_limits<float>::epsilon();
assert(leftUnknMask.type() == CV_8UC1);
CV_Assert(leftUnknMask.type() == CV_8UC1);
if( !trueRightDisp.empty() )
{
absdiff( trueRightDisp, Scalar(params.dispUnknVal), rightUnknMask );
rightUnknMask = rightUnknMask < std::numeric_limits<float>::epsilon();
assert(rightUnknMask.type() == CV_8UC1);
CV_Assert(rightUnknMask.type() == CV_8UC1);
}
// calculate errors
@ -623,7 +623,7 @@ int CV_StereoMatchingTest::readDatasetsParams( FileStorage& fs )
}
datasetsParams.clear();
FileNode fn = fs.getFirstTopLevelNode();
assert(fn.isSeq());
CV_Assert(fn.isSeq());
for( int i = 0; i < (int)fn.size(); i+=3 )
{
String _name = fn[i];
@ -649,7 +649,7 @@ int CV_StereoMatchingTest::readRunParams( FileStorage& fs )
void CV_StereoMatchingTest::writeErrors( const string& errName, const vector<float>& errors, FileStorage* fs )
{
assert( (int)errors.size() == ERROR_KINDS_COUNT );
CV_Assert( (int)errors.size() == ERROR_KINDS_COUNT );
vector<float>::const_iterator it = errors.begin();
if( fs )
for( int i = 0; i < ERROR_KINDS_COUNT; i++, ++it )
@ -696,9 +696,9 @@ void CV_StereoMatchingTest::readROI( FileNode& fn, Rect& validROI )
int CV_StereoMatchingTest::compareErrors( const vector<float>& calcErrors, const vector<float>& validErrors,
const vector<float>& eps, const string& errName )
{
assert( (int)calcErrors.size() == ERROR_KINDS_COUNT );
assert( (int)validErrors.size() == ERROR_KINDS_COUNT );
assert( (int)eps.size() == ERROR_KINDS_COUNT );
CV_Assert( (int)calcErrors.size() == ERROR_KINDS_COUNT );
CV_Assert( (int)validErrors.size() == ERROR_KINDS_COUNT );
CV_Assert( (int)eps.size() == ERROR_KINDS_COUNT );
vector<float>::const_iterator calcIt = calcErrors.begin(),
validIt = validErrors.begin(),
epsIt = eps.begin();
@ -757,7 +757,7 @@ protected:
{
int code = CV_StereoMatchingTest::readRunParams( fs );
FileNode fn = fs.getFirstTopLevelNode();
assert(fn.isSeq());
CV_Assert(fn.isSeq());
for( int i = 0; i < (int)fn.size(); i+=5 )
{
String caseName = fn[i], datasetName = fn[i+1];
@ -776,8 +776,8 @@ protected:
Rect& calcROI, Mat& leftDisp, Mat& /*rightDisp*/, int caseIdx )
{
RunParams params = caseRunParams[caseIdx];
assert( params.ndisp%16 == 0 );
assert( _leftImg.type() == CV_8UC3 && _rightImg.type() == CV_8UC3 );
CV_Assert( params.ndisp%16 == 0 );
CV_Assert( _leftImg.type() == CV_8UC3 && _rightImg.type() == CV_8UC3 );
Mat leftImg; cvtColor( _leftImg, leftImg, COLOR_BGR2GRAY );
Mat rightImg; cvtColor( _rightImg, rightImg, COLOR_BGR2GRAY );
@ -883,7 +883,7 @@ protected:
{
int code = CV_StereoMatchingTest::readRunParams(fs);
FileNode fn = fs.getFirstTopLevelNode();
assert(fn.isSeq());
CV_Assert(fn.isSeq());
for( int i = 0; i < (int)fn.size(); i+=5 )
{
String caseName = fn[i], datasetName = fn[i+1];
@ -902,7 +902,7 @@ protected:
Rect& calcROI, Mat& leftDisp, Mat& /*rightDisp*/, int caseIdx )
{
RunParams params = caseRunParams[caseIdx];
assert( params.ndisp%16 == 0 );
CV_Assert( params.ndisp%16 == 0 );
Ptr<StereoSGBM> sgbm = StereoSGBM::create( 0, params.ndisp, params.winSize,
10*params.winSize*params.winSize,
40*params.winSize*params.winSize,

View File

@ -1108,7 +1108,7 @@ static void test_remap( const Mat& src, Mat& dst, const Mat& mapx, const Mat& ma
}
break;
default:
assert(0);
CV_Assert(0);
}
}
}
@ -1240,7 +1240,7 @@ int CV_ImgWarpBaseTest::prepare_test_case( int test_case_idx )
}
break;
default:
assert(0);
CV_Assert(0);
}
/*switch( depth )
@ -1258,7 +1258,7 @@ int CV_ImgWarpBaseTest::prepare_test_case( int test_case_idx )
((float*)ptr)[j] = (float)buffer[j];
break;
default:
assert(0);
CV_Assert(0);
}*/
cv::Mat src(1, cols*cn, CV_32F, &buffer[0]);
cv::Mat dst(1, cols*cn, depth, ptr);

View File

@ -497,7 +497,7 @@ cvInitNArrayIterator( int count, CvArr** arrs,
// returns zero value if iteration is finished, non-zero otherwise
CV_IMPL int cvNextNArraySlice( CvNArrayIterator* iterator )
{
assert( iterator != 0 );
CV_Assert( iterator != 0 );
int i, dims;
for( dims = iterator->dims; dims > 0; dims-- )
@ -648,7 +648,7 @@ icvGetNodePtr( CvSparseMat* mat, const int* idx, int* _type,
int i, tabidx;
unsigned hashval = 0;
CvSparseNode *node;
assert( CV_IS_SPARSE_MAT( mat ));
CV_Assert( CV_IS_SPARSE_MAT( mat ));
if( !precalc_hashval )
{
@ -697,7 +697,7 @@ icvGetNodePtr( CvSparseMat* mat, const int* idx, int* _type,
int newrawsize = newsize*sizeof(newtable[0]);
CvSparseMatIterator iterator;
assert( (newsize & (newsize - 1)) == 0 );
CV_Assert( (newsize & (newsize - 1)) == 0 );
// resize hash table
newtable = (void**)cvAlloc( newrawsize );
@ -742,7 +742,7 @@ icvDeleteNode( CvSparseMat* mat, const int* idx, unsigned* precalc_hashval )
int i, tabidx;
unsigned hashval = 0;
CvSparseNode *node, *prev = 0;
assert( CV_IS_SPARSE_MAT( mat ));
CV_Assert( CV_IS_SPARSE_MAT( mat ));
if( !precalc_hashval )
{
@ -1462,7 +1462,7 @@ cvScalarToRawData( const CvScalar* scalar, void* data, int type, int extend_to_1
int cn = CV_MAT_CN( type );
int depth = type & CV_MAT_DEPTH_MASK;
assert( scalar && data );
CV_Assert( scalar && data );
if( (unsigned)(cn - 1) >= 4 )
CV_Error( CV_StsOutOfRange, "The number of channels must be 1, 2, 3 or 4" );
@ -1509,7 +1509,7 @@ cvScalarToRawData( const CvScalar* scalar, void* data, int type, int extend_to_1
((double*)data)[cn] = (double)(scalar->val[cn]);
break;
default:
assert(0);
CV_Assert(0);
CV_Error( CV_BadDepth, "" );
}
@ -1534,7 +1534,7 @@ cvRawDataToScalar( const void* data, int flags, CvScalar* scalar )
{
int cn = CV_MAT_CN( flags );
assert( scalar && data );
CV_Assert( scalar && data );
if( (unsigned)(cn - 1) >= 4 )
CV_Error( CV_StsOutOfRange, "The number of channels must be 1, 2, 3 or 4" );
@ -1572,7 +1572,7 @@ cvRawDataToScalar( const void* data, int flags, CvScalar* scalar )
scalar->val[cn] = ((double*)data)[cn];
break;
default:
assert(0);
CV_Assert(0);
CV_Error( CV_BadDepth, "" );
}
}
@ -2623,7 +2623,7 @@ cvReshapeMatND( const CvArr* arr,
{
CvMatND* mat = (CvMatND*)arr;
assert( new_cn > 0 );
CV_Assert( new_cn > 0 );
int last_dim_size = mat->dim[mat->dims-1].size*CV_MAT_CN(mat->type);
int new_size = last_dim_size/new_cn;
@ -2901,7 +2901,7 @@ CV_IMPL IplImage *
cvCreateImage( CvSize size, int depth, int channels )
{
IplImage *img = cvCreateImageHeader( size, depth, channels );
assert( img );
CV_Assert( img );
cvCreateData( img );
return img;

View File

@ -97,7 +97,7 @@ icvInitMemStorage( CvMemStorage* storage, int block_size )
block_size = CV_STORAGE_BLOCK_SIZE;
block_size = cvAlign( block_size, CV_STRUCT_ALIGN );
assert( sizeof(CvMemBlock) % CV_STRUCT_ALIGN == 0 );
CV_Assert( sizeof(CvMemBlock) % CV_STRUCT_ALIGN == 0 );
memset( storage, 0, sizeof( *storage ));
storage->signature = CV_STORAGE_MAGIC_VAL;
@ -240,7 +240,7 @@ icvGoNextMemBlock( CvMemStorage * storage )
if( block == parent->top ) /* the single allocated block */
{
assert( parent->bottom == block );
CV_Assert( parent->bottom == block );
parent->top = parent->bottom = 0;
parent->free_space = 0;
}
@ -266,7 +266,7 @@ icvGoNextMemBlock( CvMemStorage * storage )
if( storage->top->next )
storage->top = storage->top->next;
storage->free_space = storage->block_size - sizeof(CvMemBlock);
assert( storage->free_space % CV_STRUCT_ALIGN == 0 );
CV_Assert( storage->free_space % CV_STRUCT_ALIGN == 0 );
}
@ -331,7 +331,7 @@ cvMemStorageAlloc( CvMemStorage* storage, size_t size )
if( size > INT_MAX )
CV_Error( CV_StsOutOfRange, "Too large memory block is requested" );
assert( storage->free_space % CV_STRUCT_ALIGN == 0 );
CV_Assert( storage->free_space % CV_STRUCT_ALIGN == 0 );
if( (size_t)storage->free_space < size )
{
@ -343,7 +343,7 @@ cvMemStorageAlloc( CvMemStorage* storage, size_t size )
}
ptr = ICV_FREE_PTR(storage);
assert( (size_t)ptr % CV_STRUCT_ALIGN == 0 );
CV_Assert( (size_t)ptr % CV_STRUCT_ALIGN == 0 );
storage->free_space = cvAlignLeft(storage->free_space - (int)size, CV_STRUCT_ALIGN );
return ptr;
@ -683,7 +683,7 @@ icvGrowSeq( CvSeq *seq, int in_front_of )
else
{
icvGoNextMemBlock( storage );
assert( storage->free_space >= delta );
CV_Assert( storage->free_space >= delta );
}
}
@ -716,7 +716,7 @@ icvGrowSeq( CvSeq *seq, int in_front_of )
* For used blocks it means current number
* of sequence elements in the block:
*/
assert( block->count % seq->elem_size == 0 && block->count > 0 );
CV_Assert( block->count % seq->elem_size == 0 && block->count > 0 );
if( !in_front_of )
{
@ -732,7 +732,7 @@ icvGrowSeq( CvSeq *seq, int in_front_of )
if( block != block->prev )
{
assert( seq->first->start_index == 0 );
CV_Assert( seq->first->start_index == 0 );
seq->first = block;
}
else
@ -760,7 +760,7 @@ icvFreeSeqBlock( CvSeq *seq, int in_front_of )
{
CvSeqBlock *block = seq->first;
assert( (in_front_of ? block : block->prev)->count == 0 );
CV_Assert( (in_front_of ? block : block->prev)->count == 0 );
if( block == block->prev ) /* single block case */
{
@ -775,7 +775,7 @@ icvFreeSeqBlock( CvSeq *seq, int in_front_of )
if( !in_front_of )
{
block = block->prev;
assert( seq->ptr == block->data );
CV_Assert( seq->ptr == block->data );
block->count = (int)(seq->block_max - seq->ptr);
seq->block_max = seq->ptr = block->prev->data +
@ -804,7 +804,7 @@ icvFreeSeqBlock( CvSeq *seq, int in_front_of )
block->next->prev = block->prev;
}
assert( block->count > 0 && block->count % seq->elem_size == 0 );
CV_Assert( block->count > 0 && block->count % seq->elem_size == 0 );
block->next = seq->free_blocks;
seq->free_blocks = block;
}
@ -861,7 +861,7 @@ cvFlushSeqWriter( CvSeqWriter * writer )
CvSeqBlock *block = first_block;
writer->block->count = (int)((writer->ptr - writer->block->data) / seq->elem_size);
assert( writer->block->count > 0 );
CV_Assert( writer->block->count > 0 );
do
{
@ -891,7 +891,7 @@ cvEndWriteSeq( CvSeqWriter * writer )
CvMemStorage *storage = seq->storage;
schar *storage_block_max = (schar *) storage->top + storage->block_size;
assert( writer->block->count > 0 );
CV_Assert( writer->block->count > 0 );
if( (unsigned)((storage_block_max - storage->free_space)
- seq->block_max) < CV_STRUCT_ALIGN )
@ -1147,7 +1147,7 @@ cvSeqPush( CvSeq *seq, const void *element )
icvGrowSeq( seq, 0 );
ptr = seq->ptr;
assert( ptr + elem_size <= seq->block_max /*&& ptr == seq->block_min */ );
CV_Assert( ptr + elem_size <= seq->block_max /*&& ptr == seq->block_min */ );
}
if( element )
@ -1183,7 +1183,7 @@ cvSeqPop( CvSeq *seq, void *element )
if( --(seq->first->prev->count) == 0 )
{
icvFreeSeqBlock( seq, 0 );
assert( seq->ptr == seq->block_max );
CV_Assert( seq->ptr == seq->block_max );
}
}
@ -1207,7 +1207,7 @@ cvSeqPushFront( CvSeq *seq, const void *element )
icvGrowSeq( seq, 1 );
block = seq->first;
assert( block->start_index > 0 );
CV_Assert( block->start_index > 0 );
}
ptr = block->data -= elem_size;
@ -1289,7 +1289,7 @@ cvSeqInsert( CvSeq *seq, int before_index, const void *element )
icvGrowSeq( seq, 0 );
ptr = seq->ptr + elem_size;
assert( ptr <= seq->block_max );
CV_Assert( ptr <= seq->block_max );
}
delta_index = seq->first->start_index;
@ -1307,7 +1307,7 @@ cvSeqInsert( CvSeq *seq, int before_index, const void *element )
block = prev_block;
/* Check that we don't fall into an infinite loop: */
assert( block != seq->first->prev );
CV_Assert( block != seq->first->prev );
}
before_index = (before_index - block->start_index + delta_index) * elem_size;
@ -1346,7 +1346,7 @@ cvSeqInsert( CvSeq *seq, int before_index, const void *element )
block = next_block;
/* Check that we don't fall into an infinite loop: */
assert( block != seq->first );
CV_Assert( block != seq->first );
}
before_index = (before_index - block->start_index + delta_index) * elem_size;
@ -1502,7 +1502,7 @@ cvSeqPushMulti( CvSeq *seq, const void *_elements, int count, int front )
icvGrowSeq( seq, 1 );
block = seq->first;
assert( block->start_index > 0 );
CV_Assert( block->start_index > 0 );
}
delta = MIN( block->start_index, count );
@ -1543,7 +1543,7 @@ cvSeqPopMulti( CvSeq *seq, void *_elements, int count, int front )
int delta = seq->first->prev->count;
delta = MIN( delta, count );
assert( delta > 0 );
CV_Assert( delta > 0 );
seq->first->prev->count -= delta;
seq->total -= delta;
@ -1568,7 +1568,7 @@ cvSeqPopMulti( CvSeq *seq, void *_elements, int count, int front )
int delta = seq->first->count;
delta = MIN( delta, count );
assert( delta > 0 );
CV_Assert( delta > 0 );
seq->first->count -= delta;
seq->total -= delta;
@ -2418,7 +2418,7 @@ cvSeqPartition( const CvSeq* seq, CvMemStorage* storage, CvSeq** labels,
root2->rank += root->rank == root2->rank;
root = root2;
}
assert( root->parent == 0 );
CV_Assert( root->parent == 0 );
// Compress path from node2 to the root:
while( node2->parent )
@ -2521,7 +2521,7 @@ cvSetAdd( CvSet* set, CvSetElem* element, CvSetElem** inserted_element )
((CvSetElem*)ptr)->flags = count | CV_SET_ELEM_FREE_FLAG;
((CvSetElem*)ptr)->next_free = (CvSetElem*)(ptr + elem_size);
}
assert( count <= CV_SET_ELEM_IDX_MASK+1 );
CV_Assert( count <= CV_SET_ELEM_IDX_MASK+1 );
((CvSetElem*)(ptr - elem_size))->next_free = 0;
set->first->prev->count += count - set->total;
set->total = count;
@ -2720,7 +2720,7 @@ cvFindGraphEdgeByPtr( const CvGraph* graph,
for( ; edge; edge = edge->next[ofs] )
{
ofs = start_vtx == edge->vtx[1];
assert( ofs == 1 || start_vtx == edge->vtx[0] );
CV_Assert( ofs == 1 || start_vtx == edge->vtx[0] );
if( edge->vtx[1] == end_vtx )
break;
}
@ -2784,7 +2784,7 @@ cvGraphAddEdgeByPtr( CvGraph* graph,
"vertex pointers coincide (or set to NULL)" );
edge = (CvGraphEdge*)cvSetNew( (CvSet*)(graph->edges) );
assert( edge->flags >= 0 );
CV_Assert( edge->flags >= 0 );
edge->vtx[0] = start_vtx;
edge->vtx[1] = end_vtx;
@ -2861,7 +2861,7 @@ cvGraphRemoveEdgeByPtr( CvGraph* graph, CvGraphVtx* start_vtx, CvGraphVtx* end_v
prev_ofs = ofs, prev_edge = edge, edge = edge->next[ofs] )
{
ofs = start_vtx == edge->vtx[1];
assert( ofs == 1 || start_vtx == edge->vtx[0] );
CV_Assert( ofs == 1 || start_vtx == edge->vtx[0] );
if( edge->vtx[1] == end_vtx )
break;
}
@ -2879,7 +2879,7 @@ cvGraphRemoveEdgeByPtr( CvGraph* graph, CvGraphVtx* start_vtx, CvGraphVtx* end_v
prev_ofs = ofs, prev_edge = edge, edge = edge->next[ofs] )
{
ofs = end_vtx == edge->vtx[1];
assert( ofs == 1 || end_vtx == edge->vtx[0] );
CV_Assert( ofs == 1 || end_vtx == edge->vtx[0] );
if( edge->vtx[0] == start_vtx )
break;
}
@ -3396,7 +3396,7 @@ cvInsertNodeIntoTree( void* _node, void* _parent, void* _frame )
node->v_prev = _parent != _frame ? parent : 0;
node->h_next = parent->v_next;
assert( parent->v_next != node );
CV_Assert( parent->v_next != node );
if( parent->v_next )
parent->v_next->h_prev = node;
@ -3430,7 +3430,7 @@ cvRemoveNodeFromTree( void* _node, void* _frame )
if( parent )
{
assert( parent->v_next == node );
CV_Assert( parent->v_next == node );
parent->v_next = node->h_next;
}
}

View File

@ -238,7 +238,7 @@ DFTInit( int n0, int nf, const int* factors, int* itab, int elem_size, void* _wa
else
{
// radix[] is initialized from index 'nf' down to zero
assert (nf < 34);
CV_Assert (nf < 34);
radix[nf] = 1;
digits[nf] = 0;
for( i = 0; i < nf; i++ )
@ -374,7 +374,7 @@ DFTInit( int n0, int nf, const int* factors, int* itab, int elem_size, void* _wa
else
{
Complex<float>* wave = (Complex<float>*)_wave;
assert( elem_size == sizeof(Complex<float>) );
CV_Assert( elem_size == sizeof(Complex<float>) );
wave[0].re = 1.f;
wave[0].im = 0.f;
@ -874,13 +874,13 @@ DFT(const OcvDftOptions & c, const Complex<T>* src, Complex<T>* dst)
// 0. shuffle data
if( dst != src )
{
assert( !c.noPermute );
CV_Assert( !c.noPermute );
if( !inv )
{
for( i = 0; i <= n - 2; i += 2, itab += 2*tab_step )
{
int k0 = itab[0], k1 = itab[tab_step];
assert( (unsigned)k0 < (unsigned)n && (unsigned)k1 < (unsigned)n );
CV_Assert( (unsigned)k0 < (unsigned)n && (unsigned)k1 < (unsigned)n );
dst[i] = src[k0]; dst[i+1] = src[k1];
}
@ -892,7 +892,7 @@ DFT(const OcvDftOptions & c, const Complex<T>* src, Complex<T>* dst)
for( i = 0; i <= n - 2; i += 2, itab += 2*tab_step )
{
int k0 = itab[0], k1 = itab[tab_step];
assert( (unsigned)k0 < (unsigned)n && (unsigned)k1 < (unsigned)n );
CV_Assert( (unsigned)k0 < (unsigned)n && (unsigned)k1 < (unsigned)n );
t.re = src[k0].re; t.im = -src[k0].im;
dst[i] = t;
t.re = src[k1].re; t.im = -src[k1].im;
@ -921,7 +921,7 @@ DFT(const OcvDftOptions & c, const Complex<T>* src, Complex<T>* dst)
for( i = 0; i < n2; i += 2, itab += tab_step*2 )
{
j = itab[0];
assert( (unsigned)j < (unsigned)n2 );
CV_Assert( (unsigned)j < (unsigned)n2 );
CV_SWAP(dst[i+1], dsth[j], t);
if( j > i )
@ -938,7 +938,7 @@ DFT(const OcvDftOptions & c, const Complex<T>* src, Complex<T>* dst)
for( i = 0; i < n; i++, itab += tab_step )
{
j = itab[0];
assert( (unsigned)j < (unsigned)n );
CV_Assert( (unsigned)j < (unsigned)n );
if( j > i )
CV_SWAP(dst[i], dst[j], t);
}
@ -1218,7 +1218,7 @@ RealDFT(const OcvDftOptions & c, const T* src, T* dst)
setIppErrorStatus();
#endif
}
assert( c.tab_size == n );
CV_Assert( c.tab_size == n );
if( n == 1 )
{
@ -1338,11 +1338,11 @@ CCSIDFT(const OcvDftOptions & c, const T* src, T* dst)
T save_s1 = 0.;
T t0, t1, t2, t3, t;
assert( c.tab_size == n );
CV_Assert( c.tab_size == n );
if( complex_input )
{
assert( src != dst );
CV_Assert( src != dst );
save_s1 = src[1];
((T*)src)[1] = src[0];
src++;
@ -3177,7 +3177,7 @@ protected:
}
else
{
assert( !inv );
CV_Assert( !inv );
CopyColumn( dbuf0, complex_elem_size, dptr0,
dst_step, len, complex_elem_size );
if( even )
@ -3874,7 +3874,7 @@ DCTInit( int n, int elem_size, void* _wave, int inv )
if( n == 1 )
return;
assert( (n&1) == 0 );
CV_Assert( (n&1) == 0 );
if( (n & (n - 1)) == 0 )
{
@ -3912,7 +3912,7 @@ DCTInit( int n, int elem_size, void* _wave, int inv )
else
{
Complex<float>* wave = (Complex<float>*)_wave;
assert( elem_size == sizeof(Complex<float>) );
CV_Assert( elem_size == sizeof(Complex<float>) );
w.re = (float)scale;
w.im = 0.f;

View File

@ -1020,7 +1020,7 @@ double invert( InputArray _src, OutputArray _dst, int method )
}
else
{
assert( n == 1 );
CV_Assert( n == 1 );
if( type == CV_32FC1 )
{
@ -1208,7 +1208,7 @@ bool solve( InputArray _src, InputArray _src2arg, OutputArray _dst, int method )
}
else
{
assert( src.rows == 1 );
CV_Assert( src.rows == 1 );
if( type == CV_32FC1 )
{

View File

@ -169,7 +169,7 @@ GEMM_TransposeBlock( const uchar* src, size_t src_step,
}
break;
default:
assert(0);
CV_Assert(0);
return;
}
}
@ -2062,7 +2062,7 @@ MulTransposedR(const Mat& srcmat, const Mat& dstmat, const Mat& deltamat, double
if( delta && delta_cols < size.width )
{
assert( delta_cols == 1 );
CV_Assert( delta_cols == 1 );
buf_size *= 5;
}
buf.allocate(buf_size);

View File

@ -638,7 +638,7 @@ void SparseMat::resizeHashTab(size_t newsize)
uchar* SparseMat::newNode(const int* idx, size_t hashval)
{
const int HASH_MAX_FILL_FACTOR=3;
assert(hdr);
CV_Assert(hdr);
size_t hsize = hdr->hashtab.size();
if( ++hdr->nodeCount > hsize*HASH_MAX_FILL_FACTOR )
{

View File

@ -113,7 +113,7 @@ static void* openclamdblas_check_fn(int ID);
static void* openclamdblas_check_fn(int ID)
{
assert(ID >= 0 && ID < (int)(sizeof(openclamdblas_fn)/sizeof(openclamdblas_fn[0])));
CV_Assert(ID >= 0 && ID < (int)(sizeof(openclamdblas_fn)/sizeof(openclamdblas_fn[0])));
const struct DynamicFnEntry* e = openclamdblas_fn[ID];
void* func = CV_CL_GET_PROC_ADDRESS(e->fnName);
if (!func)

View File

@ -113,7 +113,7 @@ static void* openclamdfft_check_fn(int ID);
static void* openclamdfft_check_fn(int ID)
{
assert(ID >= 0 && ID < (int)(sizeof(openclamdfft_fn)/sizeof(openclamdfft_fn[0])));
CV_Assert(ID >= 0 && ID < (int)(sizeof(openclamdfft_fn)/sizeof(openclamdfft_fn[0])));
const struct DynamicFnEntry* e = openclamdfft_fn[ID];
void* func = CV_CL_GET_PROC_ADDRESS(e->fnName);
if (!func)

View File

@ -411,7 +411,7 @@ static void* opencl_gl_check_fn(int ID);
static void* opencl_gl_check_fn(int ID)
{
const struct DynamicFnEntry* e = NULL;
assert(ID >= 0 && ID < (int)(sizeof(opencl_gl_fn_list)/sizeof(opencl_gl_fn_list[0])));
CV_Assert(ID >= 0 && ID < (int)(sizeof(opencl_gl_fn_list)/sizeof(opencl_gl_fn_list[0])));
e = opencl_gl_fn_list[ID];
void* func = CV_CL_GET_PROC_ADDRESS(e->fnName);
if (!func)

View File

@ -175,7 +175,7 @@ int decodeFormat( const char* dt, int* fmt_pairs, int max_len )
if( !dt || !len )
return 0;
assert( fmt_pairs != 0 && max_len > 0 );
CV_Assert( fmt_pairs != 0 && max_len > 0 );
fmt_pairs[0] = 0;
max_len *= 2;

View File

@ -387,7 +387,7 @@ public:
if( c == '-' )
{
assert( ptr[1] == '-' && ptr[2] == '>' );
CV_Assert( ptr[1] == '-' && ptr[2] == '>' );
mode = 0;
ptr += 3;
}
@ -694,7 +694,7 @@ public:
else if( *ptr == '!' )
{
tag_type = CV_XML_DIRECTIVE_TAG;
assert( ptr[1] != '-' || ptr[2] != '-' );
CV_Assert( ptr[1] != '-' || ptr[2] != '-' );
ptr++;
}
else

View File

@ -98,7 +98,7 @@ public:
/*
if( !FileNode::isFlow(parent_flags) )
fs->struct_indent -= CV_YML_INDENT + FileNode::isFlow(struct_flags);
assert( fs->struct_indent >= 0 );*/
CV_Assert( fs->struct_indent >= 0 );*/
}
void write(const char* key, int value)

View File

@ -62,7 +62,6 @@
#include "opencv2/core/ocl.hpp"
#endif
#include <assert.h>
#include <ctype.h>
#include <float.h>
#include <limits.h>

View File

@ -33,7 +33,7 @@ static void cvTsReleaseSimpleSeq( CvTsSimpleSeq** seq )
static schar* cvTsSimpleSeqElem( CvTsSimpleSeq* seq, int index )
{
assert( 0 <= index && index < seq->count );
CV_Assert( 0 <= index && index < seq->count );
return seq->array + index * seq->elem_size;
}
@ -50,7 +50,11 @@ static void cvTsSimpleSeqShiftAndCopy( CvTsSimpleSeq* seq, int from_idx, int to_
if( from_idx == to_idx )
return;
assert( (from_idx > to_idx && !elem) || (from_idx < to_idx && elem) );
if (elem)
CV_Assert(from_idx < to_idx);
else
CV_Assert(from_idx > to_idx);
if( from_idx < seq->count )
{
@ -128,7 +132,7 @@ static void cvTsReleaseSimpleSet( CvTsSimpleSet** set_header )
static schar* cvTsSimpleSetFind( CvTsSimpleSet* set_header, int index )
{
int idx = index * set_header->elem_size;
assert( 0 <= index && index < set_header->max_count );
CV_Assert( 0 <= index && index < set_header->max_count );
return set_header->array[idx] ? set_header->array + idx + 1 : 0;
}
@ -136,11 +140,11 @@ static schar* cvTsSimpleSetFind( CvTsSimpleSet* set_header, int index )
static int cvTsSimpleSetAdd( CvTsSimpleSet* set_header, void* elem )
{
int idx, idx2;
assert( set_header->free_count > 0 );
CV_Assert( set_header->free_count > 0 );
idx = set_header->free_stack[--set_header->free_count];
idx2 = idx * set_header->elem_size;
assert( set_header->array[idx2] == 0 );
CV_Assert( set_header->array[idx2] == 0 );
set_header->array[idx2] = 1;
if( set_header->elem_size > 1 )
memcpy( set_header->array + idx2 + 1, elem, set_header->elem_size - 1 );
@ -152,9 +156,9 @@ static int cvTsSimpleSetAdd( CvTsSimpleSet* set_header, void* elem )
static void cvTsSimpleSetRemove( CvTsSimpleSet* set_header, int index )
{
assert( set_header->free_count < set_header->max_count &&
0 <= index && index < set_header->max_count );
assert( set_header->array[index * set_header->elem_size] == 1 );
CV_Assert( set_header->free_count < set_header->max_count &&
0 <= index && index < set_header->max_count );
CV_Assert( set_header->array[index * set_header->elem_size] == 1 );
set_header->free_stack[set_header->free_count++] = index;
set_header->array[index * set_header->elem_size] = 0;
@ -187,7 +191,7 @@ static CvTsSimpleGraph* cvTsCreateSimpleGraph( int max_vtx_count, int vtx_size,
{
CvTsSimpleGraph* graph;
assert( max_vtx_count > 1 && vtx_size >= 0 && edge_size >= 0 );
CV_Assert( max_vtx_count > 1 && vtx_size >= 0 && edge_size >= 0 );
graph = (CvTsSimpleGraph*)cvAlloc( sizeof(*graph) +
max_vtx_count * max_vtx_count * (edge_size + 1));
graph->vtx = cvTsCreateSimpleSet( max_vtx_count, vtx_size );
@ -235,13 +239,13 @@ static void cvTsSimpleGraphAddEdge( CvTsSimpleGraph* graph, int idx1, int idx2,
{
int i, t, n = graph->oriented ? 1 : 2;
assert( cvTsSimpleSetFind( graph->vtx, idx1 ) &&
cvTsSimpleSetFind( graph->vtx, idx2 ));
CV_Assert( cvTsSimpleSetFind( graph->vtx, idx1 ) &&
cvTsSimpleSetFind( graph->vtx, idx2 ));
for( i = 0; i < n; i++ )
{
int ofs = (idx1*graph->vtx->max_count + idx2)*graph->edge_size;
assert( graph->matrix[ofs] == 0 );
CV_Assert( graph->matrix[ofs] == 0 );
graph->matrix[ofs] = 1;
if( graph->edge_size > 1 )
memcpy( graph->matrix + ofs + 1, edge, graph->edge_size - 1 );
@ -255,13 +259,13 @@ static void cvTsSimpleGraphRemoveEdge( CvTsSimpleGraph* graph, int idx1, int id
{
int i, t, n = graph->oriented ? 1 : 2;
assert( cvTsSimpleSetFind( graph->vtx, idx1 ) &&
CV_Assert( cvTsSimpleSetFind( graph->vtx, idx1 ) &&
cvTsSimpleSetFind( graph->vtx, idx2 ));
for( i = 0; i < n; i++ )
{
int ofs = (idx1*graph->vtx->max_count + idx2)*graph->edge_size;
assert( graph->matrix[ofs] == 1 );
CV_Assert( graph->matrix[ofs] == 1 );
graph->matrix[ofs] = 0;
CV_SWAP( idx1, idx2, t );
}
@ -291,7 +295,7 @@ static int cvTsSimpleGraphVertexDegree( CvTsSimpleGraph* graph, int index )
int i, count = 0;
int edge_size = graph->edge_size;
int max_vtx_count = graph->vtx->max_count;
assert( cvTsSimpleGraphFindVertex( graph, index ) != 0 );
CV_Assert( cvTsSimpleGraphFindVertex( graph, index ) != 0 );
for( i = 0; i < max_vtx_count; i++ )
{
@ -301,7 +305,7 @@ static int cvTsSimpleGraphVertexDegree( CvTsSimpleGraph* graph, int index )
if( !graph->oriented )
{
assert( count % 2 == 0 );
CV_Assert( count % 2 == 0 );
count /= 2;
}
return count;
@ -609,7 +613,7 @@ int Core_SeqBaseTest::test_get_seq_elem( int _struct_idx, int iters )
CvTsSimpleSeq* sseq = (CvTsSimpleSeq*)simple_struct[_struct_idx];
struct_idx = _struct_idx;
assert( seq->total == sseq->count );
CV_Assert( seq->total == sseq->count );
if( sseq->count == 0 )
return 0;
@ -656,7 +660,7 @@ int Core_SeqBaseTest::test_get_seq_reading( int _struct_idx, int iters )
vector<schar> _elem(sseq->elem_size);
schar* elem = &_elem[0];
assert( total == sseq->count );
CV_Assert( total == sseq->count );
this->struct_idx = _struct_idx;
int pos = cvtest::randInt(rng) % 2;
@ -964,7 +968,7 @@ int Core_SeqBaseTest::test_seq_ops( int iters )
"The sequence doesn't become empty after clear" );
break;
default:
assert(0);
CV_Assert(0);
return -1;
}
@ -1903,7 +1907,7 @@ int Core_GraphScanTest::create_random_graph( int _struct_idx )
for( i = 0; i < vtx_count; i++ )
cvGraphAddVtx( graph );
assert( graph->active_count == vtx_count );
CV_Assert( graph->active_count == vtx_count );
for( i = 0; i < edge_count; i++ )
{
@ -1914,7 +1918,7 @@ int Core_GraphScanTest::create_random_graph( int _struct_idx )
cvGraphAddEdge( graph, j, k );
}
assert( graph->active_count == vtx_count && graph->edges->active_count <= edge_count );
CV_Assert( graph->active_count == vtx_count && graph->edges->active_count <= edge_count );
return 0;
}

View File

@ -204,7 +204,7 @@ static void DCT_1D( const Mat& _src, Mat& _dst, int flags, const Mat& _wave=Mat(
}
}
else
assert(0);
CV_Assert(0);
}

View File

@ -28,7 +28,7 @@ protected:
template<class Type>
void testReduce( const Mat& src, Mat& sum, Mat& avg, Mat& max, Mat& min, int dim )
{
assert( src.channels() == 1 );
CV_Assert( src.channels() == 1 );
if( dim == 0 ) // row
{
sum.create( 1, src.cols, CV_64FC1 );
@ -138,7 +138,7 @@ int Core_ReduceTest::checkOp( const Mat& src, int dstType, int opType, const Mat
eps = 0.6;
}
assert( opRes.type() == CV_64FC1 );
CV_Assert( opRes.type() == CV_64FC1 );
Mat _dst, dst, diff;
cv::reduce( src, _dst, dim, opType, dstType );
_dst.convertTo( dst, CV_64FC1 );
@ -192,7 +192,7 @@ int Core_ReduceTest::checkCase( int srcType, int dstType, int dim, Size sz )
else if( srcType == CV_64FC1 )
testReduce<double>( src, sum, avg, max, min, dim );
else
assert( 0 );
CV_Assert( 0 );
// 1. sum
tempCode = checkOp( src, dstType, CV_REDUCE_SUM, sum, dim );

View File

@ -1039,7 +1039,7 @@ static void cvTsPerspectiveTransform( const CvArr* _src, CvArr* _dst, const CvMa
}
else
{
assert( mat_depth == CV_64F );
CV_Assert( mat_depth == CV_64F );
for( i = 0; i < transmat->rows; i++ )
for( j = 0; j < cols; j++ )
mat[i*cols + j] = ((double*)(transmat->data.ptr + transmat->step*i))[j];
@ -1065,7 +1065,7 @@ static void cvTsPerspectiveTransform( const CvArr* _src, CvArr* _dst, const CvMa
buf[j] = ((double*)src)[j];
break;
default:
assert(0);
CV_Assert(0);
}
switch( cn )
@ -1095,7 +1095,7 @@ static void cvTsPerspectiveTransform( const CvArr* _src, CvArr* _dst, const CvMa
}
break;
default:
assert(0);
CV_Assert(0);
}
switch( depth )
@ -1109,7 +1109,7 @@ static void cvTsPerspectiveTransform( const CvArr* _src, CvArr* _dst, const CvMa
((double*)dst)[j] = buf[j];
break;
default:
assert(0);
CV_Assert(0);
}
}
}
@ -1458,8 +1458,8 @@ static double cvTsLU( CvMat* a, CvMat* b=NULL, CvMat* x=NULL, int* rank=0 )
double *a0 = a->data.db, *b0 = b ? b->data.db : 0;
double *x0 = x ? x->data.db : 0;
double t, det = 1.;
assert( CV_MAT_TYPE(a->type) == CV_64FC1 &&
(!b || CV_ARE_TYPES_EQ(a,b)) && (!x || CV_ARE_TYPES_EQ(a,x)));
CV_Assert( CV_MAT_TYPE(a->type) == CV_64FC1 &&
(!b || CV_ARE_TYPES_EQ(a,b)) && (!x || CV_ARE_TYPES_EQ(a,x)));
for( i = 0; i < Nm; i++ )
{
@ -1514,7 +1514,7 @@ static double cvTsLU( CvMat* a, CvMat* b=NULL, CvMat* x=NULL, int* rank=0 )
if( x )
{
assert( b );
CV_Assert( b );
for( i = N-1; i >= 0; i-- )
{

View File

@ -545,6 +545,8 @@ CV__DNN_INLINE_NS_BEGIN
class CV_EXPORTS ELULayer : public ActivationLayer
{
public:
float alpha;
static Ptr<ELULayer> create(const LayerParams &params);
};

View File

@ -0,0 +1,90 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "perf_precomp.hpp"
namespace opencv_test {
struct LstmParams {
// Batch size
int nrSamples;
// Size of the input vector
int inputSize;
// Size of the internal state vector
int hiddenSize;
// Number of timesteps for the LSTM
int nrSteps;
};
static inline void PrintTo(const LstmParams& params, ::std::ostream* os) {
(*os) << "BATCH=" << params.nrSamples
<< ", IN=" << params.inputSize
<< ", HIDDEN=" << params.hiddenSize
<< ", TS=" << params.nrSteps;
}
static const LstmParams testLstmConfigs[] = {
{1, 192, 192, 100},
{1, 1024, 192, 100},
{1, 64, 192, 100},
{1, 192, 512, 100},
{64, 192, 192, 2},
{64, 1024, 192, 2},
{64, 64, 192, 2},
{64, 192, 512, 2},
{128, 192, 192, 2},
{128, 1024, 192, 2},
{128, 64, 192, 2},
{128, 192, 512, 2}
};
class Layer_LSTM : public TestBaseWithParam<LstmParams> {};
PERF_TEST_P_(Layer_LSTM, lstm) {
const LstmParams& params = GetParam();
LayerParams lp;
lp.type = "LSTM";
lp.name = "testLstm";
lp.set("produce_cell_output", false);
lp.set("use_timestamp_dim", true);
Mat weightH(params.hiddenSize * 4, params.hiddenSize, CV_32FC1, cv::Scalar(0));
Mat weightX(params.hiddenSize * 4, params.inputSize, CV_32FC1, cv::Scalar(0));
Mat bias(params.hiddenSize * 4, 1, CV_32FC1, cv::Scalar(0));
Mat hInternal(params.nrSteps, params.hiddenSize, CV_32FC1, cv::Scalar(0));
Mat cInternal(params.nrSteps, params.hiddenSize, CV_32FC1, cv::Scalar(0));
lp.blobs.push_back(weightH);
lp.blobs.push_back(weightX);
lp.blobs.push_back(bias);
lp.blobs.push_back(hInternal);
lp.blobs.push_back(cInternal);
std::vector<int> inputDims;
inputDims.push_back(params.nrSamples);
inputDims.push_back(params.nrSteps);
inputDims.push_back(params.inputSize);
Mat input(inputDims.size(), inputDims.data(), CV_32FC1);
input = cv::Scalar(0);
Net net;
net.addLayerToPrev(lp.name, lp.type, lp);
net.setInput(input);
// Warm up
std::vector<Mat> outputs(2);
net.forward(outputs, "testLstm");
TEST_CYCLE()
{
net.forward(outputs, "testLstm");
}
SANITY_CHECK_NOTHING();
}
INSTANTIATE_TEST_CASE_P(/**/, Layer_LSTM, testing::ValuesIn(testLstmConfigs));
} // namespace

View File

@ -646,29 +646,26 @@ struct DataLayer : public Layer
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
// FIXIT: add wrapper without exception suppression
CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
forward_ocl(inputs_arr, outputs_arr, internals_arr))
if (outputs_arr.depth() == CV_16S)
{
forward_fallback(inputs_arr, outputs_arr, internals_arr);
return;
}
bool isFP16 = outputs_arr.depth() == CV_16S;
std::vector<Mat> outputs, internals;
outputs_arr.getMatVector(outputs);
internals_arr.getMatVector(internals);
// Supported modes:
// | Input type | Output type |
// | fp32 | fp32 |
// | uint8 | fp32 |
for (int i = 0; i < inputsData.size(); ++i)
{
double scale = scaleFactors[i];
Scalar& mean = means[i];
CV_Assert(mean == Scalar() || inputsData[i].size[1] <= 4);
CV_CheckTypeEQ(outputs[i].type(), CV_32FC1, "");
if (isFP16)
CV_CheckTypeEQ(outputs[i].type(), CV_16SC1, "");
else
CV_CheckTypeEQ(outputs[i].type(), CV_32FC1, "");
bool singleMean = true;
for (int j = 1; j < std::min(4, inputsData[i].size[1]) && singleMean; ++j)
@ -678,34 +675,49 @@ struct DataLayer : public Layer
if (singleMean)
{
inputsData[i].convertTo(outputs[i], CV_32F, scale, -mean[0] * scale);
if (isFP16)
{
Mat input_f32;
inputsData[i].convertTo(input_f32, CV_32F, scale, -mean[0] * scale);
convertFp16(input_f32, outputs[i]);
}
else
{
inputsData[i].convertTo(outputs[i], CV_32F, scale, -mean[0] * scale);
}
}
else
{
for (int n = 0; n < inputsData[i].size[0]; ++n)
{
for (int c = 0; c < inputsData[i].size[1]; ++c)
{
Mat inp = getPlane(inputsData[i], n, c);
Mat out = getPlane(outputs[i], n, c);
inp.convertTo(out, CV_32F, scale, -mean[c] * scale);
if (isFP16)
{
Mat input_f32;
inp.convertTo(input_f32, CV_32F, scale, -mean[c] * scale);
convertFp16(input_f32, out);
}
else
{
inp.convertTo(out, CV_32F, scale, -mean[c] * scale);
}
}
}
}
}
}
#ifdef HAVE_OPENCL
std::vector<Mat> tmp_expressions;
bool forward_ocl(InputArrayOfArrays, OutputArrayOfArrays outputs_, OutputArrayOfArrays internals_)
{
// Supported modes:
// | Input type | Output type |
// | fp32 | fp32 |
// | fp32 | fp16 |
// | uint8 | fp32 |
bool isFP16 = outputs_.depth() == CV_16S;
std::vector<UMat> outputs;
outputs_.getUMatVector(outputs);
tmp_expressions.clear();
for (int i = 0; i < inputsData.size(); ++i)
{
Mat inputData = inputsData[i];
@ -713,58 +725,55 @@ struct DataLayer : public Layer
double scale = scaleFactors[i];
Scalar& mean = means[i];
CV_Assert(mean == Scalar() || inputsData[i].size[1] <= 4);
CV_Assert(mean == Scalar() || inputData.size[1] <= 4);
if (isFP16)
CV_CheckTypeEQ(outputs[i].type(), CV_16SC1, "");
else
CV_CheckTypeEQ(outputs[i].type(), CV_32FC1, "");
bool singleMean = true;
for (int j = 1; j < std::min(4, inputsData[i].size[1]) && singleMean; ++j)
for (int j = 1; j < std::min(4, inputData.size[1]) && singleMean; ++j)
{
singleMean = mean[j] == mean[j - 1];
}
if (outputs_.depth() == CV_16S)
if (singleMean)
{
if (singleMean)
if (isFP16)
{
tmp_expressions.push_back(Mat(scale * (inputsData[i] - mean[0])));
convertFp16(tmp_expressions.back(), outputs[i]);
UMat input_i;
inputData.convertTo(input_i, CV_32F, scale, -mean[0] * scale);
convertFp16(input_i, outputs[i]);
}
else
{
for (int n = 0; n < inputsData[i].size[0]; ++n)
for (int c = 0; c < inputsData[i].size[1]; ++c)
{
Mat inp = getPlane(inputsData[i], n, c);
std::vector<cv::Range> plane(4, Range::all());
plane[0] = Range(n, n + 1);
plane[1] = Range(c, c + 1);
UMat out = outputs[i](plane).reshape(1, inp.dims, inp.size);
tmp_expressions.push_back(scale * (inp - mean[c]));
convertFp16(tmp_expressions.back(), out);
}
inputData.convertTo(outputs[i], CV_32F, scale, -mean[0] * scale);
}
}
else
{
CV_Assert(outputs_.depth() == CV_32F);
if (singleMean)
for (int n = 0; n < inputData.size[0]; ++n)
{
inputsData[i].convertTo(outputs[i], CV_32F, scale, -mean[0] * scale);
}
else
{
for (int n = 0; n < inputsData[i].size[0]; ++n)
for (int c = 0; c < inputsData[i].size[1]; ++c)
for (int c = 0; c < inputData.size[1]; ++c)
{
Mat inp = getPlane(inputData, n, c);
std::vector<cv::Range> plane(4, Range::all());
plane[0] = Range(n, n + 1);
plane[1] = Range(c, c + 1);
UMat out = outputs[i](plane).reshape(1, inp.dims, inp.size);
if (isFP16)
{
UMat input_i;
inp.convertTo(input_i, CV_32F, scale, -mean[c] * scale);
convertFp16(input_i, out);
}
else
{
Mat inp = getPlane(inputsData[i], n, c);
std::vector<cv::Range> plane(4, Range::all());
plane[0] = Range(n, n + 1);
plane[1] = Range(c, c + 1);
UMat out = outputs[i](plane).reshape(1, inp.dims, inp.size);
inp.convertTo(out, CV_32F, scale, -mean[c] * scale);
}
}
}
}
}

View File

@ -987,6 +987,9 @@ const char* const SigmoidFunctor::BaseDefaultFunctor<SigmoidFunctor>::ocl_kernel
struct ELUFunctor : public BaseDefaultFunctor<ELUFunctor>
{
typedef ELULayer Layer;
float alpha;
explicit ELUFunctor(float alpha_ = 1.f) : alpha(alpha_) {}
bool supportBackend(int backendId, int)
{
@ -998,7 +1001,12 @@ struct ELUFunctor : public BaseDefaultFunctor<ELUFunctor>
inline float calculate(float x) const
{
return x >= 0.f ? x : exp(x) - 1.f;
return x >= 0.f ? x : alpha * (exp(x) - 1.f);
}
inline void setKernelParams(ocl::Kernel& kernel) const
{
kernel.set(3, alpha);
}
#ifdef HAVE_CUDA
@ -1012,7 +1020,7 @@ struct ELUFunctor : public BaseDefaultFunctor<ELUFunctor>
void attachHalide(const Halide::Expr& input, Halide::Func& top)
{
Halide::Var x("x"), y("y"), c("c"), n("n");
top(x, y, c, n) = select(input >= 0.0f, input, exp(input) - 1);
top(x, y, c, n) = select(input >= 0.0f, input, alpha * (exp(input) - 1));
}
#endif // HAVE_HALIDE
@ -1026,7 +1034,7 @@ struct ELUFunctor : public BaseDefaultFunctor<ELUFunctor>
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
{
return std::make_shared<ngraph::op::Elu>(node, 1.0);
return std::make_shared<ngraph::op::Elu>(node, alpha);
}
#endif // HAVE_DNN_NGRAPH
@ -1856,8 +1864,10 @@ Ptr<SigmoidLayer> SigmoidLayer::create(const LayerParams& params)
Ptr<ELULayer> ELULayer::create(const LayerParams& params)
{
Ptr<ELULayer> l(new ElementWiseLayer<ELUFunctor>(ELUFunctor()));
float alpha = params.get<float>("alpha", 1.0f);
Ptr<ELULayer> l(new ElementWiseLayer<ELUFunctor>(ELUFunctor(alpha)));
l->setParamsFrom(params);
l->alpha = alpha;
return l;
}

View File

@ -230,17 +230,17 @@ public:
#if CV_TRY_AVX512_SKX
if( useAVX512 )
opt_AVX512_SKX::fastGEMM1T( sptr, wptr, wstep, biasptr, dptr, nw, vecsize);
opt_AVX512_SKX::fastGEMM1T( sptr, wptr, wstep, biasptr, dptr, nw, vecsize_aligned);
else
#endif
#if CV_TRY_AVX2
if( useAVX2 )
opt_AVX2::fastGEMM1T( sptr, wptr, wstep, biasptr, dptr, nw, vecsize);
opt_AVX2::fastGEMM1T( sptr, wptr, wstep, biasptr, dptr, nw, vecsize_aligned);
else
#endif
#if CV_TRY_AVX
if( useAVX )
opt_AVX::fastGEMM1T( sptr, wptr, wstep, biasptr, dptr, nw, vecsize);
opt_AVX::fastGEMM1T( sptr, wptr, wstep, biasptr, dptr, nw, vecsize_aligned);
else
#endif
#if CV_TRY_RVV

View File

@ -550,13 +550,24 @@ void fastDepthwiseConv( const float* wptr,
_mm256_zeroupper();
}
// Used to generate the mask used when calculating tails
static const uint32_t tailMaskArray[15] = {
0, 0, 0, 0, 0, 0, 0, 0,
0xffffffffUL, 0xffffffffUL, 0xffffffffUL, 0xffffffffUL, 0xffffffffUL, 0xffffffffUL, 0xffffffffUL
};
// dst = vec * weights^t + bias
// Requires that vecsize is at least 8 or equal to 0 to avoid memory access problems. Does not require alignment.
void fastGEMM1T( const float* vec, const float* weights,
size_t wstep, const float* bias,
float* dst, int nvecs, int vecsize )
{
int i = 0;
CV_Assert(vecsize >= 8 || vecsize == 0);
__m256 tailMask = _mm256_loadu_ps(reinterpret_cast<const float*>(tailMaskArray) + (vecsize % 8));
for( ; i <= nvecs - 8; i += 8 )
{
const float* wptr = weights + i*wstep;
@ -565,18 +576,36 @@ void fastGEMM1T( const float* vec, const float* weights,
vs4 = _mm256_setzero_ps(), vs5 = _mm256_setzero_ps(),
vs6 = _mm256_setzero_ps(), vs7 = _mm256_setzero_ps();
for( int k = 0; k < vecsize; k += 8, wptr += 8 )
int k = 0;
for( ; k <= vecsize-8; k += 8, wptr += 8 )
{
__m256 v = _mm256_load_ps(vec + k);
__m256 v = _mm256_loadu_ps(vec + k);
vs0 = _mm256_fmadd_ps(_mm256_load_ps(wptr), v, vs0);
vs1 = _mm256_fmadd_ps(_mm256_load_ps(wptr + wstep), v, vs1);
vs2 = _mm256_fmadd_ps(_mm256_load_ps(wptr + wstep*2), v, vs2);
vs3 = _mm256_fmadd_ps(_mm256_load_ps(wptr + wstep*3), v, vs3);
vs4 = _mm256_fmadd_ps(_mm256_load_ps(wptr + wstep*4), v, vs4);
vs5 = _mm256_fmadd_ps(_mm256_load_ps(wptr + wstep*5), v, vs5);
vs6 = _mm256_fmadd_ps(_mm256_load_ps(wptr + wstep*6), v, vs6);
vs7 = _mm256_fmadd_ps(_mm256_load_ps(wptr + wstep*7), v, vs7);
vs0 = _mm256_fmadd_ps(_mm256_loadu_ps(wptr), v, vs0);
vs1 = _mm256_fmadd_ps(_mm256_loadu_ps(wptr + wstep), v, vs1);
vs2 = _mm256_fmadd_ps(_mm256_loadu_ps(wptr + wstep*2), v, vs2);
vs3 = _mm256_fmadd_ps(_mm256_loadu_ps(wptr + wstep*3), v, vs3);
vs4 = _mm256_fmadd_ps(_mm256_loadu_ps(wptr + wstep*4), v, vs4);
vs5 = _mm256_fmadd_ps(_mm256_loadu_ps(wptr + wstep*5), v, vs5);
vs6 = _mm256_fmadd_ps(_mm256_loadu_ps(wptr + wstep*6), v, vs6);
vs7 = _mm256_fmadd_ps(_mm256_loadu_ps(wptr + wstep*7), v, vs7);
}
if (k != vecsize) {
// Tail
k = vecsize - 8;
wptr = weights + i * wstep + k;
__m256 v = _mm256_loadu_ps(vec + k);
v = _mm256_and_ps(v, tailMask);
vs0 = _mm256_fmadd_ps(_mm256_and_ps(_mm256_loadu_ps(wptr), tailMask), v, vs0);
vs1 = _mm256_fmadd_ps(_mm256_and_ps(_mm256_loadu_ps(wptr + wstep), tailMask), v, vs1);
vs2 = _mm256_fmadd_ps(_mm256_and_ps(_mm256_loadu_ps(wptr + wstep * 2), tailMask), v, vs2);
vs3 = _mm256_fmadd_ps(_mm256_and_ps(_mm256_loadu_ps(wptr + wstep * 3), tailMask), v, vs3);
vs4 = _mm256_fmadd_ps(_mm256_and_ps(_mm256_loadu_ps(wptr + wstep * 4), tailMask), v, vs4);
vs5 = _mm256_fmadd_ps(_mm256_and_ps(_mm256_loadu_ps(wptr + wstep * 5), tailMask), v, vs5);
vs6 = _mm256_fmadd_ps(_mm256_and_ps(_mm256_loadu_ps(wptr + wstep * 6), tailMask), v, vs6);
vs7 = _mm256_fmadd_ps(_mm256_and_ps(_mm256_loadu_ps(wptr + wstep * 7), tailMask), v, vs7);
}
__m256 s0 = _mm256_hadd_ps(_mm256_hadd_ps(vs0, vs1), _mm256_hadd_ps(vs2, vs3));
@ -598,10 +627,20 @@ void fastGEMM1T( const float* vec, const float* weights,
const float* wptr = weights + i*wstep;
__m256 vs0 = _mm256_setzero_ps();
for( int k = 0; k < vecsize; k += 8, wptr += 8 )
int k = 0;
for( ; k <= vecsize-8; k += 8, wptr += 8 )
{
__m256 v = _mm256_load_ps(vec + k);
vs0 = _mm256_fmadd_ps(_mm256_load_ps(wptr), v, vs0);
__m256 v = _mm256_loadu_ps(vec + k);
vs0 = _mm256_fmadd_ps(_mm256_loadu_ps(wptr), v, vs0);
}
if (k != vecsize) {
// Tail
k = vecsize - 8;
wptr = weights + i * wstep + k;
__m256 v = _mm256_loadu_ps(vec + k);
v = _mm256_and_ps(v, tailMask);
vs0 = _mm256_fmadd_ps(_mm256_and_ps(_mm256_loadu_ps(wptr), tailMask), v, vs0);
}
__m256 s0 = _mm256_hadd_ps(_mm256_hadd_ps(vs0, vs0), vs0);

View File

@ -87,7 +87,7 @@ public:
if (size % 2 != 1 || size <= 0)
CV_Error(Error::StsBadArg, "LRN layer supports only positive odd values for local_size");
alpha = params.get<double>("alpha", 1);
alpha = params.get<double>("alpha", 0.0001);
beta = params.get<double>("beta", 0.75);
bias = params.get<double>("bias", 1);
normBySize = params.get<bool>("norm_by_size", true);

View File

@ -46,6 +46,8 @@
#include <cmath>
#include <opencv2/dnn/shape_utils.hpp>
#include "layers_common.hpp"
namespace cv
{
namespace dnn
@ -118,10 +120,23 @@ class LSTMLayerImpl CV_FINAL : public LSTMLayer
ActivationFunction g_activation;
ActivationFunction h_activation;
#if CV_TRY_AVX
bool useAVX;
#endif
#if CV_TRY_AVX2
bool useAVX2;
#endif
public:
LSTMLayerImpl(const LayerParams& params)
: numTimeStamps(0), numSamples(0)
#if CV_TRY_AVX
, useAVX(checkHardwareSupport(CPU_AVX))
#endif
#if CV_TRY_AVX2
, useAVX2(checkHardwareSupport(CPU_AVX2))
#endif
{
setParamsFrom(params);
@ -343,6 +358,15 @@ public:
hOutTs = hOutTs.colRange(i * hOutTs.cols / numDirs, (i + 1) * hOutTs.cols / numDirs);
Mat cOutTs = produceCellOutput ? output[1].reshape(1, numSamplesTotal) : Mat();
#if CV_TRY_AVX2 || CV_TRY_AVX
bool canUseAvx = gates.isContinuous() && bias.isContinuous()
&& Wx.depth() == CV_32F && gates.depth() == CV_32F
&& bias.depth() == CV_32F && Wx.cols >= 8;
bool canUseAvx_hInternal = hInternal.isContinuous() && gates.isContinuous() && bias.isContinuous()
&& Wh.depth() == CV_32F && hInternal.depth() == CV_32F && gates.depth() == CV_32F
&& Wh.cols >= 8;
#endif
int tsStart, tsEnd, tsInc;
if (reverse || i == 1) {
tsStart = numTimeStamps - 1;
@ -359,9 +383,82 @@ public:
Range curRowRange(ts*numSamples, (ts + 1)*numSamples);
Mat xCurr = xTs.rowRange(curRowRange);
gemm(xCurr, Wx, 1, gates, 0, gates, GEMM_2_T); // Wx * x_t
gemm(hInternal, Wh, 1, gates, 1, gates, GEMM_2_T); //+Wh * h_{t-1}
gemm(dummyOnes, bias, 1, gates, 1, gates); //+b
#if CV_TRY_AVX2
if (useAVX2 && canUseAvx && xCurr.isContinuous())
{
for (int n = 0; n < xCurr.rows; n++) {
opt_AVX2::fastGEMM1T(
xCurr.ptr<float>(n),
Wx.ptr<float>(),
Wx.step1(),
bias.ptr<float>(),
gates.ptr<float>(n),
Wx.rows,
Wx.cols
);
}
}
else
#endif
#if CV_TRY_AVX
if (useAVX && canUseAvx && xCurr.isContinuous())
{
for (int n = 0; n < xCurr.rows; n++) {
opt_AVX::fastGEMM1T(
xCurr.ptr<float>(n),
Wx.ptr<float>(),
Wx.step1(),
bias.ptr<float>(),
gates.ptr<float>(n),
Wx.rows,
Wx.cols
);
}
}
else
#endif
{
gemm(xCurr, Wx, 1, gates, 0, gates, GEMM_2_T); // Wx * x_t
gemm(dummyOnes, bias, 1, gates, 1, gates); //+b
}
#if CV_TRY_AVX2
if (useAVX2 && canUseAvx_hInternal)
{
for (int n = 0; n < hInternal.rows; n++) {
opt_AVX2::fastGEMM1T(
hInternal.ptr<float>(n),
Wh.ptr<float>(),
Wh.step1(),
gates.ptr<float>(n),
gates.ptr<float>(n),
Wh.rows,
Wh.cols
);
}
}
else
#endif
#if CV_TRY_AVX
if (useAVX && canUseAvx_hInternal)
{
for (int n = 0; n < hInternal.rows; n++) {
opt_AVX::fastGEMM1T(
hInternal.ptr<float>(n),
Wh.ptr<float>(),
Wh.step1(),
gates.ptr<float>(n),
gates.ptr<float>(n),
Wh.rows,
Wh.cols
);
}
}
else
#endif
{
gemm(hInternal, Wh, 1, gates, 1, gates, GEMM_2_T); //+Wh * h_{t-1}
}
Mat gateI = gates.colRange(0*numOut, 1*numOut);
Mat gateF = gates.colRange(1*numOut, 2*numOut);

View File

@ -48,7 +48,6 @@
#include <vector>
#include <fstream>
#include <sys/stat.h>
#include <assert.h>
#include "../include/common.hpp"
#include "../include/ocl4dnn.hpp"
#include "opencl_kernels_dnn.hpp"

View File

@ -110,17 +110,10 @@ private:
opencv_onnx::GraphProto& net;
};
class SoftMaxSubgraph : public Subgraph
class SoftMaxSubgraphBase : public Subgraph
{
public:
SoftMaxSubgraph() : axis(1)
{
int input = addNodeToMatch("");
int inpExp = addNodeToMatch("Exp", input);
int sum = addNodeToMatch("ReduceSum", inpExp);
addNodeToMatch("Div", inpExp, sum);
setFusedNode("Softmax", input);
}
SoftMaxSubgraphBase() : axis(1), id(-1) {}
virtual bool match(const Ptr<ImportGraphWrapper>& net, int nodeId,
std::vector<int>& matchedNodesIds,
@ -128,7 +121,8 @@ public:
{
if (Subgraph::match(net, nodeId, matchedNodesIds, targetNodesIds))
{
Ptr<ImportNodeWrapper> sum = net->getNode(matchedNodesIds[1]);
CV_Assert(id >= 0 && id < matchedNodesIds.size());
Ptr<ImportNodeWrapper> sum = net->getNode(matchedNodesIds[id]);
opencv_onnx::NodeProto* node = sum.dynamicCast<ONNXNodeWrapper>()->node;
for (int i = 0; i < node->attribute_size(); i++)
@ -156,8 +150,60 @@ public:
attr->set_i(axis);
}
private:
protected:
int axis;
int id;
};
class SoftMaxSubgraph : public SoftMaxSubgraphBase
{
public:
SoftMaxSubgraph()
{
int input = addNodeToMatch("");
int inpExp = addNodeToMatch("Exp", input);
int sum = addNodeToMatch("ReduceSum", inpExp);
id = 1;
addNodeToMatch("Div", inpExp, sum);
setFusedNode("Softmax", input);
}
};
class SoftMaxSubgraph2 : public SoftMaxSubgraphBase {
public:
SoftMaxSubgraph2() {
int input = addNodeToMatch("");
int reducemax = addNodeToMatch("ReduceMax", input);
id = 0;
int sub = addNodeToMatch("Sub", input, reducemax);
int exp = addNodeToMatch("Exp", sub);
int reducesum = addNodeToMatch("ReduceSum", exp, addNodeToMatch(""));
addNodeToMatch("Div", exp, reducesum);
setFusedNode("Softmax", input);
}
};
class LogSoftMaxSubgraph : public SoftMaxSubgraphBase
{
public:
LogSoftMaxSubgraph()
{
int input = addNodeToMatch("");
int reducemax = addNodeToMatch("ReduceMax", input);
id = 0;
int sub_1 = addNodeToMatch("Sub", input, reducemax);
int exp = addNodeToMatch("Exp", sub_1);
int reducesum = addNodeToMatch("ReduceSum", exp, addNodeToMatch(""));
int log = addNodeToMatch("Log", reducesum);
addNodeToMatch("Sub", sub_1, log);
setFusedNode("LogSoftmax", input);
}
};
class NormalizeSubgraphBase : public Subgraph
@ -577,6 +623,8 @@ void simplifySubgraphs(opencv_onnx::GraphProto& net)
subgraphs.push_back(makePtr<ResizeSubgraph1>());
subgraphs.push_back(makePtr<ResizeSubgraph2>());
subgraphs.push_back(makePtr<SoftMaxSubgraph>());
subgraphs.push_back(makePtr<SoftMaxSubgraph2>());
subgraphs.push_back(makePtr<LogSoftMaxSubgraph>());
subgraphs.push_back(makePtr<NormalizeSubgraph1>());
subgraphs.push_back(makePtr<NormalizeSubgraph2>());
subgraphs.push_back(makePtr<NormalizeSubgraph2_2>());

View File

@ -768,12 +768,25 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto)
}
}
void setCeilMode(LayerParams& layerParams)
{
// auto_pad attribute is deprecated and uses ceil
if (layerParams.has("pad_mode"))
{
layerParams.set("ceil_mode", true);
}
else if (!layerParams.has("ceil_mode"))
{
layerParams.set("ceil_mode", false);
}
}
void ONNXImporter::parseMaxPool(LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto)
{
int depth = layerParams.get<int>("depth", CV_32F);
layerParams.type = (depth == CV_8S) ? "PoolingInt8" : "Pooling";
layerParams.set("pool", "MAX");
layerParams.set("ceil_mode", layerParams.has("pad_mode"));
setCeilMode(layerParams);
addLayer(layerParams, node_proto);
}
@ -781,7 +794,7 @@ void ONNXImporter::parseAveragePool(LayerParams& layerParams, const opencv_onnx:
{
layerParams.type = "Pooling";
layerParams.set("pool", "AVE");
layerParams.set("ceil_mode", layerParams.has("pad_mode"));
setCeilMode(layerParams);
layerParams.set("ave_pool_padded_area", framework_name == "pytorch");
addLayer(layerParams, node_proto);
}
@ -1077,6 +1090,7 @@ void ONNXImporter::parseSplit(LayerParams& layerParams, const opencv_onnx::NodeP
}
int depth = layerParams.get<int>("depth", CV_32F);
layerParams.type = (depth == CV_8S) ? "SliceInt8" : "Slice";
layerParams.set("axis", layerParams.get<float>("axis", 0));
addLayer(layerParams, node_proto);
}
@ -1085,6 +1099,14 @@ void ONNXImporter::parseBias(LayerParams& layerParams, const opencv_onnx::NodePr
opencv_onnx::NodeProto node_proto = node_proto_;
const std::string& layer_type = node_proto.op_type();
bool isSub = layer_type == "Sub";
if (layer_type == "Sum" && node_proto.input_size() == 1)
{
layerParams.type = "Identity";
addLayer(layerParams, node_proto);
return;
}
CV_Assert((node_proto.input_size() == 2) || (layer_type == "Sum" && node_proto.input_size() > 2));
if (layer_type == "Sum" && node_proto.input_size() > 2)
@ -1382,15 +1404,15 @@ void ONNXImporter::parseImageScaler(LayerParams& layerParams, const opencv_onnx:
void ONNXImporter::parseClip(LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto)
{
layerParams.type = "ReLU6";
replaceLayerParam(layerParams, "min", "min_value");
replaceLayerParam(layerParams, "max", "max_value");
layerParams.set("min_value", layerParams.get<float>("min", -FLT_MAX));
layerParams.set("max_value", layerParams.get<float>("max", FLT_MAX));
addLayer(layerParams, node_proto);
}
void ONNXImporter::parseLeakyRelu(LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto)
{
layerParams.type = "ReLU";
replaceLayerParam(layerParams, "alpha", "negative_slope");
layerParams.set("negative_slope", layerParams.get<float>("alpha", 0.01));
addLayer(layerParams, node_proto);
}
@ -1875,6 +1897,16 @@ void ONNXImporter::parseTranspose(LayerParams& layerParams, const opencv_onnx::N
int depth = layerParams.get<int>("depth", CV_32F);
layerParams.type = (depth == CV_8S) ? "PermuteInt8" : "Permute";
replaceLayerParam(layerParams, "perm", "order");
if (!layerParams.has("order")) {
MatShape inpShape = outShapes[node_proto.input(0)];
size_t dims = inpShape.size();
std::vector<int> perm(dims);
for (size_t d = 0; d < dims; ++d)
{
perm[d] = static_cast<int>(dims - 1 - d);
}
layerParams.set("order", DictValue::arrayInt(perm.data(), perm.size()));
}
CV_Assert(node_proto.input_size() == 1);
if (constBlobs.find(node_proto.input(0)) != constBlobs.end())

View File

@ -131,13 +131,14 @@ __kernel void PowForward(const int n, __global const T* in, __global T* out,
out[index] = pow(shift + scale * in[index], power);
}
__kernel void ELUForward(const int n, __global const T* in, __global T* out)
__kernel void ELUForward(const int n, __global const T* in, __global T* out,
const KERNEL_ARG_DTYPE alpha)
{
int index = get_global_id(0);
if (index < n)
{
T src = in[index];
out[index] = (src >= 0.f) ? src : exp(src) - 1;
out[index] = (src >= 0.f) ? src : alpha * (exp(src) - 1);
}
}

View File

@ -274,7 +274,7 @@ TEST_P(DNNTestNetwork, MobileNet_SSD_v1_TensorFlow_Different_Width_Height)
{
if (backend == DNN_BACKEND_HALIDE)
applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE);
#if defined(INF_ENGINE_RELEASE)
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) &&
target == DNN_TARGET_MYRIAD && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);

View File

@ -112,10 +112,12 @@ TEST(Test_Caffe, read_googlenet)
TEST_P(Test_Caffe_nets, Axpy)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
#endif
String proto = _tf("axpy.prototxt");
Net net = readNetFromCaffe(proto);
@ -156,7 +158,12 @@ TEST_P(Test_Caffe_nets, Axpy)
l1 = 2e-4;
lInf = 1e-3;
}
else if(target == DNN_TARGET_CUDA_FP16)
if (target == DNN_TARGET_MYRIAD)
{
l1 = 0.001;
lInf = 0.001;
}
if(target == DNN_TARGET_CUDA_FP16)
{
l1 = 0.0002;
lInf = 0.0007;
@ -688,7 +695,7 @@ TEST_P(Test_Caffe_nets, FasterRCNN_vgg16)
CV_TEST_TAG_DEBUG_VERYLONG
);
#if defined(INF_ENGINE_RELEASE)
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
@ -699,6 +706,19 @@ TEST_P(Test_Caffe_nets, FasterRCNN_vgg16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
#endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
// IE exception: Ngraph operation Reshape with name rpn_cls_score_reshape has dynamic output shape on 0 port, but CPU plug-in supports only static shape
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION
);
// Check 'backward_compatible_check || in_out_elements_equal' failed at core/src/op/reshape.cpp:390:
// While validating node 'v1::Reshape bbox_pred_reshape (bbox_pred[0]:f32{1,84}, Constant_241202[0]:i64{4}) -> (f32{?,?,?,?})' with friendly_name 'bbox_pred_reshape':
// Requested output shape {1,6300,4,1} is incompatible with input shape Shape{1, 84}
if (target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
static Mat ref = (Mat_<float>(3, 7) << 0, 2, 0.949398, 99.2454, 210.141, 601.205, 462.849,
0, 7, 0.997022, 481.841, 92.3218, 722.685, 175.953,
0, 12, 0.993028, 133.221, 189.377, 350.994, 563.166);

View File

@ -121,7 +121,7 @@ public:
{
SCOPED_TRACE("batch size 2");
#if defined(INF_ENGINE_RELEASE)
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (target == DNN_TARGET_MYRIAD && name == "shortcut")
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
#endif
@ -442,22 +442,31 @@ TEST_P(Test_Darknet_nets_async, Accuracy)
{
Backend backendId = get<0>(get<1>(GetParam()));
Target targetId = get<1>(get<1>(GetParam()));
std::string prefix = get<0>(GetParam());
applyTestTag(CV_TEST_TAG_MEMORY_512MB);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (INF_ENGINE_VER_MAJOR_LT(2019020000) && backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
applyTestTag(CV_TEST_TAG_MEMORY_512MB);
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
std::string prefix = get<0>(GetParam());
if (targetId == DNN_TARGET_MYRIAD && prefix == "yolov4") // NC_OUT_OF_MEMORY
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
if (backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
throw SkipTestException("No support for async forward");
#if defined(INF_ENGINE_RELEASE)
#if INF_ENGINE_VER_MAJOR_GE(2021040000)
if (targetId == DNN_TARGET_MYRIAD && prefix == "yolov3") // NC_OUT_OF_MEMORY
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#else
if (targetId == DNN_TARGET_MYRIAD && prefix == "yolov4") // NC_OUT_OF_MEMORY
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
#endif
const int numInputs = 2;
std::vector<Mat> inputs(numInputs);
int blobSize[] = {1, 3, 416, 416};
@ -485,6 +494,34 @@ TEST_P(Test_Darknet_nets_async, Accuracy)
netAsync.setPreferableBackend(backendId);
netAsync.setPreferableTarget(targetId);
double l1 = 0.0;
double lInf = 0.0;
#if defined(INF_ENGINE_RELEASE)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
if (targetId == DNN_TARGET_MYRIAD && prefix == "yolo-voc")
{
l1 = 0.02;
lInf = 0.15;
}
if (targetId == DNN_TARGET_OPENCL_FP16 && prefix == "yolo-voc")
{
l1 = 0.02;
lInf = 0.1;
}
if (targetId == DNN_TARGET_OPENCL_FP16 && prefix == "yolov3")
{
l1 = 0.001;
lInf = 0.007;
}
if (targetId == DNN_TARGET_OPENCL_FP16 && prefix == "yolov4")
{
l1 = 0.001;
lInf = 0.005;
}
}
#endif
// Run asynchronously. To make test more robust, process inputs in the reversed order.
for (int i = numInputs - 1; i >= 0; --i)
{
@ -494,7 +531,7 @@ TEST_P(Test_Darknet_nets_async, Accuracy)
ASSERT_TRUE(out.valid());
Mat result;
EXPECT_TRUE(out.get(result, async_timeout));
normAssert(refs[i], result, format("Index: %d", i).c_str(), 0, 0);
normAssert(refs[i], result, format("Index: %d", i).c_str(), l1, lInf);
}
}
@ -870,10 +907,23 @@ TEST_P(Test_Darknet_layers, avgpool_softmax)
TEST_P(Test_Darknet_layers, region)
{
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && INF_ENGINE_VER_MAJOR_GE(2020020000))
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && INF_ENGINE_VER_MAJOR_GE(2020020000))
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
// accuracy on CPU, OpenCL
// Expected: (normInf) <= (lInf), actual: 0.763223 vs 0.0001
// |ref| = 1.207319974899292
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_CPU, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION
);
#endif
testDarknetLayer("region");
}

View File

@ -243,9 +243,11 @@ TEST_P(LRN, Accuracy)
Backend backendId = get<0>(get<5>(GetParam()));
Target targetId = get<1>(get<5>(GetParam()));
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if ((inSize.width == 5 || inSize.height == 5) && targetId == DNN_TARGET_MYRIAD &&
nrmType == "ACROSS_CHANNELS")
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
#endif
LayerParams lp;
lp.set("norm_region", nrmType);
@ -410,12 +412,14 @@ TEST_P(FullyConnected, Accuracy)
bool hasBias = get<3>(GetParam());
Backend backendId = get<0>(get<4>(GetParam()));
Target targetId = get<1>(get<4>(GetParam()));
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && (targetId == DNN_TARGET_OPENCL_FP16 ||
(targetId == DNN_TARGET_MYRIAD && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X))) {
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
}
#endif
Mat weights(outChannels, inChannels * inSize.height * inSize.width, CV_32F);
randu(weights, -1.0f, 1.0f);
@ -435,9 +439,22 @@ TEST_P(FullyConnected, Accuracy)
Mat input(4, &sz[0], CV_32F);
double l1 = 0.0;
double lInf = 0.0;
#if defined(INF_ENGINE_RELEASE)
if (targetId == DNN_TARGET_MYRIAD)
{
l1 = 0.015;
lInf = 0.025;
}
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && targetId == DNN_TARGET_OPENCL_FP16)
{
l1 = 0.01;
}
#endif
if (targetId == DNN_TARGET_CUDA_FP16)
l1 = 0.015;
test(lp, input, backendId, targetId, false, true, l1);
test(lp, input, backendId, targetId, false, l1, lInf);
}
INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, FullyConnected, Combine(
@ -819,18 +836,18 @@ TEST_P(Eltwise, Accuracy)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && numConv > 1)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
#if defined(INF_ENGINE_RELEASE)
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_OPENCL &&
op == "sum" && numConv == 1 && !weighted)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
#endif
#if defined(INF_ENGINE_RELEASE)
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && numConv > 1)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif

View File

@ -196,13 +196,23 @@ TEST_P(Test_Caffe_layers, DeConvolution)
TEST_P(Test_Caffe_layers, InnerProduct)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
#endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
// IE exception: Ngraph operation Reshape with name Reshape_4219609 has dynamic output shape on 0 port, but CPU plug-in supports only static shape
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION
);
#endif
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
testLayerUsingCaffeModels("layer_inner_product", true);
}
@ -300,10 +310,12 @@ TEST_P(Test_Caffe_layers, Concat)
CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
#if INF_ENGINE_VER_MAJOR_LT(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH &&
(target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
#endif
testLayerUsingCaffeModels("layer_concat");
@ -1434,62 +1446,6 @@ INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_DLDT_two_inputs_3dim, Combine(
testing::ValuesIn(list_sizes)
));
typedef testing::TestWithParam<tuple<int, int, tuple<Backend, Target> > > Test_DLDT_two_inputs;
TEST_P(Test_DLDT_two_inputs, as_backend)
{
static const float kScale = 0.5f;
static const float kScaleInv = 1.0f / kScale;
Backend backendId = get<0>(get<2>(GetParam()));
Target targetId = get<1>(get<2>(GetParam()));
Net net;
LayerParams lp;
lp.type = "Eltwise";
lp.name = "testLayer";
lp.set("operation", "sum");
int eltwiseId = net.addLayerToPrev(lp.name, lp.type, lp); // connect to a first input
net.connect(0, 1, eltwiseId, 1); // connect to a second input
int inpSize[] = {1, 2, 3, 4};
Mat firstInp(4, &inpSize[0], get<0>(GetParam()));
Mat secondInp(4, &inpSize[0], get<1>(GetParam()));
randu(firstInp, 0, 255);
randu(secondInp, 0, 255);
net.setInputsNames({"data", "second_input"});
net.setInput(firstInp, "data", kScale);
net.setInput(secondInp, "second_input", kScaleInv);
net.setPreferableBackend(backendId);
net.setPreferableTarget(targetId);
Mat out = net.forward();
Mat ref;
addWeighted(firstInp, kScale, secondInp, kScaleInv, 0, ref, CV_32F);
// Output values are in range [0, 637.5].
double l1 = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? 0.06 : 1e-6;
double lInf = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? 0.3 : 1e-5;
if (targetId == DNN_TARGET_CUDA_FP16)
{
l1 = 0.06;
lInf = 0.3;
}
normAssert(out, ref, "", l1, lInf);
if (cvtest::debugLevel > 0 || HasFailure())
{
std::cout << "input1 scale=" << kScale << " input2 scale=" << kScaleInv << std::endl;
std::cout << "input1: " << firstInp.size << " " << firstInp.reshape(1, 1) << std::endl;
std::cout << "input2: " << secondInp.size << " " << secondInp.reshape(1, 1) << std::endl;
std::cout << "ref: " << ref.reshape(1, 1) << std::endl;
std::cout << "out: " << out.reshape(1, 1) << std::endl;
}
}
INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_DLDT_two_inputs, Combine(
Values(CV_8U, CV_32F), Values(CV_8U, CV_32F),
dnnBackendsAndTargets()
));
class UnsupportedLayer : public Layer
{
public:

View File

@ -582,7 +582,8 @@ TEST_P(Async, create_layer_pipeline_set_and_forward_all)
if (backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
throw SkipTestException("No support for async forward");
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
// Exception: Default implementation fallbacks in asynchronous mode
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && dtype == CV_8U)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
@ -832,4 +833,64 @@ INSTANTIATE_TEST_CASE_P(/**/, Test_Model_Optimizer,
#endif // HAVE_INF_ENGINE
typedef testing::TestWithParam<tuple<MatDepth, MatDepth, tuple<Backend, Target> > > Test_two_inputs;
TEST_P(Test_two_inputs, basic)
{
static const float kScale = 0.5f;
static const float kScaleInv = 1.0f / kScale;
Backend backendId = get<0>(get<2>(GetParam()));
Target targetId = get<1>(get<2>(GetParam()));
Net net;
LayerParams lp;
lp.type = "Eltwise";
lp.name = "testLayer";
lp.set("operation", "sum");
int eltwiseId = net.addLayerToPrev(lp.name, lp.type, lp); // connect to a first input
net.connect(0, 1, eltwiseId, 1); // connect to a second input
int inpSize[] = {1, 2, 3, 4};
Mat firstInp(4, &inpSize[0], get<0>(GetParam()));
Mat secondInp(4, &inpSize[0], get<1>(GetParam()));
randu(firstInp, 0, 100);
randu(secondInp, 0, 100);
#ifndef CV_CXX11
std::vector<String> input_names;
input_names.push_back("data");
input_names.push_back("second_input");
net.setInputsNames(input_names);
#else
net.setInputsNames({"data", "second_input"});
#endif
net.setInput(firstInp, "data", kScale);
net.setInput(secondInp, "second_input", kScaleInv);
net.setPreferableBackend(backendId);
net.setPreferableTarget(targetId);
Mat out = net.forward();
Mat ref;
addWeighted(firstInp, kScale, secondInp, kScaleInv, 0, ref, CV_32F);
double l1 = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? 0.06 : 1e-6;
double lInf = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? 0.3 : 1e-5;
normAssert(out, ref, "", l1, lInf);
if (cvtest::debugLevel > 0 || HasFailure())
{
std::cout << "input1 scale=" << kScale << " input2 scale=" << kScaleInv << std::endl;
std::cout << "input1: " << firstInp.size << " " << firstInp.reshape(1, 1) << std::endl;
std::cout << "input2: " << secondInp.size << " " << secondInp.reshape(1, 1) << std::endl;
std::cout << "ref: " << ref.reshape(1, 1) << std::endl;
std::cout << "out: " << out.reshape(1, 1) << std::endl;
}
}
INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_two_inputs, Combine(
Values(CV_32F, CV_8U),
Values(CV_32F, CV_8U),
dnnBackendsAndTargets()
));
}} // namespace

View File

@ -197,17 +197,11 @@ TEST_P(Test_ONNX_layers, Gather)
TEST_P(Test_ONNX_layers, Convolution3D)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
testONNXModels("conv3d");
}
TEST_P(Test_ONNX_layers, Convolution3D_bias)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
testONNXModels("conv3d_bias");
}
@ -235,18 +229,73 @@ TEST_P(Test_ONNX_layers, Deconvolution)
TEST_P(Test_ONNX_layers, Deconvolution3D)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
if (backend == DNN_BACKEND_CUDA)
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
// ok
// [ GENERAL_ERROR ] vpu/graph_transformer/src/frontend/frontend.cpp:439 Failed to compile layer "2":
// [ GENERAL_ERROR ] vpu/graph_transformer/src/model/model.cpp:198 duplicateData error: while duplicating 2@weights Const data got different desc and content byte sizes (162 and 486 respectively)
if (target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
}
else if (backend == DNN_BACKEND_OPENCV || target != DNN_TARGET_CPU)
throw SkipTestException("Only DLIE backend on CPU is supported");
#endif
if (backend == DNN_BACKEND_OPENCV)
throw SkipTestException("OpenCV backend is not supported"); // FIXIT use tags
testONNXModels("deconv3d");
}
TEST_P(Test_ONNX_layers, Deconvolution3D_bias)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
// [ GENERAL_ERROR ] vpu/graph_transformer/src/frontend/frontend.cpp:439 Failed to compile layer "2":
// [ GENERAL_ERROR ] vpu/graph_transformer/src/model/model.cpp:198 duplicateData error: while duplicating 2@weights Const data got different desc and content byte sizes (162 and 486 respectively)
if (target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
}
#endif
if (backend == DNN_BACKEND_OPENCV)
throw SkipTestException("OpenCV backend is not supported"); // FIXIT use tags
testONNXModels("deconv3d_bias");
}
TEST_P(Test_ONNX_layers, Deconvolution3D_pad)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
// [ GENERAL_ERROR ] vpu/graph_transformer/src/frontend/frontend.cpp:439 Failed to compile layer "2":
// [ GENERAL_ERROR ] vpu/graph_transformer/src/model/model.cpp:198 duplicateData error: while duplicating 2@weights Const data got different desc and content byte sizes (162 and 486 respectively)
if (target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
}
#endif
if (backend == DNN_BACKEND_OPENCV)
throw SkipTestException("OpenCV backend is not supported"); // FIXIT use tags
testONNXModels("deconv3d_pad");
}
TEST_P(Test_ONNX_layers, Deconvolution3D_adjpad)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
// [ GENERAL_ERROR ] vpu/graph_transformer/src/frontend/frontend.cpp:439 Failed to compile layer "2":
// [ GENERAL_ERROR ] vpu/graph_transformer/src/model/model.cpp:198 duplicateData error: while duplicating 2@weights Const data got different desc and content byte sizes (162 and 486 respectively)
if (target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
}
#endif
if (backend == DNN_BACKEND_OPENCV)
throw SkipTestException("OpenCV backend is not supported"); // FIXIT use tags
testONNXModels("deconv3d_adjpad");
}
@ -317,16 +366,14 @@ TEST_P(Test_ONNX_layers, Scale)
TEST_P(Test_ONNX_layers, ReduceMean3D)
{
if (backend == DNN_BACKEND_CUDA)
{
// ok
}
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported
else if (target != DNN_TARGET_CPU)
throw SkipTestException("Only CPU is supported");
#endif
if (backend == DNN_BACKEND_OPENCV && target != DNN_TARGET_CPU)
throw SkipTestException("Only CPU is supported"); // FIXIT use tags
testONNXModels("reduce_mean3d");
}
@ -443,13 +490,12 @@ TEST_P(Test_ONNX_layers, Concatenation)
TEST_P(Test_ONNX_layers, Eltwise3D)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported
#endif
testONNXModels("eltwise3d");
}
@ -460,55 +506,56 @@ TEST_P(Test_ONNX_layers, AveragePooling)
TEST_P(Test_ONNX_layers, MaxPooling3D)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
if (backend == DNN_BACKEND_CUDA)
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
// ok
// accuracy
if (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION
);
// IE exception: [ GENERAL_ERROR ] AssertionFailed: !expired()
if (target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
}
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
#endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported
else if (target != DNN_TARGET_CPU)
throw SkipTestException("Only CPU is supported");
#endif
if (backend == DNN_BACKEND_OPENCV && target != DNN_TARGET_CPU)
throw SkipTestException("Only CPU is supported"); // FIXIT use tags
testONNXModels("max_pool3d", npy, 0, 0, false, false);
}
TEST_P(Test_ONNX_layers, AvePooling3D)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
if (backend == DNN_BACKEND_CUDA)
{
// ok
}
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported
else if (target != DNN_TARGET_CPU)
throw SkipTestException("Only CPU is supported");
#endif
if (backend == DNN_BACKEND_OPENCV && target != DNN_TARGET_CPU)
throw SkipTestException("Only CPU is supported"); // FIXIT use tags
testONNXModels("ave_pool3d");
}
TEST_P(Test_ONNX_layers, PoolConv3D)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
if (backend == DNN_BACKEND_CUDA)
{
// ok
}
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported
else if (target != DNN_TARGET_CPU)
throw SkipTestException("Only CPU is supported");
#endif
if (backend == DNN_BACKEND_OPENCV && target != DNN_TARGET_CPU)
throw SkipTestException("Only CPU is supported"); // FIXIT use tags
testONNXModels("pool_conv_3d");
}
@ -1011,6 +1058,7 @@ TEST_P(Test_ONNX_layers, DynamicAxes)
TEST_P(Test_ONNX_layers, MaxPool1d)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
@ -1019,11 +1067,20 @@ TEST_P(Test_ONNX_layers, MaxPool1d)
{
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
}
#endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
{
// 2021.4: [ GENERAL_ERROR ] AssertionFailed: !expired()
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
}
#endif
testONNXModels("maxpooling_1d");
}
TEST_P(Test_ONNX_layers, MaxPoolSigmoid1d)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
@ -1032,11 +1089,13 @@ TEST_P(Test_ONNX_layers, MaxPoolSigmoid1d)
{
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
}
#endif
testONNXModels("maxpooling_sigmoid_1d");
}
TEST_P(Test_ONNX_layers, MaxPool1d_Twise)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
@ -1045,11 +1104,13 @@ TEST_P(Test_ONNX_layers, MaxPool1d_Twise)
{
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
}
#endif
testONNXModels("two_maxpooling_1d");
}
TEST_P(Test_ONNX_layers, AvePool1d)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
@ -1058,11 +1119,13 @@ TEST_P(Test_ONNX_layers, AvePool1d)
{
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
}
#endif
testONNXModels("average_pooling_1d");
}
TEST_P(Test_ONNX_layers, PoolConv1d)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
@ -1071,6 +1134,7 @@ TEST_P(Test_ONNX_layers, PoolConv1d)
{
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
}
#endif
testONNXModels("pool_conv_1d");
}
@ -1257,11 +1321,18 @@ TEST_P(Test_ONNX_nets, Squeezenet)
TEST_P(Test_ONNX_nets, Googlenet)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
#endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
// accuracy
if (target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
const String model = _tf("models/googlenet.onnx", false);
@ -1523,7 +1594,7 @@ TEST_P(Test_ONNX_nets, DenseNet121)
TEST_P(Test_ONNX_nets, Inception_v1)
{
#if defined(INF_ENGINE_RELEASE)
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
@ -1533,30 +1604,35 @@ TEST_P(Test_ONNX_nets, Inception_v1)
TEST_P(Test_ONNX_nets, Shufflenet)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{
if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
}
#endif
testONNXModels("shufflenet", pb);
}
TEST_P(Test_ONNX_nets, Resnet34_kinetics)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
if (backend == DNN_BACKEND_CUDA)
{
// ok
}
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported
else if (target != DNN_TARGET_CPU)
throw SkipTestException("Only CPU is supported");
#endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
// IE exception: Function contains several inputs and outputs with one friendly name!
if (target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
}
#endif
if (backend == DNN_BACKEND_OPENCV && target != DNN_TARGET_CPU)
throw SkipTestException("Only CPU is supported"); // FIXIT use tags
String onnxmodel = findDataFile("dnn/resnet-34_kinetics.onnx", false);
Mat image0 = imread(findDataFile("dnn/dog416.png"));
@ -1595,7 +1671,13 @@ TEST_P(Test_ONNX_nets, Resnet34_kinetics)
net.setPreferableTarget(target);
// output range [-5, 11]
float l1 = 0.0013, lInf = 0.009;
float l1 = 0.0013;
float lInf = 0.009;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
{
l1 = 0.02;
lInf = 0.07;
}
if (target == DNN_TARGET_CUDA_FP16)
{
l1 = 0.01;

View File

@ -216,13 +216,12 @@ TEST_P(Test_TensorFlow_layers, conv_pool_nchw)
TEST_P(Test_TensorFlow_layers, Convolution3D)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported
#endif
runTensorFlowNet("conv3d");
}
@ -231,7 +230,7 @@ TEST_P(Test_TensorFlow_layers, padding)
runTensorFlowNet("padding_valid");
runTensorFlowNet("spatial_padding");
runTensorFlowNet("mirror_pad");
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019020000)
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019020000) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (target == DNN_TARGET_MYRIAD)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
@ -344,6 +343,7 @@ TEST_P(Test_TensorFlow_layers, concat_axis_1)
TEST_P(Test_TensorFlow_layers, concat_3d)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (backend == DNN_BACKEND_OPENCV && target != DNN_TARGET_CPU)
{
if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
@ -353,6 +353,7 @@ TEST_P(Test_TensorFlow_layers, concat_3d)
if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH ||
backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
#endif
runTensorFlowNet("concat_3d");
}
@ -430,22 +431,32 @@ TEST_P(Test_TensorFlow_layers, batch_norm3D)
TEST_P(Test_TensorFlow_layers, slim_batch_norm)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
#endif
// Output values range: [-40.0597, 207.827]
double l1 = default_l1, lInf = default_lInf;
double l1 = default_l1;
double lInf = default_lInf;
if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)
{
l1 = 0.041;
lInf = 0.33;
}
#if defined(INF_ENGINE_RELEASE)
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_CPU)
{
lInf = 0.0002;
}
#endif
else if (target == DNN_TARGET_CUDA_FP16)
{
l1 = 0.005;
lInf = 0.33;
}
runTensorFlowNet("slim_batch_norm", false, l1, lInf);
}
@ -572,7 +583,7 @@ TEST_P(Test_TensorFlow_layers, max_pool_grad)
TEST_P(Test_TensorFlow_layers, ave_pool_same)
{
// Reference output values are in range [-0.519531, 0.112976]
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (target == DNN_TARGET_MYRIAD && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
@ -586,38 +597,41 @@ TEST_P(Test_TensorFlow_layers, ave_pool_same)
TEST_P(Test_TensorFlow_layers, MaxPooling3D)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
if (backend == DNN_BACKEND_CUDA)
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
// ok
// accuracy
if (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION
);
// IE exception: [ GENERAL_ERROR ] AssertionFailed: !expired()
if (target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
}
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
#endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported
else if (target != DNN_TARGET_CPU)
throw SkipTestException("Only CPU is supported");
#endif
if (backend == DNN_BACKEND_OPENCV && target != DNN_TARGET_CPU)
throw SkipTestException("Only CPU is supported"); // FIXIT use tags
runTensorFlowNet("max_pool3d");
}
TEST_P(Test_TensorFlow_layers, AvePooling3D)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
if (backend == DNN_BACKEND_CUDA)
{
// ok
}
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported
else if (target != DNN_TARGET_CPU)
throw SkipTestException("Only CPU is supported");
#endif
if (backend == DNN_BACKEND_OPENCV && target != DNN_TARGET_CPU)
throw SkipTestException("Only CPU is supported"); // FIXIT use tags
runTensorFlowNet("ave_pool3d");
}
@ -650,10 +664,12 @@ TEST_P(Test_TensorFlow_layers, matmul)
TEST_P(Test_TensorFlow_layers, reshape)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
#endif
runTensorFlowNet("shift_reshape_no_reorder");
runTensorFlowNet("reshape_no_reorder");
runTensorFlowNet("reshape_reduce");
@ -1319,18 +1335,35 @@ TEST_P(Test_TensorFlow_layers, lstm)
{
if(backend == DNN_BACKEND_CUDA)
applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); /* not supported */
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
#endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
// Exception: Ngraph operation Reshape with name Reshape has dynamic output shape on 0 port, but CPU plug-in supports only static shape
if (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION
);
// Xlink
if (target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
}
#endif
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
runTensorFlowNet("lstm", true);
runTensorFlowNet("lstm", true, 0.0, 0.0, true);
}
TEST_P(Test_TensorFlow_layers, split)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
@ -1360,8 +1393,10 @@ TEST_P(Test_TensorFlow_layers, resize_nearest_neighbor_align_corners)
TEST_P(Test_TensorFlow_layers, resize_nearest_neighbor_half_pixel)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
#endif
runTensorFlowNet("resize_nearest_neighbor", false, 0.0, 0.0, false, "_half_pixel");
}
@ -1500,12 +1535,28 @@ TEST_P(Test_TensorFlow_layers, clip_by_value)
TEST_P(Test_TensorFlow_layers, tf2_prelu)
{
if (backend == DNN_BACKEND_CUDA)
applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); // not supported; only across channels is supported
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
if (backend == DNN_BACKEND_CUDA)
applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); // not supported; only across channels is supported
#endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
// IE exception: Input prelu:StatefulPartitionedCall/StatefulPartitionedCall/sequential/p_re_lu/add hasn't been found in primitiveIDs map
if (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION
);
// IE exception: Eltwise node with name `StatefulPartitionedCall/StatefulPartitionedCall/sequential/p_re_lu/add` has invalid input/output dims configuration
if (target == DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_CPU, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
}
#endif
runTensorFlowNet("tf2_prelu");
}

View File

@ -236,23 +236,32 @@ TEST_P(Test_Torch_layers, net_lp_pooling_square)
}
TEST_P(Test_Torch_layers, net_lp_pooling_power)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
runTorchNet("net_lp_pooling_power", "", false, true);
}
TEST_P(Test_Torch_layers, net_conv_gemm_lrn)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
#endif
double l1 = 0.0, lInf = 0.0;
if (target == DNN_TARGET_OPENCL_FP16)
{
l1 = 0.046;
lInf = 0.023;
}
else if (target == DNN_TARGET_MYRIAD)
{
l1 = 0.02;
lInf = 0.05;
}
else if (target == DNN_TARGET_CUDA_FP16)
{
l1 = 0.0042;

View File

@ -312,7 +312,7 @@ void CV_FlannSavedIndexTest::createModel(const cv::Mat &data)
case 1: createIndex( data, KDTreeIndexParams() ); break;
//case 2: createIndex( data, CompositeIndexParams() ); break; // nothing to save for linear search
//case 2: createIndex( data, AutotunedIndexParams() ); break; // possible linear index !
default: assert(0);
default: CV_Assert(0);
}
string filename = tempfile();
index->save( filename );

View File

@ -106,8 +106,8 @@ public:
fprintf(stderr, "I can only search one feature at a time for range search\n");
return -1;
}
assert(query.cols == veclen());
assert(indices.cols == dists.cols);
CV_Assert(query.cols == veclen());
CV_Assert(indices.cols == dists.cols);
int n = 0;
int* indices_ptr = NULL;

View File

@ -30,7 +30,6 @@
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <assert.h>
#include <opencv2\highgui.hpp>
#include <opencv2\highgui\highgui_winrt.hpp>
#include "window_winrt_bridge.hpp"

View File

@ -119,7 +119,7 @@ CvSeq* icvApproximateChainTC89( CvChain* chain, int header_size,
}
}
//assert( pt.x == chain->origin.x && pt.y == chain->origin.y );
//CV_Assert( pt.x == chain->origin.x && pt.y == chain->origin.y );
if( method <= CV_CHAIN_APPROX_SIMPLE )
return cvEndWriteSeq( &writer );
@ -129,7 +129,7 @@ CvSeq* icvApproximateChainTC89( CvChain* chain, int header_size,
len = i;
current = temp.next;
assert( current );
CV_Assert( current );
/* Pass 1.
Determines support region for all the remained points */
@ -148,7 +148,7 @@ CvSeq* icvApproximateChainTC89( CvChain* chain, int header_size,
int dx, dy;
Cv32suf d;
assert( k <= len );
CV_Assert( k <= len );
/* calc indices */
i1 = i - k;
@ -205,7 +205,7 @@ CvSeq* icvApproximateChainTC89( CvChain* chain, int header_size,
((double)dx2 * dx2 + (double)dy2 * dy2) ));
sk.f = (float) (temp_num + 1.1);
assert( 0 <= sk.f && sk.f <= 2.2 );
CV_Assert( 0 <= sk.f && sk.f <= 2.2 );
if( j < k && sk.i <= s )
break;
@ -258,7 +258,7 @@ CvSeq* icvApproximateChainTC89( CvChain* chain, int header_size,
/* Pass 3.
Removes non-dominant points with 1-length support region */
current = temp.next;
assert( current );
CV_Assert( current );
prev_current = &temp;
do
@ -293,7 +293,7 @@ CvSeq* icvApproximateChainTC89( CvChain* chain, int header_size,
/* Pass 4.
Cleans remained couples of points */
assert( temp.next );
CV_Assert( temp.next );
if( array[0].s != 0 && array[len - 1].s != 0 ) /* specific case */
{
@ -362,7 +362,7 @@ copy_vect:
// gather points
current = temp.next;
assert( current );
CV_Assert( current );
do
{
@ -439,7 +439,7 @@ cvApproxChains( CvSeq* src_seq,
if( src_seq->v_next && len >= minimal_perimeter )
{
assert( prev_contour != 0 );
CV_Assert( prev_contour != 0 );
parent = prev_contour;
prev_contour = 0;
src_seq = src_seq->v_next;
@ -590,7 +590,7 @@ approxPolyDP_( const Point_<T>* src_contour, int count0, Point_<T>* dst_contour,
dx = end_pt.x - start_pt.x;
dy = end_pt.y - start_pt.y;
assert( dx != 0 || dy != 0 );
CV_Assert( dx != 0 || dy != 0 );
while( pos != slice.end )
{
@ -815,7 +815,7 @@ cvApproxPoly( const void* array, int header_size,
CV_Error( CV_StsBadArg, "Invalid approximation method" );
}
assert( contour );
CV_Assert( contour );
if( header_size >= (int)sizeof(CvContour))
cvBoundingRect( contour, 1 );
@ -836,7 +836,7 @@ cvApproxPoly( const void* array, int header_size,
if( src_seq->v_next )
{
assert( prev_contour != 0 );
CV_Assert( prev_contour != 0 );
parent = prev_contour;
prev_contour = 0;
src_seq = src_seq->v_next;

View File

@ -205,7 +205,7 @@ public:
}
else
{
assert( cn == 3 );
CV_Assert( cn == 3 );
AutoBuffer<float> buf(alignSize(size.width, CV_SIMD_WIDTH)*3 + size.width + CV_SIMD_WIDTH - 1);
memset(buf.data(), 0, buf.size() * sizeof(float));
float *sum_b = alignPtr(buf.data(), CV_SIMD_WIDTH);

View File

@ -3567,7 +3567,7 @@ struct Luv2RGBinteger
long long int xv = ((int)up)*(long long)vp;
int x = (int)(xv/BASE);
x = y*x/BASE;
x = ((long long int)y)*x/BASE;
long long int vpl = LUVLUT.LvToVpl_b[LL*256+vv];
long long int zp = vpl - xv*(255/3);
@ -3689,6 +3689,7 @@ struct Luv2RGBinteger
vzm[i] = zm;
vx[i] = (int32_t)(xv >> base_shift);
vx[i] = (((int64_t)y_)*vx[i]) >> base_shift;
}
v_int32 zm[4];
for(int k = 0; k < 4; k++)
@ -3697,11 +3698,6 @@ struct Luv2RGBinteger
zm[k] = vx_load_aligned(vzm + k*vsize/4);
}
for(int k = 0; k < 4; k++)
{
x[k] = (y[k]*x[k]) >> base_shift;
}
// z = zm/256 + zm/65536;
for (int k = 0; k < 4; k++)
{

View File

@ -97,7 +97,7 @@ cvReadChainPoint( CvChainPtReader * reader )
reader->ptr = ptr;
reader->code = (schar)code;
assert( (code & ~7) == 0 );
CV_Assert( (code & ~7) == 0 );
reader->pt.x = pt.x + icvCodeDeltas[code].x;
reader->pt.y = pt.y + icvCodeDeltas[code].y;
}
@ -1187,7 +1187,7 @@ cvFindNextContour( CvContourScanner scanner )
}
/* hole flag of the parent must differ from the flag of the contour */
assert( par_info->is_hole != is_hole );
CV_Assert( par_info->is_hole != is_hole );
if( par_info->contour == 0 ) /* removed contour */
goto resume_scan;
}

View File

@ -716,7 +716,7 @@ CV_IMPL CvSeq* cvConvexityDefects( const CvArr* array,
dx0 = (double)hull_next->x - (double)hull_cur->x;
dy0 = (double)hull_next->y - (double)hull_cur->y;
assert( dx0 != 0 || dy0 != 0 );
CV_Assert( dx0 != 0 || dy0 != 0 );
scale = 1./std::sqrt(dx0*dx0 + dy0*dy0);
defect.start = hull_cur;

View File

@ -270,7 +270,7 @@ distanceTransformEx_5x5( const Mat& _src, Mat& _temp, Mat& _dist, Mat& _labels,
if( !s[j] )
{
tmp[j] = 0;
//assert( lls[j] != 0 );
//CV_Assert( lls[j] != 0 );
}
else
{

View File

@ -138,7 +138,7 @@ bool clipLine( Size2l img_size, Point2l& pt1, Point2l& pt2 )
}
}
assert( (c1 & c2) != 0 || (x1 | y1 | x2 | y2) >= 0 );
CV_Assert( (c1 & c2) != 0 || (x1 | y1 | x2 | y2) >= 0 );
}
return (c1 | c2) == 0;
@ -648,7 +648,7 @@ Line2( Mat& img, Point2l pt1, Point2l pt2, const void* color)
size_t step = img.step;
Size size = img.size();
//assert( img && (nch == 1 || nch == 3) && img.depth() == CV_8U );
//CV_Assert( img && (nch == 1 || nch == 3) && img.depth() == CV_8U );
Size2l sizeScaled(((int64)size.width) << XY_SHIFT, ((int64)size.height) << XY_SHIFT);
if( !clipLine( sizeScaled, pt1, pt2 ))
@ -1120,7 +1120,7 @@ FillConvexPoly( Mat& img, const Point2l* v, int npts, const void* color, int lin
p0.x <<= XY_SHIFT - shift;
p0.y <<= XY_SHIFT - shift;
assert( 0 <= shift && shift <= XY_SHIFT );
CV_Assert( 0 <= shift && shift <= XY_SHIFT );
xmin = xmax = v[0].x;
ymin = ymax = v[0].y;
@ -1340,7 +1340,7 @@ FillEdgeCollection( Mat& img, std::vector<PolyEdge>& edges, const void* color )
for( i = 0; i < total; i++ )
{
PolyEdge& e1 = edges[i];
assert( e1.y0 < e1.y1 );
CV_Assert( e1.y0 < e1.y1 );
// Determine x-coordinate of the end of the edge.
// (This is not necessary x-coordinate of any vertex in the array.)
int64 x1 = e1.x + (e1.y1 - e1.y0) * e1.dx;
@ -2613,7 +2613,7 @@ cvDrawContours( void* _img, CvSeq* contour,
char code;
CV_READ_SEQ_ELEM( code, reader );
assert( (code & ~7) == 0 );
CV_Assert( (code & ~7) == 0 );
if( code != prev_code )
{

View File

@ -336,7 +336,7 @@ static int icvInitEMD( const float* signature1, int size1,
char *buffer, *buffer_end;
memset( state, 0, sizeof( *state ));
assert( cost_step % sizeof(float) == 0 );
CV_Assert( cost_step % sizeof(float) == 0 );
cost_step /= sizeof(float);
/* calculate buffer size */
@ -510,7 +510,7 @@ static int icvInitEMD( const float* signature1, int size1,
}
else
{
assert( cost );
CV_Assert( cost );
val = cost[cost_step*ci + cj];
}
state->cost[i][j] = val;
@ -552,7 +552,7 @@ static int icvInitEMD( const float* signature1, int size1,
buffer += dsize;
}
assert( buffer <= buffer_end );
CV_Assert( buffer <= buffer_end );
icvRussel( state );

View File

@ -340,8 +340,8 @@ HoughLinesSDiv( InputArray image, OutputArray lines, int type,
rv = r0 * std::cos( phi );
i = (int)rv * tn;
i += cvFloor( phi1 );
assert( i >= 0 );
assert( i < rn * tn );
CV_Assert( i >= 0 );
CV_Assert( i < rn * tn );
caccum[i] = (uchar) (caccum[i] + ((i ^ iprev) != 0));
iprev = i;
if( cmax < caccum[i] )
@ -405,8 +405,8 @@ HoughLinesSDiv( InputArray image, OutputArray lines, int type,
i = CV_IMAX( i, -1 );
i = CV_IMIN( i, sfn );
mcaccum[i]++;
assert( i >= -1 );
assert( i <= sfn );
CV_Assert( i >= -1 );
CV_Assert( i <= sfn );
}
}

View File

@ -463,7 +463,7 @@ medianBlur_8u_Om( const Mat& _src, Mat& _dst, int m )
}
else
{
assert( cn == 4 );
CV_Assert( cn == 4 );
for( k = 0; k < m*4; k += 4 )
{
UPDATE_ACC01( src_top[k], 0, -- );

View File

@ -52,7 +52,7 @@ static void completeMomentState( Moments* moments )
double cx = 0, cy = 0;
double mu20, mu11, mu02;
double inv_m00 = 0.0;
assert( moments != 0 );
CV_Assert( moments != 0 );
if( fabs(moments->m00) > DBL_EPSILON )
{

View File

@ -54,7 +54,6 @@
#include "hal_replacement.hpp"
#include <math.h>
#include <assert.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>

View File

@ -3191,7 +3191,7 @@ static int computeResizeAreaTab( int ssize, int dsize, int cn, double scale, Dec
if( sx1 - fsx1 > 1e-3 )
{
assert( k < ssize*2 );
CV_Assert( k < ssize*2 );
tab[k].di = dx * cn;
tab[k].si = (sx1 - 1) * cn;
tab[k++].alpha = (float)((sx1 - fsx1) / cellWidth);
@ -3199,7 +3199,7 @@ static int computeResizeAreaTab( int ssize, int dsize, int cn, double scale, Dec
for(int sx = sx1; sx < sx2; sx++ )
{
assert( k < ssize*2 );
CV_Assert( k < ssize*2 );
tab[k].di = dx * cn;
tab[k].si = sx * cn;
tab[k++].alpha = float(1.0 / cellWidth);
@ -3207,7 +3207,7 @@ static int computeResizeAreaTab( int ssize, int dsize, int cn, double scale, Dec
if( fsx2 - sx2 > 1e-3 )
{
assert( k < ssize*2 );
CV_Assert( k < ssize*2 );
tab[k].di = dx * cn;
tab[k].si = sx2 * cn;
tab[k++].alpha = (float)(std::min(std::min(fsx2 - sx2, 1.), cellWidth) / cellWidth);
@ -3899,7 +3899,7 @@ void resize(int src_type,
{
if( k == 0 || ytab[k].di != ytab[k-1].di )
{
assert( ytab[k].di == dy );
CV_Assert( ytab[k].di == dy );
tabofs[dy++] = k;
}
}

View File

@ -74,7 +74,7 @@ adjustRect( const uchar* src, size_t src_step, int pix_size,
src += rect.width*pix_size;
rect.width = 0;
}
assert( rect.width <= win_size.width );
CV_Assert( rect.width <= win_size.width );
}
if( ip.y >= 0 )

View File

@ -155,7 +155,7 @@ void cv::watershed( InputArray _src, InputOutputArray _markers )
dr = std::abs((ptr1)[2] - (ptr2)[2]);\
diff = ws_max(db,dg); \
diff = ws_max(diff,dr); \
assert( 0 <= diff && diff <= 255 ); \
CV_Assert( 0 <= diff && diff <= 255 ); \
}
CV_Assert( src.type() == CV_8UC3 && dst.type() == CV_32SC1 );
@ -215,7 +215,7 @@ void cv::watershed( InputArray _src, InputOutputArray _markers )
}
// Add to according queue
assert( 0 <= idx && idx <= 255 );
CV_Assert( 0 <= idx && idx <= 255 );
ws_push( idx, i*mstep + j, i*istep + j*3 );
m[0] = IN_QUEUE;
}
@ -286,7 +286,7 @@ void cv::watershed( InputArray _src, InputOutputArray _markers )
}
// Set label to current pixel in marker image
assert( lab != 0 );
CV_Assert( lab != 0 );
m[0] = lab;
if( lab == WSHED )

View File

@ -436,7 +436,7 @@ int Subdiv2D::insert(Point2f pt)
else
CV_Error_(CV_StsError, ("Subdiv2D::locate returned invalid location = %d", location) );
assert( curr_edge != 0 );
CV_Assert( curr_edge != 0 );
validGeometry = false;
curr_point = newPoint(pt, false);

View File

@ -182,7 +182,7 @@ int CV_ApproxPolyTest::check_slice( CvPoint StartPt, CvPoint EndPt,
////////////////////////////////
if( SrcReader == NULL )
{
assert( false );
CV_Assert( false );
return 0;
}
@ -237,7 +237,7 @@ int CV_ApproxPolyTest::check( CvSeq* SrcSeq, CvSeq* DstSeq, float Eps )
int Count;
int i,j;
assert( SrcSeq && DstSeq );
CV_Assert( SrcSeq && DstSeq );
////////// init ////////////////////
Count = SrcSeq->total;

View File

@ -203,7 +203,7 @@ namespace opencv_test { namespace {
}
else
{
assert( cn == 3 );
CV_Assert( cn == 3 );
for( j = 0; j < size.width*3; j += 3 )
{
float sum_b = 0, sum_g = 0, sum_r = 0, wsum = 0;

View File

@ -249,7 +249,7 @@ test_Canny( const Mat& src, Mat& dst,
}
else
{
assert( fabs(tg) > tan_3pi_8 );
CV_Assert( fabs(tg) > tan_3pi_8 );
x1 = x2 = x; y1 = y + 1; y2 = y - 1;
}

View File

@ -217,7 +217,7 @@ void CV_ColorCvtBaseTest::convert_forward( const Mat& src, Mat& dst )
float* dst_buf = &_dst_buf[0];
int i, j;
assert( (cn == 3 || cn == 4) && (dst_cn == 3 || dst_cn == 1) );
CV_Assert( (cn == 3 || cn == 4) && (dst_cn == 3 || dst_cn == 1) );
for( i = 0; i < src.rows; i++ )
{
@ -281,7 +281,7 @@ void CV_ColorCvtBaseTest::convert_forward( const Mat& src, Mat& dst )
}
break;
default:
assert(0);
CV_Assert(0);
}
}
}
@ -312,7 +312,7 @@ void CV_ColorCvtBaseTest::convert_backward( const Mat& src, const Mat& dst, Mat&
float* dst_buf = &_dst_buf[0];
int i, j;
assert( cn == 3 || cn == 4 );
CV_Assert( cn == 3 || cn == 4 );
for( i = 0; i < src.rows; i++ )
{
@ -385,7 +385,7 @@ void CV_ColorCvtBaseTest::convert_backward( const Mat& src, const Mat& dst, Mat&
}
break;
default:
assert(0);
CV_Assert(0);
}
}
}
@ -1501,7 +1501,7 @@ void CV_ColorRGBTest::convert_forward( const Mat& src, Mat& dst )
int g_rshift = dst_bits == 16 ? 2 : 3;
int r_lshift = dst_bits == 16 ? 11 : 10;
//assert( (cn == 3 || cn == 4) && (dst_cn == 3 || (dst_cn == 2 && depth == CV_8U)) );
//CV_Assert( (cn == 3 || cn == 4) && (dst_cn == 3 || (dst_cn == 2 && depth == CV_8U)) );
for( i = 0; i < src.rows; i++ )
{
@ -1571,7 +1571,7 @@ void CV_ColorRGBTest::convert_forward( const Mat& src, Mat& dst )
}
break;
default:
assert(0);
CV_Assert(0);
}
}
}
@ -1587,7 +1587,7 @@ void CV_ColorRGBTest::convert_backward( const Mat& /*src*/, const Mat& src, Mat&
int g_lshift = dst_bits == 16 ? 2 : 3;
int r_rshift = dst_bits == 16 ? 11 : 10;
//assert( (cn == 3 || cn == 4) && (src_cn == 3 || (src_cn == 2 && depth == CV_8U)) );
//CV_Assert( (cn == 3 || cn == 4) && (src_cn == 3 || (src_cn == 2 && depth == CV_8U)) );
for( i = 0; i < src.rows; i++ )
{
@ -1677,7 +1677,7 @@ void CV_ColorRGBTest::convert_backward( const Mat& /*src*/, const Mat& src, Mat&
}
break;
default:
assert(0);
CV_Assert(0);
}
}
}
@ -1840,6 +1840,21 @@ TEST(Imgproc_ColorLuv, accuracy) { CV_ColorLuvTest test; test.safe_run(); }
TEST(Imgproc_ColorRGB, accuracy) { CV_ColorRGBTest test; test.safe_run(); }
TEST(Imgproc_ColorBayer, accuracy) { CV_ColorBayerTest test; test.safe_run(); }
TEST(Imgproc_ColorLuv, Overflow_21112)
{
const Size sz(107, 16); // unaligned size to run both SIMD and generic code
Mat luv_init(sz, CV_8UC3, Scalar(49, 205, 23));
Mat rgb;
cvtColor(luv_init, rgb, COLOR_Luv2RGB);
// Convert to normal Luv coordinates for floats.
Mat luv_initf(sz, CV_32FC3, Scalar(49.0f/255.f*100, 205.0f*354/255.f - 134, 23.0f*262/255.f - 140));
Mat rgbf;
cvtColor(luv_initf, rgbf, COLOR_Luv2RGB);
Mat rgb_converted;
rgb.convertTo(rgb_converted, CV_32F);
EXPECT_LE(cvtest::norm(255.f*rgbf, rgb_converted, NORM_INF), 1e-5);
}
TEST(Imgproc_ColorBayer, regression)
{
cvtest::TS* ts = cvtest::TS::ptr();
@ -2569,7 +2584,7 @@ int row8uLuv2RGB(const uchar* src_row, uchar *dst_row, int n, int cn, int blue_i
long long int xv = ((int)up)*(long long)vp;
int x = (int)(xv/BASE);
x = y*x/BASE;
x = ((long long int)y)*x/BASE;
long long int vpl = LvToVpl_b[LL*256+vv];
long long int zp = vpl - xv*(255/3);
@ -2725,11 +2740,11 @@ TEST(Imgproc_ColorLuv_Full, bitExactness)
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0xec311a14, 0x995efefc, 0xf71b590b, 0xc1edfce7, 0x67b2b2e2, 0xe6d7f90d, 0xbcbaff5c, 0xd86ae19c,
0x3e8e4647, 0x53f1a5e3, 0x60dfb6ca, 0xcda851fe, 0xd91084b3, 0xe361bf6f, 0x90fe66ed, 0xb19c5b89,
0x4bff0e00, 0x76bbff01, 0x80735725, 0xb5e0f137, 0x96abb417, 0xfb2cf5cf, 0x314cf55e, 0x77bde10e,
0x2ab24209, 0x81caa6F0, 0x3019b8eb, 0x427c505f, 0x5bba7d77, 0xf29cb4d6, 0x760f65ca, 0xf6b4536c,
0x190508ec, 0xc7764e22, 0x19b042a8, 0x2db4c5d8, 0x6e1cfd1d, 0x39bddd51, 0x942714ed, 0x19444d39,
0xed16e206, 0xc4102784, 0x590075fe, 0xaaef2ec6, 0xbeb84149, 0x8da31e4f, 0x7cbe7d77, 0x1c90b30a,
0xb5cd0704, 0x82144fd4, 0x4e6f4843, 0x106bc505, 0xf587fc97, 0x3665d9a3, 0x3ea014a8, 0xec664953,
0x6ec9e59e, 0xf9201e08, 0xf3676fb8, 0xe4e42c10, 0x92d33f64, 0x13b923f7, 0x308f7f50, 0xca98b420,
};
RNG rng(0);

View File

@ -208,7 +208,7 @@ cvTsMarkContours( IplImage* img, int val )
int i, j;
int step = img->widthStep;
assert( img->depth == IPL_DEPTH_8U && img->nChannels == 1 && (val&1) != 0);
CV_Assert( img->depth == IPL_DEPTH_8U && img->nChannels == 1 && (val&1) != 0);
for( i = 1; i < img->height - 1; i++ )
for( j = 1; j < img->width - 1; j++ )

View File

@ -301,7 +301,7 @@ void CV_BaseShapeDescrTest::generate_point_set( void* pointsSet )
else
{
CvMat* ptm = (CvMat*)pointsSet;
assert( CV_IS_MAT(ptm) && CV_IS_MAT_CONT(ptm->type) );
CV_Assert( CV_IS_MAT(ptm) && CV_IS_MAT_CONT(ptm->type) );
total = ptm->rows + ptm->cols - 1;
point_type = CV_MAT_TYPE(ptm->type);
data = ptm->data.ptr;
@ -310,7 +310,7 @@ void CV_BaseShapeDescrTest::generate_point_set( void* pointsSet )
n = CV_MAT_CN(point_type);
point_type = CV_MAT_DEPTH(point_type);
assert( (point_type == CV_32S || point_type == CV_32F) && n <= 4 );
CV_Assert( (point_type == CV_32S || point_type == CV_32F) && n <= 4 );
for( i = 0; i < total; i++ )
{
@ -1335,7 +1335,7 @@ void CV_FitEllipseTest::generate_point_set( void* pointsSet )
else
{
CvMat* ptm = (CvMat*)pointsSet;
assert( CV_IS_MAT(ptm) && CV_IS_MAT_CONT(ptm->type) );
CV_Assert( CV_IS_MAT(ptm) && CV_IS_MAT_CONT(ptm->type) );
total = ptm->rows + ptm->cols - 1;
point_type = CV_MAT_TYPE(ptm->type);
data = ptm->data.ptr;
@ -1621,7 +1621,7 @@ void CV_FitLineTest::generate_point_set( void* pointsSet )
else
{
CvMat* ptm = (CvMat*)pointsSet;
assert( CV_IS_MAT(ptm) && CV_IS_MAT_CONT(ptm->type) );
CV_Assert( CV_IS_MAT(ptm) && CV_IS_MAT_CONT(ptm->type) );
total = ptm->rows + ptm->cols - 1;
point_type = CV_MAT_DEPTH(CV_MAT_TYPE(ptm->type));
data = ptm->data.ptr;
@ -1788,13 +1788,13 @@ cvTsGenerateTousledBlob( CvPoint2D32f center, CvSize2D32f axes,
else
{
CvMat* ptm = (CvMat*)points;
assert( CV_IS_MAT(ptm) && CV_IS_MAT_CONT(ptm->type) );
CV_Assert( CV_IS_MAT(ptm) && CV_IS_MAT_CONT(ptm->type) );
total = ptm->rows + ptm->cols - 1;
point_type = CV_MAT_TYPE(ptm->type);
data = ptm->data.ptr;
}
assert( point_type == CV_32SC2 || point_type == CV_32FC2 );
CV_Assert( point_type == CV_32SC2 || point_type == CV_32FC2 );
for( i = 0; i < total; i++ )
{
@ -1874,8 +1874,8 @@ void CV_ContourMomentsTest::generate_point_set( void* pointsSet )
center.x = (float)(img_size.width*0.5 + (cvtest::randReal(rng)-0.5)*(img_size.width - max_sz*2)*0.8);
center.y = (float)(img_size.height*0.5 + (cvtest::randReal(rng)-0.5)*(img_size.height - max_sz*2)*0.8);
assert( 0 < center.x - max_sz && center.x + max_sz < img_size.width &&
0 < center.y - max_sz && center.y + max_sz < img_size.height );
CV_Assert( 0 < center.x - max_sz && center.x + max_sz < img_size.width &&
0 < center.y - max_sz && center.y + max_sz < img_size.height );
max_r_scale = cvtest::randReal(rng)*max_max_r_scale*0.01;
angle = cvtest::randReal(rng)*360;

View File

@ -161,7 +161,7 @@ cvTsDistTransform( const CvMat* _src, CvMat* _dst, int dist_type,
float delta[16];
int tstep, count;
assert( mask_size == 3 || mask_size == 5 );
CV_Assert( mask_size == 3 || mask_size == 5 );
if( dist_type == CV_DIST_USER )
memcpy( mask, _mask, sizeof(mask) );

View File

@ -992,8 +992,8 @@ static void test_medianFilter( const Mat& src, Mat& dst, int m )
median_pair *buf0 = &_buf0[0], *buf1 = &_buf1[0];
int step = (int)(src.step/src.elemSize());
assert( src.rows == dst.rows + m - 1 && src.cols == dst.cols + m - 1 &&
src.type() == dst.type() && src.type() == CV_8UC1 );
CV_Assert( src.rows == dst.rows + m - 1 && src.cols == dst.cols + m - 1 &&
src.type() == dst.type() && src.type() == CV_8UC1 );
for( i = 0; i < dst.rows; i++ )
{
@ -1050,7 +1050,7 @@ static void test_medianFilter( const Mat& src, Mat& dst, int m )
*buf1++ = buf0[k++];
else
{
assert( col_buf[l] < INT_MAX );
CV_Assert( col_buf[l] < INT_MAX );
*buf1++ = median_pair(ins_col,col_buf[l++]);
}
}
@ -1061,7 +1061,7 @@ static void test_medianFilter( const Mat& src, Mat& dst, int m )
if( del_col < 0 )
n += m;
buf1 -= n;
assert( n == m2 );
CV_Assert( n == m2 );
dst1[j] = (uchar)buf1[n/2].val;
median_pair* tbuf;
CV_SWAP( buf0, buf1, tbuf );

View File

@ -169,7 +169,7 @@ int CV_ImgWarpBaseTest::prepare_test_case( int test_case_idx )
}
break;
default:
assert(0);
CV_Assert(0);
}
/*switch( depth )
@ -187,7 +187,7 @@ int CV_ImgWarpBaseTest::prepare_test_case( int test_case_idx )
((float*)ptr)[j] = (float)buffer[j];
break;
default:
assert(0);
CV_Assert(0);
}*/
cv::Mat src(1, cols*cn, CV_32F, &buffer[0]);
cv::Mat dst(1, cols*cn, depth, ptr);
@ -482,7 +482,7 @@ static void test_remap( const Mat& src, Mat& dst, const Mat& mapx, const Mat& ma
}
break;
default:
assert(0);
CV_Assert(0);
}
}
}

View File

@ -935,7 +935,7 @@ void CV_Remap_Test::remap_generic(const Mat& _src, Mat& _dst)
else if (interpolation == INTER_LANCZOS4)
ksize = 8;
else if (interpolation != INTER_LINEAR)
assert(0);
CV_Assert(0);
int ofs = (ksize / 2) - 1;
CV_Assert(_src.depth() == CV_32F && _dst.type() == _src.type());

View File

@ -185,7 +185,7 @@ static void cvTsMatchTemplate( const CvMat* img, const CvMat* templ, CvMat* resu
b_denom = 1.;
}
assert( CV_TM_SQDIFF <= method && method <= CV_TM_CCOEFF_NORMED );
CV_Assert( CV_TM_SQDIFF <= method && method <= CV_TM_CCOEFF_NORMED );
for( i = 0; i < result->rows; i++ )
{

View File

@ -474,7 +474,7 @@ static void test_threshold( const Mat& _src, Mat& _dst,
}
break;
default:
assert(0);
CV_Assert(0);
}
}

View File

@ -48,7 +48,6 @@
#include "opencv2/core/private.hpp"
#include <assert.h>
#include <float.h>
#include <limits.h>
#include <math.h>

View File

@ -869,7 +869,7 @@ DTreesImpl::WSplit DTreesImpl::findSplitCatClass( int vi, const vector<int>& _si
}
else
{
assert( m == 2 );
CV_Assert( m == 2 );
dbl_ptr = (double**)(c_weights + _mi);
for( j = 0; j < mi; j++ )
dbl_ptr[j] = cjk + j*2 + 1;

View File

@ -969,10 +969,10 @@ int CascadeClassifierImpl::runAt( Ptr<FeatureEvaluator>& evaluator, Point pt, in
{
CV_INSTRUMENT_REGION();
assert( !oldCascade &&
(data.featureType == FeatureEvaluator::HAAR ||
data.featureType == FeatureEvaluator::LBP ||
data.featureType == FeatureEvaluator::HOG) );
CV_Assert( !oldCascade &&
(data.featureType == FeatureEvaluator::HAAR ||
data.featureType == FeatureEvaluator::LBP ||
data.featureType == FeatureEvaluator::HOG) );
if( !evaluator->setWindow(pt, scaleIdx) )
return -1;

View File

@ -854,7 +854,7 @@ void HOGCache::init(const HOGDescriptor* _descriptor,
data->gradWeight = weights(i,j);
}
assert( count1 + count2 + count4 == rawBlockSize );
CV_Assert( count1 + count2 + count4 == rawBlockSize );
// defragment pixData
for( j = 0; j < count2; j++ )
pixData[j + count1] = pixData[j + rawBlockSize];
@ -876,7 +876,7 @@ void HOGCache::init(const HOGDescriptor* _descriptor,
const float* HOGCache::getBlock(Point pt, float* buf)
{
float* blockHist = buf;
assert(descriptor != 0);
CV_Assert(descriptor != 0);
// Size blockSize = descriptor->blockSize;
pt += imgoffset;

View File

@ -308,18 +308,7 @@ int QRCodeEncoderImpl::versionAuto(const std::string& input_str)
for(size_t i = 0; i < possible_version.size(); i++)
{
int version_range_index = possible_version[i];
if (version_range_index == 1)
{
tmp_version = 1;
}
else if (version_range_index == 2)
{
tmp_version = 10;
}
else
{
tmp_version = 27;
}
encodeAuto(input_str, payload_tmp);
tmp_version = findVersionCapacity((int)payload_tmp.size(), ecc_level,
version_range[version_range_index], version_range[version_range_index + 1]);
@ -351,10 +340,11 @@ void QRCodeEncoderImpl::generateQR(const std::string &input)
int segment_begin = i * segment_len;
int segemnt_end = min((i + 1) * segment_len, (int) input.length()) - 1;
std::string input_info = input.substr(segment_begin, segemnt_end - segment_begin + 1);
int v = versionAuto(input_info);
int detected_version = versionAuto(input_info);
CV_Assert(detected_version != -1);
if (version_level == 0)
version_level = v;
else if (version_level < v)
version_level = detected_version;
else if (version_level < detected_version)
CV_Error(Error::StsBadArg, "The given version is not suitable for the given input string length ");
payload.clear();
@ -752,12 +742,14 @@ void QRCodeEncoderImpl::eccGenerate(vector<vector<uint8_t> > &data_blocks, vecto
void QRCodeEncoderImpl::rearrangeBlocks(const vector<vector<uint8_t> > &data_blocks, const vector<vector<uint8_t> > &ecc_blocks)
{
rearranged_data.clear();
rearranged_data.reserve(MAX_PAYLOAD_LEN);
int blocks = cur_ecc_params->num_blocks_in_G2 + cur_ecc_params->num_blocks_in_G1;
int col_border = max(cur_ecc_params->data_codewords_in_G2, cur_ecc_params->data_codewords_in_G1);
int total_codeword_num = version_info->total_codewords;
int is_not_equal = cur_ecc_params->data_codewords_in_G2 - cur_ecc_params->data_codewords_in_G1;
for (int i = 0; i < total_codeword_num; i++)
int add_steps = cur_ecc_params->data_codewords_in_G2 > cur_ecc_params->data_codewords_in_G1 ?
(cur_ecc_params->data_codewords_in_G2 - cur_ecc_params->data_codewords_in_G1) * cur_ecc_params->num_blocks_in_G1 : 0;
rearranged_data.reserve(total_codeword_num + add_steps);
for (int i = 0; i < total_codeword_num + add_steps; i++)
{
int cur_col = i / blocks;
int cur_row = i % blocks;
@ -783,16 +775,6 @@ void QRCodeEncoderImpl::rearrangeBlocks(const vector<vector<uint8_t> > &data_blo
}
rearranged_data.push_back(tmp);
}
const int remainder_len []= {0,
0, 7, 7, 7, 7, 7, 0, 0, 0, 0,
0, 0, 0, 3, 3, 3, 3, 3, 3, 3,
4, 4, 4, 4, 4, 4, 4, 3, 3, 3,
3, 3, 3, 3, 0, 0, 0, 0, 0, 0};
int cur_remainder_len = remainder_len[version_level];
if (cur_remainder_len != 0)
{
rearranged_data.push_back(0);
}
}
void QRCodeEncoderImpl::findAutoMaskType()
@ -1078,6 +1060,8 @@ void QRCodeEncoderImpl::writeData()
int dir = -1;
int count = 0;
int codeword_value = rearranged_data[0];
const int limit_bits = (int)rearranged_data.size() * 8;
bool limit_reached = false;
while (x > 0)
{
if (x == 6)
@ -1093,11 +1077,20 @@ void QRCodeEncoderImpl::writeData()
continue;
}
count++;
if (count == limit_bits)
{
limit_reached = true;
break;
}
if (count % 8 == 0)
{
codeword_value = rearranged_data[count / 8];
}
}
if (limit_reached)
{
break;
}
y += dir;
if (y < 0 || y >= version_size)
{

View File

@ -192,7 +192,7 @@ void CV_DetectorTest::run( int )
// write detectors
validationFS << DETECTORS << "{";
assert( detectorNames.size() == detectorFilenames.size() );
CV_Assert( detectorNames.size() == detectorFilenames.size() );
nit = detectorNames.begin();
for( int di = 0; nit != detectorNames.end(); ++nit, di++ )
{
@ -292,7 +292,7 @@ static bool isZero( uchar i ) {return i == 0;}
int CV_DetectorTest::validate( int detectorIdx, vector<vector<Rect> >& objects )
{
assert( imageFilenames.size() == objects.size() );
CV_Assert( imageFilenames.size() == objects.size() );
int imageIdx = 0;
int totalNoPair = 0, totalValRectCount = 0;
@ -489,7 +489,7 @@ int CV_HOGDetectorTest::detectMultiScale( int di, const Mat& img,
if( detectorFilenames[di].empty() )
hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());
else
assert(0);
CV_Assert(0);
hog.detectMultiScale(img, objects);
return cvtest::TS::OK;
}
@ -772,7 +772,7 @@ void HOGCacheTester::init(const HOGDescriptorTester* _descriptor,
data->gradWeight = weights(i,j);
}
assert( count1 + count2 + count4 == rawBlockSize );
CV_Assert( count1 + count2 + count4 == rawBlockSize );
// defragment pixData
for( j = 0; j < count2; j++ )
pixData[j + count1] = pixData[j + rawBlockSize];
@ -794,7 +794,7 @@ void HOGCacheTester::init(const HOGDescriptorTester* _descriptor,
const float* HOGCacheTester::getBlock(Point pt, float* buf)
{
float* blockHist = buf;
assert(descriptor != 0);
CV_Assert(descriptor != 0);
Size blockSize = descriptor->blockSize;
pt += imgoffset;
@ -1275,7 +1275,7 @@ void HOGDescriptorTester::computeGradient(InputArray _img, InputOutputArray _gra
hidx += _nbins;
else if( hidx >= _nbins )
hidx -= _nbins;
assert( (unsigned)hidx < (unsigned)_nbins );
CV_Assert( (unsigned)hidx < (unsigned)_nbins );
qanglePtr[x*2] = (uchar)hidx;
hidx++;

View File

@ -50,7 +50,7 @@ namespace perf {
void checkDeviceMaxMemoryAllocSize(const Size& size, int type, int factor)
{
assert(factor > 0);
CV_Assert(factor > 0);
if (!cv::ocl::useOpenCL())
return;

View File

@ -379,7 +379,7 @@ void BaseTest::run( int start_from )
void BaseTest::run_func(void)
{
assert(0);
CV_Assert(0);
}

View File

@ -268,14 +268,14 @@ void ArrayTest::fill_array( int /*test_case_idx*/, int i, int j, Mat& arr )
double ArrayTest::get_success_error_level( int /*test_case_idx*/, int i, int j )
{
int elem_depth = CV_MAT_DEPTH(cvGetElemType(test_array[i][j]));
assert( i == OUTPUT || i == INPUT_OUTPUT );
CV_Assert( i == OUTPUT || i == INPUT_OUTPUT );
return elem_depth < CV_32F ? 0 : elem_depth == CV_32F ? FLT_EPSILON*100: DBL_EPSILON*5000;
}
void ArrayTest::prepare_to_validation( int /*test_case_idx*/ )
{
assert(0);
CV_Assert(0);
}
@ -293,7 +293,7 @@ int ArrayTest::validate_test_results( int test_case_idx )
int i1 = i == 0 ? REF_OUTPUT : REF_INPUT_OUTPUT;
size_t sizei = test_array[i0].size();
assert( sizei == test_array[i1].size() );
CV_Assert( sizei == test_array[i1].size() );
for( j = 0; j < sizei; j++ )
{
double err_level;

View File

@ -2122,7 +2122,7 @@ int cmpEps( const Mat& arr_, const Mat& refarr_, double* _realmaxdiff,
}
break;
default:
assert(0);
CV_Assert(0);
return CMP_EPS_BIG_DIFF;
}
if(_realmaxdiff)
@ -2733,7 +2733,7 @@ static void calcSobelKernel1D( int order, int _aperture_size, int size, vector<i
if( _aperture_size < 0 )
{
static const int scharr[8] = { 3, 10, 3, -1, 0, 1, 0, 0 }; // extra elements to eliminate "-Warray-bounds" bogus warning
assert( size == 3 );
CV_Assert( size == 3 && order < 2 );
for( i = 0; i < size; i++ )
kernel[i] = scharr[order*3 + i];
return;
@ -3057,7 +3057,7 @@ void threshold( const Mat& _src, Mat& _dst,
imaxval = cvRound(maxval);
}
assert( depth == CV_8U || depth == CV_16S || depth == CV_32F );
CV_Assert( depth == CV_8U || depth == CV_16S || depth == CV_32F );
switch( thresh_type )
{
@ -3219,7 +3219,7 @@ void threshold( const Mat& _src, Mat& _dst,
}
break;
default:
assert(0);
CV_Assert(0);
}
}

View File

@ -1366,7 +1366,7 @@ bool TestBase::next()
bool has_next = false;
do {
assert(currentIter == times.size());
CV_Assert(currentIter == times.size());
if (currentIter == 0)
{
has_next = true;
@ -1379,7 +1379,7 @@ bool TestBase::next()
}
else
{
assert(getCurrentPerformanceStrategy() == PERF_STRATEGY_SIMPLE);
CV_Assert(getCurrentPerformanceStrategy() == PERF_STRATEGY_SIMPLE);
if (totalTime - lastActivityPrintTime >= cv::getTickFrequency() * 10)
{
std::cout << '.' << std::endl;
@ -1638,7 +1638,7 @@ performance_metrics& TestBase::calcMetrics()
}
else
{
assert(false);
CV_Assert(false);
}
int offset = static_cast<int>(start - times.begin());
@ -1714,7 +1714,7 @@ void TestBase::validateMetrics()
}
else
{
assert(false);
CV_Assert(false);
}
}

View File

@ -47,7 +47,6 @@ using namespace cv;
#if !(defined(_WIN32) || defined(WINCE))
# include <pthread.h>
#endif
#include <assert.h>
#include <algorithm>
#include <limits>
@ -400,7 +399,7 @@ static
inline int _opencv_ffmpeg_interrupt_callback(void *ptr)
{
AVInterruptCallbackMetadata* metadata = (AVInterruptCallbackMetadata*)ptr;
assert(metadata);
CV_Assert(metadata);
if (metadata->timeout_after_ms == 0)
{
@ -2165,7 +2164,7 @@ bool CvVideoWriter_FFMPEG::writeFrame( const unsigned char* data, int step, int
}
}
else {
assert(false);
CV_Assert(false);
}
if( (width & -2) != frame_width || (height & -2) != frame_height || !data )
@ -2218,7 +2217,7 @@ bool CvVideoWriter_FFMPEG::writeFrame( const unsigned char* data, int step, int
sw_pix_fmt = ((AVHWFramesContext*)c->hw_frames_ctx->data)->sw_format;
#endif
if ( sw_pix_fmt != input_pix_fmt ) {
assert( input_picture );
CV_Assert( input_picture );
// let input_picture point to the raw data buffer of 'image'
_opencv_ffmpeg_av_image_fill_arrays(input_picture, (uint8_t *) data,
(AVPixelFormat)input_pix_fmt, width, height);
@ -2592,7 +2591,7 @@ bool CvVideoWriter_FFMPEG::open( const char * filename, int fourcc,
// alloc memory for context
oc = avformat_alloc_context();
assert (oc);
CV_Assert(oc);
/* set file name */
oc->oformat = fmt;

View File

@ -220,7 +220,6 @@ make & enjoy!
#include <string.h>
#include <stdlib.h>
#include <assert.h>
#include <sys/stat.h>
#include <sys/ioctl.h>
#include <limits>
@ -938,8 +937,8 @@ bool CvCaptureCAM_V4L::read_frame_v4l2()
return false;
}
assert(buf.index < req.count);
assert(buffers[buf.index].length == buf.length);
CV_Assert(buf.index < req.count);
CV_Assert(buffers[buf.index].length == buf.length);
//We shouldn't use this buffer in the queue while not retrieve frame from it.
buffers[buf.index].buffer = buf;

View File

@ -82,7 +82,6 @@
#include <string.h>
#include <limits.h>
#include <ctype.h>
#include <assert.h> // FIXIT remove this
#if defined _WIN32 || defined WINCE
#if !defined _WIN32_WINNT