man/unmap, preventing getMat/getUMat from temp object, fix thread-unsafe code in UMat::getMat()

This commit is contained in:
Andrey Pavlenko 2015-09-03 17:18:59 +03:00 committed by Alexander Alekhin
parent 536634b1fe
commit cea2dafa0f
4 changed files with 82 additions and 39 deletions

View File

@ -496,6 +496,7 @@ struct CV_EXPORTS UMatData
void* handle; void* handle;
void* userdata; void* userdata;
int allocatorFlags_; int allocatorFlags_;
int mapcount;
}; };

View File

@ -4514,6 +4514,7 @@ public:
CV_Assert(u->refcount >= 0); CV_Assert(u->refcount >= 0);
CV_Assert(u->handle != 0 && u->urefcount == 0); CV_Assert(u->handle != 0 && u->urefcount == 0);
CV_Assert(u->mapcount == 0);
if(u->tempUMat()) if(u->tempUMat())
{ {
CV_Assert(u->origdata); CV_Assert(u->origdata);
@ -4572,12 +4573,16 @@ public:
else else
{ {
cl_int retval = 0; cl_int retval = 0;
void* data = clEnqueueMapBuffer(q, (cl_mem)u->handle, CL_TRUE, if (u->tempUMat())
(CL_MAP_READ | CL_MAP_WRITE), {
0, u->size, 0, 0, 0, &retval); CV_Assert(u->mapcount == 0);
CV_OclDbgAssert(retval == CL_SUCCESS); void* data = clEnqueueMapBuffer(q, (cl_mem)u->handle, CL_TRUE,
CV_OclDbgAssert(clEnqueueUnmapMemObject(q, (cl_mem)u->handle, data, 0, 0, 0) == CL_SUCCESS); (CL_MAP_READ | CL_MAP_WRITE),
CV_OclDbgAssert(clFinish(q) == CL_SUCCESS); 0, u->size, 0, 0, 0, &retval);
CV_OclDbgAssert(retval == CL_SUCCESS);
CV_OclDbgAssert(clEnqueueUnmapMemObject(q, (cl_mem)u->handle, data, 0, 0, 0) == CL_SUCCESS);
CV_OclDbgAssert(clFinish(q) == CL_SUCCESS);
}
} }
} }
u->markHostCopyObsolete(false); u->markHostCopyObsolete(false);
@ -4715,11 +4720,16 @@ public:
} }
#endif #endif
cl_int retval = 0; cl_int retval = CL_SUCCESS;
u->data = (uchar*)clEnqueueMapBuffer(q, (cl_mem)u->handle, CL_TRUE, if (!u->deviceMemMapped())
(CL_MAP_READ | CL_MAP_WRITE), {
0, u->size, 0, 0, 0, &retval); CV_Assert(u->refcount == 1);
if(u->data && retval == CL_SUCCESS) CV_Assert(u->mapcount++ == 0);
u->data = (uchar*)clEnqueueMapBuffer(q, (cl_mem)u->handle, CL_TRUE,
(CL_MAP_READ | CL_MAP_WRITE),
0, u->size, 0, 0, 0, &retval);
}
if (u->data && retval == CL_SUCCESS)
{ {
u->markHostCopyObsolete(false); u->markHostCopyObsolete(false);
u->markDeviceMemMapped(true); u->markDeviceMemMapped(true);
@ -4765,7 +4775,6 @@ public:
if( !u->copyOnMap() && u->deviceMemMapped() ) if( !u->copyOnMap() && u->deviceMemMapped() )
{ {
CV_Assert(u->data != NULL); CV_Assert(u->data != NULL);
u->markDeviceMemMapped(false);
#ifdef HAVE_OPENCL_SVM #ifdef HAVE_OPENCL_SVM
if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) != 0) if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) != 0)
{ {
@ -4792,16 +4801,21 @@ public:
return; return;
} }
#endif #endif
CV_Assert( (retval = clEnqueueUnmapMemObject(q,
(cl_mem)u->handle, u->data, 0, 0, 0)) == CL_SUCCESS );
if (Device::getDefault().isAMD())
{
// required for multithreaded applications (see stitching test)
CV_OclDbgAssert(clFinish(q) == CL_SUCCESS);
}
if (u->refcount == 0) if (u->refcount == 0)
{
CV_Assert(u->mapcount-- == 1);
CV_Assert((retval = clEnqueueUnmapMemObject(q,
(cl_mem)u->handle, u->data, 0, 0, 0)) == CL_SUCCESS);
if (Device::getDefault().isAMD())
{
// required for multithreaded applications (see stitching test)
CV_OclDbgAssert(clFinish(q) == CL_SUCCESS);
}
u->markDeviceMemMapped(false);
u->data = 0; u->data = 0;
u->markDeviceCopyObsolete(false);
u->markHostCopyObsolete(true);
}
} }
else if( u->copyOnMap() && u->deviceCopyObsolete() ) else if( u->copyOnMap() && u->deviceCopyObsolete() )
{ {
@ -4811,9 +4825,9 @@ public:
#endif #endif
CV_Assert( (retval = clEnqueueWriteBuffer(q, (cl_mem)u->handle, CL_TRUE, 0, CV_Assert( (retval = clEnqueueWriteBuffer(q, (cl_mem)u->handle, CL_TRUE, 0,
u->size, alignedPtr.getAlignedPtr(), 0, 0, 0)) == CL_SUCCESS ); u->size, alignedPtr.getAlignedPtr(), 0, 0, 0)) == CL_SUCCESS );
u->markDeviceCopyObsolete(false);
u->markHostCopyObsolete(true);
} }
u->markDeviceCopyObsolete(false);
u->markHostCopyObsolete(true);
} }
bool checkContinuous(int dims, const size_t sz[], bool checkContinuous(int dims, const size_t sz[],

View File

@ -60,7 +60,7 @@ static Mutex umatLocks[UMAT_NLOCKS];
UMatData::UMatData(const MatAllocator* allocator) UMatData::UMatData(const MatAllocator* allocator)
{ {
prevAllocator = currAllocator = allocator; prevAllocator = currAllocator = allocator;
urefcount = refcount = 0; urefcount = refcount = mapcount = 0;
data = origdata = 0; data = origdata = 0;
size = 0; size = 0;
flags = 0; flags = 0;
@ -73,6 +73,7 @@ UMatData::~UMatData()
{ {
prevAllocator = currAllocator = 0; prevAllocator = currAllocator = 0;
urefcount = refcount = 0; urefcount = refcount = 0;
CV_Assert(mapcount == 0);
data = origdata = 0; data = origdata = 0;
size = 0; size = 0;
flags = 0; flags = 0;
@ -221,6 +222,7 @@ UMat Mat::getUMat(int accessFlags, UMatUsageFlags usageFlags) const
UMat hdr; UMat hdr;
if(!data) if(!data)
return hdr; return hdr;
CV_Assert((!u || u->mapcount==0) && "Don't get UMat from temp-Mat!");
accessFlags |= ACCESS_RW; accessFlags |= ACCESS_RW;
UMatData* temp_u = u; UMatData* temp_u = u;
if(!temp_u) if(!temp_u)
@ -637,18 +639,28 @@ Mat UMat::getMat(int accessFlags) const
{ {
if(!u) if(!u)
return Mat(); return Mat();
CV_Assert(!u->tempUMat() && "Don't get Mat from temp UMat! Use copyTo().");
// TODO Support ACCESS_READ (ACCESS_WRITE) without unnecessary data transfers // TODO Support ACCESS_READ (ACCESS_WRITE) without unnecessary data transfers
accessFlags |= ACCESS_RW; accessFlags |= ACCESS_RW;
u->currAllocator->map(u, accessFlags); UMatDataAutoLock autolock(u);
CV_Assert(u->data != 0); if(CV_XADD(&u->refcount, 1) == 0)
Mat hdr(dims, size.p, type(), u->data + offset, step.p); u->currAllocator->map(u, accessFlags);
hdr.flags = flags; if (u->data != 0)
hdr.u = u; {
hdr.datastart = u->data; Mat hdr(dims, size.p, type(), u->data + offset, step.p);
hdr.data = u->data + offset; hdr.flags = flags;
hdr.datalimit = hdr.dataend = u->data + u->size; hdr.u = u;
CV_XADD(&hdr.u->refcount, 1); hdr.datastart = u->data;
return hdr; hdr.data = u->data + offset;
hdr.datalimit = hdr.dataend = u->data + u->size;
return hdr;
}
else
{
CV_XADD(&u->refcount, -1);
CV_Assert(u->data != 0 && "Error mapping of UMat to host memory.");
return Mat();
}
} }
void* UMat::handle(int accessFlags) const void* UMat::handle(int accessFlags) const

View File

@ -243,9 +243,11 @@ TEST_P(UMatBasicTests, GetUMat)
EXPECT_MAT_NEAR(ub, ua, 0); EXPECT_MAT_NEAR(ub, ua, 0);
} }
{ {
Mat b; UMat u = a.getUMat(ACCESS_RW);
b = a.getUMat(ACCESS_RW).getMat(ACCESS_RW); {
EXPECT_MAT_NEAR(b, a, 0); Mat b = u.getMat(ACCESS_RW);
EXPECT_MAT_NEAR(b, a, 0);
}
} }
{ {
Mat b; Mat b;
@ -253,9 +255,11 @@ TEST_P(UMatBasicTests, GetUMat)
EXPECT_MAT_NEAR(b, a, 0); EXPECT_MAT_NEAR(b, a, 0);
} }
{ {
UMat ub; Mat m = ua.getMat(ACCESS_RW);
ub = ua.getMat(ACCESS_RW).getUMat(ACCESS_RW); {
EXPECT_MAT_NEAR(ub, ua, 0); UMat ub = m.getUMat(ACCESS_RW);
EXPECT_MAT_NEAR(ub, ua, 0);
}
} }
} }
@ -1268,5 +1272,17 @@ TEST(UMat, DISABLED_Test_same_behaviour_write_and_write)
ASSERT_TRUE(exceptionDetected); // data race ASSERT_TRUE(exceptionDetected); // data race
} }
TEST(UMat, mat_umat_sync)
{
UMat u(10, 10, CV_8UC1, Scalar(1));
{
Mat m = u.getMat(ACCESS_RW).reshape(1);
m.setTo(Scalar(255));
}
UMat uDiff;
compare(u, 255, uDiff, CMP_NE);
ASSERT_EQ(0, countNonZero(uDiff));
}
} } // namespace cvtest::ocl } } // namespace cvtest::ocl