mirror of
https://github.com/opencv/opencv.git
synced 2025-08-01 02:18:01 +08:00
Merge pull request #9241 from alalek:tlsSlotsSize
This commit is contained in:
commit
fffd0f5b68
@ -327,6 +327,17 @@ Cv64suf;
|
|||||||
# endif
|
# endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/****************************************************************************************\
|
||||||
|
* Thread sanitizer *
|
||||||
|
\****************************************************************************************/
|
||||||
|
#ifndef CV_THREAD_SANITIZER
|
||||||
|
# if defined(__has_feature)
|
||||||
|
# if __has_feature(thread_sanitizer)
|
||||||
|
# define CV_THREAD_SANITIZER
|
||||||
|
# endif
|
||||||
|
# endif
|
||||||
|
#endif
|
||||||
|
|
||||||
/****************************************************************************************\
|
/****************************************************************************************\
|
||||||
* exchange-add operation for atomic operations on reference counters *
|
* exchange-add operation for atomic operations on reference counters *
|
||||||
\****************************************************************************************/
|
\****************************************************************************************/
|
||||||
|
@ -1306,7 +1306,8 @@ struct ThreadData
|
|||||||
class TlsStorage
|
class TlsStorage
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
TlsStorage()
|
TlsStorage() :
|
||||||
|
tlsSlotsSize(0)
|
||||||
{
|
{
|
||||||
tlsSlots.reserve(32);
|
tlsSlots.reserve(32);
|
||||||
threads.reserve(32);
|
threads.reserve(32);
|
||||||
@ -1351,9 +1352,10 @@ public:
|
|||||||
size_t reserveSlot()
|
size_t reserveSlot()
|
||||||
{
|
{
|
||||||
AutoLock guard(mtxGlobalAccess);
|
AutoLock guard(mtxGlobalAccess);
|
||||||
|
CV_Assert(tlsSlotsSize == tlsSlots.size());
|
||||||
|
|
||||||
// Find unused slots
|
// Find unused slots
|
||||||
for(size_t slot = 0; slot < tlsSlots.size(); slot++)
|
for(size_t slot = 0; slot < tlsSlotsSize; slot++)
|
||||||
{
|
{
|
||||||
if(!tlsSlots[slot])
|
if(!tlsSlots[slot])
|
||||||
{
|
{
|
||||||
@ -1363,15 +1365,16 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create new slot
|
// Create new slot
|
||||||
tlsSlots.push_back(1);
|
tlsSlots.push_back(1); tlsSlotsSize++;
|
||||||
return (tlsSlots.size()-1);
|
return tlsSlotsSize - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Release TLS storage index and pass associated data to caller
|
// Release TLS storage index and pass associated data to caller
|
||||||
void releaseSlot(size_t slotIdx, std::vector<void*> &dataVec, bool keepSlot = false)
|
void releaseSlot(size_t slotIdx, std::vector<void*> &dataVec, bool keepSlot = false)
|
||||||
{
|
{
|
||||||
AutoLock guard(mtxGlobalAccess);
|
AutoLock guard(mtxGlobalAccess);
|
||||||
CV_Assert(tlsSlots.size() > slotIdx);
|
CV_Assert(tlsSlotsSize == tlsSlots.size());
|
||||||
|
CV_Assert(tlsSlotsSize > slotIdx);
|
||||||
|
|
||||||
for(size_t i = 0; i < threads.size(); i++)
|
for(size_t i = 0; i < threads.size(); i++)
|
||||||
{
|
{
|
||||||
@ -1393,7 +1396,9 @@ public:
|
|||||||
// Get data by TLS storage index
|
// Get data by TLS storage index
|
||||||
void* getData(size_t slotIdx) const
|
void* getData(size_t slotIdx) const
|
||||||
{
|
{
|
||||||
CV_Assert(tlsSlots.size() > slotIdx);
|
#ifndef CV_THREAD_SANITIZER
|
||||||
|
CV_Assert(tlsSlotsSize > slotIdx);
|
||||||
|
#endif
|
||||||
|
|
||||||
ThreadData* threadData = (ThreadData*)tls.GetData();
|
ThreadData* threadData = (ThreadData*)tls.GetData();
|
||||||
if(threadData && threadData->slots.size() > slotIdx)
|
if(threadData && threadData->slots.size() > slotIdx)
|
||||||
@ -1406,7 +1411,8 @@ public:
|
|||||||
void gather(size_t slotIdx, std::vector<void*> &dataVec)
|
void gather(size_t slotIdx, std::vector<void*> &dataVec)
|
||||||
{
|
{
|
||||||
AutoLock guard(mtxGlobalAccess);
|
AutoLock guard(mtxGlobalAccess);
|
||||||
CV_Assert(tlsSlots.size() > slotIdx);
|
CV_Assert(tlsSlotsSize == tlsSlots.size());
|
||||||
|
CV_Assert(tlsSlotsSize > slotIdx);
|
||||||
|
|
||||||
for(size_t i = 0; i < threads.size(); i++)
|
for(size_t i = 0; i < threads.size(); i++)
|
||||||
{
|
{
|
||||||
@ -1422,7 +1428,9 @@ public:
|
|||||||
// Set data to storage index
|
// Set data to storage index
|
||||||
void setData(size_t slotIdx, void* pData)
|
void setData(size_t slotIdx, void* pData)
|
||||||
{
|
{
|
||||||
CV_Assert(tlsSlots.size() > slotIdx && pData != NULL);
|
#ifndef CV_THREAD_SANITIZER
|
||||||
|
CV_Assert(tlsSlotsSize > slotIdx);
|
||||||
|
#endif
|
||||||
|
|
||||||
ThreadData* threadData = (ThreadData*)tls.GetData();
|
ThreadData* threadData = (ThreadData*)tls.GetData();
|
||||||
if(!threadData)
|
if(!threadData)
|
||||||
@ -1438,9 +1446,8 @@ public:
|
|||||||
|
|
||||||
if(slotIdx >= threadData->slots.size())
|
if(slotIdx >= threadData->slots.size())
|
||||||
{
|
{
|
||||||
AutoLock guard(mtxGlobalAccess);
|
AutoLock guard(mtxGlobalAccess); // keep synchronization with gather() calls
|
||||||
while(slotIdx >= threadData->slots.size())
|
threadData->slots.resize(slotIdx + 1, NULL);
|
||||||
threadData->slots.push_back(NULL);
|
|
||||||
}
|
}
|
||||||
threadData->slots[slotIdx] = pData;
|
threadData->slots[slotIdx] = pData;
|
||||||
}
|
}
|
||||||
@ -1449,6 +1456,8 @@ private:
|
|||||||
TlsAbstraction tls; // TLS abstraction layer instance
|
TlsAbstraction tls; // TLS abstraction layer instance
|
||||||
|
|
||||||
Mutex mtxGlobalAccess; // Shared objects operation guard
|
Mutex mtxGlobalAccess; // Shared objects operation guard
|
||||||
|
size_t tlsSlotsSize; // equal to tlsSlots.size() in synchronized sections
|
||||||
|
// without synchronization this counter doesn't desrease - it is used for slotIdx sanity checks
|
||||||
std::vector<int> tlsSlots; // TLS keys state
|
std::vector<int> tlsSlots; // TLS keys state
|
||||||
std::vector<ThreadData*> threads; // Array for all allocated data. Thread data pointers are placed here to allow data cleanup
|
std::vector<ThreadData*> threads; // Array for all allocated data. Thread data pointers are placed here to allow data cleanup
|
||||||
};
|
};
|
||||||
|
Loading…
Reference in New Issue
Block a user