//$$CDS-header$$
-#ifndef __CDS_MEMORY_MICHAEL_ALLOCATOR_TMPL_H
-#define __CDS_MEMORY_MICHAEL_ALLOCATOR_TMPL_H
+#ifndef CDSLIB_MEMORY_MICHAEL_ALLOCATOR_TMPL_H
+#define CDSLIB_MEMORY_MICHAEL_ALLOCATOR_TMPL_H
/*
Michael allocator implementation
/// Allocates memory block of \p nSize bytes (\p malloc wrapper)
static void * alloc( size_t nSize )
{
- return ::malloc( nSize );
+ void * p = ::malloc( nSize );
+ return p;
}
/// Returning memory block to the system (\p free wrapper)
static void free( void * p )
assert( oldAnchor.avail < pDesc->nCapacity );
pAddr = pDesc->pSB + oldAnchor.avail * (unsigned long long) pDesc->nBlockSize;
+
+ // TSan reports data race if the block contained atomic ops before
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
newAnchor.avail = reinterpret_cast<free_block_header *>( pAddr )->nNextFree;
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
newAnchor.tag += 1;
if ( oldActive.credits() == 0 ) {
static_assert( (sizeof(processor_heap) % c_nAlignment) == 0, "sizeof(processor_heap) error" );
+ // TSan false positive: a new descriptor will be linked further with release fence
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
+
pDesc = new( m_AlignedHeap.alloc( szTotal, c_nAlignment ) ) processor_desc;
pDesc->pageHeaps = reinterpret_cast<page_heap *>( pDesc + 1 );
else
pProcHeap->nPageIdx = pProcHeap->pSizeClass->nSBSizeIdx;
}
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
return pDesc;
}
);
}
+ CDS_TSAN_ANNOTATE_NEW_MEMORY( pBlock + 1, nSize );
return pBlock + 1;
}
}}} // namespace cds::memory::michael
-#endif // __CDS_MEMORY_MICHAEL_ALLOCATOR_TMPL_H
+#endif // CDSLIB_MEMORY_MICHAEL_ALLOCATOR_TMPL_H