4 //////////////////////////////////////////////////////////
6 // A memory pool implements POOLCREATE, POOLALLOC and
7 // POOLFREE to improve memory allocation by reusing records.
9 // This implementation uses a lock-free singly-linked list
10 // to store reusable records. The list is initialized with
11 // one valid record, and the list is considered empty when
12 // it has only one record; this allows the enqueue operation's
13 // CAS to assume tail can always be dereferenced.
15 // poolfree adds newly freed records to the list BACK
17 // poolalloc either takes records from FRONT or mallocs
19 //////////////////////////////////////////////////////////
23 #ifdef MEMPOOL_DETECT_MISUSE
29 static INTPTR pageSize;
37 #define CACHELINESIZE 64
41 typedef struct MemPoolItem_t {
46 typedef struct MemPool_t {
49 // only invoke this on items that are
50 // actually new, saves time for reused
52 void(*initFreshlyAllocated)(void*);
54 #ifdef MEMPOOL_DETECT_MISUSE
59 // avoid cache line contention between producer/consumer...
60 char buffer[CACHELINESIZE];
66 // the memory pool must always have at least one
68 static MemPool* poolcreate( int itemSize,
69 void(*initializer)(void*)
72 MemPool* p = RUNMALLOC( sizeof( MemPool ) );
73 p->itemSize = itemSize;
75 p->initFreshlyAllocated = initializer;
77 #ifdef MEMPOOL_DETECT_MISUSE
78 // when detecting misuse, round the item size
79 // up to a page and add a page, so whatever
80 // allocated memory you get, you can use a
81 // page-aligned subset as the record
82 pageSize = sysconf( _SC_PAGESIZE );
84 if( itemSize % pageSize == 0 ) {
85 // if the item size is already an exact multiple
86 // of the page size, just increase by one page
87 p->allocSize = itemSize + pageSize;
89 // otherwise, round down to a page size, then add two
90 p->allocSize = (itemSize & ~(pageSize-1)) + 2*pageSize;
95 p->head = RUNMALLOC( p->itemSize );
97 if( p->initFreshlyAllocated != NULL ) {
98 p->initFreshlyAllocated( p->head );
101 p->head->next = NULL;
110 #ifdef MEMPOOL_DETECT_MISUSE
112 static inline void poolfreeinto( MemPool* p, void* ptr ) {
113 // don't actually return memory to the pool, just lock
114 // it up tight so first code to touch it badly gets caught
115 // also, mprotect automatically protects full pages
116 if( mprotect( ptr, p->itemSize, PROT_NONE ) != 0 ) {
117 printf( "mprotect failed, %s.\n", strerror( errno ) );
125 static inline void poolfreeinto( MemPool* p, void* ptr ) {
127 MemPoolItem* tailCurrent;
128 MemPoolItem* tailActual;
130 // set up the now unneeded record to as the tail of the
131 // free list by treating its first bytes as next pointer,
132 MemPoolItem* tailNew = (MemPoolItem*) ptr;
133 tailNew->next = NULL;
136 // make sure the null happens before the insertion,
137 // also makes sure that we reload tailCurrent, etc..
140 tailCurrent = p->tail;
141 tailActual = (MemPoolItem*)
142 CAS( &(p->tail), // ptr to set
143 (INTPTR) tailCurrent, // current tail's next should be NULL
144 (INTPTR) tailNew // try set to our new tail
146 if( tailActual == tailCurrent ) {
147 // success, update tail
148 tailCurrent->next = tailNew;
152 // if CAS failed, retry entire operation
159 #ifdef MEMPOOL_DETECT_MISUSE
161 static inline void* poolalloc( MemPool* p ) {
162 // put the memory we intend to expose to client
163 // on a page-aligned boundary, always return
165 INTPTR nonAligned = (INTPTR) RUNMALLOC( p->allocSize );
167 void* newRec = (void*)((nonAligned + pageSize-1) & ~(pageSize-1));
169 if( p->initFreshlyAllocated != NULL ) {
170 p->initFreshlyAllocated( newRec );
179 static inline void* poolalloc( MemPool* p ) {
181 // to protect CAS in poolfree from dereferencing
182 // null, treat the queue as empty when there is
183 // only one item. The dequeue operation is only
184 // executed by the thread that owns the pool, so
185 // it doesn't require an atomic op
186 MemPoolItem* headCurrent = p->head;
187 MemPoolItem* next=headCurrent->next;
190 // only one item, so don't take from pool
191 void* newRec = RUNMALLOC( p->itemSize );
193 if( p->initFreshlyAllocated != NULL ) {
194 p->initFreshlyAllocated( newRec );
202 asm volatile( "prefetcht0 (%0)" :: "r" (next));
203 next=(MemPoolItem*)(((char *)next)+CACHELINESIZE);
204 asm volatile( "prefetcht0 (%0)" :: "r" (next));
206 return (void*)headCurrent;
212 static void pooldestroy( MemPool* p ) {
214 #ifndef MEMPOOL_DETECT_MISUSE
215 MemPoolItem* i = p->head;
229 #endif // ___MEMPOOL_H__