4 //////////////////////////////////////////////////////////
6 // A memory pool implements POOLCREATE, POOLALLOC and
7 // POOLFREE to improve memory allocation by reusing records.
9 // This implementation uses a lock-free singly-linked list
10 // to store reusable records. The list is initialized with
11 // one valid record, and the list is considered empty when
12 // it has only one record; this allows the enqueue operation's
13 // CAS to assume tail can always be dereferenced.
15 // poolfree adds newly freed records to the list BACK
17 // poolalloc either takes records from FRONT or mallocs
19 //////////////////////////////////////////////////////////
23 #ifdef MEMPOOL_DETECT_MISUSE
29 static INTPTR pageSize;
37 #define CACHELINESIZE 64
41 typedef struct MemPoolItem_t {
46 typedef struct MemPool_t {
49 // only invoke this on items that are
50 // actually new, saves time for reused
52 void(*initFreshlyAllocated)(void*);
54 #ifdef MEMPOOL_DETECT_MISUSE
60 // avoid cache line contention between producer/consumer...
61 char buffer[CACHELINESIZE];
67 // the memory pool must always have at least one
69 static MemPool* poolcreate( int itemSize,
70 void(*initializer)(void*)
73 MemPool* p = RUNMALLOC( sizeof( MemPool ) );
74 p->itemSize = itemSize;
76 p->initFreshlyAllocated = initializer;
78 #ifdef MEMPOOL_DETECT_MISUSE
79 // when detecting misuse, round the item size
80 // up to a page and add a page, so whatever
81 // allocated memory you get, you can use a
82 // page-aligned subset as the record
83 pageSize = sysconf( _SC_PAGESIZE );
85 if( itemSize % pageSize == 0 ) {
86 // if the item size is already an exact multiple
87 // of the page size, just increase alloc by one page
88 p->allocSize = itemSize + pageSize;
90 // and size for mprotect should be exact page multiple
91 p->protectSize = itemSize;
93 // otherwise, round down to a page size, then add two
94 p->allocSize = (itemSize & ~(pageSize-1)) + 2*pageSize;
96 // and size for mprotect should be exact page multiple
97 // so round down, add one
98 p->protectSize = (itemSize & ~(pageSize-1)) + pageSize;
103 p->head = RUNMALLOC( p->itemSize );
105 if( p->initFreshlyAllocated != NULL ) {
106 p->initFreshlyAllocated( p->head );
109 p->head->next = NULL;
118 #ifdef MEMPOOL_DETECT_MISUSE
120 static inline void poolfreeinto( MemPool* p, void* ptr ) {
121 // don't actually return memory to the pool, just lock
122 // it up tight so first code to touch it badly gets caught
123 // also, mprotect automatically protects full pages
124 if( mprotect( ptr, p->protectSize, PROT_NONE ) != 0 ) {
129 printf( "mprotect failed, ENOMEM.\n" );
133 printf( "mprotect failed, errno=%d.\n", errno );
143 static inline void poolfreeinto( MemPool* p, void* ptr ) {
145 MemPoolItem* tailCurrent;
146 MemPoolItem* tailActual;
148 // set up the now unneeded record to as the tail of the
149 // free list by treating its first bytes as next pointer,
150 MemPoolItem* tailNew = (MemPoolItem*) ptr;
151 tailNew->next = NULL;
154 // make sure the null happens before the insertion,
155 // also makes sure that we reload tailCurrent, etc..
158 tailCurrent = p->tail;
159 tailActual = (MemPoolItem*)
160 CAS( &(p->tail), // ptr to set
161 (INTPTR) tailCurrent, // current tail's next should be NULL
162 (INTPTR) tailNew // try set to our new tail
164 if( tailActual == tailCurrent ) {
165 // success, update tail
166 tailCurrent->next = tailNew;
170 // if CAS failed, retry entire operation
177 #ifdef MEMPOOL_DETECT_MISUSE
179 static inline void* poolalloc( MemPool* p ) {
180 // put the memory we intend to expose to client
181 // on a page-aligned boundary, always return
183 INTPTR nonAligned = (INTPTR) RUNMALLOC( p->allocSize );
185 void* newRec = (void*)((nonAligned + pageSize-1) & ~(pageSize-1));
187 //printf( "PageSize is %d or 0x%x.\n", (INTPTR)pageSize, (INTPTR)pageSize );
188 //printf( "itemSize is 0x%x and allocSize is 0x%x.\n", (INTPTR)p->itemSize, (INTPTR)p->allocSize );
189 //printf( "Allocation returned 0x%x to 0x%x,\n", (INTPTR)nonAligned, (INTPTR)nonAligned + (INTPTR)(p->allocSize) );
190 //printf( "Intend to use 0x%x to 0x%x,\n\n", (INTPTR)newRec, (INTPTR)newRec + (INTPTR)(p->itemSize) );
192 // intentionally touch the top of the new, aligned record in terms of the
193 // pages that will be locked when it eventually is free'd
194 INTPTR topOfRec = (INTPTR)newRec;
195 topOfRec += p->protectSize - 1;
196 ((char*)topOfRec)[0] = 0x1;
200 if( p->initFreshlyAllocated != NULL ) {
201 p->initFreshlyAllocated( newRec );
210 static inline void* poolalloc( MemPool* p ) {
212 // to protect CAS in poolfree from dereferencing
213 // null, treat the queue as empty when there is
214 // only one item. The dequeue operation is only
215 // executed by the thread that owns the pool, so
216 // it doesn't require an atomic op
217 MemPoolItem* headCurrent = p->head;
218 MemPoolItem* next=headCurrent->next;
221 // only one item, so don't take from pool
222 void* newRec = RUNMALLOC( p->itemSize );
224 if( p->initFreshlyAllocated != NULL ) {
225 p->initFreshlyAllocated( newRec );
233 asm volatile( "prefetcht0 (%0)" :: "r" (next));
234 next=(MemPoolItem*)(((char *)next)+CACHELINESIZE);
235 asm volatile( "prefetcht0 (%0)" :: "r" (next));
237 return (void*)headCurrent;
243 static void pooldestroy( MemPool* p ) {
245 #ifndef MEMPOOL_DETECT_MISUSE
246 MemPoolItem* i = p->head;
260 #endif // ___MEMPOOL_H__