4 //////////////////////////////////////////////////////////
6 // A memory pool implements POOLCREATE, POOLALLOC and
7 // POOLFREE to improve memory allocation by reusing records.
9 // This implementation uses a lock-free singly-linked list
10 // to store reusable records. The list is initialized with
11 // one valid record, and the list is considered empty when
12 // it has only one record; this allows the enqueue operation's
13 // CAS to assume tail can always be dereferenced.
15 // poolfree adds newly freed records to the list BACK
17 // poolalloc either takes records from FRONT or mallocs
19 //////////////////////////////////////////////////////////
26 // The cache line size is set for the AMD Opteron 6168 (dc-10)
27 // that has L1 and L2 cache line sizes of 64 bytes. Source:
28 // http://www.cs.virginia.edu/~skadron/cs451/opteron/opteron.ppt
29 #define CACHELINESIZE 64
32 typedef struct MemPoolItem_t {
37 typedef struct MemPool_t {
41 // avoid cache line contention between producer/consumer...
42 char buffer[CACHELINESIZE - sizeof(void*)];
48 // the memory pool must always have at least one
50 static MemPool* poolcreate( int itemSize ) {
51 MemPool* p = calloc( 1, sizeof( MemPool ) );
52 p->itemSize = itemSize;
53 p->head = calloc( 1, itemSize );
61 // in: a ptr, expected old, desired new
64 // Pass in a ptr, what you expect the old value is and
65 // what you want the new value to be.
66 // The CAS returns what the value is actually: if it matches
67 // your proposed old value then you assume the update was successful,
68 // otherwise someone did CAS before you, so try again (the return
69 // value is the old value you will pass next time.)
71 static inline void poolfreeinto( MemPool* p, void* ptr ) {
73 MemPoolItem* tailCurrent;
74 MemPoolItem* tailActual;
76 // set up the now unneeded record to as the tail of the
77 // free list by treating its first bytes as next pointer,
78 MemPoolItem* tailNew = (MemPoolItem*) ptr;
82 // make sure the null happens before the insertion,
83 // also makes sure that we reload tailCurrent, etc..
86 tailCurrent = p->tail;
87 tailActual = (MemPoolItem*)
88 CAS( &(p->tail), // ptr to set
89 (long) tailCurrent, // current tail's next should be NULL
90 (long) tailNew // try set to our new tail
92 if( tailActual == tailCurrent ) {
93 // success, update tail
94 tailCurrent->next = tailNew;
98 // if CAS failed, retry entire operation
104 static inline void* poolalloc( MemPool* p ) {
106 // to protect CAS in poolfree from dereferencing
107 // null, treat the queue as empty when there is
108 // only one item. The dequeue operation is only
109 // executed by the thread that owns the pool, so
110 // it doesn't require an atomic op
111 MemPoolItem* headCurrent = p->head;
113 if( headCurrent->next == NULL ) {
114 // only one item, so don't take from pool
115 return RUNMALLOC( p->itemSize );
118 p->head = headCurrent->next;
121 //////////////////////////////////////////////////////////
123 // a prefetch statement from the Linux kernel,
124 // which the little "m" depends on architecture:
126 // static inline void prefetch(void *x)
128 // asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
132 // but this built-in gcc one seems the most portable:
133 //////////////////////////////////////////////////////////
134 __builtin_prefetch( &(p->head->next) );
141 static void pooldestroy( MemPool* p ) {
142 MemPoolItem* i = p->head;
155 #endif // ___MEMPOOL_H__