//////////////////////////////////////////////////////////
//
-// A memory pool implements POOLCREATE, POOLALLOC and
+// A memory pool implements POOLCREATE, POOLALLOC and
// POOLFREE to improve memory allocation by reusing records.
//
// This implementation uses a lock-free singly-linked list
#include <string.h>
static INTPTR pageSize;
#endif
-
#include "runtime.h"
#include "mem.h"
#include "mlp_lock.h"
typedef struct MemPoolItem_t {
- void* next;
+ struct MemPoolItem_t* next;
} MemPoolItem;
// only invoke this on items that are
// actually new, saves time for reused
// items
- void(*initFreshlyAllocated)(void*);
+ void (*initFreshlyAllocated)(void*);
#ifdef MEMPOOL_DETECT_MISUSE
int allocSize;
+ int protectSize;
#else
//normal version
MemPoolItem* head;
// the memory pool must always have at least one
// item in it
-static MemPool* poolcreate( int itemSize,
- void(*initializer)(void*)
- ) {
+static MemPool* poolcreate(int itemSize,
+ void (*initializer)(void*)
+ ) {
- MemPool* p = RUNMALLOC( sizeof( MemPool ) );
+ MemPool* p = RUNMALLOC(sizeof( MemPool ) );
p->itemSize = itemSize;
-
+
p->initFreshlyAllocated = initializer;
#ifdef MEMPOOL_DETECT_MISUSE
// when detecting misuse, round the item size
// up to a page and add a page, so whatever
// allocated memory you get, you can use a
- // page-aligned subset as the record
- pageSize = sysconf( _SC_PAGESIZE );
+ // page-aligned subset as the record
+ pageSize = sysconf(_SC_PAGESIZE);
if( itemSize % pageSize == 0 ) {
// if the item size is already an exact multiple
- // of the page size, just increase by one page
+ // of the page size, just increase alloc by one page
p->allocSize = itemSize + pageSize;
+
+ // and size for mprotect should be exact page multiple
+ p->protectSize = itemSize;
} else {
// otherwise, round down to a page size, then add two
p->allocSize = (itemSize & ~(pageSize-1)) + 2*pageSize;
+
+ // and size for mprotect should be exact page multiple
+ // so round down, add one
+ p->protectSize = (itemSize & ~(pageSize-1)) + pageSize;
}
#else
// normal version
- p->head = RUNMALLOC( p->itemSize );
+ p->head = RUNMALLOC(p->itemSize);
if( p->initFreshlyAllocated != NULL ) {
- p->initFreshlyAllocated( p->head );
+ p->initFreshlyAllocated(p->head);
}
p->head->next = NULL;
#ifdef MEMPOOL_DETECT_MISUSE
-static inline void poolfreeinto( MemPool* p, void* ptr ) {
+static inline void poolfreeinto(MemPool* p, void* ptr) {
// don't actually return memory to the pool, just lock
// it up tight so first code to touch it badly gets caught
// also, mprotect automatically protects full pages
- if( mprotect( ptr, p->itemSize, PROT_NONE ) != 0 ) {
- printf( "mprotect failed, %s.\n", strerror( errno ) );
- exit( -1 );
+ if( mprotect(ptr, p->protectSize, PROT_NONE) != 0 ) {
+
+ switch( errno ) {
+
+ case ENOMEM: {
+ printf("mprotect failed, ENOMEM.\n");
+ } break;
+
+ default:
+ printf("mprotect failed, errno=%d.\n", errno);
+ }
+
+ printf("itemSize is 0x%x, allocSize is 0x%x, protectSize is 0x%x.\n", (INTPTR)p->itemSize, (INTPTR)p->allocSize, (INTPTR)p->protectSize);
+ printf("Intended to protect 0x%x to 0x%x,\n\n", (INTPTR)ptr, (INTPTR)ptr + (INTPTR)(p->protectSize) );
+
+ exit(-1);
}
}
#else
-// normal version
-static inline void poolfreeinto( MemPool* p, void* ptr ) {
- MemPoolItem* tailCurrent;
- MemPoolItem* tailActual;
-
- // set up the now unneeded record to as the tail of the
- // free list by treating its first bytes as next pointer,
+// normal version
+static inline void poolfreeinto(MemPool* p, void* ptr) {
MemPoolItem* tailNew = (MemPoolItem*) ptr;
tailNew->next = NULL;
-
- while( 1 ) {
- // make sure the null happens before the insertion,
- // also makes sure that we reload tailCurrent, etc..
- BARRIER();
-
- tailCurrent = p->tail;
- tailActual = (MemPoolItem*)
- CAS( &(p->tail), // ptr to set
- (INTPTR) tailCurrent, // current tail's next should be NULL
- (INTPTR) tailNew // try set to our new tail
- );
- if( tailActual == tailCurrent ) {
- // success, update tail
- tailCurrent->next = tailNew;
- return;
- }
-
- // if CAS failed, retry entire operation
- }
+ CFENCE;
+ MemPoolItem *tailCurrent=(MemPoolItem *) LOCKXCHG((INTPTR *) &p->tail, (INTPTR) tailNew);
+ tailCurrent->next=tailNew;
}
#endif
#ifdef MEMPOOL_DETECT_MISUSE
-static inline void* poolalloc( MemPool* p ) {
+static inline void* poolalloc(MemPool* p) {
// put the memory we intend to expose to client
// on a page-aligned boundary, always return
// new memory
- INTPTR nonAligned = (INTPTR) RUNMALLOC( p->allocSize );
+
+ INTPTR nonAligned = (INTPTR) RUNMALLOC(p->allocSize);
void* newRec = (void*)((nonAligned + pageSize-1) & ~(pageSize-1));
+ //printf( "PageSize is %d or 0x%x.\n", (INTPTR)pageSize, (INTPTR)pageSize );
+ //printf( "itemSize is 0x%x, allocSize is 0x%x, protectSize is 0x%x.\n", (INTPTR)p->itemSize, (INTPTR)p->allocSize, (INTPTR)p->protectSize );
+ //printf( "Allocation returned 0x%x to 0x%x,\n", (INTPTR)nonAligned, (INTPTR)nonAligned + (INTPTR)(p->allocSize) );
+ //printf( "Intend to use 0x%x to 0x%x,\n\n", (INTPTR)newRec, (INTPTR)newRec + (INTPTR)(p->itemSize) );
+
+ // intentionally touch the top of the new, aligned record in terms of the
+ // pages that will be locked when it eventually is free'd
+ INTPTR topOfRec = (INTPTR)newRec;
+ topOfRec += p->protectSize - 1;
+ ((char*)topOfRec)[0] = 0x1;
+
if( p->initFreshlyAllocated != NULL ) {
- p->initFreshlyAllocated( newRec );
+ p->initFreshlyAllocated(newRec);
}
return newRec;
#else
// normal version
-static inline void* poolalloc( MemPool* p ) {
+static inline void* poolalloc(MemPool* p) {
// to protect CAS in poolfree from dereferencing
// null, treat the queue as empty when there is
MemPoolItem* headCurrent = p->head;
MemPoolItem* next=headCurrent->next;
int i;
+
+
if(next == NULL) {
// only one item, so don't take from pool
- void* newRec = RUNMALLOC( p->itemSize );
-
+ void *newRec=RUNMALLOC(p->itemSize);
if( p->initFreshlyAllocated != NULL ) {
- p->initFreshlyAllocated( newRec );
+ p->initFreshlyAllocated(newRec);
}
-
return newRec;
}
-
+
p->head = next;
- asm volatile( "prefetcht0 (%0)" :: "r" (next));
+ asm volatile ( "prefetcht0 (%0)" :: "r" (next));
next=(MemPoolItem*)(((char *)next)+CACHELINESIZE);
- asm volatile( "prefetcht0 (%0)" :: "r" (next));
+ asm volatile ( "prefetcht0 (%0)" :: "r" (next));
return (void*)headCurrent;
}
-static void pooldestroy( MemPool* p ) {
+static void pooldestroy(MemPool* p) {
#ifndef MEMPOOL_DETECT_MISUSE
MemPoolItem* i = p->head;
while( i != NULL ) {
n = i->next;
- free( i );
+ free(i);
i = n;
}
#endif
- free( p );
+ free(p);
}
#endif // ___MEMPOOL_H__
-
-
-
-
-
-
-
-
-
-