#define __MLP_RUNTIME__
+#include <stdlib.h>
+#include <stdio.h>
+
+
#include <pthread.h>
+#include "runtime.h"
+#include "mem.h"
#include "Queue.h"
#include "psemaphore.h"
#include "mlp_lock.h"
#include "memPool.h"
+
+#include "classdefs.h"
+
+
+
#ifndef FALSE
#define FALSE 0
#endif
// these are useful for interpreting an INTPTR to an
// Object at runtime to retrieve the object's type
-// or object id (OID), 64-bit safe
-#define OBJPTRPTR_2_OBJTYPE( opp ) ((int*)(opp))[0]
-#define OBJPTRPTR_2_OBJOID( opp ) ((int*)(opp))[1]
+// or object id (OID)
+#define OBJPTRPTR_2_OBJTYPE(opp) ((struct ___Object___*)*(opp))->type
+#define OBJPTRPTR_2_OBJOID(opp) ((struct ___Object___*)*(opp))->oid
+
// forwarding list elements is a linked
// structure of arrays, should help task
// a lot of items are on the list
#define FLIST_ITEMS_PER_ELEMENT 30
typedef struct ForwardingListElement_t {
- int numItems;
+ int numItems;
struct ForwardingListElement_t* nextElement;
- INTPTR items[FLIST_ITEMS_PER_ELEMENT];
+ INTPTR items[FLIST_ITEMS_PER_ELEMENT];
} ForwardingListElement;
struct MemPool_t;
// generated SESE record to this can be used, because
// the common structure is always the first item in a
// customized SESE record
-typedef struct SESEcommon_t {
+typedef struct SESEcommon_t {
// the identifier for the class of sese's that
// are instances of one particular static code block
// IMPORTANT: the class ID must be the first field of
// the task record so task dispatch works correctly!
int classID;
- volatile int unresolvedDependencies;
+ volatile int unresolvedDependencies;
// a parent waits on this semaphore when stalling on
// this child, the child gives it at its SESE exit
psemaphore* parentsStallSem;
-
+
// NOTE: first element is embedded in the task
// record, so don't free it!
//ForwardingListElement forwardList;
struct Queue forwardList;
- volatile int doneExecuting;
- volatile int numRunningChildren;
+ volatile int doneExecuting;
+ volatile int numRunningChildren;
struct SESEcommon_t* parent;
-
+
int numMemoryQueue;
int rentryIdx;
int unresolvedRentryIdx;
struct REntry_t* rentryArray[NUMRENTRY];
struct REntry_t* unresolvedRentryArray[NUMRENTRY];
-
#ifdef RCR
+ struct Hashtable_rcr ** allHashStructures;
int offsetToParamRecords;
volatile int rcrstatus;
volatile int retired;
// the lock guards the following data SESE's
// use to coordinate with one another
pthread_mutex_t lock;
- pthread_cond_t runningChildrenCond;
+ pthread_cond_t runningChildrenCond;
} SESEcommon;
// a thread-local var refers to the currently
-typedef struct REntry_t{
- // fine read:0, fine write:1, parent read:2,
+typedef struct REntry_t {
+ // fine read:0, fine write:1, parent read:2,
// parent write:3 coarse: 4, parent coarse:5, scc: 6
int type;
- struct Hashtable_t* hashtable;
+#ifdef RCR
+ int count;
+#else
+ int isBufMode;
+#endif
+ struct MemoryQueueItem_t *qitem;
struct BinItem_t* binitem;
- struct Vector_t* vector;
- struct SCC_t* scc;
struct MemoryQueue_t* queue;
- psemaphore * parentStallSem;
- int tag;
SESEcommon* seseRec;
INTPTR* pointer;
- int isBufMode;
+#ifdef RCR
+ INTPTR mask;
+ int index;
+#else
+ psemaphore * parentStallSem;
+ int tag;
+#endif
} REntry;
#ifdef RCR
struct rcrRecord *next;
};
-typedef struct SESEstall_t {
+typedef struct SESEstall_t {
SESEcommon common;
int size;
void * next;
int total; //total non-retired
int status; //NOTREADY, READY
struct MemoryQueueItem_t *next;
-
+
} MemoryQueueItem;
typedef struct MemoryQueue_t {
MemoryQueueItem * head;
- MemoryQueueItem * tail;
+ MemoryQueueItem * tail;
REntry * binbuf[NUMBINS];
REntry * buf[NUMRENTRY];
int bufcount;
-static inline void ADD_FORWARD_ITEM( ForwardingListElement* e,
- SESEcommon* s ) {
+static inline void ADD_FORWARD_ITEM(ForwardingListElement* e,
+ SESEcommon* s) {
//atomic_inc( &(s->refCount) );
}
-
-
-
-// simple mechanical allocation and
+// simple mechanical allocation and
// deallocation of SESE records
-void* mlpAllocSESErecord( int size );
-void mlpFreeSESErecord( SESEcommon* seseRecord );
+void* mlpAllocSESErecord(int size);
+void mlpFreeSESErecord(SESEcommon* seseRecord);
MemoryQueue** mlpCreateMemoryQueueArray(int numMemoryQueue);
REntry* mlpCreateFineREntry(MemoryQueue *q, int type, SESEcommon* seseToIssue, void* dynID);
+#ifdef RCR
+REntry* mlpCreateREntry(MemoryQueue *q, int type, SESEcommon* seseToIssue, INTPTR mask);
+#else
REntry* mlpCreateREntry(MemoryQueue *q, int type, SESEcommon* seseToIssue);
+#endif
MemoryQueue* createMemoryQueue();
void rehashMemoryQueue(SESEcommon* seseParent);
+void TAILWRITECASE(Hashtable *T, REntry *r, BinItem *val, BinItem *bintail, int key, int inc);
+void RETIRESCC(MemoryQueue *Q, REntry *r);
+void RETIREHASHTABLE(MemoryQueue *q, REntry *r);
+void RETIREBIN(Hashtable *T, REntry *r, BinItem *b);
+void RETIREVECTOR(MemoryQueue *Q, REntry *r);
+void RESOLVECHAIN(MemoryQueue *Q);
+void RESOLVEHASHTABLE(MemoryQueue *Q, Hashtable *T);
+void RESOLVEVECTOR(MemoryQueue *q, Vector *V);
+void RESOLVESCC(MemoryQueue *q, SCC *S);
+void resolveDependencies(REntry* rentry);
+
+#ifndef RCR
+int RESOLVEBUF(MemoryQueue * q, SESEcommon *seseCommon);
+void resolvePointer(REntry* rentry);
+#endif
-
-static inline void ADD_REFERENCE_TO( SESEcommon* seseRec ) {
- atomic_inc( &(seseRec->refCount) );
+static inline void ADD_REFERENCE_TO(SESEcommon* seseRec) {
+ atomic_inc(&(seseRec->refCount) );
}
-static inline void RELEASE_REFERENCE_TO( SESEcommon* seseRec ) {
- if( atomic_sub_and_test( 1, &(seseRec->refCount) ) ) {
- poolfreeinto( seseRec->parent->taskRecordMemPool, seseRec );
+static inline int RELEASE_REFERENCE_TO(SESEcommon* seseRec) {
+ if( atomic_sub_and_test(1, &(seseRec->refCount) ) ) {
+ poolfreeinto(seseRec->parent->taskRecordMemPool, seseRec);
+ return 1;
}
+ return 0;
}
-static MemPool* taskpoolcreate( int itemSize ) {
- MemPool* p = calloc( 1, sizeof( MemPool ) );
- SESEcommon *c = (SESEcommon *) p;
- pthread_cond_init( &(c->runningChildrenCond), NULL );
- pthread_mutex_init( &(c->lock), NULL );
-
- p->itemSize = itemSize;
- p->head = calloc( 1, itemSize );
- p->head->next = NULL;
- p->tail = p->head;
- return p;
+static inline int RELEASE_REFERENCES_TO(SESEcommon* seseRec, int refCount) {
+ if( atomic_sub_and_test(refCount, &(seseRec->refCount) ) ) {
+ poolfreeinto(seseRec->parent->taskRecordMemPool, seseRec);
+ return 1;
+ }
+ return 0;
}
+#define CHECK_RECORD(x) ;
-static inline void* taskpoolalloc( MemPool* p ) {
-
- // to protect CAS in poolfree from dereferencing
- // null, treat the queue as empty when there is
- // only one item. The dequeue operation is only
- // executed by the thread that owns the pool, so
- // it doesn't require an atomic op
- MemPoolItem* headCurrent = p->head;
- MemPoolItem* next=headCurrent->next;
- int i;
- if(next == NULL) {
- // only one item, so don't take from pool
- SESEcommon *c = (SESEcommon*) RUNMALLOC( p->itemSize );
- pthread_cond_init( &(c->runningChildrenCond), NULL );
- pthread_mutex_init( &(c->lock), NULL );
- return c;
- }
-
- p->head = next;
-
- //////////////////////////////////////////////////////////
- //
- //
- // static inline void prefetch(void *x)
- // {
- // asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
- // }
- //
- //
- // but this built-in gcc one seems the most portable:
- //////////////////////////////////////////////////////////
- //__builtin_prefetch( &(p->head->next) );
- asm volatile( "prefetcht0 (%0)" :: "r" (next));
- next=(MemPoolItem*)(((char *)next)+CACHELINESIZE);
- asm volatile( "prefetcht0 (%0)" :: "r" (next));
- next=(MemPoolItem*)(((char *)next)+CACHELINESIZE);
- asm volatile( "prefetcht0 (%0)" :: "r" (next));
- next=(MemPoolItem*)(((char *)next)+CACHELINESIZE);
- asm volatile( "prefetcht0 (%0)" :: "r" (next));
-
- return (void*)headCurrent;
+
+////////////////////////////////////////////////
+//
+// Some available debug versions of the above
+// pool allocation-related helpers. The lower
+// 'x' appended to names means they are not hooked
+// up, but check em in so we can switch names and
+// use them for debugging
+//
+////////////////////////////////////////////////
+#define ADD_REFERENCE_TOx(x) atomic_inc(&((x)->refCount) ); printf("0x%x ADD 0x%x on %d\n",(INTPTR)runningSESE,(INTPTR)(x),__LINE__);
+
+#define RELEASE_REFERENCE_TOx(x) if (atomic_sub_and_test(1, &((x)->refCount))) {poolfreeinto(x->parent->taskRecordMemPool, x); printf("0x%x REL 0x%x on %d\n",(INTPTR)runningSESE,(INTPTR)(x),__LINE__); }
+
+#define CHECK_RECORDx(x) { \
+ if( ((SESEcommon*)(x))->refCount < 0 || \
+ ((SESEcommon*)(x))->refCount < 0 ) { \
+ printf("Acquired 0x%x from poolalloc, with refCount=%d\n", (INTPTR)(x), ((SESEcommon*)(x))->refCount); } \
}
+
+
+// this is for using a memPool to allocate task records,
+// pass this into the poolcreate so it will run your
+// custom init code ONLY for fresh records, reused records
+// can be returned as is
+void freshTaskRecordInitializer(void* seseRecord);
+
+
#endif /* __MLP_RUNTIME__ */