1 #include "hashStructure.h"
2 //#include "WaitingQueue.h"
4 #include "rcr_runtime.h"
9 //NOTE: this is only temporary (for testing) and will be removed in favor of thread local variables
10 //It's basically an array of hashStructures so we can simulate what would happen in a many-threaded version
11 HashStructure ** allHashStructures;
12 #define ISWRITEBIN(x) (x&BINMASK)
13 #define ISREADBIN(x) (!(x&BINMASK))
14 //#define POPCOUNT(x) __builtin_popcountll(x)
15 //__builtin_popcountll
18 inline enqueuerecord(struct rcrRecord *rcrrec, int tmpkey, BinItem_rcr *item) {
19 if (likely(rcrrec!=NULL)) {
20 struct rcrRecord * tmprec;
21 if(likely(rcrrec->index<RCRSIZE)) {
22 int index=rcrrec->index++;
23 rcrrec->ptrarray[index]=(void *) item;
24 rcrrec->array[index]=tmpkey;
25 } else if(likely((tmprec=rcrrec->next)!=NULL)&&likely(tmprec->index<RCRSIZE)) {
26 int index=tmprec->index++;
27 tmprec->ptrarray[index]=(void *) item;
28 tmprec->array[index]=tmpkey;
30 struct rcrRecord *trec=RUNMALLOC(sizeof(struct rcrRecord));
31 trec->ptrarray[0]=(void *) item;
32 trec->array[0]=tmpkey;
40 //NOTE: only temporary
41 void rcr_createMasterHashTableArray(int maxSize){
42 allHashStructures = (HashStructure **) malloc(sizeof(HashStructure *) * maxSize);
45 HashStructure* rcr_createHashtable(int sizeofWaitingQueue){
47 HashStructure* newTable=(HashStructure*)RUNMALLOC(sizeof(HashStructure));
48 for(i=0;i<RNUMBINS;i++){
49 newTable->array[i].head=NULL;
50 newTable->array[i].tail=NULL;
56 WriteBinItem_rcr* rcr_createWriteBinItem(){
57 WriteBinItem_rcr* binitem=(WriteBinItem_rcr*)RUNMALLOC(sizeof(WriteBinItem_rcr));
58 binitem->item.type=WRITEBIN;
62 ReadBinItem_rcr* rcr_createReadBinItem(){
63 ReadBinItem_rcr* binitem=(ReadBinItem_rcr*)RUNMALLOC(sizeof(ReadBinItem_rcr));
65 binitem->item.type=READBIN;
69 inline int rcr_generateKey(void * ptr){
70 return (((struct ___Object___ *) ptr)->oid)&RH_MASK;
73 inline int rcr_BWRITEBINCASE(HashStructure *T, int key, SESEcommon *task, struct rcrRecord *rcrrec, int index, int mode) {
74 //chain of bins exists => tail is valid
75 //if there is something in front of us, then we are not ready
77 BinElement_rcr* be= &(T->array[key]); //do not grab head from here since it's locked (i.e. = 0x1)
79 //LOCK is still needed as different threads will remove items...
81 val=(BinItem_rcr *)0x1;
82 val=(BinItem_rcr *)LOCKXCHG((unsigned INTPTR*)&(be->head), (unsigned INTPTR)val);
83 } while(val==(BinItem_rcr*)0x1);
86 BinItem_rcr * b=(BinItem_rcr*)rcr_createWriteBinItem();
87 WriteBinItem_rcr * td = (WriteBinItem_rcr*)b;
91 //common to both types
93 td->bitindexrd=td->bitindexwr=1<<index;
95 BARRIER();//do tail before head
98 enqueuerecord(rcrrec, key, b);
101 BARRIER();//read head before tail
102 BinItem_rcr *bintail=be->tail;
103 bitvt rdmask=0,wrmask=0;
106 if (ISWRITEBIN(bintail->type)) {
107 WriteBinItem_rcr * td = (WriteBinItem_rcr *)bintail;
108 //last one is to check for SESE blocks in a while loop.
109 if(unlikely(td->task == task)) {
112 if (!(bit & td->bitindexwr)) {
117 while(bintail->status!=READY) {
122 return bintail->status;
130 TraverserData * td = &((ReadBinItem_rcr *)bintail)->array[((ReadBinItem_rcr *)bintail)->index - 1];
131 if(unlikely(td->task == task)) {
132 //if it matches, then we remove it and the code below will upgrade it to a write.
133 ((ReadBinItem_rcr *)bintail)->index--;
134 atomic_dec(&bintail->total);
136 if (bintail->status!=READY)
142 WriteBinItem_rcr *b=rcr_createWriteBinItem();
148 //count already includes this
151 b->bitindexwr=bit|wrmask;
152 b->bitindexrd=bit|rdmask;
153 b->item.status=status;
154 bintail->next=(BinItem_rcr*)b;
155 be->tail=(BinItem_rcr*)b;
157 if (bintail->status==READY&&bintail->total==0) {
158 //we may have to set write as ready
159 while(val->total==0) {
160 if (val==((BinItem_rcr *)b)) {
161 b->item.status=READY;
166 enqueuerecord(rcrrec, key, (BinItem_rcr *) b);
177 while(b->item.status==NOTREADY) {
183 enqueuerecord(rcrrec, key, (BinItem_rcr *) b);
188 inline int rcr_BREADBINCASE(HashStructure *T, int key, SESEcommon *task, struct rcrRecord *rcrrec, int index, int mode) {
190 BinElement_rcr * be = &(T->array[key]);
192 //LOCK is still needed as different threads will remove items...
194 val=(BinItem_rcr *)0x1;
195 val=(BinItem_rcr *)LOCKXCHG((unsigned INTPTR*)&(be->head), (unsigned INTPTR)val);
196 } while(val==(BinItem_rcr*)0x1);
199 BinItem_rcr * b=(BinItem_rcr*)rcr_createReadBinItem();
200 ReadBinItem_rcr* readbin=(ReadBinItem_rcr*)b;
201 TraverserData * td = &(readbin->array[readbin->index++]);
205 //common to both types
207 td->bitindex=1<<index;
212 enqueuerecord(rcrrec, key, b);
217 BinItem_rcr * bintail=be->tail;
219 //check if already added item or not.
220 if (ISWRITEBIN(bintail->type)) {
221 WriteBinItem_rcr * td = (WriteBinItem_rcr *)bintail;
222 if(unlikely(td->task==task)) {
225 int status=bintail->status;
226 if (!(td->bitindexrd & bit)) {
233 while(bintail->status!=READY) {
242 TraverserData * td = &((ReadBinItem_rcr *)bintail)->array[((ReadBinItem_rcr *)bintail)->index - 1];
243 if (unlikely(td->task==task)) {
246 int status=bintail->status;
247 if (!(td->bitindex & bit)) {
253 while(bintail->status!=READY) {
261 if (ISREADBIN(bintail->type)) {
262 int stat=rcr_TAILREADCASE(T, val, bintail, key, task, rcrrec, index);
264 struct BinItem_rcr * bt=be->tail;
265 while(bt->status!=READY) {
273 rcr_TAILWRITECASE(T, val, bintail, key, task, rcrrec, index);
275 struct BinItem_rcr * bt=be->tail;
276 while(bt->status!=READY) {
287 int rcr_WRITEBINCASE(HashStructure *T, int key, SESEcommon *task, struct rcrRecord *rcrrec, int index) {
288 return rcr_BWRITEBINCASE(T, key, task, rcrrec, index, 0);
290 int rcr_READBINCASE(HashStructure *T, int key, SESEcommon * task, struct rcrRecord *rcrrec, int index) {
291 return rcr_BREADBINCASE(T, key, task, rcrrec, index, 0);
294 int rcr_WTWRITEBINCASE(HashStructure *T, int key, SESEcommon *task, struct rcrRecord *rcrrec, int index) {
295 return rcr_BWRITEBINCASE(T, key, task, rcrrec, index, 1);
298 int rcr_WTREADBINCASE(HashStructure *T, int key, SESEcommon * task, struct rcrRecord *rcrrec, int index) {
299 return rcr_BREADBINCASE(T, key, task, rcrrec, index, 1);
302 int rcr_TAILREADCASE(HashStructure *T, BinItem_rcr *val, BinItem_rcr *bintail, int key, SESEcommon * task, struct rcrRecord * rcrrec, int index) {
303 ReadBinItem_rcr * readbintail=(ReadBinItem_rcr*)T->array[key].tail;
306 if (readbintail->item.status==READY) {
314 if (readbintail->index==RNUMREAD) { // create new read group
315 ReadBinItem_rcr* rb=rcr_createReadBinItem();
316 td = &rb->array[rb->index++];
319 rb->item.status=status;
320 T->array[key].tail->next=(BinItem_rcr*)rb;
321 T->array[key].tail=(BinItem_rcr*)rb;
322 enqueuerecord(rcrrec, key, (BinItem_rcr *) rb);
323 } else { // group into old tail
324 td = &readbintail->array[readbintail->index++];
325 atomic_inc(&readbintail->item.total);
326 enqueuerecord(rcrrec, key, (BinItem_rcr *) readbintail);
330 td->bitindex=1<<index;
332 T->array[key].head=val;//released lock
336 void rcr_TAILWRITECASE(HashStructure *T, BinItem_rcr *val, BinItem_rcr *bintail, int key, SESEcommon * task, struct rcrRecord *rcrrec, int index) {
337 ReadBinItem_rcr* rb=rcr_createReadBinItem();
338 TraverserData * td = &(rb->array[rb->index++]);
340 rb->item.status=NOTREADY;
343 td->bitindex=1<<index;
344 enqueuerecord(rcrrec, key, (BinItem_rcr *) rb);
346 T->array[key].tail->next=(BinItem_rcr*)rb;
347 T->array[key].tail=(BinItem_rcr*)rb;
348 T->array[key].head=val;//released lock
351 void rcr_RETIREHASHTABLE(HashStructure *T, SESEcommon *task, int key, BinItem_rcr *b) {
352 atomic_dec(&b->total);
353 if(ISREADBIN(b->type)) {
354 if (b->next==NULL || b->total>0) {
359 //We either have a write bin or we are at the end of a read bin
360 BinElement_rcr * be = &(T->array[key]);
362 // CHECK FIRST IF next is nonnull to guarantee that b.total cannot change
363 BinItem_rcr * val=(BinItem_rcr *)0x1;
365 val=(BinItem_rcr*)LOCKXCHG((unsigned INTPTR*)&(be->head), (unsigned INTPTR)val);
366 } while(val==(BinItem_rcr*)0x1);
368 // at this point have locked bin
369 BinItem_rcr *ptr=val;
373 if (ISREADBIN(ptr->type)) {
374 if (ptr->status==NOTREADY) {
375 ReadBinItem_rcr* rptr=(ReadBinItem_rcr*)ptr;
376 for (i=0;i<rptr->index;i++) {
377 TraverserData * td=&rptr->array[i];
378 if (task==td->task) {
379 RESOLVE(td->task, td->bitindex);
380 if (((INTPTR)rptr->array[i].task)&PARENTBIN) {
381 //parents go immediately
382 atomic_dec(&rptr->item.total);
389 if (ptr->next==NULL) {
394 } else if (ptr==val) {
397 } else if (ptr->total==0) {
398 //skip past retired item
405 if(ptr->status==NOTREADY) {
406 WriteBinItem_rcr* wptr=(WriteBinItem_rcr*)ptr;
407 RESOLVE(wptr->task, wptr->bitindexwr);
409 if(((INTPTR)wptr->task)&PARENTBIN) {
417 be->head=val; // release lock
421 void RESOLVE(SESEcommon *record, bitvt mask) {
423 struct rcrRecord * array=(struct rcrRecord *)(((char *)record)+record->offsetToParamRecords);
425 int shift=__builtin_ctzll(mask)+1;
427 if (atomic_sub_and_test(1,&array[index].count)) {
428 if(unlikely(record->classID<0)) {
429 //parent stall...clear it
430 psem_give_tag(record->parentsStallSem, ((SESEstall *)record)->tag);
431 //mark the record unused
435 int flag=LOCKXCHG32(&array[index].flag,0);
437 if(atomic_sub_and_test(1, &(record->unresolvedDependencies)))
438 workScheduleSubmit((void *)record);