8 #include "mlp_runtime.h"
9 #include "workschedule.h"
10 #include "methodheaders.h"
13 __thread SESEcommon* runningSESE;
14 __thread int childSESE=0;
16 __thread psemaphore runningSESEstallSem;
19 // this is for using a memPool to allocate task records,
20 // pass this into the poolcreate so it will run your
21 // custom init code ONLY for fresh records, reused records
22 // can be returned as is
23 void freshTaskRecordInitializer( void* seseRecord ) {
24 SESEcommon* c = (SESEcommon*) seseRecord;
25 pthread_cond_init( &(c->runningChildrenCond), NULL );
26 pthread_mutex_init( &(c->lock), NULL );
34 void* mlpAllocSESErecord( int size ) {
35 void* newrec = RUNMALLOC( size );
37 printf( "mlpAllocSESErecord did not obtain memory!\n" );
43 void mlpFreeSESErecord( SESEcommon* seseRecord ) {
44 RUNFREE( seseRecord );
47 MemoryQueue** mlpCreateMemoryQueueArray(int numMemoryQueue){
49 MemoryQueue** newMemoryQueue=(MemoryQueue**)RUNMALLOC( sizeof( MemoryQueue* ) * numMemoryQueue );
50 for(i=0; i<numMemoryQueue; i++){
51 newMemoryQueue[i]=createMemoryQueue();
53 return newMemoryQueue;
56 REntry* mlpCreateFineREntry(MemoryQueue* q, int type, SESEcommon* seseToIssue, void* dynID){
57 #ifdef OOO_DISABLE_TASKMEMPOOL
58 REntry* newREntry=(REntry*)RUNMALLOC(sizeof(REntry));
60 REntry* newREntry=poolalloc(q->rentrypool);
63 newREntry->seseRec=seseToIssue;
64 newREntry->pointer=dynID;
69 REntry* mlpCreateREntry(MemoryQueue* q, int type, SESEcommon* seseToIssue, INTPTR mask) {
71 REntry* mlpCreateREntry(MemoryQueue* q, int type, SESEcommon* seseToIssue) {
73 #ifdef OOO_DISABLE_TASKMEMPOOL
74 REntry* newREntry=(REntry*)RUNMALLOC(sizeof(REntry));
76 REntry* newREntry=poolalloc(q->rentrypool);
79 newREntry->seseRec=seseToIssue;
86 int isParent(REntry *r) {
87 if (r->type==PARENTREAD || r->type==PARENTWRITE || r->type==PARENTCOARSE) {
94 int isParentCoarse(REntry *r){
95 if (r->type==PARENTCOARSE){
102 int isFineRead(REntry *r) {
103 if (r->type==READ || r->type==PARENTREAD) {
110 int isFineWrite(REntry *r) {
111 if (r->type==WRITE || r->type==PARENTWRITE) {
118 int isCoarse(REntry *r){
119 if(r->type==COARSE || r->type==PARENTCOARSE){
126 int isSCC(REntry *r){
127 if(r->type==SCCITEM){
134 int isSingleItem(MemoryQueueItem *qItem){
135 if(qItem->type==SINGLEITEM){
142 int isHashtable(MemoryQueueItem *qItem){
143 if(qItem->type==HASHTABLE){
150 int isVector(MemoryQueueItem *qItem){
151 if(qItem->type==VECTOR){
158 int isReadBinItem(BinItem* b){
159 if(b->type==READBIN){
166 int isWriteBinItem(BinItem* b){
167 if(b->type==WRITEBIN){
174 int generateKey(unsigned int data){
175 return (data&H_MASK);
178 Hashtable* createHashtable(){
180 Hashtable* newTable=(Hashtable*)RUNMALLOC(sizeof(Hashtable));
181 newTable->item.type=HASHTABLE;
182 for(i=0;i<NUMBINS;i++){
183 newTable->array[i]=(BinElement*)RUNMALLOC(sizeof(BinElement));
184 newTable->array[i]->head=NULL;
185 newTable->array[i]->tail=NULL;
187 newTable->unresolvedQueue=NULL;
191 WriteBinItem* createWriteBinItem(){
192 WriteBinItem* binitem=(WriteBinItem*)RUNMALLOC(sizeof(WriteBinItem));
193 binitem->item.type=WRITEBIN;
197 ReadBinItem* createReadBinItem(){
198 ReadBinItem* binitem=(ReadBinItem*)RUNMALLOC(sizeof(ReadBinItem));
200 binitem->item.type=READBIN;
204 Vector* createVector(){
205 Vector* vector=(Vector*)RUNMALLOC(sizeof(Vector));
207 vector->item.type=VECTOR;
212 SCC* scc=(SCC*)RUNMALLOC(sizeof(SCC));
213 scc->item.type=SINGLEITEM;
217 MemoryQueue* createMemoryQueue(){
218 MemoryQueue* queue = (MemoryQueue*)RUNMALLOC(sizeof(MemoryQueue));
219 MemoryQueueItem* dummy=(MemoryQueueItem*)RUNMALLOC(sizeof(MemoryQueueItem));
220 dummy->type=3; // dummy type
225 #ifndef OOO_DISABLE_TASKMEMPOOL
226 queue->rentrypool = poolcreate( sizeof(REntry), NULL );
231 int ADDRENTRY(MemoryQueue * q, REntry * r) {
232 if (isFineRead(r) || isFineWrite(r)) {
233 return ADDTABLE(q, r);
234 } else if (isCoarse(r)) {
235 return ADDVECTOR(q, r);
236 } else if (isSCC(r)) {
241 int ADDTABLE(MemoryQueue *q, REntry *r) {
242 if(!isHashtable(q->tail)) {
244 MemoryQueueItem* tail=q->tail;
245 if (isParent(r) && tail->total==0 && q->tail==q->head) {
250 Hashtable* h=createHashtable();
251 tail->next=(MemoryQueueItem*)h;
252 //************NEED memory barrier here to ensure compiler does not cache Q.tail.status********
253 if (BARRIER() && tail->status==READY && tail->total==0 && q->tail==q->head) {
254 //previous Q item is finished
255 h->item.status=READY;
257 q->tail=(MemoryQueueItem*)h;
258 // handle the the queue item case
259 if(q->head->type==3){
260 q->head=(MemoryQueueItem*)h;
264 //at this point, have table
265 Hashtable* table=(Hashtable*)q->tail;
266 r->qitem=(MemoryQueueItem *) table; // set rentry's hashtable
267 if( *(r->pointer)==0 ||
268 ( *(r->pointer)!=0 &&
270 table->unresolvedQueue!=NULL
274 // grab lock on the queue
276 val=(struct Queue*)0x1;
277 val=(struct Queue*)LOCKXCHG((unsigned INTPTR*)&(table->unresolvedQueue), (unsigned INTPTR)val);
278 } while(val==(struct Queue*)0x1);
280 //queue is null, first case
281 if(*(r->pointer)!=0){
282 // check whether pointer is already resolved, or not.
283 table->unresolvedQueue=NULL; //released lock;
284 return ADDTABLEITEM(table,r,TRUE);
286 struct Queue* queue=createQueue();
287 addNewItemBack(queue,r);
288 atomic_inc(&table->item.total);
289 table->unresolvedQueue=queue; // expose new queue
291 // add unresolved rentry at the end of the queue.
292 addNewItemBack(val,r);
293 atomic_inc(&table->item.total);
294 table->unresolvedQueue=val; // released lock
300 // leave this--its a helpful test when things are going bonkers
301 //if( OBJPTRPTR_2_OBJOID( r->pointer ) == 0 ) {
302 // // we started numbering object ID's at 1, if we try to
303 // // hash a zero oid, something BAD is about to happen!
304 // printf( "Tried to insert invalid object type=%d into mem Q hashtable!\n",
305 // OBJPTRPTR_2_OBJTYPE( r->pointer ) );
308 int key=generateKey( OBJPTRPTR_2_OBJOID( r->pointer ) );
311 BinElement* bin=table->array[key];
312 val=(BinItem*)LOCKXCHG((unsigned INTPTR*)&(bin->head), (unsigned INTPTR)val);//note...talk to me about optimizations here.
313 } while(val==(BinItem*)0x1);
314 //at this point have locked bin
316 return EMPTYBINCASE(table, table->array[key], r, TRUE);
318 if (isFineWrite(r)) {
319 return WRITEBINCASE(table, r, val, key, TRUE);
320 } else if (isFineRead(r)) {
321 return READBINCASE(table, r, val, key, TRUE);
326 int ADDTABLEITEM(Hashtable* table, REntry* r, int inc){
329 int key=generateKey( OBJPTRPTR_2_OBJOID( r->pointer ) );
332 BinElement* bin=table->array[key];
333 val=(BinItem*)LOCKXCHG((unsigned INTPTR*)&(bin->head), (unsigned INTPTR)val);
334 } while(val==(BinItem*)0x1);
335 //at this point have locked bin
337 return EMPTYBINCASE(table, table->array[key], r, inc);
339 if (isFineWrite(r)) {
340 return WRITEBINCASE(table, r, val, key, inc);
341 } else if (isFineRead(r)) {
342 return READBINCASE(table, r, val, key, inc);
347 int EMPTYBINCASE(Hashtable *T, BinElement* be, REntry *r, int inc) {
350 if (isFineWrite(r)) {
351 b=(BinItem*)createWriteBinItem();
352 ((WriteBinItem*)b)->val=r;//<-only different statement
353 } else if (isFineRead(r)) {
354 b=(BinItem*)createReadBinItem();
355 ReadBinItem* readbin=(ReadBinItem*)b;
356 readbin->array[readbin->index++]=r;
360 if (T->item.status==READY) {
361 //current entry is ready
365 be->head=NULL; // released lock
374 atomic_inc(&T->item.total);
376 r->qitem=(MemoryQueueItem *)T;
379 be->head=b;//released lock
383 int WRITEBINCASE(Hashtable *T, REntry *r, BinItem *val, int key, int inc) {
384 //chain of bins exists => tail is valid
385 //if there is something in front of us, then we are not ready
388 BinElement* be=T->array[key];
390 BinItem *bintail=be->tail;
392 WriteBinItem *b=createWriteBinItem();
396 // note: If current table clears all dependencies, then write bin is ready
400 atomic_inc(&T->item.total);
403 r->qitem=(MemoryQueueItem *)T;
404 r->binitem=(BinItem*)b;
406 be->tail->next=(BinItem*)b;
407 //need to check if we can go...
409 if (T->item.status==READY) {
410 for(;val!=NULL;val=val->next) {
411 if (val==((BinItem *)b)) {
415 b->item.status=retval;//unsure if really needed at this point..
416 be->head=NULL; // released lock
420 } else if (val->total!=0) {
426 b->item.status=retval;
427 be->tail=(BinItem*)b;
432 int READBINCASE(Hashtable *T, REntry *r, BinItem *val, int key, int inc) {
433 BinItem * bintail=T->array[key]->tail;
434 if (isReadBinItem(bintail)) {
435 return TAILREADCASE(T, r, val, bintail, key, inc);
436 } else if (!isReadBinItem(bintail)) {
437 TAILWRITECASE(T, r, val, bintail, key, inc);
442 int TAILREADCASE(Hashtable *T, REntry *r, BinItem *val, BinItem *bintail, int key, int inc) {
443 ReadBinItem * readbintail=(ReadBinItem*)T->array[key]->tail;
445 if (readbintail->item.status==READY) {
449 T->array[key]->head=val;//released lock
457 if (readbintail->index==NUMREAD) { // create new read group
458 ReadBinItem* rb=createReadBinItem();
459 rb->array[rb->index++]=r;
460 rb->item.total=1;//safe only because item could not have started
461 rb->item.status=status;
462 T->array[key]->tail->next=(BinItem*)rb;
463 T->array[key]->tail=(BinItem*)rb;
464 r->binitem=(BinItem*)rb;
465 } else { // group into old tail
466 readbintail->array[readbintail->index++]=r;
467 atomic_inc(&readbintail->item.total);
468 r->binitem=(BinItem*)readbintail;
471 atomic_inc(&T->item.total);
473 r->qitem=(MemoryQueueItem *)T;
474 T->array[key]->head=val;//released lock
478 void TAILWRITECASE(Hashtable *T, REntry *r, BinItem *val, BinItem *bintail, int key, int inc) {
479 // WriteBinItem* wb=createWriteBinItem();
481 //wb->item.total=1;//safe because item could not have started
482 //wb->item.status=NOTREADY;
483 ReadBinItem* rb=createReadBinItem();
484 rb->array[rb->index++]=r;
485 rb->item.total=1;//safe because item could not have started
486 rb->item.status=NOTREADY;
488 atomic_inc(&T->item.total);
490 r->qitem=(MemoryQueueItem *)T;
491 r->binitem=(BinItem*)rb;
492 T->array[key]->tail->next=(BinItem*)rb;
493 T->array[key]->tail=(BinItem*)rb;
494 T->array[key]->head=val;//released lock
497 int ADDVECTOR(MemoryQueue *Q, REntry *r) {
498 if(!isVector(Q->tail)) {
500 if (isParentCoarse(r) && Q->tail->total==0 && Q->tail==Q->head) {
505 Vector* V=createVector();
506 Q->tail->next=(MemoryQueueItem*)V;
507 //************NEED memory barrier here to ensure compiler does not cache Q.tail.status******
508 if (BARRIER() && Q->tail->status==READY&&Q->tail->total==0) {
509 //previous Q item is finished
510 V->item.status=READY;
512 Q->tail=(MemoryQueueItem*)V;
513 // handle the the queue item case
514 if(Q->head->type==3){
515 Q->head=(MemoryQueueItem*)V;
518 //at this point, have vector
519 Vector* V=(Vector*)Q->tail;
520 if (V->index==NUMITEMS) {
524 V->item.status=NOTREADY;
525 Q->tail->next=(MemoryQueueItem*)V;
526 //***NEED memory barrier here to ensure compiler does not cache Q.tail.status******
527 if (BARRIER() && Q->tail->status==READY) {
528 V->item.status=READY;
530 Q->tail=(MemoryQueueItem*)V;
533 atomic_inc(&V->item.total);
537 //*****NEED memory barrier here to ensure compiler does not reorder writes to V.array and V.index
540 //*****NEED memory barrier here to ensure compiler does not cache V.status*********
541 r->qitem=(MemoryQueueItem *)V;
542 if (BARRIER() && V->item.status==READY) {
544 flag=(void*)LOCKXCHG((unsigned INTPTR*)&(V->array[index]), (unsigned INTPTR)flag);
546 if (isParentCoarse(r)) { //parent's retire immediately
547 atomic_dec(&V->item.total);
552 return NOTREADY;//<- means that some other dispatcher got this one...so need to do accounting correctly
560 //SCC's don't come in parent variety
561 int ADDSCC(MemoryQueue *Q, REntry *r) {
566 r->qitem=(MemoryQueueItem *)S;
567 Q->tail->next=(MemoryQueueItem*)S;
568 //*** NEED BARRIER HERE
569 if (BARRIER() && Q->tail->status==READY && Q->tail->total==0 && Q->tail==Q->head) {
570 //previous Q item is finished
571 S->item.status=READY;
572 Q->tail=(MemoryQueueItem*)S;
573 // handle the the queue item case
574 if(Q->head->type==3){
575 Q->head=(MemoryQueueItem*)S;
578 flag=(void*)LOCKXCHG((unsigned INTPTR*)&(S->val), (unsigned INTPTR)flag);
582 return NOTREADY;//<- means that some other dispatcher got this one...so need to do accounting correctly
585 Q->tail=(MemoryQueueItem*)S;
591 void RETIRERENTRY(MemoryQueue* Q, REntry * r) {
592 if (isFineWrite(r)||isFineRead(r)) {
593 RETIREHASHTABLE(Q, r);
594 } else if (isCoarse(r)) {
596 } else if (isSCC(r)) {
599 #ifndef OOO_DISABLE_TASKMEMPOOL
600 poolfreeinto(Q->rentrypool, r);
604 void RETIRESCC(MemoryQueue *Q, REntry *r) {
605 SCC* s=(SCC *)r->qitem;
606 s->item.total=0;//don't need atomicdec
611 void RETIREHASHTABLE(MemoryQueue *q, REntry *r) {
612 Hashtable *T=(Hashtable *)r->qitem;
613 BinItem *b=r->binitem;
615 atomic_dec(&T->item.total);
616 if (T->item.next!=NULL && T->item.total==0) {
621 void RETIREBIN(Hashtable *T, REntry *r, BinItem *b) {
622 int key=generateKey( OBJPTRPTR_2_OBJOID( r->pointer ) );
624 atomic_dec(&b->total);
626 if (isFineWrite(r) || (isFineRead(r) && b->next!=NULL && b->total==0)) {
627 // CHECK FIRST IF next is nonnull to guarantee that b.total cannot change
631 val=(BinItem*)LOCKXCHG((unsigned INTPTR*)&(T->array[key]->head), (unsigned INTPTR)val);
632 } while(val==(BinItem*)0x1);
633 // at this point have locked bin
638 if (isReadBinItem(ptr)) {
639 ReadBinItem* rptr=(ReadBinItem*)ptr;
640 if (rptr->item.status==NOTREADY) {
641 for (i=0;i<rptr->index;i++) {
642 resolveDependencies(rptr->array[i]);
643 if (isParent(rptr->array[i])) {
644 //parents go immediately
645 atomic_dec(&rptr->item.total);
646 atomic_dec(&T->item.total);
650 rptr->item.status=READY;
651 if (rptr->item.next==NULL) {
654 if (rptr->item.total!=0) {
656 } else if ((BinItem*)rptr==val) {
659 } else if(isWriteBinItem(ptr)) {
662 if(ptr->status==NOTREADY){
663 resolveDependencies(((WriteBinItem*)ptr)->val);
665 if(isParent(((WriteBinItem*)ptr)->val)){
666 atomic_dec(&T->item.total);
670 }else{ // write bin is already resolved
674 if(ptr->status==NOTREADY) {
675 resolveDependencies(((WriteBinItem*)ptr)->val);
678 if (isParent(((WriteBinItem*)ptr)->val)) {
679 atomic_dec(&T->item.total);
689 T->array[key]->head=val; // release lock
694 void RETIREVECTOR(MemoryQueue *Q, REntry *r) {
695 Vector* V=(Vector *)r->qitem;
696 atomic_dec(&V->item.total);
697 if (V->item.next!=NULL && V->item.total==0) { //NOTE: ORDERING CRUCIAL HERE
702 void RESOLVECHAIN(MemoryQueue *Q) {
704 MemoryQueueItem* head=Q->head;
705 if (head->next==NULL||head->total!=0) {
706 //item is not finished
707 if (head->status!=READY) {
708 //need to update status
710 if (isHashtable(head)) {
711 RESOLVEHASHTABLE(Q, (Hashtable *) head);
712 } else if (isVector(head)) {
713 RESOLVEVECTOR(Q, (Vector *) head);
714 } else if (isSingleItem(head)) {
715 RESOLVESCC((SCC *)head);
717 if (head->next==NULL)
724 MemoryQueueItem* nextitem=head->next;
725 CAS((unsigned INTPTR*)&(Q->head), (unsigned INTPTR)head, (unsigned INTPTR)nextitem);
726 //oldvalue not needed... if we fail we just repeat
731 void RESOLVEHASHTABLE(MemoryQueue *Q, Hashtable *T) {
733 for (binidx=0;binidx<NUMBINS;binidx++) {
734 BinElement* bin=T->array[binidx];
738 val=(BinItem*)LOCKXCHG((unsigned INTPTR*)&(bin->head), (unsigned INTPTR)val);
739 } while (val==(BinItem*)1);
740 //at this point have locked bin
743 if(ptr!=NULL&&ptr->status==NOTREADY) {
745 if (isWriteBinItem(ptr)) {
748 resolveDependencies(((WriteBinItem*)ptr)->val);
750 if (isParent(((WriteBinItem*)ptr)->val)) {
751 atomic_dec(&T->item.total);
755 } else if (isReadBinItem(ptr)) {
757 ReadBinItem* rptr=(ReadBinItem*)ptr;
758 for(i=0;i<rptr->index;i++) {
759 resolveDependencies(rptr->array[i]);
760 if (isParent(rptr->array[i])) {
761 atomic_dec(&rptr->item.total);
762 atomic_dec(&T->item.total);
765 if (rptr->item.next==NULL||rptr->item.total!=0) {
767 } else if((BinItem*)rptr==val) {
770 rptr->item.status=READY;
775 bin->head=val; // released lock;
779 void RESOLVEVECTOR(MemoryQueue *q, Vector *V) {
785 for (i=0;i<NUMITEMS;i++) {
787 val=(REntry*)LOCKXCHG((unsigned INTPTR*)&(tmp->array[i]), (unsigned INTPTR)val);
789 resolveDependencies(val);
791 atomic_dec(&tmp->item.total);
795 if (tmp->item.next!=NULL&&isVector(tmp->item.next)) {
796 tmp=(Vector*)tmp->item.next;
803 void RESOLVESCC(SCC *S) {
804 //precondition: SCC's state is READY
806 flag=(void*)LOCKXCHG((unsigned INTPTR*)&(S->val), (unsigned INTPTR)flag);
808 resolveDependencies(flag);
813 void resolveDependencies(REntry* rentry){
814 SESEcommon* seseCommon=(SESEcommon*)rentry->seseRec;
815 int type=rentry->type;
817 if (type==COARSE||type==SCCITEM) {
818 struct rcrRecord * array=(struct rcrRecord *)(((char *)seseCommon)+seseCommon->offsetToParamRecords);
819 INTPTR mask=rentry->mask;
822 int shift=__builtin_ctzll(mask)+1;
825 if(atomic_sub_and_test(1, &array[index].flag)) {
826 if(atomic_sub_and_test(1, &(seseCommon->unresolvedDependencies)))
827 workScheduleSubmit((void *)seseCommon);
830 } else if (type==PARENTCOARSE) {
831 psem_give_tag(rentry->parentStallSem, rentry->tag);
833 printf("ERROR: REntry type %d should never be generated in RCR..\n", rentry->type);
836 if(type==READ || type==WRITE || type==COARSE || type==SCCITEM){
837 if( atomic_sub_and_test(1, &(seseCommon->unresolvedDependencies)) ){
838 workScheduleSubmit(seseCommon);
840 }else if(type==PARENTREAD || type==PARENTWRITE || type==PARENTCOARSE){
841 psem_give_tag(rentry->parentStallSem, rentry->tag);
846 void INITIALIZEBUF(MemoryQueue * q){
848 for(i=0; i<NUMBINS; i++){
854 void ADDRENTRYTOBUF(MemoryQueue * q, REntry * r){
855 q->buf[q->bufcount]=r;
859 int RESOLVEBUFFORHASHTABLE(MemoryQueue * q, Hashtable* table, SESEcommon *seseCommon){
861 // first phase: only consider write rentry
862 for(i=0; i<q->bufcount;i++){
865 int key=generateKey( OBJPTRPTR_2_OBJOID( r->pointer ) );
866 if(q->binbuf[key]==NULL){
867 // for multiple writes, add only the first write that hashes to the same bin
874 // second phase: enqueue read items if it is eligible
875 for(i=0; i<q->bufcount;i++){
877 if(r!=NULL && r->type==READ){
878 int key=generateKey( OBJPTRPTR_2_OBJOID( r->pointer ) );
879 if(q->binbuf[key]==NULL){
880 // read item that hashes to the bin which doen't contain any write
881 seseCommon->rentryArray[seseCommon->rentryIdx++]=r;
882 if(ADDTABLEITEM(table, r, FALSE)==READY){
883 resolveDependencies(r);
890 // then, add only one of write items that hashes to the same bin
891 for(i=0; i<q->bufcount;i++){
894 seseCommon->rentryArray[seseCommon->rentryIdx++]=r;
895 if(ADDTABLEITEM(table, r, FALSE)==READY){
896 resolveDependencies(r);
902 int RESOLVEBUF(MemoryQueue * q, SESEcommon *seseCommon){
905 // check if every waiting entry is resolved
906 // if not, defer every items for hashtable until it is resolved.
907 int unresolved=FALSE;
908 for(i=0; i<q->bufcount;i++){
910 if(*(r->pointer)==0){
914 if(unresolved==TRUE){
915 for(i=0; i<q->bufcount;i++){
919 if(ADDRENTRY(q,r)==NOTREADY){
926 // first phase: only consider write rentry
927 for(i=0; i<q->bufcount;i++){
930 int key=generateKey( OBJPTRPTR_2_OBJOID( r->pointer ) );
931 if(q->binbuf[key]==NULL){
932 // for multiple writes, add only the first write that hashes to the same bin
939 // second phase: enqueue read items if it is eligible
940 for(i=0; i<q->bufcount;i++){
942 if(r!=NULL && r->type==READ){
943 int key=generateKey( OBJPTRPTR_2_OBJOID( r->pointer ) );
944 if(q->binbuf[key]==NULL){
945 // read item that hashes to the bin which doen't contain any write
946 seseCommon->rentryArray[seseCommon->rentryIdx++]=r;
947 if(ADDRENTRY(q,r)==NOTREADY){
955 // then, add only one of write items that hashes to the same bin
956 for(i=0; i<q->bufcount;i++){
959 seseCommon->rentryArray[seseCommon->rentryIdx++]=r;
960 if(ADDRENTRY(q,r)==NOTREADY){
969 void resolvePointer(REntry* rentry){
970 Hashtable* table=(Hashtable *)rentry->qitem;
972 if(table==NULL || table->unresolvedQueue==NULL){
973 //resolved already before related rentry is enqueued to the waiting queue
978 val=(struct Queue*)0x1;
979 val=(struct Queue*)LOCKXCHG((unsigned INTPTR*)&(table->unresolvedQueue), (unsigned INTPTR)val);
981 } while(val==(struct Queue*)0x1);
983 getHead(val)!=NULL &&
984 getHead(val)->objectptr==rentry){
985 // handling pointer is the first item of the queue
986 // start to resolve until it reaches unresolved pointer or end of queue
987 INTPTR currentSESE=0;
989 struct QueueItem* head=getHead(val);
991 REntry* rentry=(REntry*)head->objectptr;
992 if(*(rentry->pointer)==0){
993 // encounters following unresolved pointer
994 table->unresolvedQueue=val;//released lock
997 removeItem(val,head);
999 //now, address is resolved
1001 //check if rentry is buffer mode
1002 if(rentry->isBufMode==TRUE){
1004 queue=rentry->queue;
1005 INITIALIZEBUF(queue);
1006 currentSESE=(INTPTR)rentry;
1007 ADDRENTRYTOBUF(queue,rentry);
1008 } else if(currentSESE==(INTPTR)rentry){
1009 ADDRENTRYTOBUF(queue,rentry);
1010 } else if(currentSESE!=(INTPTR)rentry){
1011 RESOLVEBUFFORHASHTABLE(queue,table,(SESEcommon*)rentry->seseRec);
1012 currentSESE=(INTPTR)rentry;
1013 INITIALIZEBUF(queue);
1014 ADDRENTRYTOBUF(rentry->queue,rentry);
1018 //previous SESE has buf mode, need to invoke resolve buffer
1019 RESOLVEBUFFORHASHTABLE(queue,table,(SESEcommon*)rentry->seseRec);
1023 if(ADDTABLEITEM(table, rentry, FALSE)==READY){
1024 resolveDependencies(rentry);
1028 table->unresolvedQueue=NULL; // set hashtable as normal-mode.
1033 // resolved rentry is not head of queue
1034 table->unresolvedQueue=val;//released lock;
1038 void rehashMemoryQueue(SESEcommon* seseParent){