3 #include "multicoregarbage.h"
4 #include "multicoreruntime.h"
5 #include "runtime_arch.h"
6 #include "SimpleHash.h"
7 #include "GenericHashtable.h"
8 #include "ObjectHash.h"
11 extern struct parameterwrapper ** objectqueues[][NUMCLASSES];
12 extern int numqueues[][NUMCLASSES];
14 extern struct genhashtable * activetasks;
15 extern struct parameterwrapper ** objectqueues[][NUMCLASSES];
16 extern struct taskparamdescriptor *currtpd;
20 struct pointerblock *next;
23 struct pointerblock *gchead=NULL;
25 struct pointerblock *gctail=NULL;
27 struct pointerblock *gctail2=NULL;
29 struct pointerblock *gcspare=NULL;
31 #define NUMLOBJPTRS 20
33 struct lobjpointerblock {
34 void * lobjs[NUMLOBJPTRS];
35 //void * dsts[NUMLOBJPTRS];
36 int lengths[NUMLOBJPTRS];
37 //void * origs[NUMLOBJPTRS];
38 int hosts[NUMLOBJPTRS];
39 struct lobjpointerblock *next;
42 struct lobjpointerblock *gclobjhead=NULL;
43 int gclobjheadindex=0;
44 struct lobjpointerblock *gclobjtail=NULL;
45 int gclobjtailindex=0;
46 struct lobjpointerblock *gclobjtail2=NULL;
47 int gclobjtailindex2=0;
48 struct lobjpointerblock *gclobjspare=NULL;
51 inline void dumpSMem() {
52 tprintf("Dump shared mem: \n");
53 for (int i = BAMBOO_BASE_VA; i < BAMBOO_BASE_VA+BAMBOO_SHARED_MEM_SIZE; i += 4*16)
54 tprintf("0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x \n",
55 *((int *)(i)), *((int *)(i + 4)), *((int *)(i + 4*2)), *((int *)(i + 4*3)),
56 *((int *)(i + 4*4)), *((int *)(i + 4*5)), *((int *)(i + 4*6)), *((int *)(i + 4*7)),
57 *((int *)(i + 4*8)), *((int *)(i + 4*9)), *((int *)(i + 4*10)), *((int *)(i + 4*11)),
58 *((int *)(i + 4*12)), *((int *)(i + 4*13)), *((int *)(i + 4*14)), *((int *)(i + 4*15)));
63 inline void gc_enqueue(void *ptr) {
65 BAMBOO_DEBUGPRINT(0xe601);
66 BAMBOO_DEBUGPRINT_REG(ptr);
68 if (gcheadindex==NUMPTRS) {
69 struct pointerblock * tmp;
74 tmp=RUNMALLOC(sizeof(struct pointerblock));
75 } // if (gcspare!=NULL)
79 } // if (gcheadindex==NUMPTRS)
80 gchead->ptrs[gcheadindex++]=ptr;
81 } // void gc_enqueue(void *ptr)
83 // dequeue and destroy the queue
84 inline void * gc_dequeue() {
85 if (gctailindex==NUMPTRS) {
86 struct pointerblock *tmp=gctail;
93 } // if (gcspare!=NULL)
94 } // if (gctailindex==NUMPTRS)
95 return gctail->ptrs[gctailindex++];
96 } // void * gc_dequeue()
98 // dequeue and do not destroy the queue
99 inline void * gc_dequeue2() {
100 if (gctailindex2==NUMPTRS) {
101 struct pointerblock *tmp=gctail2;
102 gctail2=gctail2->next;
104 } // if (gctailindex2==NUMPTRS)
105 return gctail2->ptrs[gctailindex2++];
106 } // void * gc_dequeue2()
108 inline int gc_moreItems() {
109 if ((gchead==gctail)&&(gctailindex==gcheadindex))
112 } // int gc_moreItems()
114 inline int gc_moreItems2() {
115 if ((gchead==gctail2)&&(gctailindex2==gcheadindex))
118 } // int gc_moreItems2()
120 // enqueue a large obj: start addr & length
121 inline void gc_lobjenqueue(void *ptr,
125 BAMBOO_DEBUGPRINT(0xe901);
127 if (gclobjheadindex==NUMLOBJPTRS) {
128 struct lobjpointerblock * tmp;
129 if (gclobjspare!=NULL) {
133 tmp=RUNMALLOC(sizeof(struct lobjpointerblock));
134 } // if (gclobjspare!=NULL)
135 gclobjhead->next=tmp;
138 } // if (gclobjheadindex==NUMLOBJPTRS)
139 gclobjhead->lobjs[gclobjheadindex]=ptr;
140 gclobjhead->lengths[gclobjheadindex]=length;
141 gclobjhead->hosts[gclobjheadindex++]=host;
143 BAMBOO_DEBUGPRINT_REG(gclobjhead->lobjs[gclobjheadindex-1]);
144 BAMBOO_DEBUGPRINT_REG(gclobjhead->lengths[gclobjheadindex-1]);
145 BAMBOO_DEBUGPRINT_REG(gclobjhead->hosts[gclobjheadindex-1]);
147 } // void gc_lobjenqueue(void *ptr...)
149 // dequeue and destroy the queue
150 inline void * gc_lobjdequeue(int * length,
152 if (gclobjtailindex==NUMLOBJPTRS) {
153 struct lobjpointerblock *tmp=gclobjtail;
154 gclobjtail=gclobjtail->next;
156 if (gclobjspare!=NULL) {
160 } // if (gclobjspare!=NULL)
161 } // if (gclobjtailindex==NUMLOBJPTRS)
163 *length = gclobjtail->lengths[gclobjtailindex];
166 *host = (int)(gclobjtail->hosts[gclobjtailindex]);
168 return gclobjtail->lobjs[gclobjtailindex++];
169 } // void * gc_lobjdequeue()
171 inline int gc_lobjmoreItems() {
172 if ((gclobjhead==gclobjtail)&&(gclobjtailindex==gclobjheadindex))
175 } // int gc_lobjmoreItems()
177 // dequeue and don't destroy the queue
178 inline void gc_lobjdequeue2() {
179 if (gclobjtailindex2==NUMLOBJPTRS) {
180 gclobjtail2=gclobjtail2->next;
184 }// if (gclobjtailindex2==NUMLOBJPTRS)
185 } // void * gc_lobjdequeue2()
187 inline int gc_lobjmoreItems2() {
188 if ((gclobjhead==gclobjtail2)&&(gclobjtailindex2==gclobjheadindex))
191 } // int gc_lobjmoreItems2()
193 INTPTR gccurr_heapbound = 0;
195 inline void gettype_size(void * ptr,
198 int type = ((int *)ptr)[0];
200 if(type < NUMCLASSES) {
202 size = classsize[type];
205 struct ArrayObject *ao=(struct ArrayObject *)ptr;
206 int elementsize=classsize[type];
207 int length=ao->___length___;
208 size=sizeof(struct ArrayObject)+length*elementsize;
209 } // if(type < NUMCLASSES)
214 inline bool isLarge(void * ptr,
218 BAMBOO_DEBUGPRINT(0xe701);
219 BAMBOO_DEBUGPRINT_REG(ptr);
221 // check if a pointer is referring to a large object
222 gettype_size(ptr, ttype, tsize);
224 BAMBOO_DEBUGPRINT(*tsize);
226 int bound = (BAMBOO_SMEM_SIZE);
227 if((int)(ptr-(BAMBOO_BASE_VA)) < (BAMBOO_LARGE_SMEM_BOUND)) {
228 bound = (BAMBOO_SMEM_SIZE_L);
230 if((((int)(ptr-(BAMBOO_BASE_VA)))%(bound))==0) {
231 // ptr is a start of a block
233 BAMBOO_DEBUGPRINT(0xe702);
234 BAMBOO_DEBUGPRINT(1);
238 if((bound-(((int)(ptr-(BAMBOO_BASE_VA)))%bound)) < (*tsize)) {
239 // it acrosses the boundary of current block
241 BAMBOO_DEBUGPRINT(0xe703);
242 BAMBOO_DEBUGPRINT(1);
247 BAMBOO_DEBUGPRINT(0);
250 } // bool isLarge(void * ptr, int * ttype, int * tsize)
252 inline int hostcore(void * ptr) {
253 // check the host core of ptr
257 RESIDECORE(ptr, &x, &y);
258 host = (x==0)?(x*bamboo_height+y):(x*bamboo_height+y-2);
260 } // int hostcore(void * ptr)
262 inline bool isLocal(void * ptr) {
263 // check if a pointer is in shared heap on this core
264 return hostcore(ptr) == BAMBOO_NUM_OF_CORE;
265 } // bool isLocal(void * ptr)
267 inline bool gc_checkCoreStatus() {
268 bool allStall = true;
269 for(int i = 0; i < NUMCORES; ++i) {
270 if(gccorestatus[i] != 0) {
273 } // if(gccorestatus[i] != 0)
274 } // for(i = 0; i < NUMCORES; ++i)
278 inline void checkMarkStatue() {
281 (waitconfirm && (numconfirm == 0))) {
282 BAMBOO_START_CRITICAL_SECTION_STATUS();
283 gccorestatus[BAMBOO_NUM_OF_CORE] = 0;
284 gcnumsendobjs[BAMBOO_NUM_OF_CORE] = gcself_numsendobjs;
285 gcnumreceiveobjs[BAMBOO_NUM_OF_CORE] = gcself_numreceiveobjs;
286 // check the status of all cores
287 bool allStall = gc_checkCoreStatus();
289 // check if the sum of send objs and receive obj are the same
290 // yes->check if the info is the latest; no->go on executing
292 for(i = 0; i < NUMCORES; ++i) {
293 sumsendobj += gcnumsendobjs[i];
294 } // for(i = 0; i < NUMCORES; ++i)
295 for(i = 0; i < NUMCORES; ++i) {
296 sumsendobj -= gcnumreceiveobjs[i];
297 } // for(i = 0; i < NUMCORES; ++i)
298 if(0 == sumsendobj) {
300 // the first time found all cores stall
301 // send out status confirm msg to all other cores
302 // reset the corestatus array too
303 gccorestatus[BAMBOO_NUM_OF_CORE] = 1;
305 numconfirm = NUMCORES - 1;
306 for(i = 1; i < NUMCORES; ++i) {
308 // send mark phase finish confirm request msg to core i
309 send_msg_1(i, GCMARKCONFIRM);
310 } // for(i = 1; i < NUMCORES; ++i)
312 // all the core status info are the latest
314 gcphase = COMPACTPHASE;
315 // restore the gcstatus for all cores
316 for(i = 0; i < NUMCORES; ++i) {
318 } // for(i = 0; i < NUMCORES; ++i)
319 } // if(!gcwautconfirm) else()
320 } // if(0 == sumsendobj)
322 BAMBOO_CLOSE_CRITICAL_SECTION_STATUS();
323 } // if((!waitconfirm)...
324 } // void checkMarkStatue()
326 inline bool preGC() {
327 // preparation for gc
328 // make sure to clear all incoming msgs espacially transfer obj msgs
331 (waitconfirm && (numconfirm == 0))) {
332 // send out status confirm msgs to all cores to check if there are
333 // transfer obj msgs on-the-fly
335 numconfirm = NUMCORES - 1;
336 for(i = 1; i < NUMCORES; ++i) {
338 // send status confirm msg to core i
339 send_msg_1(i, STATUSCONFIRM);
340 } // for(i = 1; i < NUMCORES; ++i)
342 while(numconfirm != 0) {} // wait for confirmations
343 numsendobjs[BAMBOO_NUM_OF_CORE] = self_numsendobjs;
344 numreceiveobjs[BAMBOO_NUM_OF_CORE] = self_numreceiveobjs;
346 for(i = 0; i < NUMCORES; ++i) {
347 sumsendobj += numsendobjs[i];
348 } // for(i = 1; i < NUMCORES; ++i)
349 for(i = 0; i < NUMCORES; ++i) {
350 sumsendobj -= numreceiveobjs[i];
351 } // for(i = 1; i < NUMCORES; ++i)
352 if(0 == sumsendobj) {
355 // still have some transfer obj msgs on-the-fly, can not start gc
357 } // if(0 == sumsendobj)
359 // previously asked for status confirmation and do not have all the
360 // confirmations yet, can not start gc
362 } // if((!waitconfirm) ||
365 inline void initGC() {
367 for(i = 0; i < NUMCORES; ++i) {
369 gcnumsendobjs[i] = 0;
370 gcnumreceiveobjs[i] = 0;
372 gcrequiredmems[i] = 0;
373 gcfilledblocks[i] = 0;
375 } // for(i = 0; i < NUMCORES; ++i)
376 gcself_numsendobjs = 0;
377 gcself_numreceiveobjs = 0;
378 gcmarkedptrbound = 0;
393 gcheadindex=gctailindex=gctailindex2 = 0;
394 gchead=gctail=gctail2=RUNMALLOC(sizeof(struct pointerblock));
396 gctailindex = gctailindex2 = gcheadindex;
397 gctail = gctail2 = gchead;
400 // initialize the large obj queues
401 if (gclobjhead==NULL) {
404 gclobjtailindex2 = 0;
405 gclobjhead=gclobjtail=gclobjtail2=
406 RUNMALLOC(sizeof(struct lobjpointerblock));
408 gclobjtailindex = gclobjtailindex2 = gclobjheadindex;
409 gclobjtail = gclobjtail2 = gclobjhead;
413 // compute load balance for all cores
414 inline int loadbalance() {
415 // compute load balance
418 // get the total loads
419 gcloads[BAMBOO_NUM_OF_CORE]+=
420 BAMBOO_SMEM_SIZE*gcreservedsb;//reserved sblocks for sbstartbl
421 int tloads = gcloads[STARTUPCORE];
422 for(i = 1; i < NUMCORES; i++) {
423 tloads += gcloads[i];
425 int heaptop = BAMBOO_BASE_VA + tloads;
427 BAMBOO_DEBUGPRINT(0xdddd);
428 BAMBOO_DEBUGPRINT_REG(tloads);
429 BAMBOO_DEBUGPRINT_REG(heaptop);
432 BLOCKINDEX(heaptop, &b);
433 int numbpc = b / NUMCORES; // num of blocks per core
435 BAMBOO_DEBUGPRINT_REG(b);
436 BAMBOO_DEBUGPRINT_REG(numbpc);
438 gcheapdirection = (numbpc%2 == 0);
441 RESIDECORE(heaptop, &x, &y);
442 gctopcore = (x == 0 ? y : x * bamboo_height + y - 2);
444 BAMBOO_DEBUGPRINT_REG(gctopcore);
447 } // void loadbalance()
449 inline bool cacheLObjs() {
450 // check the total mem size need for large objs
454 BAMBOO_DEBUGPRINT(0xe801);
456 gclobjtail2 = gclobjtail;
457 gclobjtailindex2 = gclobjtailindex;
458 while(gc_lobjmoreItems2()){
460 size = gclobjtail2->lengths[gclobjtailindex2 - 1];
463 BAMBOO_DEBUGPRINT_REG(size);
464 BAMBOO_DEBUGPRINT_REG(sumsize);
466 } // while(gc_lobjmoreItems2())
468 // check if there are enough space to cache these large objs
469 INTPTR dst = (BAMBOO_BASE_VA) + (BAMBOO_SHARED_MEM_SIZE) - sumsize;
470 if(gcheaptop > dst) {
471 // do not have enough room to cache large objs
475 BAMBOO_DEBUGPRINT(0xe802);
476 BAMBOO_DEBUGPRINT_REG(dst);
479 gcheaptop = dst; // Note: record the start of cached lobjs with gcheaptop
480 // cache the largeObjs to the top of the shared heap
481 gclobjtail2 = gclobjtail;
482 gclobjtailindex2 = gclobjtailindex;
483 while(gc_lobjmoreItems2()) {
485 size = gclobjtail2->lengths[gclobjtailindex2 - 1];
486 memcpy(dst, gclobjtail2->lobjs[gclobjtailindex2 - 1], size);
489 BAMBOO_DEBUGPRINT_REG(gclobjtail2->lobjs[gclobjtailindex2-1]);
490 BAMBOO_DEBUGPRINT(dst-size);
491 BAMBOO_DEBUGPRINT_REG(size);
495 } // void cacheLObjs()
497 inline void moveLObjs() {
499 BAMBOO_DEBUGPRINT(0xea01);
501 // find current heap top
502 // flush all gcloads to indicate the real heap top on one core
503 // previous it represents the next available ptr on a core
504 if((gcloads[0] > ((BAMBOO_BASE_VA)+(BAMBOO_SMEM_SIZE_L)))
505 && ((gcloads[0] % (BAMBOO_SMEM_SIZE)) == 0)) {
506 // edge of a block, check if this is exactly the heaptop
507 BASEPTR(0, gcfilledblocks[0]-1, &(gcloads[0]));
508 gcloads[0]+=(gcfilledblocks[0]>1?(BAMBOO_SMEM_SIZE):(BAMBOO_SMEM_SIZE_L));
510 int tmpheaptop = gcloads[0];
512 BAMBOO_DEBUGPRINT_REG(tmpheaptop);
514 for(int i = 1; i < NUMCORES; i++) {
515 if((gcloads[i] > ((BAMBOO_BASE_VA)+(BAMBOO_SMEM_SIZE_L)))
516 && ((gcloads[i] % (BAMBOO_SMEM_SIZE)) == 0)) {
517 // edge of a block, check if this is exactly the heaptop
518 BASEPTR(0, gcfilledblocks[i]-1, &gcloads[i]);
519 gcloads[i]+=(gcfilledblocks[i]>1?(BAMBOO_SMEM_SIZE):(BAMBOO_SMEM_SIZE_L));
521 if(tmpheaptop < gcloads[i]) {
522 tmpheaptop = gcloads[i];
525 BAMBOO_DEBUGPRINT_REG(gcloads[i]);
526 BAMBOO_DEBUGPRINT_REG(tmpheaptop);
529 // move large objs from gcheaptop to tmpheaptop
530 // write the header first
531 int tomove = (BAMBOO_BASE_VA) + (BAMBOO_SHARED_MEM_SIZE) - gcheaptop;
533 BAMBOO_DEBUGPRINT(0xea02);
534 BAMBOO_DEBUGPRINT_REG(tomove);
537 gcheaptop = tmpheaptop;
540 // check how many blocks it acrosses
541 int remain = tmpheaptop-(int)(BAMBOO_BASE_VA);
542 int b = remain/(BAMBOO_SMEM_SIZE);
543 // check the remaining space in this block
544 int bound = (BAMBOO_SMEM_SIZE);
545 if(remain < (BAMBOO_LARGE_SMEM_BOUND)) {
546 bound = (BAMBOO_SMEM_SIZE_L);
548 remain = bound - remain%bound;
549 // flush the sbstartbl
550 memset(&(gcsbstarttbl[gcreservedsb]), '\0',
551 BAMBOO_SHARED_MEM_SIZE/BAMBOO_SMEM_SIZE*sizeof(INTPTR));
554 BAMBOO_DEBUGPRINT(0xea03);
560 int base = tmpheaptop;
562 remain -= BAMBOO_CACHE_LINE_SIZE;
563 tmpheaptop += BAMBOO_CACHE_LINE_SIZE;
564 while(gc_lobjmoreItems()) {
565 ptr = (int)(gc_lobjdequeue(&size, &host));
566 ALIGNSIZE(size, &isize);
568 // this object acrosses blocks
570 // close current block, fill its header
571 *((int*)base) = cpysize + BAMBOO_CACHE_LINE_SIZE;
575 remain = ((tmpheaptop-(BAMBOO_BASE_VA))<(BAMBOO_LARGE_SMEM_BOUND)) ?
576 BAMBOO_SMEM_SIZE_L : BAMBOO_SMEM_SIZE;
578 remain -= BAMBOO_CACHE_LINE_SIZE;
579 tmpheaptop += BAMBOO_CACHE_LINE_SIZE;
582 // move the large obj
583 memcpy(tmpheaptop, gcheaptop, size);
584 // fill the remaining space with -2 padding
585 memset(tmpheaptop+size, -2, isize-size);
587 BAMBOO_DEBUGPRINT(0xea04);
588 BAMBOO_DEBUGPRINT_REG(gcheaptop);
589 BAMBOO_DEBUGPRINT_REG(tmpheaptop);
590 BAMBOO_DEBUGPRINT_REG(size);
591 BAMBOO_DEBUGPRINT_REG(isize);
594 if(host == BAMBOO_NUM_OF_CORE) {
595 BAMBOO_START_CRITICAL_SECTION();
596 RuntimeHashadd(gcpointertbl, ptr, tmpheaptop);
597 BAMBOO_CLOSE_CRITICAL_SECTION();
599 // send the original host core with the mapping info
600 send_msg_3(host, GCLOBJMAPPING, ptr, tmpheaptop);
601 } // if(host == BAMBOO_NUM_OF_CORE) else ...
604 // set the gcsbstarttbl
605 int tmpsbs = 1+(isize-remain-1)/BAMBOO_SMEM_SIZE;
606 for(int k = 1; k < tmpsbs; k++) {
607 gcsbstarttbl[b+k] = (INTPTR)(-1);
610 if(((isize-remain)%(BAMBOO_SMEM_SIZE)) == 0) {
611 gcsbstarttbl[b] = (INTPTR)(-1);
612 remain = ((tmpheaptop-(BAMBOO_BASE_VA))<(BAMBOO_LARGE_SMEM_BOUND)) ?
613 BAMBOO_SMEM_SIZE_L : BAMBOO_SMEM_SIZE;
615 gcsbstarttbl[b] = (INTPTR)(tmpheaptop);
616 remain = tmpheaptop-(BAMBOO_BASE_VA);
617 int bound = remain<(BAMBOO_LARGE_SMEM_BOUND)?(BAMBOO_SMEM_SIZE_L):(BAMBOO_SMEM_SIZE);
618 remain = bound - remain%bound;
619 } // if(((isize-remain)%(BAMBOO_SMEM_SIZE)) == 0) else ...
621 // close current block and fill the header
622 *((int*)base) = isize + BAMBOO_CACHE_LINE_SIZE;
625 remain -= BAMBOO_CACHE_LINE_SIZE;
626 tmpheaptop += BAMBOO_CACHE_LINE_SIZE;
629 // move the large obj
630 memcpy(tmpheaptop, gcheaptop, size);
631 // fill the remaining space with -2 padding
632 memset(tmpheaptop+size, -2, isize-size);
634 BAMBOO_DEBUGPRINT(0xea05);
635 BAMBOO_DEBUGPRINT_REG(gcheaptop);
636 BAMBOO_DEBUGPRINT_REG(tmpheaptop);
637 BAMBOO_DEBUGPRINT_REG(size);
638 BAMBOO_DEBUGPRINT_REG(isize);
642 if(host == BAMBOO_NUM_OF_CORE) {
643 BAMBOO_START_CRITICAL_SECTION();
644 RuntimeHashadd(gcpointertbl, ptr, tmpheaptop);
645 BAMBOO_CLOSE_CRITICAL_SECTION();
647 // send the original host core with the mapping info
648 send_msg_3(host, GCLOBJMAPPING, ptr, tmpheaptop);
649 } // if(host == BAMBOO_NUM_OF_CORE) else ...
651 } // if(remain < isize) else ...
652 } // while(gc_lobjmoreItems())
654 // close current block, fill the head
655 *((int*)base) = cpysize + BAMBOO_CACHE_LINE_SIZE;
657 tmpheaptop -= BAMBOO_CACHE_LINE_SIZE;
659 gcheaptop = tmpheaptop;
661 BAMBOO_DEBUGPRINT(0xea06);
662 BAMBOO_DEBUGPRINT_REG(gcheaptop);
664 } // void moveLObjs()
666 inline void updateFreeMemList() {
667 struct freeMemItem * tochange = bamboo_free_mem_list->head;
668 if(tochange == NULL) {
669 bamboo_free_mem_list->head = tochange =
670 (struct freeMemItem *)RUNMALLOC(sizeof(struct freeMemItem));
672 // handle the top of the heap
673 tochange->ptr = gcheaptop;
674 tochange->size = BAMBOO_SHARED_MEM_SIZE + BAMBOO_BASE_VA - gcheaptop;
675 // zero out all these spare memory
676 memset(tochange->ptr, '\0', tochange->size);
677 if(bamboo_free_mem_list->tail != tochange) {
678 bamboo_free_mem_list->tail = tochange;
679 if(bamboo_free_mem_list->tail != NULL) {
680 RUNFREE(bamboo_free_mem_list->tail);
683 } // void updateFreeMemList()
686 inline void tomark(struct garbagelist * stackptr) {
687 if(MARKPHASE != gcphase) {
694 // enqueue current stack
695 while(stackptr!=NULL) {
697 BAMBOO_DEBUGPRINT(0xe501);
698 BAMBOO_DEBUGPRINT_REG(stackptr->size);
699 BAMBOO_DEBUGPRINT_REG(stackptr->next);
700 BAMBOO_DEBUGPRINT_REG(stackptr->array[0]);
702 for(i=0; i<stackptr->size; i++) {
703 if(stackptr->array[i] != NULL) {
704 gc_enqueue(stackptr->array[i]);
707 stackptr=stackptr->next;
711 BAMBOO_DEBUGPRINT(0xe503);
713 // enqueue objectsets
714 for(i=0; i<NUMCLASSES; i++) {
715 struct parameterwrapper ** queues =
716 objectqueues[BAMBOO_NUM_OF_CORE][i];
717 int length = numqueues[BAMBOO_NUM_OF_CORE][i];
718 for(j = 0; j < length; ++j) {
719 struct parameterwrapper * parameter = queues[j];
720 struct ObjectHash * set=parameter->objectset;
721 struct ObjectNode * ptr=set->listhead;
723 gc_enqueue((void *)ptr->key);
729 // euqueue current task descriptor
730 if(currtpd != NULL) {
732 BAMBOO_DEBUGPRINT(0xe504);
734 for(i=0; i<currtpd->numParameters; i++) {
735 gc_enqueue(currtpd->parameterArray[i]);
740 BAMBOO_DEBUGPRINT(0xe505);
742 // euqueue active tasks
743 struct genpointerlist * ptr=activetasks->list;
745 struct taskparamdescriptor *tpd=ptr->src;
747 for(i=0; i<tpd->numParameters; i++) {
748 gc_enqueue(tpd->parameterArray[i]);
754 BAMBOO_DEBUGPRINT(0xe506);
756 // enqueue cached transferred obj
757 struct QueueItem * tmpobjptr = getHead(&objqueue);
758 while(tmpobjptr != NULL) {
759 struct transObjInfo * objInfo =
760 (struct transObjInfo *)(tmpobjptr->objectptr);
761 gc_enqueue(objInfo->objptr);
762 getNextQueueItem(tmpobjptr);
764 } // void tomark(struct garbagelist * stackptr)
766 inline void markObj(void * objptr) {
770 if(ISSHAREDOBJ(objptr)) {
771 int host = hostcore(objptr);
772 if(BAMBOO_NUM_OF_CORE == host) {
776 // send a msg to host informing that objptr is active
777 send_msg_2(host, GCMARKEDOBJ, objptr);
778 gcself_numsendobjs++;
782 } // if(ISSHAREDOBJ(objptr))
783 } // void markObj(void * objptr)
785 inline void mark(bool isfirst,
786 struct garbagelist * stackptr) {
790 gccurr_heaptop = 0; // record the size of all active objs in this core
791 // aligned but does not consider block boundaries
792 gcmarkedptrbound = 0;
796 while(MARKPHASE == gcphase) {
797 while(gc_moreItems2()) {
799 void * ptr = gc_dequeue2();
803 // check if it is a shared obj
804 if(ISSHAREDOBJ(ptr)) {
805 // a shared obj, check if it is a local obj on this core
806 if(isLarge(ptr, &type, &size)) {
807 // ptr is a large object
808 if(((int *)ptr)[6] == 0) {
809 // not marked and not enqueued
810 BAMBOO_START_CRITICAL_SECTION();
811 gc_lobjenqueue(ptr, size, BAMBOO_NUM_OF_CORE);
813 BAMBOO_CLOSE_CRITICAL_SECTION();
817 } else if ((isLocal(ptr)) && (((int *)ptr)[6] == 0)) {
818 // ptr is an unmarked active object on this core
819 ALIGNSIZE(size, &isize);
820 gccurr_heaptop += isize;
822 BAMBOO_DEBUGPRINT(0xaaaa);
823 BAMBOO_DEBUGPRINT_REG(ptr);
824 BAMBOO_DEBUGPRINT_REG(isize);
828 if(ptr + size > gcmarkedptrbound) {
829 gcmarkedptrbound = ptr + size;
830 } // if(ptr + size > gcmarkedptrbound)
831 } // if(isLarge(ptr, &type, &size)) else if(isLocal(ptr))
832 } // if(ISSHAREDOBJ(ptr))
834 // scan all pointers in ptr
835 unsigned INTPTR * pointer;
836 pointer=pointerarray[type];
838 /* Array of primitives */
840 } else if (((INTPTR)pointer)==1) {
841 /* Array of pointers */
842 struct ArrayObject *ao=(struct ArrayObject *) ptr;
843 int length=ao->___length___;
845 for(j=0; j<length; j++) {
847 ((void **)(((char *)&ao->___length___)+sizeof(int)))[j];
851 INTPTR size=pointer[0];
853 for(i=1; i<=size; i++) {
854 unsigned int offset=pointer[i];
855 void * objptr=*((void **)(((char *)ptr)+offset));
859 } // while(!isEmpty(gctomark))
860 gcbusystatus = false;
861 // send mark finish msg to core coordinator
862 if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
863 gccorestatus[BAMBOO_NUM_OF_CORE] = 0;
864 gcnumsendobjs[BAMBOO_NUM_OF_CORE] = gcself_numsendobjs;
865 gcnumreceiveobjs[BAMBOO_NUM_OF_CORE] = gcself_numreceiveobjs;
866 gcloads[BAMBOO_NUM_OF_CORE] = gccurr_heaptop;
868 send_msg_4(STARTUPCORE, GCFINISHMARK, BAMBOO_NUM_OF_CORE,
869 gcself_numsendobjs, gcself_numreceiveobjs);
872 if(BAMBOO_NUM_OF_CORE == 0) {
875 } // while(MARKPHASE == gcphase)
878 inline void compact2Heaptop() {
879 // no cores with spare mem and some cores are blocked with pending move
880 // find the current heap top and make them move to the heap top
882 int numblocks = gcfilledblocks[gctopcore];
883 BASEPTR(gctopcore, numblocks, &p);
886 int remain = b<NUMCORES ? BAMBOO_SMEM_SIZE_L : BAMBOO_SMEM_SIZE;
887 if((gctopcore == STARTUPCORE) && (b == 0)) {
888 remain -= gcreservedsb*BAMBOO_SMEM_SIZE;
889 p += gcreservedsb*BAMBOO_SMEM_SIZE;
891 for(int i = 0; i < NUMCORES; i++) {
892 if((gccorestatus[i] != 0) && (gcrequiredmems[i] > 0)) {
893 int memneed = gcrequiredmems[i] + BAMBOO_CACHE_LINE_SIZE;
894 if(STARTUPCORE == i) {
897 gcdstcore = gctopcore;
898 gcblock2fill = numblocks + 1;
900 send_msg_4(i, GCMOVESTART, gctopcore, p, numblocks + 1);
902 if(memneed < remain) {
904 gcrequiredmems[i] = 0;
906 gcloads[gctopcore] += memneed;
908 // next available block
910 gcfilledblocks[gctopcore] += 1;
912 BASEPTR(gctopcore, gcfilledblocks[gctopcore], &newbase);
913 gcloads[gctopcore] = newbase;
914 gcrequiredmems[i] -= remain - BAMBOO_CACHE_LINE_SIZE;
915 gcstopblock[gctopcore]++;
916 if(gcheapdirection) {
918 if(gctopcore== NUMCORES) {
920 gcheapdirection = false;
926 gcheapdirection = true;
929 numblocks = gcstopblock[gctopcore];
930 BASEPTR(gctopcore, numblocks, &p);
932 remain = b<NUMCORES ? BAMBOO_SMEM_SIZE_L : BAMBOO_SMEM_SIZE;
933 } // if(memneed < remain)
934 } // if((gccorestatus[i] != 0) && (gcrequiredmems[i] > 0))
935 } // for(i = 0; i < NUMCORES; i++)
936 } // void compact2Heaptop()
938 inline void resolvePendingMoveRequest() {
941 bool nosparemem = true;
942 bool haspending = false;
943 bool hasrunning = false;
944 bool noblock = false;
947 for(i = j = 0; (i < NUMCORES) && (j < NUMCORES);) {
949 // check if there are cores with spare mem
950 if(gccorestatus[i] == 0) {
951 // finished working, check if it still have spare mem
952 if(gcfilledblocks[i] < gcstopblock[i]) {
953 // still have spare mem
956 } // if(gcfilledblocks[i] < gcstopblock[i]) else ...
961 if(gccorestatus[j] != 0) {
962 // not finished, check if it has pending move requests
963 if((gcfilledblocks[j]==gcstopblock[j])&&(gcrequiredmems[j]>0)) {
968 } // if((gcfilledblocks[i] == gcstopblock[i])...) else ...
969 } // if(gccorestatus[i] == 0) else ...
972 if(!nosparemem && haspending) {
976 gcrequiredmems[dstcore] = assignSpareMem(sourcecore,
977 gcrequiredmems[dstcore],
980 if(STARTUPCORE == dstcore) {
981 gcdstcore = sourcecore;
983 gcmovestartaddr = startaddr;
984 gcblock2fill = tomove;
986 send_msg_4(dstcore, GCMOVESTART, sourcecore, startaddr, tomove);
988 if(gcrequiredmems[dstcore] == 0) {
995 } // for(i = 0; i < NUMCORES; i++)
997 BAMBOO_DEBUGPRINT(0xcccc);
998 BAMBOO_DEBUGPRINT_REG(hasrunning);
999 BAMBOO_DEBUGPRINT_REG(haspending);
1000 BAMBOO_DEBUGPRINT_REG(noblock);
1003 if(!hasrunning && !noblock) {
1004 gcphase = SUBTLECOMPACTPHASE;
1008 } // void resovePendingMoveRequest()
1011 int numblocks; // block num for heap
1012 INTPTR base; // base virtual address of current heap block
1013 INTPTR ptr; // virtual address of current heap top
1014 int offset; // offset in current heap block
1015 int blockbase; // virtual address of current small block to check
1016 int blockbound; // bound virtual address of current small blcok
1017 int sblockindex; // index of the small blocks
1018 int top; // real size of current heap block to check
1019 int bound; // bound size of current heap block to check
1020 }; // struct moveHelper
1022 inline void nextSBlock(struct moveHelper * orig) {
1023 orig->blockbase = orig->blockbound;
1025 if(orig->blockbase >= orig->bound) {
1026 // end of current heap block, jump to next one
1028 BASEPTR(BAMBOO_NUM_OF_CORE, orig->numblocks, &(orig->base));
1029 orig->bound = orig->base + BAMBOO_SMEM_SIZE;
1030 orig->blockbase = orig->base;
1032 orig->sblockindex = (orig->blockbase-BAMBOO_BASE_VA)/BAMBOO_SMEM_SIZE;
1033 if(gcsbstarttbl[orig->sblockindex] == -1) {
1035 orig->sblockindex += 1;
1036 orig->blockbase += BAMBOO_SMEM_SIZE;
1037 goto innernextSBlock;
1038 } else if(gcsbstarttbl[orig->sblockindex] != 0) {
1039 // not start from the very beginning
1040 orig->blockbase = gcsbstarttbl[orig->sblockindex];
1042 orig->blockbound = orig->blockbase + *((int*)(orig->blockbase));
1043 orig->offset = BAMBOO_CACHE_LINE_SIZE;
1044 orig->ptr = orig->blockbase + orig->offset;
1045 } // void nextSBlock(struct moveHelper * orig)
1047 inline void initOrig_Dst(struct moveHelper * orig,
1048 struct moveHelper * to) {
1051 to->top = to->offset = BAMBOO_CACHE_LINE_SIZE;
1052 to->bound = BAMBOO_SMEM_SIZE_L;
1053 BASEPTR(BAMBOO_NUM_OF_CORE, to->numblocks, &(to->base));
1054 if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
1055 to->base += gcreservedsb * BAMBOO_SMEM_SIZE;
1056 to->top += gcreservedsb * BAMBOO_SMEM_SIZE;
1058 to->ptr = to->base + to->offset;
1060 // init the orig ptr
1061 orig->numblocks = 0;
1062 orig->base = to->base;
1063 orig->bound = to->base + BAMBOO_SMEM_SIZE_L;
1064 orig->blockbase = orig->base;
1065 if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
1066 orig->sblockindex = gcreservedsb;
1068 orig->sblockindex = (orig->base - BAMBOO_BASE_VA) / BAMBOO_SMEM_SIZE;
1070 if(gcsbstarttbl[orig->sblockindex] == -1) {
1073 BAMBOO_BASE_VA+BAMBOO_SMEM_SIZE*(orig->sblockindex+1);
1076 } else if(gcsbstarttbl[orig->sblockindex] != 0) {
1077 orig->blockbase = gcsbstarttbl[orig->sblockindex];
1079 orig->blockbound = orig->blockbase + *((int*)(orig->blockbase));
1080 orig->offset = BAMBOO_CACHE_LINE_SIZE;
1081 orig->ptr = orig->blockbase + orig->offset;
1082 } // void initOrig_Dst(struct moveHelper * orig, struct moveHelper * to)
1084 inline void nextBlock(struct moveHelper * to) {
1085 to->top = to->bound + BAMBOO_CACHE_LINE_SIZE; // header!
1086 to->bound += BAMBOO_SMEM_SIZE;
1088 BASEPTR(BAMBOO_NUM_OF_CORE, to->numblocks, &(to->base));
1089 to->offset = BAMBOO_CACHE_LINE_SIZE;
1090 to->ptr = to->base + to->offset;
1091 } // void nextBlock(struct moveHelper * to)
1093 // endaddr does not contain spaces for headers
1094 inline bool moveobj(struct moveHelper * orig,
1095 struct moveHelper * to,
1097 if(stopblock == 0) {
1102 BAMBOO_DEBUGPRINT(0xe201);
1103 BAMBOO_DEBUGPRINT_REG(orig->ptr);
1104 BAMBOO_DEBUGPRINT_REG(to->ptr);
1112 while((char)(*((int*)(orig->ptr))) == (char)(-2)) {
1114 if((orig->ptr > orig->bound) || (orig->ptr == orig->blockbound)) {
1120 BAMBOO_DEBUGPRINT(0xe202);
1122 // check the obj's type, size and mark flag
1123 type = ((int *)(orig->ptr))[0];
1126 // end of this block, go to next one
1129 } else if(type < NUMCLASSES) {
1131 size = classsize[type];
1134 struct ArrayObject *ao=(struct ArrayObject *)(orig->ptr);
1135 int elementsize=classsize[type];
1136 int length=ao->___length___;
1137 size=sizeof(struct ArrayObject)+length*elementsize;
1139 mark = ((int *)(orig->ptr))[6];
1141 BAMBOO_DEBUGPRINT(0xe203);
1145 BAMBOO_DEBUGPRINT(0xe204);
1147 // marked obj, copy it to current heap top
1148 // check to see if remaining space is enough
1149 ALIGNSIZE(size, &isize);
1150 if(to->top + isize > to->bound) {
1151 // fill -1 indicating the end of this block
1152 if(to->top != to->bound) {
1153 *((int*)to->ptr) = -1;
1155 //memset(to->ptr+1, -2, to->bound - to->top - 1);
1156 // fill the header of this block and then go to next block
1157 to->offset += to->bound - to->top;
1158 (*((int*)(to->base))) = to->offset;
1160 if(stopblock == to->numblocks) {
1161 // already fulfilled the block
1167 memcpy(to->ptr, orig->ptr, size);
1168 // fill the remaining space with -2
1169 memset(to->ptr+size, -2, isize-size);
1170 // store mapping info
1171 BAMBOO_START_CRITICAL_SECTION();
1172 RuntimeHashadd(gcpointertbl, orig->ptr, to->ptr);
1173 BAMBOO_CLOSE_CRITICAL_SECTION();
1174 gccurr_heaptop -= isize;
1176 to->offset += isize;
1180 BAMBOO_DEBUGPRINT(0xe205);
1184 if((orig->ptr > orig->bound) || (orig->ptr == orig->blockbound)) {
1188 } //bool moveobj(struct moveHelper* orig,struct moveHelper* to,int* endaddr)
1190 inline int assignSpareMem(int sourcecore,
1195 BLOCKINDEX(gcloads[sourcecore], &b);
1196 int boundptr = b<NUMCORES?(b+1)*BAMBOO_SMEM_SIZE_L
1197 :BAMBOO_LARGE_SMEM_BOUND+(b-NUMCORES+1)*BAMBOO_SMEM_SIZE;
1198 int remain = boundptr - gcloads[sourcecore];
1199 int memneed = requiredmem + BAMBOO_CACHE_LINE_SIZE;
1200 *startaddr = gcloads[sourcecore];
1201 *tomove = gcfilledblocks[sourcecore] + 1;
1202 if(memneed < remain) {
1203 gcloads[sourcecore] += memneed;
1206 // next available block
1207 gcfilledblocks[sourcecore] += 1;
1209 BASEPTR(sourcecore, gcfilledblocks[sourcecore], &newbase);
1210 gcloads[sourcecore] = newbase;
1211 return requiredmem-remain;
1215 inline bool gcfindSpareMem(int * startaddr,
1220 for(int k = 0; k < NUMCORES; k++) {
1221 if((gccorestatus[k] == 0) && (gcfilledblocks[k] < gcstopblock[k])) {
1222 // check if this stopped core has enough mem
1223 assignSpareMem(k, requiredmem, tomove, startaddr);
1228 // if can not find spare mem right now, hold the request
1229 gcrequiredmems[requiredcore] = requiredmem;
1232 } //bool gcfindSpareMem(int* startaddr,int* tomove,int mem,int core)
1234 inline bool compacthelper(struct moveHelper * orig,
1235 struct moveHelper * to,
1238 bool * localcompact) {
1239 // scan over all objs in this block, compact the marked objs
1240 // loop stop when finishing either scanning all active objs or
1241 // fulfilled the gcstopblock
1244 bool stop = moveobj(orig, to, gcblock2fill);
1248 } while(orig->ptr < gcmarkedptrbound);
1249 // if no objs have been compact, do nothing,
1250 // otherwise, fill the header of this block
1251 if(to->offset > BAMBOO_CACHE_LINE_SIZE) {
1252 (*((int*)(to->base))) = to->offset;
1256 to->top -= BAMBOO_CACHE_LINE_SIZE;
1257 } // if(to->offset > BAMBOO_CACHE_LINE_SIZE) else ...
1259 *heaptopptr = to->ptr;
1260 *filledblocks = to->numblocks;
1263 BAMBOO_DEBUGPRINT(0xe101);
1264 BAMBOO_DEBUGPRINT_REG(*heaptopptr);
1265 BAMBOO_DEBUGPRINT_REG(*filledblocks);
1268 // send msgs to core coordinator indicating that the compact is finishing
1269 // send compact finish message to core coordinator
1270 if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
1271 gcfilledblocks[BAMBOO_NUM_OF_CORE] = *filledblocks;
1272 gcloads[BAMBOO_NUM_OF_CORE] = *heaptopptr;
1273 if(orig->ptr < gcmarkedptrbound) {
1276 if(gcfindSpareMem(&gcmovestartaddr, &gcblock2fill, &gcdstcore,
1277 gccurr_heaptop, BAMBOO_NUM_OF_CORE)) {
1283 gccorestatus[BAMBOO_NUM_OF_CORE] = 0;
1287 if(orig->ptr < gcmarkedptrbound) {
1290 send_msg_5(STARTUPCORE, GCFINISHCOMPACT, BAMBOO_NUM_OF_CORE,
1291 *filledblocks, *heaptopptr, gccurr_heaptop);
1293 // finish compacting
1294 send_msg_5(STARTUPCORE, GCFINISHCOMPACT, BAMBOO_NUM_OF_CORE,
1295 *filledblocks, *heaptopptr, 0);
1297 } // if(STARTUPCORE == BAMBOO_NUM_OF_CORE)
1299 if(orig->ptr < gcmarkedptrbound) {
1300 // still have unpacked obj
1301 while(!gctomove) {};
1304 to->ptr = gcmovestartaddr;
1305 to->numblocks = gcblock2fill - 1;
1306 to->bound = (to->numblocks==0)?
1308 BAMBOO_SMEM_SIZE_L+BAMBOO_SMEM_SIZE*to->numblocks;
1309 BASEPTR(BAMBOO_NUM_OF_CORE, to->numblocks, &(to->base));
1310 to->offset = to->ptr - to->base;
1311 to->top = (to->numblocks==0)?
1312 (to->offset):(to->bound-BAMBOO_SMEM_SIZE+to->offset);
1314 to->offset = BAMBOO_CACHE_LINE_SIZE;
1315 to->ptr += to->offset; // for header
1316 to->top += to->offset;
1317 if(gcdstcore == BAMBOO_NUM_OF_CORE) {
1318 *localcompact = true;
1320 *localcompact = false;
1325 } // void compacthelper()
1327 inline void compact() {
1328 if(COMPACTPHASE != gcphase) {
1329 BAMBOO_EXIT(0xb102);
1332 // initialize pointers for comapcting
1333 struct moveHelper * orig =
1334 (struct moveHelper *)RUNMALLOC(sizeof(struct moveHelper));
1335 struct moveHelper * to =
1336 (struct moveHelper *)RUNMALLOC(sizeof(struct moveHelper));
1338 initOrig_Dst(orig, to);
1340 int filledblocks = 0;
1341 INTPTR heaptopptr = 0;
1342 bool localcompact = true;
1343 compacthelper(orig, to, &filledblocks, &heaptopptr, &localcompact);
1349 inline void * flushObj(void * objptr) {
1351 BAMBOO_DEBUGPRINT(0xe401);
1353 void * dstptr = NULL;
1354 if(ISSHAREDOBJ(objptr)) {
1356 BAMBOO_DEBUGPRINT(0xe402);
1357 BAMBOO_DEBUGPRINT_REG(objptr);
1359 // a shared obj ptr, change to new address
1360 BAMBOO_START_CRITICAL_SECTION();
1361 RuntimeHashget(gcpointertbl, objptr, &dstptr);
1362 BAMBOO_CLOSE_CRITICAL_SECTION();
1363 if(NULL == dstptr) {
1365 BAMBOO_DEBUGPRINT(0xe403);
1367 // send msg to host core for the mapping info
1368 gcobj2map = (int)objptr;
1371 send_msg_3(hostcore(objptr), GCMAPREQUEST, (int)objptr,
1372 BAMBOO_NUM_OF_CORE);
1373 while(!gcismapped) {}
1374 BAMBOO_START_CRITICAL_SECTION();
1375 RuntimeHashget(gcpointertbl, objptr, &dstptr);
1376 BAMBOO_CLOSE_CRITICAL_SECTION();
1378 } // if(ISSHAREDOBJ(objptr))
1380 BAMBOO_DEBUGPRINT(0xe404);
1383 } // void flushObj(void * objptr, void ** tochange)
1385 inline void flush() {
1386 while(gc_moreItems()) {
1388 BAMBOO_DEBUGPRINT(0xe301);
1390 void * ptr = gc_dequeue();
1392 BAMBOO_DEBUGPRINT_REG(ptr);
1394 if(((int *)(ptr))[6] == 1) {
1395 void * tptr = flushObj(ptr);
1397 BAMBOO_DEBUGPRINT(0xe302);
1402 int type = ((int *)(ptr))[0];
1403 // scan all pointers in ptr
1404 unsigned INTPTR * pointer;
1405 pointer=pointerarray[type];
1407 BAMBOO_DEBUGPRINT(0xe303);
1410 /* Array of primitives */
1412 } else if (((INTPTR)pointer)==1) {
1414 BAMBOO_DEBUGPRINT(0xe304);
1416 /* Array of pointers */
1417 struct ArrayObject *ao=(struct ArrayObject *) ptr;
1418 int length=ao->___length___;
1420 for(j=0; j<length; j++) {
1422 BAMBOO_DEBUGPRINT(0xe305);
1425 ((void **)(((char *)&ao->___length___)+sizeof(int)))[j];
1427 BAMBOO_DEBUGPRINT_REG(objptr);
1429 ((void **)(((char *)&ao->___length___)+sizeof(int)))[j] =
1434 BAMBOO_DEBUGPRINT(0xe306);
1436 INTPTR size=pointer[0];
1438 for(i=1; i<=size; i++) {
1440 BAMBOO_DEBUGPRINT(0xe307);
1442 unsigned int offset=pointer[i];
1443 void * objptr=*((void **)(((char *)ptr)+offset));
1445 BAMBOO_DEBUGPRINT_REG(objptr);
1447 *((void **)(((char *)ptr)+offset)) = flushObj(objptr);
1448 } // for(i=1; i<=size; i++)
1449 } // if (pointer==0) else if (((INTPTR)pointer)==1) else ()
1450 // restore the mark field, indicating that this obj has been flushed
1451 ((int *)(ptr))[6] = 0;
1452 } // if(((int *)(ptr))[6] == 1)
1453 } // while(moi != NULL)
1455 BAMBOO_DEBUGPRINT(0xe308);
1457 // send flush finish message to core coordinator
1458 if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
1459 gccorestatus[BAMBOO_NUM_OF_CORE] = 0;
1461 send_msg_2(STARTUPCORE, GCFINISHFLUSH, BAMBOO_NUM_OF_CORE);
1464 BAMBOO_DEBUGPRINT(0xe309);
1468 inline void gc_collect(struct garbagelist * stackptr) {
1469 // core collector routine
1470 mark(true, stackptr);
1472 while(FLUSHPHASE != gcphase) {}
1475 while(FINISHPHASE != gcphase) {}
1476 } // void gc_collect(struct garbagelist * stackptr)
1478 inline void gc(struct garbagelist * stackptr) {
1484 // core coordinator routine
1485 if(0 == BAMBOO_NUM_OF_CORE) {
1487 // not ready to do gc
1493 tprintf("start gc! \n");
1499 gcprocessing = true;
1501 waitconfirm = false;
1503 gcphase = MARKPHASE;
1504 for(i = 1; i < NUMCORES - 1; i++) {
1505 // send GC start messages to all cores
1506 send_msg_1(i, GCSTART);
1508 bool isfirst = true;
1509 bool allStall = false;
1512 while(MARKPHASE == gcphase) {
1513 mark(isfirst, stackptr);
1520 } // while(MARKPHASE == gcphase)
1521 // send msgs to all cores requiring large objs info
1522 numconfirm = NUMCORES - 1;
1523 for(i = 1; i < NUMCORES; ++i) {
1524 send_msg_1(i, GCLOBJREQUEST);
1526 gcloads[BAMBOO_NUM_OF_CORE] = gccurr_heaptop;
1527 while(numconfirm != 0) {} // wait for responses
1529 tprintf("prepare to cache large objs \n");
1532 // cache all large objs
1534 // no enough space to cache large objs
1535 BAMBOO_EXIT(0xb103);
1537 // predict number of blocks to fill for each core
1538 int numpbc = loadbalance();
1539 for(i = 0; i < NUMCORES; ++i) {
1540 //send start compact messages to all cores
1541 if((gcheapdirection) && (i < gctopcore)
1542 || ((!gcheapdirection) && (i > gctopcore))) {
1543 gcstopblock[i] =numpbc + 1;
1544 if(i != STARTUPCORE) {
1545 send_msg_2(i, GCSTARTCOMPACT, numpbc+1);
1548 gcstopblock[i] = numpbc;
1549 if(i != STARTUPCORE) {
1550 send_msg_2(i, GCSTARTCOMPACT, numpbc);
1553 // init some data strutures for compact phase
1555 gcfilledblocks[i] = 0;
1556 gcrequiredmems[i] = 0;
1559 tprintf("mark phase finished \n");
1564 bool finalcompact = false;
1565 // initialize pointers for comapcting
1566 struct moveHelper * orig =
1567 (struct moveHelper *)RUNMALLOC(sizeof(struct moveHelper));
1568 struct moveHelper * to =
1569 (struct moveHelper *)RUNMALLOC(sizeof(struct moveHelper));
1570 initOrig_Dst(orig, to);
1571 int filledblocks = 0;
1572 INTPTR heaptopptr = 0;
1573 bool finishcompact = false;
1574 bool iscontinue = true;
1575 bool localcompact = true;
1576 while((COMPACTPHASE == gcphase) || (SUBTLECOMPACTPHASE == gcphase)) {
1577 if((!finishcompact) && iscontinue) {
1579 BAMBOO_DEBUGPRINT(0xe001);
1581 finishcompact = compacthelper(orig, to, &filledblocks,
1582 &heaptopptr, &localcompact);
1584 BAMBOO_DEBUGPRINT_REG(finishcompact);
1585 BAMBOO_DEBUGPRINT_REG(gctomove);
1586 BAMBOO_DEBUGPRINT_REG(gcrequiredmems[0]);
1587 BAMBOO_DEBUGPRINT_REG(gcfilledblocks[0]);
1588 BAMBOO_DEBUGPRINT_REG(gcstopblock[0]);
1592 if(gc_checkCoreStatus()) {
1593 // all cores have finished compacting
1594 // restore the gcstatus of all cores
1595 for(i = 0; i < NUMCORES; ++i) {
1596 gccorestatus[i] = 1;
1600 // check if there are spare mem for pending move requires
1601 if(COMPACTPHASE == gcphase) {
1602 resolvePendingMoveRequest();
1605 BAMBOO_DEBUGPRINT(0xe002);
1609 } // if(gc_checkCoreStatus()) else ...
1613 BAMBOO_DEBUGPRINT(0xe003);
1614 BAMBOO_DEBUGPRINT_REG(gcmovestartaddr);
1615 BAMBOO_DEBUGPRINT_REG(gcblock2fill);
1616 BAMBOO_DEBUGPRINT_REG(gctomove);
1618 to->ptr = gcmovestartaddr;
1619 to->numblocks = gcblock2fill - 1;
1620 to->bound = (to->numblocks==0)?
1622 BAMBOO_SMEM_SIZE_L+BAMBOO_SMEM_SIZE*to->numblocks;
1623 BASEPTR(BAMBOO_NUM_OF_CORE, to->numblocks, &(to->base));
1624 to->offset = to->ptr - to->base;
1625 to->top = (to->numblocks==0)?
1626 (to->offset):(to->bound-BAMBOO_SMEM_SIZE+to->offset);
1628 to->offset = BAMBOO_CACHE_LINE_SIZE;
1629 to->ptr += to->offset; // for header
1630 to->top += to->offset;
1631 if(gcdstcore == BAMBOO_NUM_OF_CORE) {
1632 localcompact = true;
1634 localcompact = false;
1638 } else if(!finishcompact) {
1643 } // while(COMPACTPHASE == gcphase)
1645 tprintf("prepare to move large objs \n");
1651 tprintf("compact phase finished \n");
1655 gcphase = FLUSHPHASE;
1656 for(i = 1; i < NUMCORES; ++i) {
1657 // send start flush messages to all cores
1658 send_msg_1(i, GCSTARTFLUSH);
1663 gccorestatus[BAMBOO_NUM_OF_CORE] = 0;
1664 while(FLUSHPHASE == gcphase) {
1665 // check the status of all cores
1667 for(i = 0; i < NUMCORES; ++i) {
1668 if(gccorestatus[i] != 0) {
1676 } // while(FLUSHPHASE == gcphase)
1677 gcphase = FINISHPHASE;
1678 for(i = 1; i < NUMCORES; ++i) {
1679 // send gc finish messages to all cores
1680 send_msg_1(i, GCFINISH);
1683 tprintf("flush phase finished \n");
1687 // need to create free memory list
1688 updateFreeMemList();
1690 tprintf("gc finished \n");
1694 gcprocessing = true;
1695 gc_collect(stackptr);
1698 // invalidate all shared mem pointers
1699 bamboo_cur_msp = NULL;
1700 bamboo_smem_size = 0;
1703 gcprocessing = false;
1705 } // void gc(struct garbagelist * stackptr)