3 #include "multicoregarbage.h"
4 #include "multicoreruntime.h"
5 #include "runtime_arch.h"
6 #include "SimpleHash.h"
7 #include "GenericHashtable.h"
8 #include "ObjectHash.h"
11 extern struct parameterwrapper ** objectqueues[][NUMCLASSES];
12 extern int numqueues[][NUMCLASSES];
14 extern struct genhashtable * activetasks;
15 extern struct parameterwrapper ** objectqueues[][NUMCLASSES];
16 extern struct taskparamdescriptor *currtpd;
20 struct pointerblock *next;
23 struct pointerblock *gchead=NULL;
25 struct pointerblock *gctail=NULL;
27 struct pointerblock *gctail2=NULL;
29 struct pointerblock *gcspare=NULL;
31 #define NUMLOBJPTRS 20
33 struct lobjpointerblock {
34 void * lobjs[NUMLOBJPTRS];
35 //void * dsts[NUMLOBJPTRS];
36 int lengths[NUMLOBJPTRS];
37 //void * origs[NUMLOBJPTRS];
38 int hosts[NUMLOBJPTRS];
39 struct lobjpointerblock *next;
42 struct lobjpointerblock *gclobjhead=NULL;
43 int gclobjheadindex=0;
44 struct lobjpointerblock *gclobjtail=NULL;
45 int gclobjtailindex=0;
46 struct lobjpointerblock *gclobjtail2=NULL;
47 int gclobjtailindex2=0;
48 struct lobjpointerblock *gclobjspare=NULL;
51 inline void dumpSMem() {
60 tprintf("Dump shared mem: \n");
61 tprintf("++++ reserved sblocks ++++ \n");
62 for(i=BAMBOO_BASE_VA; i<gcbaseva; i+= 4*16) {
63 tprintf("0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x \n",
64 *((int *)(i)), *((int *)(i + 4)), *((int *)(i + 4*2)), *((int *)(i + 4*3)),
65 *((int *)(i + 4*4)), *((int *)(i + 4*5)), *((int *)(i + 4*6)), *((int *)(i + 4*7)),
66 *((int *)(i + 4*8)), *((int *)(i + 4*9)), *((int *)(i + 4*10)), *((int *)(i + 4*11)),
67 *((int *)(i + 4*12)), *((int *)(i + 4*13)), *((int *)(i + 4*14)), *((int *)(i + 4*15)));
69 sblock = gcreservedsb;
70 bool advanceblock = false;
71 for(i=gcbaseva;i<BAMBOO_BASE_VA+BAMBOO_SHARED_MEM_SIZE;i+=4*16){
73 if(j%((BAMBOO_SMEM_SIZE)/(4*16)) == 0) {
74 if(j < ((BAMBOO_LARGE_SMEM_BOUND)/(4*16))) {
75 if((j > 0) && (j%((BAMBOO_SMEM_SIZE_L)/(4*16)) == 0)) {
84 if(block%NUMCORES == 0) {
87 if(((!reverse) && (y%2)) || (reverse && ((y%2)==0))) {
88 if((x == 0) || ((x==1)&&(y==6))){
91 coren -= bamboo_width - 1;
93 coren -= bamboo_width;
97 coren += bamboo_width - 1;
99 coren += bamboo_width;
109 coren -= bamboo_width - 1;
111 coren -= bamboo_width;
115 coren += bamboo_width-1;
117 coren += bamboo_width;
130 } else if(tmpcore > 47) {
134 x = tmpcore%bamboo_width;
135 y = tmpcore/bamboo_width;
136 tprintf("==== %d, %d : core (%d,%d), saddr %x====\n", block, sblock++,
137 x, y, (sblock-1)*(BAMBOO_SMEM_SIZE)+BAMBOO_BASE_VA);
140 tprintf("0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x \n",
141 *((int *)(i)), *((int *)(i + 4)), *((int *)(i + 4*2)), *((int *)(i + 4*3)),
142 *((int *)(i + 4*4)), *((int *)(i + 4*5)), *((int *)(i + 4*6)), *((int *)(i + 4*7)),
143 *((int *)(i + 4*8)), *((int *)(i + 4*9)), *((int *)(i + 4*10)), *((int *)(i + 4*11)),
144 *((int *)(i + 4*12)), *((int *)(i + 4*13)), *((int *)(i + 4*14)), *((int *)(i + 4*15)));
150 // should be invoked with interruption closed
151 inline void gc_enqueue_I(void *ptr) {
153 BAMBOO_DEBUGPRINT(0xe601);
154 BAMBOO_DEBUGPRINT_REG(ptr);
156 if (gcheadindex==NUMPTRS) {
157 struct pointerblock * tmp;
162 tmp=RUNMALLOC_I(sizeof(struct pointerblock));
163 } // if (gcspare!=NULL)
167 } // if (gcheadindex==NUMPTRS)
168 gchead->ptrs[gcheadindex++]=ptr;
170 BAMBOO_DEBUGPRINT(0xe602);
172 } // void gc_enqueue_I(void *ptr)
174 // dequeue and destroy the queue
175 inline void * gc_dequeue() {
176 if (gctailindex==NUMPTRS) {
177 struct pointerblock *tmp=gctail;
184 } // if (gcspare!=NULL)
185 } // if (gctailindex==NUMPTRS)
186 return gctail->ptrs[gctailindex++];
187 } // void * gc_dequeue()
189 // dequeue and do not destroy the queue
190 inline void * gc_dequeue2() {
191 if (gctailindex2==NUMPTRS) {
192 struct pointerblock *tmp=gctail2;
193 gctail2=gctail2->next;
195 } // if (gctailindex2==NUMPTRS)
196 return gctail2->ptrs[gctailindex2++];
197 } // void * gc_dequeue2()
199 inline int gc_moreItems() {
200 if ((gchead==gctail)&&(gctailindex==gcheadindex))
203 } // int gc_moreItems()
205 inline int gc_moreItems2() {
206 if ((gchead==gctail2)&&(gctailindex2==gcheadindex))
209 } // int gc_moreItems2()
211 // should be invoked with interruption closed
212 // enqueue a large obj: start addr & length
213 inline void gc_lobjenqueue_I(void *ptr,
217 BAMBOO_DEBUGPRINT(0xe901);
219 if (gclobjheadindex==NUMLOBJPTRS) {
220 struct lobjpointerblock * tmp;
221 if (gclobjspare!=NULL) {
225 tmp=RUNMALLOC_I(sizeof(struct lobjpointerblock));
226 } // if (gclobjspare!=NULL)
227 gclobjhead->next=tmp;
230 } // if (gclobjheadindex==NUMLOBJPTRS)
231 gclobjhead->lobjs[gclobjheadindex]=ptr;
232 gclobjhead->lengths[gclobjheadindex]=length;
233 gclobjhead->hosts[gclobjheadindex++]=host;
235 BAMBOO_DEBUGPRINT_REG(gclobjhead->lobjs[gclobjheadindex-1]);
236 BAMBOO_DEBUGPRINT_REG(gclobjhead->lengths[gclobjheadindex-1]);
237 BAMBOO_DEBUGPRINT_REG(gclobjhead->hosts[gclobjheadindex-1]);
239 } // void gc_lobjenqueue_I(void *ptr...)
241 // dequeue and destroy the queue
242 inline void * gc_lobjdequeue(int * length,
244 if (gclobjtailindex==NUMLOBJPTRS) {
245 struct lobjpointerblock *tmp=gclobjtail;
246 gclobjtail=gclobjtail->next;
248 if (gclobjspare!=NULL) {
252 } // if (gclobjspare!=NULL)
253 } // if (gclobjtailindex==NUMLOBJPTRS)
255 *length = gclobjtail->lengths[gclobjtailindex];
258 *host = (int)(gclobjtail->hosts[gclobjtailindex]);
260 return gclobjtail->lobjs[gclobjtailindex++];
261 } // void * gc_lobjdequeue()
263 inline int gc_lobjmoreItems() {
264 if ((gclobjhead==gclobjtail)&&(gclobjtailindex==gclobjheadindex))
267 } // int gc_lobjmoreItems()
269 // dequeue and don't destroy the queue
270 inline void gc_lobjdequeue2() {
271 if (gclobjtailindex2==NUMLOBJPTRS) {
272 gclobjtail2=gclobjtail2->next;
276 }// if (gclobjtailindex2==NUMLOBJPTRS)
277 } // void * gc_lobjdequeue2()
279 inline int gc_lobjmoreItems2() {
280 if ((gclobjhead==gclobjtail2)&&(gclobjtailindex2==gclobjheadindex))
283 } // int gc_lobjmoreItems2()
285 INTPTR gccurr_heapbound = 0;
287 inline void gettype_size(void * ptr,
290 int type = ((int *)ptr)[0];
292 if(type < NUMCLASSES) {
294 size = classsize[type];
297 struct ArrayObject *ao=(struct ArrayObject *)ptr;
298 int elementsize=classsize[type];
299 int length=ao->___length___;
300 size=sizeof(struct ArrayObject)+length*elementsize;
301 } // if(type < NUMCLASSES)
306 inline bool isLarge(void * ptr,
310 BAMBOO_DEBUGPRINT(0xe701);
311 BAMBOO_DEBUGPRINT_REG(ptr);
313 // check if a pointer is referring to a large object
314 gettype_size(ptr, ttype, tsize);
316 BAMBOO_DEBUGPRINT(*tsize);
318 int bound = (BAMBOO_SMEM_SIZE);
319 if(((int)ptr-gcbaseva) < (BAMBOO_LARGE_SMEM_BOUND)) {
320 bound = (BAMBOO_SMEM_SIZE_L);
322 if((((int)ptr-gcbaseva)%(bound))==0) {
323 // ptr is a start of a block
325 BAMBOO_DEBUGPRINT(0xe702);
326 BAMBOO_DEBUGPRINT(1);
330 if((bound-(((int)ptr-gcbaseva)%bound)) < (*tsize)) {
331 // it acrosses the boundary of current block
333 BAMBOO_DEBUGPRINT(0xe703);
334 BAMBOO_DEBUGPRINT(1);
339 BAMBOO_DEBUGPRINT(0);
342 } // bool isLarge(void * ptr, int * ttype, int * tsize)
344 inline int hostcore(void * ptr) {
345 // check the host core of ptr
349 RESIDECORE(ptr, &x, &y);
350 host = (x==0)?(x*bamboo_height+y):(x*bamboo_height+y-2);
352 } // int hostcore(void * ptr)
354 inline bool isLocal(void * ptr) {
355 // check if a pointer is in shared heap on this core
356 return hostcore(ptr) == BAMBOO_NUM_OF_CORE;
357 } // bool isLocal(void * ptr)
359 inline bool gc_checkCoreStatus() {
360 bool allStall = true;
361 for(int i = 0; i < NUMCORES; ++i) {
362 if(gccorestatus[i] != 0) {
365 } // if(gccorestatus[i] != 0)
366 } // for(i = 0; i < NUMCORES; ++i)
370 inline void checkMarkStatue() {
372 BAMBOO_DEBUGPRINT(0xee01);
376 (waitconfirm && (numconfirm == 0))) {
378 BAMBOO_DEBUGPRINT(0xee02);
380 BAMBOO_START_CRITICAL_SECTION_STATUS();
381 gccorestatus[BAMBOO_NUM_OF_CORE] = 0;
382 gcnumsendobjs[BAMBOO_NUM_OF_CORE] = gcself_numsendobjs;
383 gcnumreceiveobjs[BAMBOO_NUM_OF_CORE] = gcself_numreceiveobjs;
384 // check the status of all cores
385 bool allStall = gc_checkCoreStatus();
387 BAMBOO_DEBUGPRINT(0xee03);
391 BAMBOO_DEBUGPRINT(0xee04);
393 // check if the sum of send objs and receive obj are the same
394 // yes->check if the info is the latest; no->go on executing
396 for(i = 0; i < NUMCORES; ++i) {
397 sumsendobj += gcnumsendobjs[i];
398 } // for(i = 0; i < NUMCORES; ++i)
400 BAMBOO_DEBUGPRINT(0xee05);
401 BAMBOO_DEBUGPRINT_REG(sumsendobj);
403 for(i = 0; i < NUMCORES; ++i) {
404 sumsendobj -= gcnumreceiveobjs[i];
405 } // for(i = 0; i < NUMCORES; ++i)
407 BAMBOO_DEBUGPRINT(0xee06);
408 BAMBOO_DEBUGPRINT_REG(sumsendobj);
410 if(0 == sumsendobj) {
412 BAMBOO_DEBUGPRINT(0xee07);
416 BAMBOO_DEBUGPRINT(0xee08);
418 // the first time found all cores stall
419 // send out status confirm msg to all other cores
420 // reset the corestatus array too
421 gccorestatus[BAMBOO_NUM_OF_CORE] = 1;
423 numconfirm = NUMCORES - 1;
424 for(i = 1; i < NUMCORES; ++i) {
426 // send mark phase finish confirm request msg to core i
427 send_msg_1(i, GCMARKCONFIRM);
428 } // for(i = 1; i < NUMCORES; ++i)
431 BAMBOO_DEBUGPRINT(0xee09);
433 // all the core status info are the latest
435 gcphase = COMPACTPHASE;
436 // restore the gcstatus for all cores
437 for(i = 0; i < NUMCORES; ++i) {
439 } // for(i = 0; i < NUMCORES; ++i)
440 } // if(!gcwautconfirm) else()
441 } // if(0 == sumsendobj)
443 BAMBOO_CLOSE_CRITICAL_SECTION_STATUS();
444 } // if((!waitconfirm)...
446 BAMBOO_DEBUGPRINT(0xee0a);
448 } // void checkMarkStatue()
450 inline bool preGC() {
451 // preparation for gc
452 // make sure to clear all incoming msgs espacially transfer obj msgs
454 BAMBOO_DEBUGPRINT(0xec01);
458 (waitconfirm && (numconfirm == 0))) {
459 // send out status confirm msgs to all cores to check if there are
460 // transfer obj msgs on-the-fly
462 numconfirm = NUMCORES - 1;
463 for(i = 1; i < NUMCORES; ++i) {
465 // send status confirm msg to core i
466 send_msg_1(i, STATUSCONFIRM);
467 } // for(i = 1; i < NUMCORES; ++i)
470 BAMBOO_DEBUGPRINT(0xec02);
473 if(numconfirm == 0) {
476 } // wait for confirmations
480 BAMBOO_DEBUGPRINT(0xec03);
482 numsendobjs[BAMBOO_NUM_OF_CORE] = self_numsendobjs;
483 numreceiveobjs[BAMBOO_NUM_OF_CORE] = self_numreceiveobjs;
486 BAMBOO_DEBUGPRINT(0xec04);
488 for(i = 0; i < NUMCORES; ++i) {
489 sumsendobj += numsendobjs[i];
491 BAMBOO_DEBUGPRINT(0xf000 + numsendobjs[i]);
493 } // for(i = 1; i < NUMCORES; ++i)
495 BAMBOO_DEBUGPRINT(0xec05);
496 BAMBOO_DEBUGPRINT_REG(sumsendobj);
498 for(i = 0; i < NUMCORES; ++i) {
499 sumsendobj -= numreceiveobjs[i];
501 BAMBOO_DEBUGPRINT(0xf000 + numreceiveobjs[i]);
503 } // for(i = 1; i < NUMCORES; ++i)
505 BAMBOO_DEBUGPRINT(0xec06);
506 BAMBOO_DEBUGPRINT_REG(sumsendobj);
508 if(0 == sumsendobj) {
511 // still have some transfer obj msgs on-the-fly, can not start gc
513 } // if(0 == sumsendobj)
516 BAMBOO_DEBUGPRINT(0xec07);
518 // previously asked for status confirmation and do not have all the
519 // confirmations yet, can not start gc
521 } // if((!waitconfirm) ||
524 inline void initGC() {
526 if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
527 for(i = 0; i < NUMCORES; ++i) {
529 gcnumsendobjs[i] = 0;
530 gcnumreceiveobjs[i] = 0;
532 gcrequiredmems[i] = 0;
533 gcfilledblocks[i] = 0;
535 } // for(i = 0; i < NUMCORES; ++i)
540 gcself_numsendobjs = 0;
541 gcself_numreceiveobjs = 0;
542 gcmarkedptrbound = 0;
556 gcheadindex=gctailindex=gctailindex2 = 0;
557 gchead=gctail=gctail2=RUNMALLOC(sizeof(struct pointerblock));
559 gctailindex = gctailindex2 = gcheadindex;
560 gctail = gctail2 = gchead;
563 // initialize the large obj queues
564 if (gclobjhead==NULL) {
567 gclobjtailindex2 = 0;
568 gclobjhead=gclobjtail=gclobjtail2=
569 RUNMALLOC(sizeof(struct lobjpointerblock));
571 gclobjtailindex = gclobjtailindex2 = gclobjheadindex;
572 gclobjtail = gclobjtail2 = gclobjhead;
575 freeRuntimeHash(gcpointertbl);
576 gcpointertbl = allocateRuntimeHash(20);
579 // compute load balance for all cores
580 inline int loadbalance() {
581 // compute load balance
584 // get the total loads
585 int tloads = gcloads[STARTUPCORE];
586 for(i = 1; i < NUMCORES; i++) {
587 tloads += gcloads[i];
589 int heaptop = gcbaseva + tloads;
591 BAMBOO_DEBUGPRINT(0xdddd);
592 BAMBOO_DEBUGPRINT_REG(tloads);
593 BAMBOO_DEBUGPRINT_REG(heaptop);
596 BLOCKINDEX(heaptop, &b);
597 int numbpc = b / NUMCORES; // num of blocks per core
599 BAMBOO_DEBUGPRINT_REG(b);
600 BAMBOO_DEBUGPRINT_REG(numbpc);
602 gcheapdirection = (numbpc%2 == 0);
605 RESIDECORE(heaptop, &x, &y);
606 gctopcore = (x == 0 ? y : x * bamboo_height + y - 2);
608 BAMBOO_DEBUGPRINT_REG(x);
609 BAMBOO_DEBUGPRINT_REG(y);
610 BAMBOO_DEBUGPRINT_REG(gctopcore);
613 } // void loadbalance()
615 inline bool cacheLObjs() {
616 // check the total mem size need for large objs
620 BAMBOO_DEBUGPRINT(0xe801);
622 gclobjtail2 = gclobjtail;
623 gclobjtailindex2 = gclobjtailindex;
624 while(gc_lobjmoreItems2()){
626 size = gclobjtail2->lengths[gclobjtailindex2 - 1];
629 BAMBOO_DEBUGPRINT_REG(gclobjtail2->lobjs[gclobjtailindex2-1]);
630 BAMBOO_DEBUGPRINT_REG(size);
631 BAMBOO_DEBUGPRINT_REG(sumsize);
633 } // while(gc_lobjmoreItems2())
635 // check if there are enough space to cache these large objs
636 INTPTR dst = (BAMBOO_BASE_VA) + (BAMBOO_SHARED_MEM_SIZE) - sumsize;
637 if(gcheaptop > dst) {
638 // do not have enough room to cache large objs
642 BAMBOO_DEBUGPRINT(0xe802);
643 BAMBOO_DEBUGPRINT_REG(dst);
646 gcheaptop = dst; // Note: record the start of cached lobjs with gcheaptop
647 // cache the largeObjs to the top of the shared heap
648 gclobjtail2 = gclobjtail;
649 gclobjtailindex2 = gclobjtailindex;
650 while(gc_lobjmoreItems2()) {
652 size = gclobjtail2->lengths[gclobjtailindex2 - 1];
653 // set the mark field to 2, indicating that this obj has been moved and need to be flushed
654 ((int *)(gclobjtail2->lobjs[gclobjtailindex2-1]))[6] = 2;
656 BAMBOO_DEBUGPRINT(0xdcdc);
657 memcpy(dst, gclobjtail2->lobjs[gclobjtailindex2 - 1], size);
660 BAMBOO_DEBUGPRINT_REG(gclobjtail2->lobjs[gclobjtailindex2-1]);
661 BAMBOO_DEBUGPRINT(dst-size);
662 BAMBOO_DEBUGPRINT_REG(size);
666 } // void cacheLObjs()
668 inline struct freeMemItem * updateFreeMemList(int localtop,
670 struct freeMemItem * listtop,
672 struct freeMemItem * tochange = listtop;
673 struct freeMemItem * tmp = bamboo_free_mem_list->head;
674 bool extendflag = false;
675 struct freeMemItem * ex_tmp = NULL;
676 // check if there is a hole in the block below it
678 if(tmp->ptr<localtop) {
679 if((tmp->ptr+tmp->size) == localtop) {
680 // extend the hole up to includ this block
681 tmp->size += localsize;
683 *returntop = tmp->ptr;
685 } // if((tmp->ptr+tmp->size) == localtop)
688 } // if(tmp->ptr<gcloads[i]) else ...
689 if(tmp == tochange) {
693 } // if(tmp == tochange)
695 if((extendflag) && (tmp != tochange)) {
698 } // if(tmp->ptr<gcloads[i])
699 if(tmp != tochange) {
701 if((localtop+localsize) == tmp->ptr) {
702 // extend the hole below to include this block
706 tmp->size += localsize;
707 *returntop = localtop;
709 ex_tmp->size += tmp->size;
710 tmp->ptr = tmp->next->ptr;
711 tmp->size = tmp->next->size;
712 if(tmp->next == tochange) {
716 tmp->next = tmp->next->next;
722 if(tmp == tochange) {
726 } // if(tmp == tochange)
728 } // if(tmp != tochange)
729 if((!extendflag) && (tmp == tochange)) {
730 // add a new item for this block hole
731 if(tochange->next == NULL) {
733 (struct freeMemItem *)RUNMALLOC(sizeof(struct freeMemItem));
735 tochange = tochange->next;
736 tochange->ptr = localtop;
737 tochange->size = localsize;
738 *returntop = localtop;
739 } // if((!extendflag) && (tmp == tochange))
741 } // void updateFreeMemList(int, int, struct freeMemItem *, int *)
743 inline void moveLObjs() {
745 BAMBOO_DEBUGPRINT(0xea01);
748 int bound = BAMBOO_SMEM_SIZE_L;
749 struct freeMemItem * tochange = bamboo_free_mem_list->head;
750 if(tochange == NULL) {
751 bamboo_free_mem_list->head = tochange =
752 (struct freeMemItem *)RUNMALLOC(sizeof(struct freeMemItem));
754 // find current heap top
755 // flush all gcloads to indicate the real heap top on one core
756 // previous it represents the next available ptr on a core
757 if((gcloads[0] > (gcbaseva+(BAMBOO_SMEM_SIZE_L)))
758 && ((gcloads[0]%(BAMBOO_SMEM_SIZE)) == 0)) {
759 // edge of a block, check if this is exactly the heaptop
760 BASEPTR(0, gcfilledblocks[0]-1, &(gcloads[0]));
761 gcloads[0]+=(gcfilledblocks[0]>1?(BAMBOO_SMEM_SIZE):(BAMBOO_SMEM_SIZE_L));
763 // in the middle of a block, flush the remaining space in this block
764 // and update it into the free mem list
765 if(gcloads[0] > (gcbaseva+(BAMBOO_SMEM_SIZE_L))) {
766 bound = BAMBOO_SMEM_SIZE;
768 remain = bound - gcloads[0]%bound;
769 tochange->ptr = gcloads[0];
770 tochange->size = remain;
771 // zero out all these spare memory
772 memset(tochange->ptr, '\0', tochange->size);
774 int tmpheaptop = gcloads[0];
776 BAMBOO_DEBUGPRINT_REG(tmpheaptop);
778 for(int i = 1; i < NUMCORES; i++) {
780 if((gcfilledblocks[i] > 0)
781 && ((gcloads[i] % (BAMBOO_SMEM_SIZE)) == 0)) {
782 // edge of a block, check if this is exactly the heaptop
783 BASEPTR(0, gcfilledblocks[i]-1, &gcloads[i]);
785 +=(gcfilledblocks[i]>1?(BAMBOO_SMEM_SIZE):(BAMBOO_SMEM_SIZE_L));
788 // in the middle of a block, flush the remaining space in this block
789 // and update it into the free mem list
790 if(gcfilledblocks[i] > 0) {
791 bound = BAMBOO_SMEM_SIZE;
793 bound = BAMBOO_SMEM_SIZE_L;
795 remain = bound - gcloads[i]%bound;
796 // zero out all these spare memory
797 memset(gcloads[i], '\0', remain);
798 // update free mem list
799 tochange = updateFreeMemList(gcloads[i], remain, tochange, &tmptop);
800 } // if((gcfilledblocks[i] > 0)
802 if(tmpheaptop < tmptop) {
806 BAMBOO_DEBUGPRINT_REG(gcloads[i]);
807 BAMBOO_DEBUGPRINT_REG(tmpheaptop);
810 tochange->ptr = tmpheaptop;
811 tochange->size = gcheaptop - tmpheaptop;
812 // zero out all these spare memory
813 memset(tochange->ptr, '\0', tochange->size);
814 if(bamboo_free_mem_list->tail != tochange) {
815 bamboo_free_mem_list->tail = tochange;
817 while(tochange->next != NULL) {
818 struct freeMemItem * toremove = tochange->next;
819 tochange->next = toremove->next;
822 // move large objs from gcheaptop to tmpheaptop
823 // write the header first
824 int tomove = (BAMBOO_BASE_VA) + (BAMBOO_SHARED_MEM_SIZE) - gcheaptop;
826 BAMBOO_DEBUGPRINT(0xea02);
827 BAMBOO_DEBUGPRINT_REG(tomove);
828 BAMBOO_DEBUGPRINT_REG(tmpheaptop);
829 BAMBOO_DEBUGPRINT_REG(gcheaptop);
831 // flush the sbstartbl
832 memset(&(gcsbstarttbl[gcreservedsb]), '\0',
833 BAMBOO_SHARED_MEM_SIZE/BAMBOO_SMEM_SIZE*sizeof(INTPTR));
835 gcheaptop = tmpheaptop;
838 // check how many blocks it acrosses
839 remain = tmpheaptop-gcbaseva;
840 int b = remain/(BAMBOO_SMEM_SIZE) + gcreservedsb;
841 // check the remaining space in this block
842 bound = (BAMBOO_SMEM_SIZE);
843 if(remain < (BAMBOO_LARGE_SMEM_BOUND)) {
844 bound = (BAMBOO_SMEM_SIZE_L);
846 remain = bound - remain%bound;
849 BAMBOO_DEBUGPRINT(0xea03);
855 int base = tmpheaptop;
857 remain -= BAMBOO_CACHE_LINE_SIZE;
858 tmpheaptop += BAMBOO_CACHE_LINE_SIZE;
859 while(gc_lobjmoreItems()) {
860 ptr = (int)(gc_lobjdequeue(&size, &host));
861 ALIGNSIZE(size, &isize);
863 // this object acrosses blocks
865 // close current block, fill its header
866 memset(base, '\0', BAMBOO_CACHE_LINE_SIZE);
867 *((int*)base) = cpysize + BAMBOO_CACHE_LINE_SIZE;
871 remain = ((tmpheaptop-gcbaseva)<(BAMBOO_LARGE_SMEM_BOUND)) ?
872 BAMBOO_SMEM_SIZE_L : BAMBOO_SMEM_SIZE;
874 remain -= BAMBOO_CACHE_LINE_SIZE;
875 tmpheaptop += BAMBOO_CACHE_LINE_SIZE;
878 // move the large obj
879 memcpy(tmpheaptop, gcheaptop, size);
880 // fill the remaining space with -2 padding
881 memset(tmpheaptop+size, -2, isize-size);
882 // zero out original mem caching the lobj
883 memset(gcheaptop, '\0', size);
885 BAMBOO_DEBUGPRINT(0xea04);
886 BAMBOO_DEBUGPRINT_REG(gcheaptop);
887 BAMBOO_DEBUGPRINT_REG(tmpheaptop);
888 BAMBOO_DEBUGPRINT_REG(size);
889 BAMBOO_DEBUGPRINT_REG(isize);
892 if(host == BAMBOO_NUM_OF_CORE) {
893 BAMBOO_START_CRITICAL_SECTION();
894 RuntimeHashadd_I(gcpointertbl, ptr, tmpheaptop);
895 BAMBOO_CLOSE_CRITICAL_SECTION();
897 BAMBOO_DEBUGPRINT(0xcdca);
898 BAMBOO_DEBUGPRINT_REG(ptr);
899 BAMBOO_DEBUGPRINT_REG(tmpheaptop);
902 // send the original host core with the mapping info
903 send_msg_3(host, GCLOBJMAPPING, ptr, tmpheaptop);
905 BAMBOO_DEBUGPRINT(0xcdcb);
906 BAMBOO_DEBUGPRINT_REG(ptr);
907 BAMBOO_DEBUGPRINT_REG(tmpheaptop);
909 } // if(host == BAMBOO_NUM_OF_CORE) else ...
912 // set the gcsbstarttbl
913 int tmpsbs = 1+(isize-remain-1)/BAMBOO_SMEM_SIZE;
914 for(int k = 1; k < tmpsbs; k++) {
915 gcsbstarttbl[b+k] = (INTPTR)(-1);
918 if(((isize-remain)%(BAMBOO_SMEM_SIZE)) == 0) {
919 gcsbstarttbl[b] = (INTPTR)(-1);
920 remain = ((tmpheaptop-gcbaseva)<(BAMBOO_LARGE_SMEM_BOUND)) ?
921 BAMBOO_SMEM_SIZE_L : BAMBOO_SMEM_SIZE;
923 gcsbstarttbl[b] = (INTPTR)(tmpheaptop);
924 remain = tmpheaptop-gcbaseva;
925 int bound = remain<(BAMBOO_LARGE_SMEM_BOUND)?(BAMBOO_SMEM_SIZE_L):(BAMBOO_SMEM_SIZE);
926 remain = bound - remain%bound;
927 } // if(((isize-remain)%(BAMBOO_SMEM_SIZE)) == 0) else ...
929 // close current block and fill the header
930 memset(base, '\0', BAMBOO_CACHE_LINE_SIZE);
931 *((int*)base) = isize + BAMBOO_CACHE_LINE_SIZE;
934 remain -= BAMBOO_CACHE_LINE_SIZE;
935 tmpheaptop += BAMBOO_CACHE_LINE_SIZE;
938 // move the large obj
939 memcpy(tmpheaptop, gcheaptop, size);
940 // fill the remaining space with -2 padding
941 memset(tmpheaptop+size, -2, isize-size);
943 BAMBOO_DEBUGPRINT(0xea05);
944 BAMBOO_DEBUGPRINT_REG(gcheaptop);
945 BAMBOO_DEBUGPRINT_REG(tmpheaptop);
946 BAMBOO_DEBUGPRINT_REG(size);
947 BAMBOO_DEBUGPRINT_REG(isize);
951 if(host == BAMBOO_NUM_OF_CORE) {
952 BAMBOO_START_CRITICAL_SECTION();
953 RuntimeHashadd_I(gcpointertbl, ptr, tmpheaptop);
954 BAMBOO_CLOSE_CRITICAL_SECTION();
956 BAMBOO_DEBUGPRINT(0xcdcc);
957 BAMBOO_DEBUGPRINT_REG(ptr);
958 BAMBOO_DEBUGPRINT_REG(tmpheaptop);
961 // send the original host core with the mapping info
962 send_msg_3(host, GCLOBJMAPPING, ptr, tmpheaptop);
964 BAMBOO_DEBUGPRINT(0xcdcd);
965 BAMBOO_DEBUGPRINT_REG(ptr);
966 BAMBOO_DEBUGPRINT_REG(tmpheaptop);
968 } // if(host == BAMBOO_NUM_OF_CORE) else ...
970 } // if(remain < isize) else ...
971 } // while(gc_lobjmoreItems())
973 // close current block, fill the header
974 memset(base, '\0', BAMBOO_CACHE_LINE_SIZE);
975 *((int*)base) = cpysize + BAMBOO_CACHE_LINE_SIZE;
977 tmpheaptop -= BAMBOO_CACHE_LINE_SIZE;
979 gcheaptop = tmpheaptop;
980 // update the free mem list
981 tochange->size = (BAMBOO_BASE_VA)+(BAMBOO_SHARED_MEM_SIZE)-gcheaptop;
982 tochange->ptr = gcheaptop;
984 BAMBOO_DEBUGPRINT(0xea06);
985 BAMBOO_DEBUGPRINT_REG(gcheaptop);
987 } // void moveLObjs()
989 /*inline void updateFreeMemList() {
990 struct freeMemItem * tochange = bamboo_free_mem_list->head;
991 if(tochange == NULL) {
992 bamboo_free_mem_list->head = tochange =
993 (struct freeMemItem *)RUNMALLOC(sizeof(struct freeMemItem));
995 // handle the top of the heap
996 tochange->ptr = gcheaptop;
997 tochange->size = BAMBOO_SHARED_MEM_SIZE + BAMBOO_BASE_VA - gcheaptop;
998 // zero out all these spare memory
999 memset(tochange->ptr, '\0', tochange->size);
1000 if(bamboo_free_mem_list->tail != tochange) {
1001 bamboo_free_mem_list->tail = tochange;
1002 if(bamboo_free_mem_list->tail != NULL) {
1003 RUNFREE(bamboo_free_mem_list->tail);
1006 } // void updateFreeMemList()
1009 // enqueue root objs
1010 inline void tomark(struct garbagelist * stackptr) {
1011 if(MARKPHASE != gcphase) {
1013 BAMBOO_DEBUGPRINT_REG(gcphase);
1015 BAMBOO_EXIT(0xb101);
1017 gcbusystatus = true;
1021 // enqueue current stack
1022 while(stackptr!=NULL) {
1024 BAMBOO_DEBUGPRINT(0xe501);
1025 BAMBOO_DEBUGPRINT_REG(stackptr->size);
1026 BAMBOO_DEBUGPRINT_REG(stackptr->next);
1027 BAMBOO_DEBUGPRINT_REG(stackptr->array[0]);
1029 for(i=0; i<stackptr->size; i++) {
1030 if(stackptr->array[i] != NULL) {
1031 BAMBOO_START_CRITICAL_SECTION();
1032 gc_enqueue_I(stackptr->array[i]);
1033 BAMBOO_CLOSE_CRITICAL_SECTION();
1036 stackptr=stackptr->next;
1040 BAMBOO_DEBUGPRINT(0xe503);
1042 // enqueue objectsets
1043 for(i=0; i<NUMCLASSES; i++) {
1044 struct parameterwrapper ** queues =
1045 objectqueues[BAMBOO_NUM_OF_CORE][i];
1046 int length = numqueues[BAMBOO_NUM_OF_CORE][i];
1047 for(j = 0; j < length; ++j) {
1048 struct parameterwrapper * parameter = queues[j];
1049 struct ObjectHash * set=parameter->objectset;
1050 struct ObjectNode * ptr=set->listhead;
1052 BAMBOO_START_CRITICAL_SECTION();
1053 gc_enqueue_I((void *)ptr->key);
1054 BAMBOO_CLOSE_CRITICAL_SECTION();
1060 // euqueue current task descriptor
1061 if(currtpd != NULL) {
1063 BAMBOO_DEBUGPRINT(0xe504);
1065 for(i=0; i<currtpd->numParameters; i++) {
1066 BAMBOO_START_CRITICAL_SECTION();
1067 gc_enqueue_I(currtpd->parameterArray[i]);
1068 BAMBOO_CLOSE_CRITICAL_SECTION();
1073 BAMBOO_DEBUGPRINT(0xe505);
1075 // euqueue active tasks
1076 struct genpointerlist * ptr=activetasks->list;
1078 struct taskparamdescriptor *tpd=ptr->src;
1080 for(i=0; i<tpd->numParameters; i++) {
1081 BAMBOO_START_CRITICAL_SECTION();
1082 gc_enqueue_I(tpd->parameterArray[i]);
1083 BAMBOO_CLOSE_CRITICAL_SECTION();
1089 BAMBOO_DEBUGPRINT(0xe506);
1091 // enqueue cached transferred obj
1092 struct QueueItem * tmpobjptr = getHead(&objqueue);
1093 while(tmpobjptr != NULL) {
1094 struct transObjInfo * objInfo =
1095 (struct transObjInfo *)(tmpobjptr->objectptr);
1096 BAMBOO_START_CRITICAL_SECTION();
1097 gc_enqueue_I(objInfo->objptr);
1098 BAMBOO_CLOSE_CRITICAL_SECTION();
1099 tmpobjptr = getNextQueueItem(tmpobjptr);
1103 BAMBOO_DEBUGPRINT(0xe507);
1105 // enqueue cached objs to be transferred
1106 struct QueueItem * item = getHead(totransobjqueue);
1107 while(item != NULL) {
1108 struct transObjInfo * totransobj =
1109 (struct transObjInfo *)(item->objectptr);
1110 BAMBOO_START_CRITICAL_SECTION();
1111 gc_enqueue_I(totransobj->objptr);
1112 BAMBOO_CLOSE_CRITICAL_SECTION();
1113 item = getNextQueueItem(item);
1114 } // while(item != NULL)
1115 } // void tomark(struct garbagelist * stackptr)
1117 inline void markObj(void * objptr) {
1118 if(objptr == NULL) {
1121 if(ISSHAREDOBJ(objptr)) {
1122 int host = hostcore(objptr);
1123 if(BAMBOO_NUM_OF_CORE == host) {
1125 BAMBOO_START_CRITICAL_SECTION();
1126 gc_enqueue_I(objptr);
1127 BAMBOO_CLOSE_CRITICAL_SECTION();
1130 BAMBOO_DEBUGPRINT(0xbbbb);
1131 BAMBOO_DEBUGPRINT_REG(host);
1132 BAMBOO_DEBUGPRINT_REG(objptr);
1134 // send a msg to host informing that objptr is active
1135 send_msg_2(host, GCMARKEDOBJ, objptr);
1136 gcself_numsendobjs++;
1139 BAMBOO_START_CRITICAL_SECTION();
1140 gc_enqueue_I(objptr);
1141 BAMBOO_CLOSE_CRITICAL_SECTION();
1142 } // if(ISSHAREDOBJ(objptr))
1143 } // void markObj(void * objptr)
1145 inline void mark(bool isfirst,
1146 struct garbagelist * stackptr) {
1148 BAMBOO_DEBUGPRINT(0xed01);
1152 BAMBOO_DEBUGPRINT(0xed02);
1154 // enqueue root objs
1156 gccurr_heaptop = 0; // record the size of all active objs in this core
1157 // aligned but does not consider block boundaries
1158 gcmarkedptrbound = 0;
1161 BAMBOO_DEBUGPRINT(0xed03);
1164 bool checkfield = true;
1165 bool sendStall = false;
1167 while(MARKPHASE == gcphase) {
1169 BAMBOO_DEBUGPRINT(0xed04);
1171 while(gc_moreItems2()) {
1173 BAMBOO_DEBUGPRINT(0xed05);
1176 gcbusystatus = true;
1178 void * ptr = gc_dequeue2();
1180 BAMBOO_DEBUGPRINT_REG(ptr);
1185 // check if it is a shared obj
1186 if(ISSHAREDOBJ(ptr)) {
1187 // a shared obj, check if it is a local obj on this core
1188 if(isLarge(ptr, &type, &size)) {
1189 // ptr is a large object
1190 if(((int *)ptr)[6] == 0) {
1191 // not marked and not enqueued
1193 BAMBOO_DEBUGPRINT(0xecec);
1194 BAMBOO_DEBUGPRINT_REG(ptr);
1196 BAMBOO_START_CRITICAL_SECTION();
1197 gc_lobjenqueue_I(ptr, size, BAMBOO_NUM_OF_CORE);
1199 BAMBOO_CLOSE_CRITICAL_SECTION();
1201 ((int *)ptr)[6] = 1;
1204 bool islocal = isLocal(ptr);
1205 if (islocal && (((int *)ptr)[6] == 0)) {
1206 // ptr is an unmarked active object on this core
1207 ALIGNSIZE(size, &isize);
1208 gccurr_heaptop += isize;
1210 BAMBOO_DEBUGPRINT(0xaaaa);
1211 BAMBOO_DEBUGPRINT_REG(ptr);
1212 BAMBOO_DEBUGPRINT_REG(isize);
1215 ((int *)ptr)[6] = 1;
1216 if(ptr + size > gcmarkedptrbound) {
1217 gcmarkedptrbound = ptr + size;
1218 } // if(ptr + size > gcmarkedptrbound)
1219 } else if ((!islocal) && (((int *)ptr)[6] == 0)) {
1220 int host = hostcore(ptr);
1222 BAMBOO_DEBUGPRINT(0xbbbb);
1223 BAMBOO_DEBUGPRINT_REG(host);
1224 BAMBOO_DEBUGPRINT_REG(ptr);
1226 // send a msg to host informing that ptr is active
1227 send_msg_2(host, GCMARKEDOBJ, ptr);
1228 gcself_numsendobjs++;
1230 }// if(isLocal(ptr)) else ...
1231 } // if(isLarge(ptr, &type, &size)) else ...
1232 } // if(ISSHAREDOBJ(ptr))
1234 BAMBOO_DEBUGPRINT(0xed06);
1238 // scan all pointers in ptr
1239 unsigned INTPTR * pointer;
1240 pointer=pointerarray[type];
1242 /* Array of primitives */
1244 } else if (((INTPTR)pointer)==1) {
1245 /* Array of pointers */
1246 struct ArrayObject *ao=(struct ArrayObject *) ptr;
1247 int length=ao->___length___;
1249 for(j=0; j<length; j++) {
1251 ((void **)(((char *)&ao->___length___)+sizeof(int)))[j];
1255 INTPTR size=pointer[0];
1257 for(i=1; i<=size; i++) {
1258 unsigned int offset=pointer[i];
1259 void * objptr=*((void **)(((char *)ptr)+offset));
1262 } // if (pointer==0) else if ... else ...
1264 } // while(!isEmpty(gctomark))
1266 BAMBOO_DEBUGPRINT(0xed07);
1268 gcbusystatus = false;
1269 // send mark finish msg to core coordinator
1270 if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
1272 BAMBOO_DEBUGPRINT(0xed08);
1274 gccorestatus[BAMBOO_NUM_OF_CORE] = 0;
1275 gcnumsendobjs[BAMBOO_NUM_OF_CORE] = gcself_numsendobjs;
1276 gcnumreceiveobjs[BAMBOO_NUM_OF_CORE] = gcself_numreceiveobjs;
1277 gcloads[BAMBOO_NUM_OF_CORE] = gccurr_heaptop;
1281 BAMBOO_DEBUGPRINT(0xed09);
1283 send_msg_4(STARTUPCORE, GCFINISHMARK, BAMBOO_NUM_OF_CORE,
1284 gcself_numsendobjs, gcself_numreceiveobjs);
1289 BAMBOO_DEBUGPRINT(0xed0a);
1292 if(BAMBOO_NUM_OF_CORE == 0) {
1294 BAMBOO_DEBUGPRINT(0xed0b);
1298 } // while(MARKPHASE == gcphase)
1301 inline void compact2Heaptophelper(int coren,
1306 int memneed = gcrequiredmems[coren] + BAMBOO_CACHE_LINE_SIZE;
1307 if(STARTUPCORE == coren) {
1309 gcmovestartaddr = *p;
1310 gcdstcore = gctopcore;
1311 gcblock2fill = *numblocks + 1;
1313 send_msg_4(coren, GCMOVESTART, gctopcore, *p, (*numblocks) + 1);
1316 BAMBOO_DEBUGPRINT_REG(coren);
1317 BAMBOO_DEBUGPRINT_REG(gctopcore);
1318 BAMBOO_DEBUGPRINT_REG(*p);
1319 BAMBOO_DEBUGPRINT_REG(*numblocks+1);
1321 if(memneed < *remain) {
1323 BAMBOO_DEBUGPRINT(0xd104);
1326 gcrequiredmems[coren] = 0;
1327 gcloads[gctopcore] += memneed;
1328 *remain = *remain - memneed;
1331 BAMBOO_DEBUGPRINT(0xd105);
1333 // next available block
1335 gcfilledblocks[gctopcore] += 1;
1337 BASEPTR(gctopcore, gcfilledblocks[gctopcore], &newbase);
1338 gcloads[gctopcore] = newbase;
1339 gcrequiredmems[coren] -= *remain - BAMBOO_CACHE_LINE_SIZE;
1340 gcstopblock[gctopcore]++;
1341 gctopcore = nextTopcore(gctopcore, gcheapdirection);
1342 *numblocks = gcstopblock[gctopcore];
1343 *p = gcloads[gctopcore];
1345 *remain = (b<NUMCORES)?((BAMBOO_SMEM_SIZE_L)-((*p)%(BAMBOO_SMEM_SIZE_L)))
1346 :((BAMBOO_SMEM_SIZE)-((*p)%(BAMBOO_SMEM_SIZE)));
1348 BAMBOO_DEBUGPRINT(0xd106);
1349 BAMBOO_DEBUGPRINT_REG(gctopcore);
1350 BAMBOO_DEBUGPRINT_REG(*p);
1351 BAMBOO_DEBUGPRINT_REG(b);
1352 BAMBOO_DEBUGPRINT_REG(*remain);
1354 } // if(memneed < remain)
1356 } // void compact2Heaptophelper(int, int*, int*, int*)
1358 inline void compact2Heaptop() {
1359 // no cores with spare mem and some cores are blocked with pending move
1360 // find the current heap top and make them move to the heap top
1362 int numblocks = gcfilledblocks[gctopcore];
1363 //BASEPTR(gctopcore, numblocks, &p);
1364 p = gcloads[gctopcore];
1367 int remain = (b<NUMCORES)?((BAMBOO_SMEM_SIZE_L)-(p%(BAMBOO_SMEM_SIZE_L)))
1368 :((BAMBOO_SMEM_SIZE)-(p%(BAMBOO_SMEM_SIZE)));
1369 // check if the top core finishes
1370 if(gccorestatus[gctopcore] != 0) {
1372 BAMBOO_DEBUGPRINT(0xd101);
1373 BAMBOO_DEBUGPRINT_REG(gctopcore);
1375 // let the top core finishes its own work first
1376 compact2Heaptophelper(gctopcore, &p, &numblocks, &remain);
1381 BAMBOO_DEBUGPRINT(0xd102);
1382 BAMBOO_DEBUGPRINT_REG(gctopcore);
1383 BAMBOO_DEBUGPRINT_REG(p);
1384 BAMBOO_DEBUGPRINT_REG(b);
1385 BAMBOO_DEBUGPRINT_REG(remain);
1387 /*if((gctopcore == STARTUPCORE) && (b == 0)) {
1388 remain -= gcreservedsb*BAMBOO_SMEM_SIZE;
1389 p += gcreservedsb*BAMBOO_SMEM_SIZE;
1391 for(int i = 0; i < NUMCORES; i++) {
1392 BAMBOO_START_CRITICAL_SECTION();
1393 if((gccorestatus[i] != 0) && (gcrequiredmems[i] > 0)) {
1395 BAMBOO_DEBUGPRINT(0xd103);
1397 compact2Heaptophelper(i, &p, &numblocks, &remain);
1398 if(gccorestatus[gctopcore] != 0) {
1400 BAMBOO_DEBUGPRINT(0xd101);
1401 BAMBOO_DEBUGPRINT_REG(gctopcore);
1403 // the top core is not free now
1406 } // if((gccorestatus[i] != 0) && (gcrequiredmems[i] > 0))
1407 BAMBOO_CLOSE_CRITICAL_SECTION();
1408 } // for(i = 0; i < NUMCORES; i++)
1410 BAMBOO_DEBUGPRINT(0xd106);
1412 } // void compact2Heaptop()
1414 inline int nextTopcore(int topcore, bool direction) {
1415 int nextopcore = topcore;
1416 if((NUMCORES == 62) && (nextopcore>5)) {
1419 int x = nextopcore / bamboo_height;
1420 int y = nextopcore % bamboo_height;
1421 if((direction && (y%2 == 0)) || ((!direction) && (y%2))) {
1434 if((x == 0) || ((x==1) &&(y==6))) {
1450 nextopcore = x*bamboo_height+y;
1457 } // int nextTopcore(int topcore, bool direction)
1459 inline void resolvePendingMoveRequest() {
1461 BAMBOO_DEBUGPRINT(0xeb01);
1464 BAMBOO_DEBUGPRINT(0xeeee);
1465 for(int k = 0; k < NUMCORES; k++) {
1466 BAMBOO_DEBUGPRINT(0xf000+k);
1467 BAMBOO_DEBUGPRINT_REG(gccorestatus[k]);
1468 BAMBOO_DEBUGPRINT_REG(gcloads[k]);
1469 BAMBOO_DEBUGPRINT_REG(gcfilledblocks[k]);
1470 BAMBOO_DEBUGPRINT_REG(gcstopblock[k]);
1472 BAMBOO_DEBUGPRINT(0xffff);
1476 bool nosparemem = true;
1477 bool haspending = false;
1478 bool hasrunning = false;
1479 bool noblock = false;
1480 int dstcore = 0; // the core who need spare mem
1481 int sourcecore = 0; // the core who has spare mem
1482 for(i = j = 0; (i < NUMCORES) && (j < NUMCORES);) {
1484 // check if there are cores with spare mem
1485 if(gccorestatus[i] == 0) {
1486 // finished working, check if it still have spare mem
1487 if(gcfilledblocks[i] < gcstopblock[i]) {
1488 // still have spare mem
1491 } // if(gcfilledblocks[i] < gcstopblock[i]) else ...
1496 if(gccorestatus[j] != 0) {
1497 // not finished, check if it has pending move requests
1498 if((gcfilledblocks[j]==gcstopblock[j])&&(gcrequiredmems[j]>0)) {
1503 } // if((gcfilledblocks[i] == gcstopblock[i])...) else ...
1504 } // if(gccorestatus[i] == 0) else ...
1506 } // if(!haspending)
1507 if(!nosparemem && haspending) {
1511 BAMBOO_START_CRITICAL_SECTION();
1512 gcrequiredmems[dstcore] = assignSpareMem_I(sourcecore,
1513 gcrequiredmems[dstcore],
1516 BAMBOO_CLOSE_CRITICAL_SECTION();
1518 BAMBOO_DEBUGPRINT(0xeb02);
1519 BAMBOO_DEBUGPRINT_REG(sourcecore);
1520 BAMBOO_DEBUGPRINT_REG(dstcore);
1521 BAMBOO_DEBUGPRINT_REG(startaddr);
1522 BAMBOO_DEBUGPRINT_REG(tomove);
1524 if(STARTUPCORE == dstcore) {
1526 BAMBOO_DEBUGPRINT(0xeb03);
1528 gcdstcore = sourcecore;
1530 gcmovestartaddr = startaddr;
1531 gcblock2fill = tomove;
1534 BAMBOO_DEBUGPRINT(0xeb04);
1536 send_msg_4(dstcore, GCMOVESTART, sourcecore, startaddr, tomove);
1543 } // for(i = 0; i < NUMCORES; i++)
1545 BAMBOO_DEBUGPRINT(0xcccc);
1546 BAMBOO_DEBUGPRINT_REG(hasrunning);
1547 BAMBOO_DEBUGPRINT_REG(haspending);
1548 BAMBOO_DEBUGPRINT_REG(noblock);
1551 if(!hasrunning && !noblock) {
1552 gcphase = SUBTLECOMPACTPHASE;
1556 } // void resovePendingMoveRequest()
1559 int numblocks; // block num for heap
1560 INTPTR base; // base virtual address of current heap block
1561 INTPTR ptr; // virtual address of current heap top
1562 int offset; // offset in current heap block
1563 int blockbase; // virtual address of current small block to check
1564 int blockbound; // bound virtual address of current small blcok
1565 int sblockindex; // index of the small blocks
1566 int top; // real size of current heap block to check
1567 int bound; // bound size of current heap block to check
1568 }; // struct moveHelper
1570 inline void nextSBlock(struct moveHelper * orig) {
1571 orig->blockbase = orig->blockbound;
1573 BAMBOO_DEBUGPRINT(0xecc0);
1574 BAMBOO_DEBUGPRINT_REG(orig->blockbase);
1575 BAMBOO_DEBUGPRINT_REG(orig->blockbound);
1576 BAMBOO_DEBUGPRINT_REG(orig->bound);
1577 BAMBOO_DEBUGPRINT_REG(orig->ptr);
1579 if((orig->blockbase >= orig->bound) || (orig->ptr >= orig->bound)
1580 || ((*((int*)orig->ptr))==0) || ((*((int*)orig->blockbase))==0)) {
1582 // end of current heap block, jump to next one
1585 BAMBOO_DEBUGPRINT(0xecc1);
1586 BAMBOO_DEBUGPRINT_REG(orig->numblocks);
1588 BASEPTR(BAMBOO_NUM_OF_CORE, orig->numblocks, &(orig->base));
1590 BAMBOO_DEBUGPRINT(orig->base);
1592 orig->bound = orig->base + BAMBOO_SMEM_SIZE;
1593 orig->blockbase = orig->base;
1594 orig->sblockindex = (orig->blockbase-BAMBOO_BASE_VA)/BAMBOO_SMEM_SIZE;
1595 if(gcsbstarttbl[orig->sblockindex] == -1) {
1598 BAMBOO_DEBUGPRINT(0xecc2);
1600 orig->sblockindex += 1;
1601 orig->blockbase += BAMBOO_SMEM_SIZE;
1602 goto innernextSBlock;
1603 } else if(gcsbstarttbl[orig->sblockindex] != 0) {
1604 // not start from the very beginning
1605 orig->blockbase = gcsbstarttbl[orig->sblockindex];
1608 orig->blockbound = orig->blockbase + *((int*)(orig->blockbase));
1609 orig->offset = BAMBOO_CACHE_LINE_SIZE;
1610 orig->ptr = orig->blockbase + orig->offset;
1612 BAMBOO_DEBUGPRINT(0xecc3);
1613 BAMBOO_DEBUGPRINT_REG(orig->base);
1614 BAMBOO_DEBUGPRINT_REG(orig->bound);
1615 BAMBOO_DEBUGPRINT_REG(orig->ptr);
1617 if(orig->ptr >= orig->bound) {
1618 // met a lobj, move to next block
1619 goto innernextSBlock;
1621 } // void nextSBlock(struct moveHelper * orig)
1623 inline void initOrig_Dst(struct moveHelper * orig,
1624 struct moveHelper * to) {
1627 to->top = to->offset = BAMBOO_CACHE_LINE_SIZE;
1628 to->bound = BAMBOO_SMEM_SIZE_L;
1629 BASEPTR(BAMBOO_NUM_OF_CORE, to->numblocks, &(to->base));
1632 BAMBOO_DEBUGPRINT(0xef01);
1633 BAMBOO_DEBUGPRINT_REG(to->base);
1635 /*if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
1636 to->base += gcreservedsb * BAMBOO_SMEM_SIZE;
1637 to->top += gcreservedsb * BAMBOO_SMEM_SIZE;
1639 to->ptr = to->base + to->offset;
1641 // init the orig ptr
1642 orig->numblocks = 0;
1643 orig->base = to->base;
1644 orig->bound = to->base + BAMBOO_SMEM_SIZE_L;
1645 orig->blockbase = orig->base;
1646 /*if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
1647 orig->sblockindex = gcreservedsb;
1649 orig->sblockindex = (orig->base - BAMBOO_BASE_VA) / BAMBOO_SMEM_SIZE;
1652 BAMBOO_DEBUGPRINT(0xef02);
1653 BAMBOO_DEBUGPRINT_REG(orig->base);
1654 BAMBOO_DEBUGPRINT_REG(orig->sblockindex);
1655 BAMBOO_DEBUGPRINT_REG(gcsbstarttbl);
1656 BAMBOO_DEBUGPRINT_REG(gcsbstarttbl[orig->sblockindex]);
1659 if(gcsbstarttbl[orig->sblockindex] == -1) {
1661 BAMBOO_DEBUGPRINT(0xef03);
1665 BAMBOO_BASE_VA+BAMBOO_SMEM_SIZE*(orig->sblockindex+1);
1668 } else if(gcsbstarttbl[orig->sblockindex] != 0) {
1670 BAMBOO_DEBUGPRINT(0xef04);
1672 orig->blockbase = gcsbstarttbl[orig->sblockindex];
1675 BAMBOO_DEBUGPRINT(0xef05);
1677 orig->blockbound = orig->blockbase + *((int*)(orig->blockbase));
1678 orig->offset = BAMBOO_CACHE_LINE_SIZE;
1679 orig->ptr = orig->blockbase + orig->offset;
1681 BAMBOO_DEBUGPRINT(0xef06);
1682 BAMBOO_DEBUGPRINT_REG(orig->base);
1684 } // void initOrig_Dst(struct moveHelper * orig, struct moveHelper * to)
1686 inline void nextBlock(struct moveHelper * to) {
1687 to->top = to->bound + BAMBOO_CACHE_LINE_SIZE; // header!
1688 to->bound += BAMBOO_SMEM_SIZE;
1690 BASEPTR(BAMBOO_NUM_OF_CORE, to->numblocks, &(to->base));
1691 to->offset = BAMBOO_CACHE_LINE_SIZE;
1692 to->ptr = to->base + to->offset;
1693 } // void nextBlock(struct moveHelper * to)
1695 // endaddr does not contain spaces for headers
1696 inline bool moveobj(struct moveHelper * orig,
1697 struct moveHelper * to,
1699 if(stopblock == 0) {
1704 BAMBOO_DEBUGPRINT(0xe201);
1705 BAMBOO_DEBUGPRINT_REG(orig->ptr);
1706 BAMBOO_DEBUGPRINT_REG(to->ptr);
1714 while((char)(*((int*)(orig->ptr))) == (char)(-2)) {
1715 orig->ptr = (int*)(orig->ptr) + 1;
1717 if((orig->ptr > orig->bound) || (orig->ptr == orig->blockbound)) {
1722 BAMBOO_DEBUGPRINT(0xe202);
1724 // check the obj's type, size and mark flag
1725 type = ((int *)(orig->ptr))[0];
1728 // end of this block, go to next one
1731 } else if(type < NUMCLASSES) {
1733 size = classsize[type];
1736 struct ArrayObject *ao=(struct ArrayObject *)(orig->ptr);
1737 int elementsize=classsize[type];
1738 int length=ao->___length___;
1739 size=sizeof(struct ArrayObject)+length*elementsize;
1741 mark = ((int *)(orig->ptr))[6];
1743 BAMBOO_DEBUGPRINT(0xe203);
1747 BAMBOO_DEBUGPRINT(0xe204);
1749 // marked obj, copy it to current heap top
1750 // check to see if remaining space is enough
1751 ALIGNSIZE(size, &isize);
1752 if(to->top + isize > to->bound) {
1753 // fill -1 indicating the end of this block
1754 /*if(to->top != to->bound) {
1755 *((int*)to->ptr) = -1;
1757 //memset(to->ptr+1, -2, to->bound - to->top - 1);
1758 // fill the header of this block and then go to next block
1759 to->offset += to->bound - to->top;
1760 memset(to->base, '\0', BAMBOO_CACHE_LINE_SIZE);
1761 (*((int*)(to->base))) = to->offset;
1763 if(stopblock == to->numblocks) {
1764 // already fulfilled the block
1768 } // if(stopblock == to->numblocks)
1769 } // if(to->top + isize > to->bound)
1770 // set the mark field to 2, indicating that this obj has been moved and need to be flushed
1771 ((int *)(orig->ptr))[6] = 2;
1772 if(to->ptr != orig->ptr) {
1773 memcpy(to->ptr, orig->ptr, size);
1774 // fill the remaining space with -2
1775 memset(to->ptr+size, -2, isize-size);
1777 // store mapping info
1778 BAMBOO_START_CRITICAL_SECTION();
1779 RuntimeHashadd_I(gcpointertbl, orig->ptr, to->ptr);
1780 BAMBOO_CLOSE_CRITICAL_SECTION();
1782 BAMBOO_DEBUGPRINT(0xcdce);
1783 BAMBOO_DEBUGPRINT_REG(orig->ptr);
1784 BAMBOO_DEBUGPRINT_REG(to->ptr);
1786 gccurr_heaptop -= isize;
1788 to->offset += isize;
1790 if(to->top == to->bound) {
1791 // fill the header of this block and then go to next block
1792 memset(to->base, '\0', BAMBOO_CACHE_LINE_SIZE);
1793 (*((int*)(to->base))) = to->offset;
1798 BAMBOO_DEBUGPRINT(0xe205);
1803 BAMBOO_DEBUGPRINT_REG(isize);
1804 BAMBOO_DEBUGPRINT_REG(orig->ptr);
1805 BAMBOO_DEBUGPRINT_REG(orig->bound);
1807 if((orig->ptr > orig->bound) || (orig->ptr == orig->blockbound)) {
1809 BAMBOO_DEBUGPRINT(0xe206);
1814 BAMBOO_DEBUGPRINT_REG(orig->ptr);
1817 } //bool moveobj(struct moveHelper* orig,struct moveHelper* to,int* endaddr)
1819 // should be invoked with interrupt closed
1820 inline int assignSpareMem_I(int sourcecore,
1825 BLOCKINDEX(gcloads[sourcecore], &b);
1826 int boundptr = b<NUMCORES?(b+1)*BAMBOO_SMEM_SIZE_L
1827 :BAMBOO_LARGE_SMEM_BOUND+(b-NUMCORES+1)*BAMBOO_SMEM_SIZE;
1828 int remain = boundptr - gcloads[sourcecore];
1829 int memneed = requiredmem + BAMBOO_CACHE_LINE_SIZE;
1830 *startaddr = gcloads[sourcecore];
1831 *tomove = gcfilledblocks[sourcecore] + 1;
1832 if(memneed < remain) {
1833 gcloads[sourcecore] += memneed;
1836 // next available block
1837 gcfilledblocks[sourcecore] += 1;
1839 BASEPTR(sourcecore, gcfilledblocks[sourcecore], &newbase);
1840 gcloads[sourcecore] = newbase;
1841 return requiredmem-remain;
1843 } // int assignSpareMem_I(int ,int * , int * , int * )
1845 // should be invoked with interrupt closed
1846 inline bool gcfindSpareMem_I(int * startaddr,
1851 for(int k = 0; k < NUMCORES; k++) {
1852 if((gccorestatus[k] == 0) && (gcfilledblocks[k] < gcstopblock[k])) {
1853 // check if this stopped core has enough mem
1854 assignSpareMem_I(k, requiredmem, tomove, startaddr);
1859 // if can not find spare mem right now, hold the request
1860 gcrequiredmems[requiredcore] = requiredmem;
1863 } //bool gcfindSpareMem_I(int* startaddr,int* tomove,int mem,int core)
1865 inline bool compacthelper(struct moveHelper * orig,
1866 struct moveHelper * to,
1869 bool * localcompact) {
1870 // scan over all objs in this block, compact the marked objs
1871 // loop stop when finishing either scanning all active objs or
1872 // fulfilled the gcstopblock
1874 BAMBOO_DEBUGPRINT(0xe101);
1875 BAMBOO_DEBUGPRINT_REG(gcblock2fill);
1879 bool stop = moveobj(orig, to, gcblock2fill);
1883 } while(orig->ptr < gcmarkedptrbound);
1884 // if no objs have been compact, do nothing,
1885 // otherwise, fill the header of this block
1886 if(to->offset > BAMBOO_CACHE_LINE_SIZE) {
1887 memset(to->base, '\0', BAMBOO_CACHE_LINE_SIZE);
1888 (*((int*)(to->base))) = to->offset;
1892 to->top -= BAMBOO_CACHE_LINE_SIZE;
1893 } // if(to->offset > BAMBOO_CACHE_LINE_SIZE) else ...
1895 *heaptopptr = to->ptr;
1896 *filledblocks = to->numblocks;
1899 BAMBOO_DEBUGPRINT(0xe102);
1900 BAMBOO_DEBUGPRINT_REG(orig->ptr);
1901 BAMBOO_DEBUGPRINT_REG(gcmarkedptrbound);
1902 BAMBOO_DEBUGPRINT_REG(*heaptopptr);
1903 BAMBOO_DEBUGPRINT_REG(*filledblocks);
1904 BAMBOO_DEBUGPRINT_REG(gccurr_heaptop);
1907 // send msgs to core coordinator indicating that the compact is finishing
1908 // send compact finish message to core coordinator
1909 if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
1910 gcfilledblocks[BAMBOO_NUM_OF_CORE] = *filledblocks;
1911 gcloads[BAMBOO_NUM_OF_CORE] = *heaptopptr;
1912 if(orig->ptr < gcmarkedptrbound) {
1914 BAMBOO_DEBUGPRINT(0xe103);
1918 BAMBOO_START_CRITICAL_SECTION();
1919 if(gcfindSpareMem_I(&gcmovestartaddr, &gcblock2fill, &gcdstcore,
1920 gccurr_heaptop, BAMBOO_NUM_OF_CORE)) {
1922 BAMBOO_DEBUGPRINT(0xe104);
1926 BAMBOO_CLOSE_CRITICAL_SECTION();
1928 BAMBOO_DEBUGPRINT(0xe105);
1932 BAMBOO_CLOSE_CRITICAL_SECTION();
1935 BAMBOO_DEBUGPRINT(0xe106);
1937 gccorestatus[BAMBOO_NUM_OF_CORE] = 0;
1942 if(orig->ptr < gcmarkedptrbound) {
1944 BAMBOO_DEBUGPRINT(0xe107);
1948 send_msg_5(STARTUPCORE, GCFINISHCOMPACT, BAMBOO_NUM_OF_CORE,
1949 *filledblocks, *heaptopptr, gccurr_heaptop);
1952 BAMBOO_DEBUGPRINT(0xe108);
1953 BAMBOO_DEBUGPRINT_REG(*heaptopptr);
1955 // finish compacting
1956 send_msg_5(STARTUPCORE, GCFINISHCOMPACT, BAMBOO_NUM_OF_CORE,
1957 *filledblocks, *heaptopptr, 0);
1959 } // if(STARTUPCORE == BAMBOO_NUM_OF_CORE)
1961 if(orig->ptr < gcmarkedptrbound) {
1963 BAMBOO_DEBUGPRINT(0xe109);
1965 // still have unpacked obj
1973 BAMBOO_DEBUGPRINT(0xe10a);
1976 to->ptr = gcmovestartaddr;
1977 to->numblocks = gcblock2fill - 1;
1978 to->bound = (to->numblocks==0)?
1980 BAMBOO_SMEM_SIZE_L+BAMBOO_SMEM_SIZE*to->numblocks;
1981 BASEPTR(gcdstcore, to->numblocks, &(to->base));
1982 to->offset = to->ptr - to->base;
1983 to->top = (to->numblocks==0)?
1984 (to->offset):(to->bound-BAMBOO_SMEM_SIZE+to->offset);
1986 to->offset = BAMBOO_CACHE_LINE_SIZE;
1987 to->ptr += to->offset; // for header
1988 to->top += to->offset;
1989 if(gcdstcore == BAMBOO_NUM_OF_CORE) {
1990 *localcompact = true;
1992 *localcompact = false;
1997 BAMBOO_DEBUGPRINT(0xe10b);
2000 } // void compacthelper()
2002 inline void compact() {
2003 if(COMPACTPHASE != gcphase) {
2004 BAMBOO_EXIT(0xb102);
2007 // initialize pointers for comapcting
2008 struct moveHelper * orig =
2009 (struct moveHelper *)RUNMALLOC(sizeof(struct moveHelper));
2010 struct moveHelper * to =
2011 (struct moveHelper *)RUNMALLOC(sizeof(struct moveHelper));
2013 initOrig_Dst(orig, to);
2015 int filledblocks = 0;
2016 INTPTR heaptopptr = 0;
2017 bool localcompact = true;
2018 compacthelper(orig, to, &filledblocks, &heaptopptr, &localcompact);
2024 inline void * flushObj(void * objptr) {
2026 BAMBOO_DEBUGPRINT(0xe401);
2028 void * dstptr = NULL;
2029 if(ISSHAREDOBJ(objptr)) {
2031 BAMBOO_DEBUGPRINT(0xe402);
2032 BAMBOO_DEBUGPRINT_REG(objptr);
2034 // a shared obj ptr, change to new address
2035 BAMBOO_START_CRITICAL_SECTION();
2036 RuntimeHashget(gcpointertbl, objptr, &dstptr);
2037 BAMBOO_CLOSE_CRITICAL_SECTION();
2039 BAMBOO_DEBUGPRINT_REG(dstptr);
2041 if(NULL == dstptr) {
2043 BAMBOO_DEBUGPRINT(0xe403);
2044 BAMBOO_DEBUGPRINT_REG(objptr);
2045 BAMBOO_DEBUGPRINT_REG(hostcore(objptr));
2047 // send msg to host core for the mapping info
2048 gcobj2map = (int)objptr;
2051 send_msg_3(hostcore(objptr), GCMAPREQUEST, (int)objptr,
2052 BAMBOO_NUM_OF_CORE);
2058 BAMBOO_START_CRITICAL_SECTION();
2059 RuntimeHashget(gcpointertbl, objptr, &dstptr);
2060 BAMBOO_CLOSE_CRITICAL_SECTION();
2062 BAMBOO_DEBUGPRINT_REG(dstptr);
2065 } // if(ISSHAREDOBJ(objptr))
2067 BAMBOO_DEBUGPRINT(0xe404);
2070 } // void flushObj(void * objptr, void ** tochange)
2072 inline void flushRuntimeObj(struct garbagelist * stackptr) {
2074 // flush current stack
2075 while(stackptr!=NULL) {
2076 for(i=0; i<stackptr->size; i++) {
2077 if(stackptr->array[i] != NULL) {
2078 stackptr->array[i] = flushObj(stackptr->array[i]);
2081 stackptr=stackptr->next;
2085 for(i=0; i<NUMCLASSES; i++) {
2086 struct parameterwrapper ** queues =
2087 objectqueues[BAMBOO_NUM_OF_CORE][i];
2088 int length = numqueues[BAMBOO_NUM_OF_CORE][i];
2089 for(j = 0; j < length; ++j) {
2090 struct parameterwrapper * parameter = queues[j];
2091 struct ObjectHash * set=parameter->objectset;
2092 struct ObjectNode * ptr=set->listhead;
2094 ptr->key = flushObj((void *)ptr->key);
2100 // flush current task descriptor
2101 if(currtpd != NULL) {
2102 for(i=0; i<currtpd->numParameters; i++) {
2103 currtpd->parameterArray[i] = flushObj(currtpd->parameterArray[i]);
2107 // flush active tasks
2108 struct genpointerlist * ptr=activetasks->list;
2110 struct taskparamdescriptor *tpd=ptr->src;
2112 for(i=0; i<tpd->numParameters; i++) {
2113 tpd->parameterArray[i] = flushObj(tpd->parameterArray[i]);
2118 // flush cached transferred obj
2119 struct QueueItem * tmpobjptr = getHead(&objqueue);
2120 while(tmpobjptr != NULL) {
2121 struct transObjInfo * objInfo =
2122 (struct transObjInfo *)(tmpobjptr->objectptr);
2123 objInfo->objptr = flushObj(objInfo->objptr);
2124 tmpobjptr = getNextQueueItem(tmpobjptr);
2127 // flush cached objs to be transferred
2128 struct QueueItem * item = getHead(totransobjqueue);
2129 while(item != NULL) {
2130 struct transObjInfo * totransobj =
2131 (struct transObjInfo *)(item->objectptr);
2132 totransobj->objptr = flushObj(totransobj->objptr);
2133 item = getNextQueueItem(item);
2134 } // while(item != NULL)
2135 } // void flushRuntimeObj(struct garbagelist * stackptr)
2137 inline void flush(struct garbagelist * stackptr) {
2138 flushRuntimeObj(stackptr);
2140 while(gc_moreItems()) {
2142 BAMBOO_DEBUGPRINT(0xe301);
2144 void * ptr = gc_dequeue();
2145 void * tptr = flushObj(ptr);
2147 BAMBOO_DEBUGPRINT(0xe302);
2148 BAMBOO_DEBUGPRINT_REG(ptr);
2149 BAMBOO_DEBUGPRINT_REG(tptr);
2154 if(((int *)(ptr))[6] == 2) {
2155 int type = ((int *)(ptr))[0];
2156 // scan all pointers in ptr
2157 unsigned INTPTR * pointer;
2158 pointer=pointerarray[type];
2160 BAMBOO_DEBUGPRINT(0xe303);
2161 BAMBOO_DEBUGPRINT_REG(pointer);
2164 /* Array of primitives */
2166 } else if (((INTPTR)pointer)==1) {
2168 BAMBOO_DEBUGPRINT(0xe304);
2170 /* Array of pointers */
2171 struct ArrayObject *ao=(struct ArrayObject *) ptr;
2172 int length=ao->___length___;
2174 for(j=0; j<length; j++) {
2176 BAMBOO_DEBUGPRINT(0xe305);
2179 ((void **)(((char *)&ao->___length___)+sizeof(int)))[j];
2181 BAMBOO_DEBUGPRINT_REG(objptr);
2183 ((void **)(((char *)&ao->___length___)+sizeof(int)))[j] =
2188 BAMBOO_DEBUGPRINT(0xe306);
2190 INTPTR size=pointer[0];
2192 for(i=1; i<=size; i++) {
2194 BAMBOO_DEBUGPRINT(0xe307);
2196 unsigned int offset=pointer[i];
2197 void * objptr=*((void **)(((char *)ptr)+offset));
2199 BAMBOO_DEBUGPRINT_REG(objptr);
2201 *((void **)(((char *)ptr)+offset)) = flushObj(objptr);
2202 } // for(i=1; i<=size; i++)
2203 } // if (pointer==0) else if (((INTPTR)pointer)==1) else ()
2204 // restore the mark field, indicating that this obj has been flushed
2205 ((int *)(ptr))[6] = 0;
2206 } // if(((int *)(ptr))[6] == 2)
2207 } // while(moi != NULL)
2209 BAMBOO_DEBUGPRINT(0xe308);
2211 // send flush finish message to core coordinator
2212 if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
2213 gccorestatus[BAMBOO_NUM_OF_CORE] = 0;
2215 send_msg_2(STARTUPCORE, GCFINISHFLUSH, BAMBOO_NUM_OF_CORE);
2218 BAMBOO_DEBUGPRINT(0xe309);
2222 inline void gc_collect(struct garbagelist * stackptr) {
2223 // core collector routine
2225 //BAMBOO_START_CRITICAL_SECTION();
2226 if(INITPHASE == gcphase) {
2227 //BAMBOO_CLOSE_CRITICAL_SECTION();
2230 //BAMBOO_CLOSE_CRITICAL_SECTION();
2233 tprintf("Do initGC\n");
2236 //send init finish msg to core coordinator
2237 send_msg_2(STARTUPCORE, GCFINISHINIT, BAMBOO_NUM_OF_CORE);
2239 //BAMBOO_START_CRITICAL_SECTION();
2240 if(MARKPHASE == gcphase) {
2241 //BAMBOO_CLOSE_CRITICAL_SECTION();
2244 //BAMBOO_CLOSE_CRITICAL_SECTION();
2247 tprintf("Start mark phase\n");
2249 mark(true, stackptr);
2251 tprintf("Finish mark phase, start compact phase\n");
2255 tprintf("Finish compact phase\n");
2258 //BAMBOO_START_CRITICAL_SECTION();
2259 if(FLUSHPHASE == gcphase) {
2260 //BAMBOO_CLOSE_CRITICAL_SECTION();
2263 //BAMBOO_CLOSE_CRITICAL_SECTION();
2266 tprintf("Start flush phase\n");
2270 tprintf("Finish flush phase\n");
2274 //BAMBOO_START_CRITICAL_SECTION();
2275 if(FINISHPHASE == gcphase) {
2276 //BAMBOO_CLOSE_CRITICAL_SECTION();
2279 //BAMBOO_CLOSE_CRITICAL_SECTION();
2282 tprintf("Finish gc!\n");
2284 } // void gc_collect(struct garbagelist * stackptr)
2286 inline void gc(struct garbagelist * stackptr) {
2289 gcprocessing = false;
2293 // core coordinator routine
2294 if(0 == BAMBOO_NUM_OF_CORE) {
2296 tprintf("Check if can do gc or not\n");
2299 // not ready to do gc
2305 tprintf("start gc! \n");
2309 gcprocessing = true;
2311 waitconfirm = false;
2313 gcphase = INITPHASE;
2314 for(i = 1; i < NUMCORES; i++) {
2315 // send GC init messages to all cores
2316 send_msg_1(i, GCSTARTINIT);
2318 bool isfirst = true;
2319 bool allStall = false;
2323 tprintf("Check core status \n");
2326 gccorestatus[BAMBOO_NUM_OF_CORE] = 0;
2328 BAMBOO_START_CRITICAL_SECTION();
2329 if(gc_checkCoreStatus()) {
2330 BAMBOO_CLOSE_CRITICAL_SECTION();
2333 BAMBOO_CLOSE_CRITICAL_SECTION();
2336 tprintf("Start mark phase \n");
2338 // all cores have finished compacting
2339 // restore the gcstatus of all cores
2340 gccorestatus[BAMBOO_NUM_OF_CORE] = 1;
2341 for(i = 1; i < NUMCORES; ++i) {
2342 gccorestatus[i] = 1;
2343 // send GC start messages to all cores
2344 send_msg_1(i, GCSTART);
2347 gcphase = MARKPHASE;
2349 while(MARKPHASE == gcphase) {
2350 mark(isfirst, stackptr);
2357 } // while(MARKPHASE == gcphase)
2358 // send msgs to all cores requiring large objs info
2359 numconfirm = NUMCORES - 1;
2360 for(i = 1; i < NUMCORES; ++i) {
2361 send_msg_1(i, GCLOBJREQUEST);
2363 gcloads[BAMBOO_NUM_OF_CORE] = gccurr_heaptop;
2368 } // wait for responses
2370 tprintf("prepare to cache large objs \n");
2373 // cache all large objs
2375 // no enough space to cache large objs
2376 BAMBOO_EXIT(0xb103);
2378 // predict number of blocks to fill for each core
2379 int numpbc = loadbalance();
2381 numpbc = (BAMBOO_SHARED_MEM_SIZE)/(BAMBOO_SMEM_SIZE);
2383 tprintf("mark phase finished \n");
2388 if(!gcheapdirection) {
2391 BASEPTR(gctopcore, tmpn, &tmptopptr);
2393 tmptopptr = (BAMBOO_BASE_VA) + (BAMBOO_SHARED_MEM_SIZE);
2395 BAMBOO_DEBUGPRINT(0xabab);
2396 BAMBOO_DEBUGPRINT_REG(tmptopptr);
2398 for(i = 0; i < NUMCORES; ++i) {
2400 BASEPTR(i, tmpn, &tmpcoreptr);
2401 //send start compact messages to all cores
2402 if (tmpcoreptr < tmptopptr) {
2403 gcstopblock[i] =numpbc + 1;
2404 if(i != STARTUPCORE) {
2405 send_msg_2(i, GCSTARTCOMPACT, numpbc+1);
2407 gcblock2fill = numpbc+1;
2408 } // if(i != STARTUPCORE)
2410 gcstopblock[i] = numpbc;
2411 if(i != STARTUPCORE) {
2412 send_msg_2(i, GCSTARTCOMPACT, numpbc);
2414 gcblock2fill = numpbc;
2415 } // if(i != STARTUPCORE)
2418 BAMBOO_DEBUGPRINT(0xf000+i);
2419 BAMBOO_DEBUGPRINT_REG(tmpcoreptr);
2420 BAMBOO_DEBUGPRINT_REG(gcstopblock[i]);
2422 // init some data strutures for compact phase
2424 gcfilledblocks[i] = 0;
2425 gcrequiredmems[i] = 0;
2429 bool finalcompact = false;
2430 // initialize pointers for comapcting
2431 struct moveHelper * orig =
2432 (struct moveHelper *)RUNMALLOC(sizeof(struct moveHelper));
2433 struct moveHelper * to =
2434 (struct moveHelper *)RUNMALLOC(sizeof(struct moveHelper));
2435 initOrig_Dst(orig, to);
2436 int filledblocks = 0;
2437 INTPTR heaptopptr = 0;
2438 bool finishcompact = false;
2439 bool iscontinue = true;
2440 bool localcompact = true;
2441 while((COMPACTPHASE == gcphase) || (SUBTLECOMPACTPHASE == gcphase)) {
2442 if((!finishcompact) && iscontinue) {
2444 BAMBOO_DEBUGPRINT(0xe001);
2445 BAMBOO_DEBUGPRINT_REG(gcblock2fill);
2447 finishcompact = compacthelper(orig, to, &filledblocks,
2448 &heaptopptr, &localcompact);
2450 BAMBOO_DEBUGPRINT(0xe002);
2451 BAMBOO_DEBUGPRINT_REG(finishcompact);
2452 BAMBOO_DEBUGPRINT_REG(gctomove);
2453 BAMBOO_DEBUGPRINT_REG(gcrequiredmems[0]);
2454 BAMBOO_DEBUGPRINT_REG(gcfilledblocks[0]);
2455 BAMBOO_DEBUGPRINT_REG(gcstopblock[0]);
2459 if(gc_checkCoreStatus()) {
2460 // all cores have finished compacting
2461 // restore the gcstatus of all cores
2462 for(i = 0; i < NUMCORES; ++i) {
2463 gccorestatus[i] = 1;
2467 // check if there are spare mem for pending move requires
2468 if(COMPACTPHASE == gcphase) {
2470 BAMBOO_DEBUGPRINT(0xe003);
2472 resolvePendingMoveRequest();
2474 BAMBOO_DEBUGPRINT_REG(gctomove);
2478 BAMBOO_DEBUGPRINT(0xe004);
2482 } // if(gc_checkCoreStatus()) else ...
2486 BAMBOO_DEBUGPRINT(0xe005);
2487 BAMBOO_DEBUGPRINT_REG(gcmovestartaddr);
2488 BAMBOO_DEBUGPRINT_REG(gcblock2fill);
2489 BAMBOO_DEBUGPRINT_REG(gctomove);
2491 to->ptr = gcmovestartaddr;
2492 to->numblocks = gcblock2fill - 1;
2493 to->bound = (to->numblocks==0)?
2495 BAMBOO_SMEM_SIZE_L+BAMBOO_SMEM_SIZE*to->numblocks;
2496 BASEPTR(gcdstcore, to->numblocks, &(to->base));
2497 to->offset = to->ptr - to->base;
2498 to->top = (to->numblocks==0)?
2499 (to->offset):(to->bound-BAMBOO_SMEM_SIZE+to->offset);
2501 to->offset = BAMBOO_CACHE_LINE_SIZE;
2502 to->ptr += to->offset; // for header
2503 to->top += to->offset;
2504 if(gcdstcore == BAMBOO_NUM_OF_CORE) {
2505 localcompact = true;
2507 localcompact = false;
2511 } else if(!finishcompact) {
2516 } // while(COMPACTPHASE == gcphase)
2518 tprintf("prepare to move large objs \n");
2524 tprintf("compact phase finished \n");
2528 gcphase = FLUSHPHASE;
2529 gccorestatus[BAMBOO_NUM_OF_CORE] = 1;
2530 for(i = 1; i < NUMCORES; ++i) {
2531 // send start flush messages to all cores
2532 gccorestatus[i] = 1;
2533 send_msg_1(i, GCSTARTFLUSH);
2537 tprintf("Start flush phase \n");
2542 BAMBOO_DEBUGPRINT(0xefef);
2543 gccorestatus[BAMBOO_NUM_OF_CORE] = 0;
2544 while(FLUSHPHASE == gcphase) {
2545 // check the status of all cores
2546 if(gc_checkCoreStatus()) {
2549 } // while(FLUSHPHASE == gcphase)
2550 gcphase = FINISHPHASE;
2553 // need to create free memory list
2554 updateFreeMemList();
2556 tprintf("flush phase finished \n");
2561 gccorestatus[BAMBOO_NUM_OF_CORE] = 1;
2562 for(i = 1; i < NUMCORES; ++i) {
2563 // send gc finish messages to all cores
2564 send_msg_1(i, GCFINISH);
2565 gccorestatus[i] = 1;
2568 tprintf("gc finished \n");
2572 gcprocessing = true;
2573 gc_collect(stackptr);
2576 // invalidate all shared mem pointers
2577 bamboo_cur_msp = NULL;
2578 bamboo_smem_size = 0;
2581 gcprocessing = false;
2583 } // void gc(struct garbagelist * stackptr)