3 #include "multicoregarbage.h"
4 #include "multicoreruntime.h"
5 #include "runtime_arch.h"
6 #include "SimpleHash.h"
7 #include "GenericHashtable.h"
8 #include "ObjectHash.h"
9 #include "GCSharedHash.h"
11 // TODO for profiling the flush phase
13 /*int num_mapinforequest;
15 unsigned long long marktime;*/
19 extern struct parameterwrapper ** objectqueues[][NUMCLASSES];
20 extern int numqueues[][NUMCLASSES];
22 extern struct genhashtable * activetasks;
23 extern struct parameterwrapper ** objectqueues[][NUMCLASSES];
24 extern struct taskparamdescriptor *currtpd;
26 extern struct LockValue runtime_locks[MAXTASKPARAMS];
27 extern int runtime_locklen;
30 extern unsigned int gcmem_mixed_threshold;
31 extern unsigned int gcmem_mixed_usedmem;
36 struct pointerblock *next;
39 struct pointerblock *gchead=NULL;
41 struct pointerblock *gctail=NULL;
43 struct pointerblock *gctail2=NULL;
45 struct pointerblock *gcspare=NULL;
47 #define NUMLOBJPTRS 20
49 struct lobjpointerblock {
50 void * lobjs[NUMLOBJPTRS];
51 //void * dsts[NUMLOBJPTRS];
52 int lengths[NUMLOBJPTRS];
53 //void * origs[NUMLOBJPTRS];
54 int hosts[NUMLOBJPTRS];
55 struct lobjpointerblock *next;
56 struct lobjpointerblock *prev;
59 struct lobjpointerblock *gclobjhead=NULL;
60 int gclobjheadindex=0;
61 struct lobjpointerblock *gclobjtail=NULL;
62 int gclobjtailindex=0;
63 struct lobjpointerblock *gclobjtail2=NULL;
64 int gclobjtailindex2=0;
65 struct lobjpointerblock *gclobjspare=NULL;
68 // dump whole mem in blocks
69 inline void dumpSMem() {
77 printf("(%x,%x) Dump shared mem: \n", udn_tile_coord_x(),
79 // reserved blocks for sblocktbl
80 printf("(%x,%x) ++++ reserved sblocks ++++ \n", udn_tile_coord_x(),
82 for(i=BAMBOO_BASE_VA; i<gcbaseva; i+= 4*16) {
83 printf("(%x,%x) 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x \n",
84 udn_tile_coord_x(), udn_tile_coord_y(),
85 *((int *)(i)), *((int *)(i + 4)),
86 *((int *)(i + 4*2)), *((int *)(i + 4*3)),
87 *((int *)(i + 4*4)), *((int *)(i + 4*5)),
88 *((int *)(i + 4*6)), *((int *)(i + 4*7)),
89 *((int *)(i + 4*8)), *((int *)(i + 4*9)),
90 *((int *)(i + 4*10)), *((int *)(i + 4*11)),
91 *((int *)(i + 4*12)), *((int *)(i + 4*13)),
92 *((int *)(i + 4*14)), *((int *)(i + 4*15)));
94 sblock = gcreservedsb;
95 bool advanceblock = false;
97 for(i=gcbaseva; i<gcbaseva+BAMBOO_SHARED_MEM_SIZE; i+=4*16) {
99 // computing sblock # and block #, core coordinate (x,y) also
100 if(j%((BAMBOO_SMEM_SIZE)/(4*16)) == 0) {
102 if(j < ((BAMBOO_LARGE_SMEM_BOUND)/(4*16))) {
103 if((j > 0) && (j%((BAMBOO_SMEM_SIZE_L)/(4*16)) == 0)) {
115 coren = gc_block2core[block%(NUMCORES4GC*2)];
117 // compute core coordinate
118 BAMBOO_COORDS(coren, &x, &y);
119 printf("(%x,%x) ==== %d, %d : core (%d,%d), saddr %x====\n",
120 udn_tile_coord_x(), udn_tile_coord_y(),
121 block, sblock++, x, y,
122 (sblock-1)*(BAMBOO_SMEM_SIZE)+gcbaseva);
125 printf("(%x,%x) 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x \n",
126 udn_tile_coord_x(), udn_tile_coord_y(),
127 *((int *)(i)), *((int *)(i + 4)),
128 *((int *)(i + 4*2)), *((int *)(i + 4*3)),
129 *((int *)(i + 4*4)), *((int *)(i + 4*5)),
130 *((int *)(i + 4*6)), *((int *)(i + 4*7)),
131 *((int *)(i + 4*8)), *((int *)(i + 4*9)),
132 *((int *)(i + 4*10)), *((int *)(i + 4*11)),
133 *((int *)(i + 4*12)), *((int *)(i + 4*13)),
134 *((int *)(i + 4*14)), *((int *)(i + 4*15)));
136 printf("(%x,%x) \n", udn_tile_coord_x(), udn_tile_coord_y());
140 // should be invoked with interruption closed
141 inline void gc_enqueue_I(void *ptr) {
143 BAMBOO_DEBUGPRINT(0xe601);
144 BAMBOO_DEBUGPRINT_REG(ptr);
146 if (gcheadindex==NUMPTRS) {
147 struct pointerblock * tmp;
152 tmp=RUNMALLOC_I(sizeof(struct pointerblock));
153 } // if (gcspare!=NULL)
157 } // if (gcheadindex==NUMPTRS)
158 gchead->ptrs[gcheadindex++]=ptr;
160 BAMBOO_DEBUGPRINT(0xe602);
162 } // void gc_enqueue_I(void *ptr)
164 // dequeue and destroy the queue
165 inline void * gc_dequeue_I() {
166 if (gctailindex==NUMPTRS) {
167 struct pointerblock *tmp=gctail;
174 } // if (gcspare!=NULL)
175 } // if (gctailindex==NUMPTRS)
176 return gctail->ptrs[gctailindex++];
177 } // void * gc_dequeue()
179 // dequeue and do not destroy the queue
180 inline void * gc_dequeue2_I() {
181 if (gctailindex2==NUMPTRS) {
182 struct pointerblock *tmp=gctail2;
183 gctail2=gctail2->next;
185 } // if (gctailindex2==NUMPTRS)
186 return gctail2->ptrs[gctailindex2++];
187 } // void * gc_dequeue2()
189 inline int gc_moreItems_I() {
190 if ((gchead==gctail)&&(gctailindex==gcheadindex))
193 } // int gc_moreItems()
195 inline int gc_moreItems2_I() {
196 if ((gchead==gctail2)&&(gctailindex2==gcheadindex))
199 } // int gc_moreItems2()
201 // should be invoked with interruption closed
202 // enqueue a large obj: start addr & length
203 inline void gc_lobjenqueue_I(void *ptr,
207 BAMBOO_DEBUGPRINT(0xe901);
209 if (gclobjheadindex==NUMLOBJPTRS) {
210 struct lobjpointerblock * tmp;
211 if (gclobjspare!=NULL) {
215 tmp=RUNMALLOC_I(sizeof(struct lobjpointerblock));
216 } // if (gclobjspare!=NULL)
217 gclobjhead->next=tmp;
218 tmp->prev = gclobjhead;
221 } // if (gclobjheadindex==NUMLOBJPTRS)
222 gclobjhead->lobjs[gclobjheadindex]=ptr;
223 gclobjhead->lengths[gclobjheadindex]=length;
224 gclobjhead->hosts[gclobjheadindex++]=host;
226 BAMBOO_DEBUGPRINT_REG(gclobjhead->lobjs[gclobjheadindex-1]);
227 BAMBOO_DEBUGPRINT_REG(gclobjhead->lengths[gclobjheadindex-1]);
228 BAMBOO_DEBUGPRINT_REG(gclobjhead->hosts[gclobjheadindex-1]);
230 } // void gc_lobjenqueue_I(void *ptr...)
232 // dequeue and destroy the queue
233 inline void * gc_lobjdequeue_I(int * length,
235 if (gclobjtailindex==NUMLOBJPTRS) {
236 struct lobjpointerblock *tmp=gclobjtail;
237 gclobjtail=gclobjtail->next;
239 gclobjtail->prev = NULL;
240 if (gclobjspare!=NULL) {
246 } // if (gclobjspare!=NULL)
247 } // if (gclobjtailindex==NUMLOBJPTRS)
249 *length = gclobjtail->lengths[gclobjtailindex];
252 *host = (int)(gclobjtail->hosts[gclobjtailindex]);
254 return gclobjtail->lobjs[gclobjtailindex++];
255 } // void * gc_lobjdequeue()
257 inline int gc_lobjmoreItems_I() {
258 if ((gclobjhead==gclobjtail)&&(gclobjtailindex==gclobjheadindex))
261 } // int gc_lobjmoreItems()
263 // dequeue and don't destroy the queue
264 inline void gc_lobjdequeue2_I() {
265 if (gclobjtailindex2==NUMLOBJPTRS) {
266 gclobjtail2=gclobjtail2->next;
270 } // if (gclobjtailindex2==NUMLOBJPTRS)
271 } // void * gc_lobjdequeue2()
273 inline int gc_lobjmoreItems2_I() {
274 if ((gclobjhead==gclobjtail2)&&(gclobjtailindex2==gclobjheadindex))
277 } // int gc_lobjmoreItems2()
279 // 'reversly' dequeue and don't destroy the queue
280 inline void gc_lobjdequeue3_I() {
281 if (gclobjtailindex2==0) {
282 gclobjtail2=gclobjtail2->prev;
283 gclobjtailindex2=NUMLOBJPTRS-1;
286 } // if (gclobjtailindex2==NUMLOBJPTRS)
287 } // void * gc_lobjdequeue3()
289 inline int gc_lobjmoreItems3_I() {
290 if ((gclobjtail==gclobjtail2)&&(gclobjtailindex2==gclobjtailindex))
293 } // int gc_lobjmoreItems3()
295 inline void gc_lobjqueueinit4_I() {
296 gclobjtail2 = gclobjtail;
297 gclobjtailindex2 = gclobjtailindex;
298 } // void gc_lobjqueueinit2()
300 inline void * gc_lobjdequeue4_I(int * length,
302 if (gclobjtailindex2==NUMLOBJPTRS) {
303 gclobjtail2=gclobjtail2->next;
305 } // if (gclobjtailindex==NUMLOBJPTRS)
307 *length = gclobjtail2->lengths[gclobjtailindex2];
310 *host = (int)(gclobjtail2->hosts[gclobjtailindex2]);
312 return gclobjtail2->lobjs[gclobjtailindex2++];
313 } // void * gc_lobjdequeue()
315 inline int gc_lobjmoreItems4_I() {
316 if ((gclobjhead==gclobjtail2)&&(gclobjtailindex2==gclobjheadindex))
319 } // int gc_lobjmoreItems(
321 INTPTR gccurr_heapbound = 0;
323 inline void gettype_size(void * ptr,
326 int type = ((int *)ptr)[0];
328 if(type < NUMCLASSES) {
330 size = classsize[type];
333 struct ArrayObject *ao=(struct ArrayObject *)ptr;
334 int elementsize=classsize[type];
335 int length=ao->___length___;
336 size=sizeof(struct ArrayObject)+length*elementsize;
337 } // if(type < NUMCLASSES)
342 inline bool isLarge(void * ptr,
346 BAMBOO_DEBUGPRINT(0xe701);
347 BAMBOO_DEBUGPRINT_REG(ptr);
349 // check if a pointer is referring to a large object
350 gettype_size(ptr, ttype, tsize);
352 BAMBOO_DEBUGPRINT(*tsize);
354 int bound = (BAMBOO_SMEM_SIZE);
355 if(((int)ptr-gcbaseva) < (BAMBOO_LARGE_SMEM_BOUND)) {
356 bound = (BAMBOO_SMEM_SIZE_L);
358 if((((int)ptr-gcbaseva)%(bound))==0) {
359 // ptr is a start of a block
361 BAMBOO_DEBUGPRINT(0xe702);
362 BAMBOO_DEBUGPRINT(1);
366 if((bound-(((int)ptr-gcbaseva)%bound)) < (*tsize)) {
367 // it acrosses the boundary of current block
369 BAMBOO_DEBUGPRINT(0xe703);
370 BAMBOO_DEBUGPRINT(1);
375 BAMBOO_DEBUGPRINT(0);
378 } // bool isLarge(void * ptr, int * ttype, int * tsize)
380 inline int hostcore(void * ptr) {
381 // check the host core of ptr
383 RESIDECORE(ptr, &host);
385 BAMBOO_DEBUGPRINT(0xedd0);
386 BAMBOO_DEBUGPRINT_REG(ptr);
387 BAMBOO_DEBUGPRINT_REG(host);
390 } // int hostcore(void * ptr)
392 inline bool isLocal(void * ptr) {
393 // check if a pointer is in shared heap on this core
394 return hostcore(ptr) == BAMBOO_NUM_OF_CORE;
395 } // bool isLocal(void * ptr)
397 inline bool gc_checkCoreStatus_I() {
398 bool allStall = true;
399 for(int i = 0; i < NUMCORES4GC; ++i) {
400 if(gccorestatus[i] != 0) {
403 } // if(gccorestatus[i] != 0)
404 } // for(i = 0; i < NUMCORES4GC; ++i)
408 inline bool gc_checkAllCoreStatus_I() {
409 bool allStall = true;
410 for(int i = 0; i < NUMCORESACTIVE; ++i) {
411 if(gccorestatus[i] != 0) {
414 } // if(gccorestatus[i] != 0)
415 } // for(i = 0; i < NUMCORESACTIVE; ++i)
419 inline void checkMarkStatue() {
421 BAMBOO_DEBUGPRINT(0xee01);
425 (waitconfirm && (numconfirm == 0))) {
427 BAMBOO_DEBUGPRINT(0xee02);
432 entry_index = (gcnumsrobjs_index == 0) ? 1 : 0;
435 entry_index = gcnumsrobjs_index;
437 BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
438 gccorestatus[BAMBOO_NUM_OF_CORE] = 0;
439 gcnumsendobjs[entry_index][BAMBOO_NUM_OF_CORE] = gcself_numsendobjs;
440 gcnumreceiveobjs[entry_index][BAMBOO_NUM_OF_CORE] = gcself_numreceiveobjs;
441 // check the status of all cores
442 bool allStall = gc_checkAllCoreStatus_I();
444 BAMBOO_DEBUGPRINT(0xee03);
448 BAMBOO_DEBUGPRINT(0xee04);
453 BAMBOO_DEBUGPRINT(0xee05);
455 // the first time found all cores stall
456 // send out status confirm msg to all other cores
457 // reset the corestatus array too
458 gccorestatus[BAMBOO_NUM_OF_CORE] = 1;
460 numconfirm = NUMCORESACTIVE - 1;
461 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
462 for(i = 1; i < NUMCORESACTIVE; ++i) {
464 // send mark phase finish confirm request msg to core i
465 send_msg_1(i, GCMARKCONFIRM, false);
466 } // for(i = 1; i < NUMCORESACTIVE; ++i)
469 // check if the sum of send objs and receive obj are the same
470 // yes->check if the info is the latest; no->go on executing
472 for(i = 0; i < NUMCORESACTIVE; ++i) {
473 sumsendobj += gcnumsendobjs[gcnumsrobjs_index][i];
474 } // for(i = 0; i < NUMCORESACTIVE; ++i)
476 BAMBOO_DEBUGPRINT(0xee06);
477 BAMBOO_DEBUGPRINT_REG(sumsendobj);
479 for(i = 0; i < NUMCORESACTIVE; ++i) {
480 sumsendobj -= gcnumreceiveobjs[gcnumsrobjs_index][i];
481 } // for(i = 0; i < NUMCORESACTIVE; ++i)
483 BAMBOO_DEBUGPRINT(0xee07);
484 BAMBOO_DEBUGPRINT_REG(sumsendobj);
486 if(0 == sumsendobj) {
487 // Check if there are changes of the numsendobjs or numreceiveobjs on
489 bool ischanged = false;
490 for(i = 0; i < NUMCORESACTIVE; ++i) {
491 if((gcnumsendobjs[0][i] != gcnumsendobjs[1][i]) ||
492 (gcnumreceiveobjs[0][i] != gcnumreceiveobjs[1][i]) ) {
496 } // for(i = 0; i < NUMCORESACTIVE; ++i)
498 BAMBOO_DEBUGPRINT(0xee08);
499 BAMBOO_DEBUGPRINT_REG(ischanged);
503 BAMBOO_DEBUGPRINT(0xee09);
505 // all the core status info are the latest
507 gcphase = COMPACTPHASE;
508 // restore the gcstatus for all cores
509 for(i = 0; i < NUMCORESACTIVE; ++i) {
511 } // for(i = 0; i < NUMCORESACTIVE; ++i)
514 gcnumsrobjs_index = (gcnumsrobjs_index == 0) ? 1 : 0;
517 // There were changes between phase 1 and phase 2, can not decide
518 // whether the mark phase has been finished
520 // As it fails in phase 2, flip the entries
521 gcnumsrobjs_index = (gcnumsrobjs_index == 0) ? 1 : 0;
522 } // if(0 == sumsendobj) else ...
523 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
524 } // if(!gcwaitconfirm) else()
526 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
528 } // if((!waitconfirm)...
530 BAMBOO_DEBUGPRINT(0xee0a);
532 } // void checkMarkStatue()
534 inline bool preGC() {
535 // preparation for gc
536 // make sure to clear all incoming msgs espacially transfer obj msgs
538 BAMBOO_DEBUGPRINT(0xec01);
542 (waitconfirm && (numconfirm == 0))) {
543 // send out status confirm msgs to all cores to check if there are
544 // transfer obj msgs on-the-fly
546 numconfirm = NUMCORESACTIVE - 1;
547 for(i = 1; i < NUMCORESACTIVE; ++i) {
549 // send status confirm msg to core i
550 send_msg_1(i, STATUSCONFIRM, false);
551 } // for(i = 1; i < NUMCORESACTIVE; ++i)
554 BAMBOO_DEBUGPRINT(0xec02);
557 if(numconfirm == 0) {
560 } // wait for confirmations
564 BAMBOO_DEBUGPRINT(0xec03);
566 numsendobjs[BAMBOO_NUM_OF_CORE] = self_numsendobjs;
567 numreceiveobjs[BAMBOO_NUM_OF_CORE] = self_numreceiveobjs;
570 BAMBOO_DEBUGPRINT(0xec04);
572 for(i = 0; i < NUMCORESACTIVE; ++i) {
573 sumsendobj += numsendobjs[i];
575 BAMBOO_DEBUGPRINT(0xf000 + numsendobjs[i]);
577 } // for(i = 1; i < NUMCORESACTIVE; ++i)
579 BAMBOO_DEBUGPRINT(0xec05);
580 BAMBOO_DEBUGPRINT_REG(sumsendobj);
582 for(i = 0; i < NUMCORESACTIVE; ++i) {
583 sumsendobj -= numreceiveobjs[i];
585 BAMBOO_DEBUGPRINT(0xf000 + numreceiveobjs[i]);
587 } // for(i = 1; i < NUMCORESACTIVE; ++i)
589 BAMBOO_DEBUGPRINT(0xec06);
590 BAMBOO_DEBUGPRINT_REG(sumsendobj);
592 if(0 == sumsendobj) {
595 // still have some transfer obj msgs on-the-fly, can not start gc
597 } // if(0 == sumsendobj)
600 BAMBOO_DEBUGPRINT(0xec07);
602 // previously asked for status confirmation and do not have all the
603 // confirmations yet, can not start gc
605 } // if((!waitconfirm) ||
608 inline void initGC() {
610 if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
611 for(i = 0; i < NUMCORES4GC; ++i) {
613 gcnumsendobjs[0][i] = gcnumsendobjs[1][i] = 0;
614 gcnumreceiveobjs[0][i] = gcnumreceiveobjs[1][i] = 0;
616 gcrequiredmems[i] = 0;
617 gcfilledblocks[i] = 0;
619 } // for(i = 0; i < NUMCORES4GC; ++i)
620 for(i = NUMCORES4GC; i < NUMCORESACTIVE; ++i) {
622 gcnumsendobjs[0][i] = gcnumsendobjs[1][i] = 0;
623 gcnumreceiveobjs[0][i] = gcnumreceiveobjs[1][i] = 0;
628 } // if(STARTUPCORE == BAMBOO_NUM_OF_CORE)
629 gcself_numsendobjs = 0;
630 gcself_numreceiveobjs = 0;
631 gcmarkedptrbound = 0;
634 //gcismapped = false;
645 gcheadindex=gctailindex=gctailindex2 = 0;
646 gchead=gctail=gctail2=RUNMALLOC(sizeof(struct pointerblock));
648 gctailindex = gctailindex2 = gcheadindex;
649 gctail = gctail2 = gchead;
652 // initialize the large obj queues
653 if (gclobjhead==NULL) {
656 gclobjtailindex2 = 0;
657 gclobjhead=gclobjtail=gclobjtail2=
658 RUNMALLOC(sizeof(struct lobjpointerblock));
660 gclobjtailindex = gclobjtailindex2 = gclobjheadindex = 0;
661 gclobjtail = gclobjtail2 = gclobjhead;
663 gclobjhead->next = gclobjhead->prev = NULL;
665 #ifdef LOCALHASHTBL_TEST
666 freeRuntimeHash(gcpointertbl);
667 gcpointertbl = allocateRuntimeHash(20);
670 //tprintf("++local hash table element: %x \n", gcpointertbl->numelements);
671 mgchashreset(gcpointertbl);
673 //tprintf("==local hash table element: %x \n", gcpointertbl->numelements);
675 //gcpointertbl = allocateMGCHash(20);
677 freeMGCHash(gcforwardobjtbl);
678 gcforwardobjtbl = allocateMGCHash(20, 3);
680 // initialize the mapping info related structures
681 if((BAMBOO_NUM_OF_CORE < NUMCORES4GC) && (gcsharedptbl != NULL)) {
682 // Never free the shared hash table, just reset it
683 /*freeGCSharedHash(gcsharedptbl);
684 gcsharedptbl = allocateGCSharedHash(20);*/
685 mgcsharedhashReset(gcsharedptbl);
687 // Zero out the remaining bamboo_cur_msp
688 // Only zero out the first 4 bytes of the remaining memory
689 if((bamboo_cur_msp != 0)
690 && (bamboo_smem_zero_top == bamboo_cur_msp)
691 && (bamboo_smem_size > 0)) {
692 *((int *)bamboo_cur_msp) = 0;
696 /*num_mapinforequest = 0;
697 num_mapinforequest_i = 0;
699 flushstalltime_i = 0;
702 gc_num_livespace = 0;
703 gc_num_freespace = 0;
705 gc_num_lobjspace = 0;
707 //#ifdef GC_PROFILE_S
709 gc_num_forwardobj = 0;
710 gc_num_profiles = NUMCORESACTIVE - 1;
714 // compute load balance for all cores
715 inline int loadbalance(int * heaptop) {
716 // compute load balance
719 // get the total loads
720 int tloads = gcloads[STARTUPCORE];
721 for(i = 1; i < NUMCORES4GC; i++) {
722 tloads += gcloads[i];
724 *heaptop = gcbaseva + tloads;
727 BAMBOO_DEBUGPRINT(0xdddd);
728 BAMBOO_DEBUGPRINT_REG(tloads);
729 BAMBOO_DEBUGPRINT_REG(*heaptop);
732 BLOCKINDEX(*heaptop, &b);
733 int numbpc = b / NUMCORES4GC; // num of blocks per core
735 BAMBOO_DEBUGPRINT_REG(b);
736 BAMBOO_DEBUGPRINT_REG(numbpc);
739 RESIDECORE(heaptop, &gctopcore);
741 BAMBOO_DEBUGPRINT_REG(gctopcore);
744 } // void loadbalance(int * heaptop)
746 inline bool cacheLObjs() {
747 // check the total mem size need for large objs
748 unsigned long long sumsize = 0;
751 BAMBOO_DEBUGPRINT(0xe801);
753 gclobjtail2 = gclobjtail;
754 gclobjtailindex2 = gclobjtailindex;
758 // compute total mem size required and sort the lobjs in ascending order
759 while(gc_lobjmoreItems2_I()) {
761 tmp_lobj = gclobjtail2->lobjs[gclobjtailindex2-1];
762 tmp_host = gclobjtail2->hosts[gclobjtailindex2-1];
763 tmp_len = gclobjtail2->lengths[gclobjtailindex2 - 1];
769 BAMBOO_DEBUGPRINT_REG(gclobjtail2->lobjs[gclobjtailindex2-1]);
770 BAMBOO_DEBUGPRINT_REG(tmp_len);
771 BAMBOO_DEBUGPRINT_REG(sumsize);
773 int i = gclobjtailindex2-1;
774 struct lobjpointerblock * tmp_block = gclobjtail2;
775 // find the place to insert
778 if(tmp_block->prev == NULL) {
781 if(tmp_block->prev->lobjs[NUMLOBJPTRS-1] > tmp_lobj) {
782 tmp_block->lobjs[i] = tmp_block->prev->lobjs[NUMLOBJPTRS-1];
783 tmp_block->lengths[i] = tmp_block->prev->lengths[NUMLOBJPTRS-1];
784 tmp_block->hosts[i] = tmp_block->prev->hosts[NUMLOBJPTRS-1];
785 tmp_block = tmp_block->prev;
789 } // if(tmp_block->prev->lobjs[NUMLOBJPTRS-1] < tmp_lobj)
791 if(tmp_block->lobjs[i-1] > tmp_lobj) {
792 tmp_block->lobjs[i] = tmp_block->lobjs[i-1];
793 tmp_block->lengths[i] = tmp_block->lengths[i-1];
794 tmp_block->hosts[i] = tmp_block->hosts[i-1];
798 } // if(tmp_block->lobjs[i-1] < tmp_lobj)
799 } // if(i ==0 ) else {}
802 if(i != gclobjtailindex2 - 1) {
803 tmp_block->lobjs[i] = tmp_lobj;
804 tmp_block->lengths[i] = tmp_len;
805 tmp_block->hosts[i] = tmp_host;
807 } // while(gc_lobjmoreItems2())
810 gc_num_lobjspace = sumsize;
812 // check if there are enough space to cache these large objs
813 INTPTR dst = gcbaseva + (BAMBOO_SHARED_MEM_SIZE) -sumsize;
814 if((unsigned long long)gcheaptop > (unsigned long long)dst) {
815 // do not have enough room to cache large objs
817 BAMBOO_DEBUGPRINT(0xe802);
818 BAMBOO_DEBUGPRINT_REG(dst);
819 BAMBOO_DEBUGPRINT_REG(gcheaptop);
820 BAMBOO_DEBUGPRINT_REG(sumsize);
825 BAMBOO_DEBUGPRINT(0xe803);
826 BAMBOO_DEBUGPRINT_REG(dst);
827 BAMBOO_DEBUGPRINT_REG(gcheaptop);
830 gcheaptop = dst; // Note: record the start of cached lobjs with gcheaptop
831 // cache the largeObjs to the top of the shared heap
832 //gclobjtail2 = gclobjtail;
833 //gclobjtailindex2 = gclobjtailindex;
834 dst = gcbaseva + (BAMBOO_SHARED_MEM_SIZE);
835 while(gc_lobjmoreItems3_I()) {
837 size = gclobjtail2->lengths[gclobjtailindex2];
838 // set the mark field to , indicating that this obj has been moved
839 // and need to be flushed
840 ((int *)(gclobjtail2->lobjs[gclobjtailindex2]))[6] = COMPACTED;
842 if((int)dst < (int)(gclobjtail2->lobjs[gclobjtailindex2])+size) {
843 memmove(dst, gclobjtail2->lobjs[gclobjtailindex2], size);
845 //BAMBOO_WRITE_HINT_CACHE(dst, size);
846 memcpy(dst, gclobjtail2->lobjs[gclobjtailindex2], size);
849 BAMBOO_DEBUGPRINT(0x804);
850 BAMBOO_DEBUGPRINT_REG(gclobjtail2->lobjs[gclobjtailindex2]);
851 BAMBOO_DEBUGPRINT(dst);
852 BAMBOO_DEBUGPRINT_REG(size);
853 BAMBOO_DEBUGPRINT_REG(*((int*)gclobjtail2->lobjs[gclobjtailindex2]));
854 BAMBOO_DEBUGPRINT_REG(*((int*)(dst)));
858 } // void cacheLObjs()
860 // update the bmmboo_smemtbl to record current shared mem usage
861 void updateSmemTbl(int coren,
864 int bound = BAMBOO_SMEM_SIZE_L;
865 BLOCKINDEX(localtop, <opcore);
866 if(localtop >= (gcbaseva+(BAMBOO_LARGE_SMEM_BOUND))) {
867 bound = BAMBOO_SMEM_SIZE;
869 int load = (localtop-gcbaseva)%bound;
874 toset = gc_core2block[2*coren+i]+(NUMCORES4GC*2)*j;
875 if(toset < ltopcore) {
876 bamboo_smemtbl[toset]=
877 (toset<NUMCORES4GC) ? BAMBOO_SMEM_SIZE_L : BAMBOO_SMEM_SIZE;
879 gcmem_mixed_usedmem += bamboo_smemtbl[toset];
881 } else if(toset == ltopcore) {
882 bamboo_smemtbl[toset] = load;
884 gcmem_mixed_usedmem += bamboo_smemtbl[toset];
896 } // void updateSmemTbl(int, int)
898 inline void moveLObjs() {
900 BAMBOO_DEBUGPRINT(0xea01);
903 // update the gcmem_mixed_usedmem
904 gcmem_mixed_usedmem = 0;
906 // zero out the smemtbl
907 BAMBOO_MEMSET_WH(bamboo_smemtbl, 0, sizeof(int)*gcnumblock);
908 // find current heap top
909 // flush all gcloads to indicate the real heap top on one core
910 // previous it represents the next available ptr on a core
911 if((gcloads[0] > (gcbaseva+(BAMBOO_SMEM_SIZE_L)))
912 && ((gcloads[0]%(BAMBOO_SMEM_SIZE)) == 0)) {
913 // edge of a block, check if this is exactly the heaptop
914 BASEPTR(0, gcfilledblocks[0]-1, &(gcloads[0]));
915 gcloads[0]+=(gcfilledblocks[0]>1 ?
916 (BAMBOO_SMEM_SIZE) : (BAMBOO_SMEM_SIZE_L));
918 updateSmemTbl(0, gcloads[0]);
920 BAMBOO_DEBUGPRINT(0xea02);
921 BAMBOO_DEBUGPRINT_REG(gcloads[0]);
922 BAMBOO_DEBUGPRINT_REG(bamboo_smemtbl[0]);
924 for(int i = 1; i < NUMCORES4GC; i++) {
927 BAMBOO_DEBUGPRINT(0xf000+i);
928 BAMBOO_DEBUGPRINT_REG(gcloads[i]);
929 BAMBOO_DEBUGPRINT_REG(gcfilledblocks[i]);
931 if((gcfilledblocks[i] > 0)
932 && ((gcloads[i] % (BAMBOO_SMEM_SIZE)) == 0)) {
933 // edge of a block, check if this is exactly the heaptop
934 BASEPTR(i, gcfilledblocks[i]-1, &gcloads[i]);
936 (gcfilledblocks[i]>1 ? (BAMBOO_SMEM_SIZE) : (BAMBOO_SMEM_SIZE_L));
939 updateSmemTbl(i, gcloads[i]);
941 BAMBOO_DEBUGPRINT_REG(gcloads[i]);
943 } // for(int i = 1; i < NUMCORES4GC; i++) {
945 // find current heap top
947 // a bug here: when using local allocation, directly move large objects
948 // to the highest free chunk might not be memory efficient
953 for(i = gcnumblock-1; i >= 0; i--) {
954 if(bamboo_smemtbl[i] > 0) {
959 tmpheaptop = gcbaseva;
961 tmpheaptop = gcbaseva+bamboo_smemtbl[i]+((i<NUMCORES4GC) ?
962 (BAMBOO_SMEM_SIZE_L*i) :
963 (BAMBOO_SMEM_SIZE*(i-NUMCORES4GC)+BAMBOO_LARGE_SMEM_BOUND));
966 // move large objs from gcheaptop to tmpheaptop
967 // write the header first
968 unsigned int tomove = gcbaseva + (BAMBOO_SHARED_MEM_SIZE) -gcheaptop;
970 gcmem_mixed_usedmem += tomove;
973 BAMBOO_DEBUGPRINT(0xea03);
974 BAMBOO_DEBUGPRINT_REG(tomove);
975 BAMBOO_DEBUGPRINT_REG(tmpheaptop);
976 BAMBOO_DEBUGPRINT_REG(gcheaptop);
978 // flush the sbstartbl
979 BAMBOO_MEMSET_WH(&(gcsbstarttbl[gcreservedsb]), '\0',
980 (BAMBOO_SHARED_MEM_SIZE/BAMBOO_SMEM_SIZE-gcreservedsb)*sizeof(INTPTR));
982 gcheaptop = tmpheaptop;
984 // check how many blocks it acrosses
985 int remain = tmpheaptop-gcbaseva;
986 int sb = remain/(BAMBOO_SMEM_SIZE) + gcreservedsb;//number of the sblock
987 int b = 0; // number of the block
988 BLOCKINDEX(tmpheaptop, &b);
989 // check the remaining space in this block
990 bound = (BAMBOO_SMEM_SIZE);
991 if(remain < (BAMBOO_LARGE_SMEM_BOUND)) {
992 bound = (BAMBOO_SMEM_SIZE_L);
994 remain = bound - remain%bound;
997 BAMBOO_DEBUGPRINT(0xea04);
1003 int base = tmpheaptop;
1005 remain -= BAMBOO_CACHE_LINE_SIZE;
1006 tmpheaptop += BAMBOO_CACHE_LINE_SIZE;
1007 gc_lobjqueueinit4_I();
1008 while(gc_lobjmoreItems4_I()) {
1009 ptr = (int)(gc_lobjdequeue4_I(&size, &host));
1010 ALIGNSIZE(size, &isize);
1011 if(remain < isize) {
1012 // this object acrosses blocks
1014 // close current block, fill its header
1015 BAMBOO_MEMSET_WH(base, '\0', BAMBOO_CACHE_LINE_SIZE);
1016 *((int*)base) = cpysize + BAMBOO_CACHE_LINE_SIZE;
1017 bamboo_smemtbl[b]+=BAMBOO_CACHE_LINE_SIZE;//add the size of header
1021 remain = ((tmpheaptop-gcbaseva)<(BAMBOO_LARGE_SMEM_BOUND)) ?
1022 BAMBOO_SMEM_SIZE_L : BAMBOO_SMEM_SIZE;
1024 remain -= BAMBOO_CACHE_LINE_SIZE;
1025 tmpheaptop += BAMBOO_CACHE_LINE_SIZE;
1026 BLOCKINDEX(tmpheaptop, &b);
1027 sb = (tmpheaptop-gcbaseva)/(BAMBOO_SMEM_SIZE) + gcreservedsb;
1028 } // if(cpysize > 0)
1030 // move the large obj
1031 if((int)gcheaptop < (int)(tmpheaptop)+size) {
1032 memmove(tmpheaptop, gcheaptop, size);
1034 //BAMBOO_WRITE_HINT_CACHE(tmpheaptop, size);
1035 memcpy(tmpheaptop, gcheaptop, size);
1037 // fill the remaining space with -2 padding
1038 BAMBOO_MEMSET_WH(tmpheaptop+size, -2, isize-size);
1040 BAMBOO_DEBUGPRINT(0xea05);
1041 BAMBOO_DEBUGPRINT_REG(gcheaptop);
1042 BAMBOO_DEBUGPRINT_REG(tmpheaptop);
1043 BAMBOO_DEBUGPRINT_REG(size);
1044 BAMBOO_DEBUGPRINT_REG(isize);
1045 BAMBOO_DEBUGPRINT_REG(base);
1048 // cache the mapping info anyway
1049 //if(ptr != tmpheaptop) {
1050 BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
1051 #ifdef LOCALHASHTBL_TEST
1052 RuntimeHashadd_I(gcpointertbl, ptr, tmpheaptop);
1054 mgchashInsert_I(gcpointertbl, ptr, tmpheaptop);
1056 //MGCHashadd_I(gcpointertbl, ptr, tmpheaptop);
1057 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
1060 BAMBOO_DEBUGPRINT(0xcdca);
1061 BAMBOO_DEBUGPRINT_REG(ptr);
1062 BAMBOO_DEBUGPRINT_REG(tmpheaptop);
1064 if(host != BAMBOO_NUM_OF_CORE) {
1065 // send the original host core with the mapping info
1066 send_msg_3(host, GCLOBJMAPPING, ptr, tmpheaptop, false);
1068 BAMBOO_DEBUGPRINT(0xcdcb);
1069 BAMBOO_DEBUGPRINT_REG(ptr);
1070 BAMBOO_DEBUGPRINT_REG(tmpheaptop);
1072 } // if(host != BAMBOO_NUM_OF_CORE)
1073 tmpheaptop += isize;
1075 // set the gcsbstarttbl and bamboo_smemtbl
1076 int tmpsbs = 1+(isize-remain-1)/BAMBOO_SMEM_SIZE;
1077 for(int k = 1; k < tmpsbs; k++) {
1078 gcsbstarttbl[sb+k] = (INTPTR)(-1);
1081 bound = (b<NUMCORES4GC) ? BAMBOO_SMEM_SIZE_L : BAMBOO_SMEM_SIZE;
1082 BLOCKINDEX(tmpheaptop-1, &tmpsbs);
1083 for(; b < tmpsbs; b++) {
1084 bamboo_smemtbl[b] = bound;
1085 if(b==NUMCORES4GC-1) {
1086 bound = BAMBOO_SMEM_SIZE;
1089 if(((isize-remain)%(BAMBOO_SMEM_SIZE)) == 0) {
1090 gcsbstarttbl[sb] = (INTPTR)(-1);
1091 remain = ((tmpheaptop-gcbaseva)<(BAMBOO_LARGE_SMEM_BOUND)) ?
1092 BAMBOO_SMEM_SIZE_L : BAMBOO_SMEM_SIZE;
1093 bamboo_smemtbl[b] = bound;
1095 gcsbstarttbl[sb] = (INTPTR)(tmpheaptop);
1096 remain = tmpheaptop-gcbaseva;
1097 bamboo_smemtbl[b] = remain%bound;
1098 remain = bound - bamboo_smemtbl[b];
1099 } // if(((isize-remain)%(BAMBOO_SMEM_SIZE)) == 0) else ...
1101 // close current block and fill the header
1102 BAMBOO_MEMSET_WH(base, '\0', BAMBOO_CACHE_LINE_SIZE);
1103 *((int*)base) = isize + BAMBOO_CACHE_LINE_SIZE;
1106 if(remain == BAMBOO_CACHE_LINE_SIZE) {
1107 // fill with 0 in case
1108 BAMBOO_MEMSET_WH(tmpheaptop, '\0', remain);
1110 remain -= BAMBOO_CACHE_LINE_SIZE;
1111 tmpheaptop += BAMBOO_CACHE_LINE_SIZE;
1114 // move the large obj
1115 if((int)gcheaptop < (int)(tmpheaptop)+size) {
1116 memmove(tmpheaptop, gcheaptop, size);
1118 //BAMBOO_WRITE_HINT_CACHE(tmpheaptop, size);
1119 memcpy(tmpheaptop, gcheaptop, size);
1121 // fill the remaining space with -2 padding
1122 BAMBOO_MEMSET_WH(tmpheaptop+size, -2, isize-size);
1123 // zero out original mem caching the lobj
1124 //BAMBOO_MEMSET_WH(gcheaptop, '\0', size); // TODO ??
1126 BAMBOO_DEBUGPRINT(0xea06);
1127 BAMBOO_DEBUGPRINT_REG(gcheaptop);
1128 BAMBOO_DEBUGPRINT_REG(tmpheaptop);
1129 BAMBOO_DEBUGPRINT_REG(size);
1130 BAMBOO_DEBUGPRINT_REG(isize);
1135 // cache the mapping info anyway
1136 //if(ptr != tmpheaptop) {
1137 BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
1138 #ifdef LOCALHASHTBL_TEST
1139 RuntimeHashadd_I(gcpointertbl, ptr, tmpheaptop);
1141 mgchashInsert_I(gcpointertbl, ptr, tmpheaptop);
1143 //MGCHashadd_I(gcpointertbl, ptr, tmpheaptop);
1144 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
1147 BAMBOO_DEBUGPRINT(0xcdcc);
1148 BAMBOO_DEBUGPRINT_REG(ptr);
1149 BAMBOO_DEBUGPRINT_REG(tmpheaptop);
1150 BAMBOO_DEBUGPRINT_REG(*((int*)tmpheaptop));
1152 if(host != BAMBOO_NUM_OF_CORE) {
1153 // send the original host core with the mapping info
1154 send_msg_3(host, GCLOBJMAPPING, ptr, tmpheaptop, false);
1156 BAMBOO_DEBUGPRINT(0xcdcd);
1157 BAMBOO_DEBUGPRINT_REG(ptr);
1158 BAMBOO_DEBUGPRINT_REG(tmpheaptop);
1160 } // if(host != BAMBOO_NUM_OF_CORE)
1161 tmpheaptop += isize;
1163 // update bamboo_smemtbl
1164 bamboo_smemtbl[b] += isize;
1165 } // if(remain < isize) else ...
1166 } // while(gc_lobjmoreItems())
1168 // close current block, fill the header
1169 BAMBOO_MEMSET_WH(base, '\0', BAMBOO_CACHE_LINE_SIZE);
1170 *((int*)base) = cpysize + BAMBOO_CACHE_LINE_SIZE;
1171 bamboo_smemtbl[b] += BAMBOO_CACHE_LINE_SIZE;// add the size of the header
1173 tmpheaptop -= BAMBOO_CACHE_LINE_SIZE;
1175 gcheaptop = tmpheaptop;
1177 } // if(tomove == 0)
1180 BAMBOO_DEBUGPRINT(0xea07);
1181 BAMBOO_DEBUGPRINT_REG(gcheaptop);
1184 bamboo_free_block = 0;
1187 tbound = (bamboo_free_block<NUMCORES4GC) ?
1188 BAMBOO_SMEM_SIZE_L : BAMBOO_SMEM_SIZE;
1189 if(bamboo_smemtbl[bamboo_free_block] == tbound) {
1190 bamboo_free_block++;
1192 // the first non-full partition
1198 // check how many live space there are
1199 gc_num_livespace = 0;
1200 for(int tmpi = 0; tmpi < gcnumblock; tmpi++) {
1201 gc_num_livespace += bamboo_smemtbl[tmpi];
1203 gc_num_freespace = (BAMBOO_SHARED_MEM_SIZE) - gc_num_livespace;
1206 BAMBOO_DEBUGPRINT(0xea08);
1207 BAMBOO_DEBUGPRINT_REG(gcheaptop);
1209 } // void moveLObjs()
1211 inline void markObj(void * objptr) {
1212 if(objptr == NULL) {
1215 if(ISSHAREDOBJ(objptr)) {
1216 int host = hostcore(objptr);
1217 if(BAMBOO_NUM_OF_CORE == host) {
1219 BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
1220 if(((int *)objptr)[6] == INIT) {
1221 // this is the first time that this object is discovered,
1222 // set the flag as DISCOVERED
1223 ((int *)objptr)[6] |= DISCOVERED;
1224 gc_enqueue_I(objptr);
1226 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
1229 BAMBOO_DEBUGPRINT(0xbbbb);
1230 BAMBOO_DEBUGPRINT_REG(host);
1231 BAMBOO_DEBUGPRINT_REG(objptr);
1233 // check if this obj has been forwarded
1234 if(!MGCHashcontains(gcforwardobjtbl, (int)objptr)) {
1236 // TODO unsigned long long ttime = BAMBOO_GET_EXE_TIME();
1238 // send a msg to host informing that objptr is active
1239 send_msg_2(host, GCMARKEDOBJ, objptr, /*BAMBOO_NUM_OF_CORE,*/ false);
1243 marktime += BAMBOO_GET_EXE_TIME() - ttime;
1244 num_markrequest++;*/
1245 //#ifdef GC_PROFILE_S
1246 gc_num_forwardobj++;
1247 //#endif // GC_PROFILE_S
1248 #endif // GC_PROFILE
1249 gcself_numsendobjs++;
1250 MGCHashadd(gcforwardobjtbl, (int)objptr);
1254 BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
1255 gc_enqueue_I(objptr);
1256 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
1257 } // if(ISSHAREDOBJ(objptr))
1258 } // void markObj(void * objptr)
1260 // enqueue root objs
1261 inline void tomark(struct garbagelist * stackptr) {
1262 if(MARKPHASE != gcphase) {
1264 BAMBOO_DEBUGPRINT_REG(gcphase);
1266 BAMBOO_EXIT(0xb101);
1268 gcbusystatus = true;
1272 // enqueue current stack
1273 while(stackptr!=NULL) {
1275 BAMBOO_DEBUGPRINT(0xe501);
1276 BAMBOO_DEBUGPRINT_REG(stackptr->size);
1277 BAMBOO_DEBUGPRINT_REG(stackptr->next);
1278 BAMBOO_DEBUGPRINT_REG(stackptr->array[0]);
1280 for(i=0; i<stackptr->size; i++) {
1281 if(stackptr->array[i] != NULL) {
1282 markObj(stackptr->array[i]);
1285 stackptr=stackptr->next;
1289 BAMBOO_DEBUGPRINT(0xe503);
1291 // enqueue objectsets
1292 if(BAMBOO_NUM_OF_CORE < NUMCORESACTIVE) {
1293 for(i=0; i<NUMCLASSES; i++) {
1294 struct parameterwrapper ** queues =
1295 objectqueues[BAMBOO_NUM_OF_CORE][i];
1296 int length = numqueues[BAMBOO_NUM_OF_CORE][i];
1297 for(j = 0; j < length; ++j) {
1298 struct parameterwrapper * parameter = queues[j];
1299 struct ObjectHash * set=parameter->objectset;
1300 struct ObjectNode * ptr=set->listhead;
1302 markObj((void *)ptr->key);
1309 // euqueue current task descriptor
1310 if(currtpd != NULL) {
1312 BAMBOO_DEBUGPRINT(0xe504);
1314 for(i=0; i<currtpd->numParameters; i++) {
1315 markObj(currtpd->parameterArray[i]);
1320 BAMBOO_DEBUGPRINT(0xe505);
1322 // euqueue active tasks
1323 if(activetasks != NULL) {
1324 struct genpointerlist * ptr=activetasks->list;
1326 struct taskparamdescriptor *tpd=ptr->src;
1328 for(i=0; i<tpd->numParameters; i++) {
1329 markObj(tpd->parameterArray[i]);
1336 BAMBOO_DEBUGPRINT(0xe506);
1338 // enqueue cached transferred obj
1339 struct QueueItem * tmpobjptr = getHead(&objqueue);
1340 while(tmpobjptr != NULL) {
1341 struct transObjInfo * objInfo =
1342 (struct transObjInfo *)(tmpobjptr->objectptr);
1343 markObj(objInfo->objptr);
1344 tmpobjptr = getNextQueueItem(tmpobjptr);
1348 BAMBOO_DEBUGPRINT(0xe507);
1350 // enqueue cached objs to be transferred
1351 struct QueueItem * item = getHead(totransobjqueue);
1352 while(item != NULL) {
1353 struct transObjInfo * totransobj =
1354 (struct transObjInfo *)(item->objectptr);
1355 markObj(totransobj->objptr);
1356 item = getNextQueueItem(item);
1357 } // while(item != NULL)
1360 BAMBOO_DEBUGPRINT(0xe508);
1362 // enqueue lock related info
1363 for(i = 0; i < runtime_locklen; ++i) {
1364 markObj((void *)(runtime_locks[i].redirectlock));
1365 if(runtime_locks[i].value != NULL) {
1366 markObj((void *)(runtime_locks[i].value));
1370 } // void tomark(struct garbagelist * stackptr)
1372 inline void mark(bool isfirst,
1373 struct garbagelist * stackptr) {
1375 if(BAMBOO_NUM_OF_CORE == 0) BAMBOO_DEBUGPRINT(0xed01);
1379 if(BAMBOO_NUM_OF_CORE == 0) BAMBOO_DEBUGPRINT(0xed02);
1381 // enqueue root objs
1383 gccurr_heaptop = 0; // record the size of all active objs in this core
1384 // aligned but does not consider block boundaries
1385 gcmarkedptrbound = 0;
1388 if(BAMBOO_NUM_OF_CORE == 0) BAMBOO_DEBUGPRINT(0xed03);
1391 bool checkfield = true;
1392 bool sendStall = false;
1394 while(MARKPHASE == gcphase) {
1396 if(BAMBOO_NUM_OF_CORE == 0) BAMBOO_DEBUGPRINT(0xed04);
1399 BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
1400 bool hasItems = gc_moreItems2_I();
1401 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
1403 BAMBOO_DEBUGPRINT(0xed05);
1409 gcbusystatus = true;
1411 void * ptr = gc_dequeue2_I();
1414 BAMBOO_DEBUGPRINT_REG(ptr);
1419 // check if it is a shared obj
1420 if(ISSHAREDOBJ(ptr)) {
1421 // a shared obj, check if it is a local obj on this core
1422 int host = hostcore(ptr);
1423 bool islocal = (host == BAMBOO_NUM_OF_CORE);
1425 bool isnotmarked = ((((int *)ptr)[6] & DISCOVERED) != 0);
1426 if(isLarge(ptr, &type, &size) && isnotmarked) {
1427 // ptr is a large object and not marked or enqueued
1429 BAMBOO_DEBUGPRINT(0xecec);
1430 BAMBOO_DEBUGPRINT_REG(ptr);
1431 BAMBOO_DEBUGPRINT_REG(*((int*)ptr));
1433 BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
1434 gc_lobjenqueue_I(ptr, size, BAMBOO_NUM_OF_CORE);
1436 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
1438 ((int *)ptr)[6] = ((int *)ptr)[6] & (~DISCOVERED) | MARKED;
1439 } else if(isnotmarked) {
1440 // ptr is an unmarked active object on this core
1441 ALIGNSIZE(size, &isize);
1442 gccurr_heaptop += isize;
1444 BAMBOO_DEBUGPRINT(0xaaaa);
1445 BAMBOO_DEBUGPRINT_REG(ptr);
1446 BAMBOO_DEBUGPRINT_REG(isize);
1447 BAMBOO_DEBUGPRINT(((int *)(ptr))[0]);
1450 ((int *)ptr)[6] = ((int *)ptr)[6] & (~DISCOVERED) | MARKED;
1452 if(ptr + size > gcmarkedptrbound) {
1453 gcmarkedptrbound = ptr + size;
1454 } // if(ptr + size > gcmarkedptrbound)
1456 // ptr is not an active obj or has been marked
1458 } // if(isLarge(ptr, &type, &size)) else ...
1459 } /* can never reach here
1462 if(BAMBOO_NUM_OF_CORE == 0) {
1463 BAMBOO_DEBUGPRINT(0xbbbb);
1464 BAMBOO_DEBUGPRINT_REG(host);
1465 BAMBOO_DEBUGPRINT_REG(ptr);
1468 // check if this obj has been forwarded
1469 if(!MGCHashcontains(gcforwardobjtbl, (int)ptr)) {
1470 // send a msg to host informing that ptr is active
1471 send_msg_2(host, GCMARKEDOBJ, ptr, false);
1472 gcself_numsendobjs++;
1473 MGCHashadd(gcforwardobjtbl, (int)ptr);
1476 }// if(isLocal(ptr)) else ...*/
1477 } // if(ISSHAREDOBJ(ptr))
1479 BAMBOO_DEBUGPRINT(0xed06);
1483 // scan all pointers in ptr
1484 unsigned INTPTR * pointer;
1485 pointer=pointerarray[type];
1487 /* Array of primitives */
1489 } else if (((INTPTR)pointer)==1) {
1490 /* Array of pointers */
1491 struct ArrayObject *ao=(struct ArrayObject *) ptr;
1492 int length=ao->___length___;
1494 for(j=0; j<length; j++) {
1496 ((void **)(((char *)&ao->___length___)+sizeof(int)))[j];
1500 INTPTR size=pointer[0];
1502 for(i=1; i<=size; i++) {
1503 unsigned int offset=pointer[i];
1504 void * objptr=*((void **)(((char *)ptr)+offset));
1507 } // if (pointer==0) else if ... else ...
1509 } // while(gc_moreItems2())
1511 BAMBOO_DEBUGPRINT(0xed07);
1513 gcbusystatus = false;
1514 // send mark finish msg to core coordinator
1515 if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
1517 BAMBOO_DEBUGPRINT(0xed08);
1519 gccorestatus[BAMBOO_NUM_OF_CORE] = 0;
1520 gcnumsendobjs[gcnumsrobjs_index][BAMBOO_NUM_OF_CORE]=gcself_numsendobjs;
1521 gcnumreceiveobjs[gcnumsrobjs_index][BAMBOO_NUM_OF_CORE]=
1522 gcself_numreceiveobjs;
1523 gcloads[BAMBOO_NUM_OF_CORE] = gccurr_heaptop;
1527 BAMBOO_DEBUGPRINT(0xed09);
1529 send_msg_4(STARTUPCORE, GCFINISHMARK, BAMBOO_NUM_OF_CORE,
1530 gcself_numsendobjs, gcself_numreceiveobjs, false);
1533 } // if(STARTUPCORE == BAMBOO_NUM_OF_CORE) ...
1535 BAMBOO_DEBUGPRINT(0xed0a);
1538 if(BAMBOO_NUM_OF_CORE == STARTUPCORE) {
1540 BAMBOO_DEBUGPRINT(0xed0b);
1544 } // while(MARKPHASE == gcphase)
1547 inline void compact2Heaptophelper_I(int coren,
1552 int memneed = gcrequiredmems[coren] + BAMBOO_CACHE_LINE_SIZE;
1553 if(STARTUPCORE == coren) {
1555 gcmovestartaddr = *p;
1556 gcdstcore = gctopcore;
1557 gcblock2fill = *numblocks + 1;
1559 send_msg_4(coren, GCMOVESTART, gctopcore, *p, (*numblocks) + 1, false);
1562 BAMBOO_DEBUGPRINT_REG(coren);
1563 BAMBOO_DEBUGPRINT_REG(gctopcore);
1564 BAMBOO_DEBUGPRINT_REG(*p);
1565 BAMBOO_DEBUGPRINT_REG(*numblocks+1);
1567 if(memneed < *remain) {
1569 BAMBOO_DEBUGPRINT(0xd104);
1572 gcrequiredmems[coren] = 0;
1573 gcloads[gctopcore] += memneed;
1574 *remain = *remain - memneed;
1577 BAMBOO_DEBUGPRINT(0xd105);
1579 // next available block
1581 gcfilledblocks[gctopcore] += 1;
1583 BASEPTR(gctopcore, gcfilledblocks[gctopcore], &newbase);
1584 gcloads[gctopcore] = newbase;
1585 gcrequiredmems[coren] -= *remain - BAMBOO_CACHE_LINE_SIZE;
1586 gcstopblock[gctopcore]++;
1587 gctopcore = NEXTTOPCORE(gctopblock);
1589 *numblocks = gcstopblock[gctopcore];
1590 *p = gcloads[gctopcore];
1592 *remain=(b<NUMCORES4GC) ?
1593 ((BAMBOO_SMEM_SIZE_L)-((*p)%(BAMBOO_SMEM_SIZE_L)))
1594 : ((BAMBOO_SMEM_SIZE)-((*p)%(BAMBOO_SMEM_SIZE)));
1596 BAMBOO_DEBUGPRINT(0xd106);
1597 BAMBOO_DEBUGPRINT_REG(gctopcore);
1598 BAMBOO_DEBUGPRINT_REG(*p);
1599 BAMBOO_DEBUGPRINT_REG(b);
1600 BAMBOO_DEBUGPRINT_REG(*remain);
1602 } // if(memneed < remain)
1604 } // void compact2Heaptophelper_I(int, int*, int*, int*)
1606 inline void compact2Heaptop() {
1607 // no cores with spare mem and some cores are blocked with pending move
1608 // find the current heap top and make them move to the heap top
1610 int numblocks = gcfilledblocks[gctopcore];
1611 //BASEPTR(gctopcore, numblocks, &p);
1612 p = gcloads[gctopcore];
1615 int remain = (b<NUMCORES4GC) ?
1616 ((BAMBOO_SMEM_SIZE_L)-(p%(BAMBOO_SMEM_SIZE_L)))
1617 : ((BAMBOO_SMEM_SIZE)-(p%(BAMBOO_SMEM_SIZE)));
1618 // check if the top core finishes
1619 BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
1620 if(gccorestatus[gctopcore] != 0) {
1622 BAMBOO_DEBUGPRINT(0xd101);
1623 BAMBOO_DEBUGPRINT_REG(gctopcore);
1625 // let the top core finishes its own work first
1626 compact2Heaptophelper_I(gctopcore, &p, &numblocks, &remain);
1627 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
1630 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
1633 BAMBOO_DEBUGPRINT(0xd102);
1634 BAMBOO_DEBUGPRINT_REG(gctopcore);
1635 BAMBOO_DEBUGPRINT_REG(p);
1636 BAMBOO_DEBUGPRINT_REG(b);
1637 BAMBOO_DEBUGPRINT_REG(remain);
1639 for(int i = 0; i < NUMCORES4GC; i++) {
1640 BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
1641 if((gccorestatus[i] != 0) && (gcrequiredmems[i] > 0)) {
1643 BAMBOO_DEBUGPRINT(0xd103);
1645 compact2Heaptophelper_I(i, &p, &numblocks, &remain);
1646 if(gccorestatus[gctopcore] != 0) {
1648 BAMBOO_DEBUGPRINT(0xd101);
1649 BAMBOO_DEBUGPRINT_REG(gctopcore);
1651 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
1652 // the top core is not free now
1655 } // if((gccorestatus[i] != 0) && (gcrequiredmems[i] > 0))
1656 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
1657 } // for(i = 0; i < NUMCORES4GC; i++)
1659 BAMBOO_DEBUGPRINT(0xd106);
1661 } // void compact2Heaptop()
1663 inline void resolvePendingMoveRequest() {
1665 BAMBOO_DEBUGPRINT(0xeb01);
1668 BAMBOO_DEBUGPRINT(0xeeee);
1669 for(int k = 0; k < NUMCORES4GC; k++) {
1670 BAMBOO_DEBUGPRINT(0xf000+k);
1671 BAMBOO_DEBUGPRINT_REG(gccorestatus[k]);
1672 BAMBOO_DEBUGPRINT_REG(gcloads[k]);
1673 BAMBOO_DEBUGPRINT_REG(gcfilledblocks[k]);
1674 BAMBOO_DEBUGPRINT_REG(gcstopblock[k]);
1676 BAMBOO_DEBUGPRINT(0xffff);
1680 bool nosparemem = true;
1681 bool haspending = false;
1682 bool hasrunning = false;
1683 bool noblock = false;
1684 int dstcore = 0; // the core who need spare mem
1685 int sourcecore = 0; // the core who has spare mem
1686 for(i = j = 0; (i < NUMCORES4GC) && (j < NUMCORES4GC); ) {
1688 // check if there are cores with spare mem
1689 if(gccorestatus[i] == 0) {
1690 // finished working, check if it still have spare mem
1691 if(gcfilledblocks[i] < gcstopblock[i]) {
1692 // still have spare mem
1695 } // if(gcfilledblocks[i] < gcstopblock[i]) else ...
1700 if(gccorestatus[j] != 0) {
1701 // not finished, check if it has pending move requests
1702 if((gcfilledblocks[j]==gcstopblock[j])&&(gcrequiredmems[j]>0)) {
1707 } // if((gcfilledblocks[i] == gcstopblock[i])...) else ...
1708 } // if(gccorestatus[i] == 0) else ...
1710 } // if(!haspending)
1711 if(!nosparemem && haspending) {
1715 BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
1716 gcrequiredmems[dstcore] = assignSpareMem_I(sourcecore,
1717 gcrequiredmems[dstcore],
1720 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
1722 BAMBOO_DEBUGPRINT(0xeb02);
1723 BAMBOO_DEBUGPRINT_REG(sourcecore);
1724 BAMBOO_DEBUGPRINT_REG(dstcore);
1725 BAMBOO_DEBUGPRINT_REG(startaddr);
1726 BAMBOO_DEBUGPRINT_REG(tomove);
1728 if(STARTUPCORE == dstcore) {
1730 BAMBOO_DEBUGPRINT(0xeb03);
1732 gcdstcore = sourcecore;
1734 gcmovestartaddr = startaddr;
1735 gcblock2fill = tomove;
1738 BAMBOO_DEBUGPRINT(0xeb04);
1740 send_msg_4(dstcore, GCMOVESTART, sourcecore,
1741 startaddr, tomove, false);
1748 } // for(i = 0; i < NUMCORES4GC; i++)
1750 BAMBOO_DEBUGPRINT(0xcccc);
1751 BAMBOO_DEBUGPRINT_REG(hasrunning);
1752 BAMBOO_DEBUGPRINT_REG(haspending);
1753 BAMBOO_DEBUGPRINT_REG(noblock);
1756 if(!hasrunning && !noblock) {
1757 gcphase = SUBTLECOMPACTPHASE;
1761 } // void resovePendingMoveRequest()
1764 int numblocks; // block num for heap
1765 INTPTR base; // base virtual address of current heap block
1766 INTPTR ptr; // virtual address of current heap top
1767 int offset; // offset in current heap block
1768 int blockbase; // virtual address of current small block to check
1769 int blockbound; // bound virtual address of current small blcok
1770 int sblockindex; // index of the small blocks
1771 int top; // real size of current heap block to check
1772 int bound; // bound size of current heap block to check
1773 }; // struct moveHelper
1775 // If out of boundary of valid shared memory, return false, else return true
1776 inline bool nextSBlock(struct moveHelper * orig) {
1777 orig->blockbase = orig->blockbound;
1778 bool sbchanged = false;
1780 BAMBOO_DEBUGPRINT(0xecc0);
1781 BAMBOO_DEBUGPRINT_REG(orig->blockbase);
1782 BAMBOO_DEBUGPRINT_REG(orig->blockbound);
1783 BAMBOO_DEBUGPRINT_REG(orig->bound);
1784 BAMBOO_DEBUGPRINT_REG(orig->ptr);
1787 // check if across a big block
1788 // TODO now do not zero out the whole memory, maybe the last two conditions
1790 if((orig->blockbase >= orig->bound) || (orig->ptr >= orig->bound)
1791 || ((orig->ptr != NULL) && (*((int*)orig->ptr))==0)
1792 || ((*((int*)orig->blockbase))==0)) {
1794 // end of current heap block, jump to next one
1797 BAMBOO_DEBUGPRINT(0xecc1);
1798 BAMBOO_DEBUGPRINT_REG(orig->numblocks);
1800 BASEPTR(BAMBOO_NUM_OF_CORE, orig->numblocks, &(orig->base));
1802 BAMBOO_DEBUGPRINT(orig->base);
1804 if(orig->base >= gcbaseva + BAMBOO_SHARED_MEM_SIZE) {
1806 orig->ptr = orig->base; // set current ptr to out of boundary too
1809 //orig->bound = orig->base + BAMBOO_SMEM_SIZE;
1810 orig->blockbase = orig->base;
1811 orig->sblockindex = (orig->blockbase-gcbaseva)/BAMBOO_SMEM_SIZE;
1814 BLOCKINDEX(orig->base, &blocknum);
1815 if(bamboo_smemtbl[blocknum] == 0) {
1817 goto innernextSBlock;
1819 // check the bamboo_smemtbl to decide the real bound
1820 orig->bound = orig->base + bamboo_smemtbl[blocknum];
1821 } else if(0 == (orig->blockbase%BAMBOO_SMEM_SIZE)) {
1822 orig->sblockindex += 1;
1824 } // if((orig->blockbase >= orig->bound) || (orig->ptr >= orig->bound)...
1826 // check if this sblock should be skipped or have special start point
1827 if(gcsbstarttbl[orig->sblockindex] == -1) {
1830 BAMBOO_DEBUGPRINT(0xecc2);
1832 orig->sblockindex += 1;
1833 orig->blockbase += BAMBOO_SMEM_SIZE;
1834 goto outernextSBlock;
1835 } else if((gcsbstarttbl[orig->sblockindex] != 0)
1837 // the first time to access this SBlock
1839 BAMBOO_DEBUGPRINT(0xecc3);
1841 // not start from the very beginning
1842 orig->blockbase = gcsbstarttbl[orig->sblockindex];
1843 } // if(gcsbstarttbl[orig->sblockindex] == -1) else ...
1845 // setup information for this sblock
1846 orig->blockbound = orig->blockbase + *((int*)(orig->blockbase));
1847 orig->offset = BAMBOO_CACHE_LINE_SIZE;
1848 orig->ptr = orig->blockbase + orig->offset;
1850 BAMBOO_DEBUGPRINT(0xecc4);
1851 BAMBOO_DEBUGPRINT_REG(orig->base);
1852 BAMBOO_DEBUGPRINT_REG(orig->bound);
1853 BAMBOO_DEBUGPRINT_REG(orig->ptr);
1854 BAMBOO_DEBUGPRINT_REG(orig->blockbound);
1855 BAMBOO_DEBUGPRINT_REG(orig->blockbase);
1856 BAMBOO_DEBUGPRINT_REG(orig->offset);
1858 if(orig->ptr >= orig->bound) {
1859 // met a lobj, move to next block
1860 goto innernextSBlock;
1864 } // bool nextSBlock(struct moveHelper * orig)
1866 // return false if there are no available data to compact
1867 inline bool initOrig_Dst(struct moveHelper * orig,
1868 struct moveHelper * to) {
1871 to->top = to->offset = BAMBOO_CACHE_LINE_SIZE;
1872 to->bound = BAMBOO_SMEM_SIZE_L;
1873 BASEPTR(BAMBOO_NUM_OF_CORE, to->numblocks, &(to->base));
1876 BAMBOO_DEBUGPRINT(0xef01);
1877 BAMBOO_DEBUGPRINT_REG(to->base);
1879 to->ptr = to->base + to->offset;
1881 // init the orig ptr
1882 orig->numblocks = 0;
1883 orig->base = to->base;
1885 BLOCKINDEX(orig->base, &blocknum);
1886 // check the bamboo_smemtbl to decide the real bound
1887 orig->bound = orig->base + bamboo_smemtbl[blocknum];
1888 orig->blockbase = orig->base;
1889 orig->sblockindex = (orig->base - gcbaseva) / BAMBOO_SMEM_SIZE;
1891 BAMBOO_DEBUGPRINT(0xef02);
1892 BAMBOO_DEBUGPRINT_REG(orig->base);
1893 BAMBOO_DEBUGPRINT_REG(orig->sblockindex);
1894 BAMBOO_DEBUGPRINT_REG(gcsbstarttbl);
1895 BAMBOO_DEBUGPRINT_REG(gcsbstarttbl[orig->sblockindex]);
1898 if(gcsbstarttbl[orig->sblockindex] == -1) {
1900 BAMBOO_DEBUGPRINT(0xef03);
1904 gcbaseva+BAMBOO_SMEM_SIZE*(orig->sblockindex+1);
1905 return nextSBlock(orig);
1906 } else if(gcsbstarttbl[orig->sblockindex] != 0) {
1908 BAMBOO_DEBUGPRINT(0xef04);
1910 orig->blockbase = gcsbstarttbl[orig->sblockindex];
1913 BAMBOO_DEBUGPRINT(0xef05);
1915 orig->blockbound = orig->blockbase + *((int*)(orig->blockbase));
1916 orig->offset = BAMBOO_CACHE_LINE_SIZE;
1917 orig->ptr = orig->blockbase + orig->offset;
1919 BAMBOO_DEBUGPRINT(0xef06);
1920 BAMBOO_DEBUGPRINT_REG(orig->base);
1923 } // bool initOrig_Dst(struct moveHelper * orig, struct moveHelper * to)
1925 inline void nextBlock(struct moveHelper * to) {
1926 to->top = to->bound + BAMBOO_CACHE_LINE_SIZE; // header!
1927 to->bound += BAMBOO_SMEM_SIZE;
1929 BASEPTR(BAMBOO_NUM_OF_CORE, to->numblocks, &(to->base));
1930 to->offset = BAMBOO_CACHE_LINE_SIZE;
1931 to->ptr = to->base + to->offset;
1932 } // void nextBlock(struct moveHelper * to)
1934 // endaddr does not contain spaces for headers
1935 inline bool moveobj(struct moveHelper * orig,
1936 struct moveHelper * to,
1938 if(stopblock == 0) {
1943 BAMBOO_DEBUGPRINT(0xe201);
1944 BAMBOO_DEBUGPRINT_REG(orig->ptr);
1945 BAMBOO_DEBUGPRINT_REG(to->ptr);
1953 while((char)(*((int*)(orig->ptr))) == (char)(-2)) {
1954 orig->ptr = (int*)(orig->ptr) + 1;
1956 if((orig->ptr >= orig->bound) || (orig->ptr == orig->blockbound)) {
1957 if(!nextSBlock(orig)) {
1958 // finished, no more data
1964 BAMBOO_DEBUGPRINT(0xe202);
1965 BAMBOO_DEBUGPRINT_REG(orig->ptr);
1966 BAMBOO_DEBUGPRINT(((int *)(orig->ptr))[0]);
1968 // check the obj's type, size and mark flag
1969 type = ((int *)(orig->ptr))[0];
1972 // end of this block, go to next one
1973 if(!nextSBlock(orig)) {
1974 // finished, no more data
1978 } else if(type < NUMCLASSES) {
1980 size = classsize[type];
1983 struct ArrayObject *ao=(struct ArrayObject *)(orig->ptr);
1984 int elementsize=classsize[type];
1985 int length=ao->___length___;
1986 size=sizeof(struct ArrayObject)+length*elementsize;
1988 mark = ((int *)(orig->ptr))[6];
1989 bool isremote = ((((int *)(orig->ptr))[6] & REMOTEM) != 0);
1991 BAMBOO_DEBUGPRINT(0xe203);
1992 BAMBOO_DEBUGPRINT_REG(orig->ptr);
1993 BAMBOO_DEBUGPRINT_REG(size);
1995 ALIGNSIZE(size, &isize); // no matter is the obj marked or not
1996 // should be able to across it
1997 if((mark & MARKED) != 0) {
1999 BAMBOO_DEBUGPRINT(0xe204);
2001 #ifdef GC_PROFILE//_S
2004 // marked obj, copy it to current heap top
2005 // check to see if remaining space is enough
2006 if(to->top + isize > to->bound) {
2007 // fill 0 indicating the end of this block
2008 BAMBOO_MEMSET_WH(to->ptr, '\0', to->bound - to->top);
2009 // fill the header of this block and then go to next block
2010 to->offset += to->bound - to->top;
2011 BAMBOO_MEMSET_WH(to->base, '\0', BAMBOO_CACHE_LINE_SIZE);
2012 (*((int*)(to->base))) = to->offset;
2014 if(stopblock == to->numblocks) {
2015 // already fulfilled the block
2017 } // if(stopblock == to->numblocks)
2018 } // if(to->top + isize > to->bound)
2019 // set the mark field to 2, indicating that this obj has been moved
2020 // and need to be flushed
2021 ((int *)(orig->ptr))[6] = COMPACTED;
2022 if(to->ptr != orig->ptr) {
2023 if((int)(orig->ptr) < (int)(to->ptr)+size) {
2024 memmove(to->ptr, orig->ptr, size);
2026 //BAMBOO_WRITE_HINT_CACHE(to->ptr, size);
2027 memcpy(to->ptr, orig->ptr, size);
2029 // fill the remaining space with -2
2030 BAMBOO_MEMSET_WH(to->ptr+size, -2, isize-size);
2032 // store mapping info
2033 BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
2034 #ifdef LOCALHASHTBL_TEST
2035 RuntimeHashadd_I(gcpointertbl, orig->ptr, to->ptr);
2037 mgchashInsert_I(gcpointertbl, orig->ptr, to->ptr);
2039 //MGCHashadd_I(gcpointertbl, orig->ptr, to->ptr);
2042 //unsigned long long ttimet = BAMBOO_GET_EXE_TIME();
2044 // add to the sharedptbl
2045 if(gcsharedptbl != NULL) {
2046 //GCSharedHashadd_I(gcsharedptbl, orig->ptr, to->ptr);
2047 mgcsharedhashInsert_I(gcsharedptbl, orig->ptr, to->ptr);
2048 //num_mapinforequest++; // TODO
2051 //flushstalltime_i += BAMBOO_GET_EXE_TIME()-ttimet;
2054 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
2057 BAMBOO_DEBUGPRINT(0xcdce);
2058 BAMBOO_DEBUGPRINT_REG(orig->ptr);
2059 BAMBOO_DEBUGPRINT_REG(to->ptr);
2061 gccurr_heaptop -= isize;
2063 to->offset += isize;
2065 if(to->top == to->bound) {
2066 // fill the header of this block and then go to next block
2067 BAMBOO_MEMSET_WH(to->base, '\0', BAMBOO_CACHE_LINE_SIZE);
2068 (*((int*)(to->base))) = to->offset;
2073 BAMBOO_DEBUGPRINT(0xe205);
2079 BAMBOO_DEBUGPRINT_REG(isize);
2080 BAMBOO_DEBUGPRINT_REG(size);
2081 BAMBOO_DEBUGPRINT_REG(orig->ptr);
2082 BAMBOO_DEBUGPRINT_REG(orig->bound);
2084 if((orig->ptr > orig->bound) || (orig->ptr == orig->blockbound)) {
2086 BAMBOO_DEBUGPRINT(0xe206);
2088 if(!nextSBlock(orig)) {
2089 // finished, no more data
2094 BAMBOO_DEBUGPRINT(0xe207);
2095 BAMBOO_DEBUGPRINT_REG(orig->ptr);
2098 } //bool moveobj(struct moveHelper* orig,struct moveHelper* to,int* endaddr)
2100 // should be invoked with interrupt closed
2101 inline int assignSpareMem_I(int sourcecore,
2106 BLOCKINDEX(gcloads[sourcecore], &b);
2107 int boundptr = (b<NUMCORES4GC) ? ((b+1)*BAMBOO_SMEM_SIZE_L)
2108 : (BAMBOO_LARGE_SMEM_BOUND+(b-NUMCORES4GC+1)*BAMBOO_SMEM_SIZE);
2109 int remain = boundptr - gcloads[sourcecore];
2110 int memneed = requiredmem + BAMBOO_CACHE_LINE_SIZE;
2111 *startaddr = gcloads[sourcecore];
2112 *tomove = gcfilledblocks[sourcecore] + 1;
2113 if(memneed < remain) {
2114 gcloads[sourcecore] += memneed;
2117 // next available block
2118 gcfilledblocks[sourcecore] += 1;
2120 BASEPTR(sourcecore, gcfilledblocks[sourcecore], &newbase);
2121 gcloads[sourcecore] = newbase;
2122 return requiredmem-remain;
2124 } // int assignSpareMem_I(int ,int * , int * , int * )
2126 // should be invoked with interrupt closed
2127 inline bool gcfindSpareMem_I(int * startaddr,
2132 for(int k = 0; k < NUMCORES4GC; k++) {
2133 if((gccorestatus[k] == 0) && (gcfilledblocks[k] < gcstopblock[k])) {
2134 // check if this stopped core has enough mem
2135 assignSpareMem_I(k, requiredmem, tomove, startaddr);
2140 // if can not find spare mem right now, hold the request
2141 gcrequiredmems[requiredcore] = requiredmem;
2144 } //bool gcfindSpareMem_I(int* startaddr,int* tomove,int mem,int core)
2146 inline bool compacthelper(struct moveHelper * orig,
2147 struct moveHelper * to,
2150 bool * localcompact) {
2151 // scan over all objs in this block, compact the marked objs
2152 // loop stop when finishing either scanning all active objs or
2153 // fulfilled the gcstopblock
2155 BAMBOO_DEBUGPRINT(0xe101);
2156 BAMBOO_DEBUGPRINT_REG(gcblock2fill);
2157 BAMBOO_DEBUGPRINT_REG(gcmarkedptrbound);
2160 while(orig->ptr < gcmarkedptrbound) {
2161 bool stop = moveobj(orig, to, gcblock2fill);
2166 // if no objs have been compact, do nothing,
2167 // otherwise, fill the header of this block
2168 if(to->offset > BAMBOO_CACHE_LINE_SIZE) {
2169 BAMBOO_MEMSET_WH(to->base, '\0', BAMBOO_CACHE_LINE_SIZE);
2170 (*((int*)(to->base))) = to->offset;
2174 to->top -= BAMBOO_CACHE_LINE_SIZE;
2175 } // if(to->offset > BAMBOO_CACHE_LINE_SIZE) else ...
2177 *heaptopptr = to->ptr;
2178 *filledblocks = to->numblocks;
2181 BAMBOO_DEBUGPRINT(0xe102);
2182 BAMBOO_DEBUGPRINT_REG(orig->ptr);
2183 BAMBOO_DEBUGPRINT_REG(gcmarkedptrbound);
2184 BAMBOO_DEBUGPRINT_REG(*heaptopptr);
2185 BAMBOO_DEBUGPRINT_REG(*filledblocks);
2186 BAMBOO_DEBUGPRINT_REG(gccurr_heaptop);
2189 // send msgs to core coordinator indicating that the compact is finishing
2190 // send compact finish message to core coordinator
2191 if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
2192 gcfilledblocks[BAMBOO_NUM_OF_CORE] = *filledblocks;
2193 gcloads[BAMBOO_NUM_OF_CORE] = *heaptopptr;
2194 if(orig->ptr < gcmarkedptrbound) {
2196 BAMBOO_DEBUGPRINT(0xe103);
2200 BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
2201 if(gcfindSpareMem_I(&gcmovestartaddr, &gcblock2fill, &gcdstcore,
2202 gccurr_heaptop, BAMBOO_NUM_OF_CORE)) {
2204 BAMBOO_DEBUGPRINT(0xe104);
2208 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
2210 BAMBOO_DEBUGPRINT(0xe105);
2214 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
2217 BAMBOO_DEBUGPRINT(0xe106);
2219 gccorestatus[BAMBOO_NUM_OF_CORE] = 0;
2224 if(orig->ptr < gcmarkedptrbound) {
2226 BAMBOO_DEBUGPRINT(0xe107);
2230 send_msg_5(STARTUPCORE, GCFINISHCOMPACT, BAMBOO_NUM_OF_CORE,
2231 *filledblocks, *heaptopptr, gccurr_heaptop, false);
2234 BAMBOO_DEBUGPRINT(0xe108);
2235 BAMBOO_DEBUGPRINT_REG(*heaptopptr);
2237 // finish compacting
2238 send_msg_5(STARTUPCORE, GCFINISHCOMPACT, BAMBOO_NUM_OF_CORE,
2239 *filledblocks, *heaptopptr, 0, false);
2241 } // if(STARTUPCORE == BAMBOO_NUM_OF_CORE)
2243 if(orig->ptr < gcmarkedptrbound) {
2245 BAMBOO_DEBUGPRINT(0xe109);
2247 // still have unpacked obj
2256 BAMBOO_DEBUGPRINT(0xe10a);
2259 to->ptr = gcmovestartaddr;
2260 to->numblocks = gcblock2fill - 1;
2261 to->bound = (to->numblocks==0) ?
2262 BAMBOO_SMEM_SIZE_L :
2263 BAMBOO_SMEM_SIZE_L+BAMBOO_SMEM_SIZE*to->numblocks;
2264 BASEPTR(gcdstcore, to->numblocks, &(to->base));
2265 to->offset = to->ptr - to->base;
2266 to->top = (to->numblocks==0) ?
2267 (to->offset) : (to->bound-BAMBOO_SMEM_SIZE+to->offset);
2269 to->offset = BAMBOO_CACHE_LINE_SIZE;
2270 to->ptr += to->offset; // for header
2271 to->top += to->offset;
2272 if(gcdstcore == BAMBOO_NUM_OF_CORE) {
2273 *localcompact = true;
2275 *localcompact = false;
2280 BAMBOO_DEBUGPRINT(0xe10b);
2283 } // void compacthelper()
2285 inline void compact() {
2286 if(COMPACTPHASE != gcphase) {
2287 BAMBOO_EXIT(0xb102);
2290 // initialize pointers for comapcting
2291 struct moveHelper * orig =
2292 (struct moveHelper *)RUNMALLOC(sizeof(struct moveHelper));
2293 struct moveHelper * to =
2294 (struct moveHelper *)RUNMALLOC(sizeof(struct moveHelper));
2296 if(!initOrig_Dst(orig, to)) {
2297 // no available data to compact
2298 // send compact finish msg to STARTUP core
2300 BAMBOO_DEBUGPRINT(0xe001);
2301 BAMBOO_DEBUGPRINT_REG(to->base);
2303 send_msg_5(STARTUPCORE, GCFINISHCOMPACT, BAMBOO_NUM_OF_CORE,
2304 0, to->base, 0, false);
2310 int filledblocks = 0;
2311 INTPTR heaptopptr = 0;
2312 bool localcompact = true;
2313 compacthelper(orig, to, &filledblocks, &heaptopptr, &localcompact);
2319 // if return NULL, means
2320 // 1. objptr is NULL
2321 // 2. objptr is not a shared obj
2322 // in these cases, remain the original value is OK
2323 inline void * flushObj(void * objptr) {
2325 BAMBOO_DEBUGPRINT(0xe401);
2327 if(objptr == NULL) {
2330 void * dstptr = NULL;
2331 if(ISSHAREDOBJ(objptr)) {
2333 BAMBOO_DEBUGPRINT(0xe402);
2334 BAMBOO_DEBUGPRINT_REG(objptr);
2336 // a shared obj ptr, change to new address
2337 BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
2339 //unsigned long long ttime = BAMBOO_GET_EXE_TIME();
2341 #ifdef LOCALHASHTBL_TEST
2342 RuntimeHashget(gcpointertbl, objptr, &dstptr);
2344 dstptr = mgchashSearch(gcpointertbl, objptr);
2346 //MGCHashget(gcpointertbl, objptr, &dstptr);
2348 //flushstalltime += BAMBOO_GET_EXE_TIME()-ttime;
2350 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
2352 BAMBOO_DEBUGPRINT_REG(dstptr);
2355 if(NULL == dstptr) {
2358 BAMBOO_DEBUGPRINT(0xe403);
2359 BAMBOO_DEBUGPRINT_REG(objptr);
2360 BAMBOO_DEBUGPRINT_REG(hostcore(objptr));
2362 if(hostcore(objptr) == BAMBOO_NUM_OF_CORE) {
2363 // error! the obj is right on this core, but cannot find it
2364 BAMBOO_DEBUGPRINT_REG(objptr);
2365 BAMBOO_EXIT(0xb103);
2366 // assume that the obj has not been moved, use the original address
2369 int hostc = hostcore(objptr);
2371 //unsigned long long ttimet = BAMBOO_GET_EXE_TIME();
2373 // check the corresponsing sharedptbl
2374 BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
2375 //struct GCSharedHash * sptbl = gcrpointertbls[hostcore(objptr)];
2376 mgcsharedhashtbl_t * sptbl = gcrpointertbls[hostc];
2378 //GCSharedHashget(sptbl, (int)objptr, &dstptr);
2379 dstptr = mgcsharedhashSearch(sptbl, (int)objptr);
2380 if(dstptr != NULL) {
2381 #ifdef LOCALHASHTBL_TEST
2382 RuntimeHashadd_I(gcpointertbl, (int)objptr, (int)dstptr);
2384 mgchashInsert_I(gcpointertbl, (int)objptr, (int)dstptr);
2388 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
2390 //flushstalltime_i += BAMBOO_GET_EXE_TIME()-ttimet;
2393 if(dstptr == NULL) {
2394 // still can not get the mapping info,
2395 // send msg to host core for the mapping info
2396 gcobj2map = (int)objptr;
2401 //num_mapinforequest++;
2402 //unsigned long long ttime = BAMBOO_GET_EXE_TIME();
2405 //unsigned long long ttimet = BAMBOO_GET_EXE_TIME();
2407 // the first time require the mapping, send msg to the hostcore
2408 // for the mapping info
2409 send_msg_3(hostc, GCMAPREQUEST, (int)objptr,
2410 BAMBOO_NUM_OF_CORE, false);
2417 //flushstalltime_i += BAMBOO_GET_EXE_TIME()-ttimet;
2421 //flushstalltime += BAMBOO_GET_EXE_TIME() - ttime;
2423 BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
2424 #ifdef LOCALHASHTBL_TEST
2425 RuntimeHashget(gcpointertbl, objptr, &dstptr);
2427 dstptr = mgchashSearch(gcpointertbl, objptr);
2429 //MGCHashget(gcpointertbl, objptr, &dstptr);
2430 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
2431 } // if(dstptr == NULL)
2432 } // if(hostcore(objptr) == BAMBOO_NUM_OF_CORE) else ...
2434 BAMBOO_DEBUGPRINT_REG(dstptr);
2436 } // if(NULL == dstptr)
2437 } // if(ISSHAREDOBJ(objptr))
2438 // if not a shared obj, return NULL to indicate no need to flush
2440 BAMBOO_DEBUGPRINT(0xe404);
2443 } // void flushObj(void * objptr)
2445 inline void flushRuntimeObj(struct garbagelist * stackptr) {
2447 // flush current stack
2448 while(stackptr!=NULL) {
2449 for(i=0; i<stackptr->size; i++) {
2450 if(stackptr->array[i] != NULL) {
2451 void * dst = flushObj(stackptr->array[i]);
2453 stackptr->array[i] = dst;
2457 stackptr=stackptr->next;
2461 if(BAMBOO_NUM_OF_CORE < NUMCORESACTIVE) {
2462 for(i=0; i<NUMCLASSES; i++) {
2463 struct parameterwrapper ** queues =
2464 objectqueues[BAMBOO_NUM_OF_CORE][i];
2465 int length = numqueues[BAMBOO_NUM_OF_CORE][i];
2466 for(j = 0; j < length; ++j) {
2467 struct parameterwrapper * parameter = queues[j];
2468 struct ObjectHash * set=parameter->objectset;
2469 struct ObjectNode * ptr=set->listhead;
2471 void * dst = flushObj((void *)ptr->key);
2477 ObjectHashrehash(set);
2482 // flush current task descriptor
2483 if(currtpd != NULL) {
2484 for(i=0; i<currtpd->numParameters; i++) {
2485 void * dst = flushObj(currtpd->parameterArray[i]);
2487 currtpd->parameterArray[i] = dst;
2492 // flush active tasks
2493 if(activetasks != NULL) {
2494 struct genpointerlist * ptr=activetasks->list;
2496 struct taskparamdescriptor *tpd=ptr->src;
2498 for(i=0; i<tpd->numParameters; i++) {
2499 void * dst = flushObj(tpd->parameterArray[i]);
2501 tpd->parameterArray[i] = dst;
2506 genrehash(activetasks);
2509 // flush cached transferred obj
2510 struct QueueItem * tmpobjptr = getHead(&objqueue);
2511 while(tmpobjptr != NULL) {
2512 struct transObjInfo * objInfo =
2513 (struct transObjInfo *)(tmpobjptr->objectptr);
2514 void * dst = flushObj(objInfo->objptr);
2516 objInfo->objptr = dst;
2518 tmpobjptr = getNextQueueItem(tmpobjptr);
2521 // flush cached objs to be transferred
2522 struct QueueItem * item = getHead(totransobjqueue);
2523 while(item != NULL) {
2524 struct transObjInfo * totransobj =
2525 (struct transObjInfo *)(item->objectptr);
2526 void * dst = flushObj(totransobj->objptr);
2528 totransobj->objptr = dst;
2530 item = getNextQueueItem(item);
2531 } // while(item != NULL)
2533 // enqueue lock related info
2534 for(i = 0; i < runtime_locklen; ++i) {
2535 void * dst = flushObj(runtime_locks[i].redirectlock);
2537 runtime_locks[i].redirectlock = (int)dst;
2539 if(runtime_locks[i].value != NULL) {
2540 void * dst=flushObj(runtime_locks[i].value);
2542 runtime_locks[i].value = (int)dst;
2547 } // void flushRuntimeObj(struct garbagelist * stackptr)
2549 inline void transmappinginfo() {
2550 // broadcast the sharedptbl pointer
2551 for(int i = 0; i < NUMCORESACTIVE; i++) {
2552 if(i != BAMBOO_NUM_OF_CORE) {
2553 send_msg_3(i, GCMAPTBL, gcsharedptbl, BAMBOO_NUM_OF_CORE, false);
2557 if(STARTUPCORE != BAMBOO_NUM_OF_CORE) {
2558 send_msg_2(STARTUPCORE, GCFINISHMAPINFO, BAMBOO_NUM_OF_CORE, false);
2562 inline void flush(struct garbagelist * stackptr) {
2564 /* TODO if(BAMBOO_NUM_OF_CORE == 0) {
2565 BAMBOO_DEBUGPRINT(0xcccc);
2566 BAMBOO_DEBUGPRINT_REG(BAMBOO_GET_EXE_TIME());
2570 flushRuntimeObj(stackptr);
2572 // TODO if(BAMBOO_NUM_OF_CORE == 0) BAMBOO_DEBUGPRINT_REG(BAMBOO_GET_EXE_TIME());
2576 BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
2577 bool hasItems = gc_moreItems_I();
2578 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
2584 BAMBOO_DEBUGPRINT(0xe301);
2586 BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
2587 void * ptr = gc_dequeue_I();
2588 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
2589 if(ISSHAREDOBJ(ptr)) {
2590 // should be a local shared obj and should have mapping info
2591 ptr = flushObj(ptr);
2593 BAMBOO_DEBUGPRINT(0xe302);
2594 BAMBOO_DEBUGPRINT_REG(ptr);
2595 BAMBOO_DEBUGPRINT_REG(tptr);
2596 BAMBOO_DEBUGPRINT_REG(((int *)(tptr))[0]);
2599 BAMBOO_EXIT(0xb105);
2601 } // if(ISSHAREDOBJ(ptr))
2602 if((!ISSHAREDOBJ(ptr)) || (((int *)(ptr))[6] == COMPACTED)) {
2603 int type = ((int *)(ptr))[0];
2604 // scan all pointers in ptr
2605 unsigned INTPTR * pointer;
2606 pointer=pointerarray[type];
2608 BAMBOO_DEBUGPRINT(0xe303);
2609 BAMBOO_DEBUGPRINT_REG(pointer);
2612 /* Array of primitives */
2614 } else if (((INTPTR)pointer)==1) {
2616 BAMBOO_DEBUGPRINT(0xe304);
2618 /* Array of pointers */
2619 struct ArrayObject *ao=(struct ArrayObject *) ptr;
2620 int length=ao->___length___;
2622 for(j=0; j<length; j++) {
2624 BAMBOO_DEBUGPRINT(0xe305);
2627 ((void **)(((char *)&ao->___length___)+sizeof(int)))[j];
2629 BAMBOO_DEBUGPRINT_REG(objptr);
2631 if(objptr != NULL) {
2632 void * dst = flushObj(objptr);
2634 ((void **)(((char *)&ao->___length___)+sizeof(int)))[j] = dst;
2640 BAMBOO_DEBUGPRINT(0xe306);
2642 INTPTR size=pointer[0];
2644 for(i=1; i<=size; i++) {
2646 BAMBOO_DEBUGPRINT(0xe307);
2648 unsigned int offset=pointer[i];
2649 void * objptr=*((void **)(((char *)ptr)+offset));
2651 BAMBOO_DEBUGPRINT_REG(objptr);
2653 if(objptr != NULL) {
2654 void * dst = flushObj(objptr);
2656 *((void **)(((char *)ptr)+offset)) = dst;
2659 } // for(i=1; i<=size; i++)
2660 } // if (pointer==0) else if (((INTPTR)pointer)==1) else ()
2661 // restore the mark field, indicating that this obj has been flushed
2662 if(ISSHAREDOBJ(ptr)) {
2663 ((int *)(ptr))[6] = INIT;
2665 } // if((!ISSHAREDOBJ(ptr)) || (((int *)(ptr))[6] == COMPACTED))
2666 } // while(gc_moreItems())
2668 BAMBOO_DEBUGPRINT(0xe308);
2671 // TODO if(BAMBOO_NUM_OF_CORE == 0) BAMBOO_DEBUGPRINT_REG(BAMBOO_GET_EXE_TIME());
2674 // TODO bug here: the startup core contains all lobjs' info, thus all the
2675 // lobjs are flushed in sequence.
2677 while(gc_lobjmoreItems_I()) {
2679 BAMBOO_DEBUGPRINT(0xe309);
2681 void * ptr = gc_lobjdequeue_I(NULL, NULL);
2682 ptr = flushObj(ptr);
2684 BAMBOO_DEBUGPRINT(0xe30a);
2685 BAMBOO_DEBUGPRINT_REG(ptr);
2686 BAMBOO_DEBUGPRINT_REG(tptr);
2687 BAMBOO_DEBUGPRINT_REG(((int *)(tptr))[0]);
2690 BAMBOO_EXIT(0xb106);
2692 if(((int *)(ptr))[6] == COMPACTED) {
2693 int type = ((int *)(ptr))[0];
2694 // scan all pointers in ptr
2695 unsigned INTPTR * pointer;
2696 pointer=pointerarray[type];
2698 BAMBOO_DEBUGPRINT(0xe30b);
2699 BAMBOO_DEBUGPRINT_REG(pointer);
2702 /* Array of primitives */
2704 } else if (((INTPTR)pointer)==1) {
2706 BAMBOO_DEBUGPRINT(0xe30c);
2708 /* Array of pointers */
2709 struct ArrayObject *ao=(struct ArrayObject *) ptr;
2710 int length=ao->___length___;
2712 for(j=0; j<length; j++) {
2714 BAMBOO_DEBUGPRINT(0xe30d);
2717 ((void **)(((char *)&ao->___length___)+sizeof(int)))[j];
2719 BAMBOO_DEBUGPRINT_REG(objptr);
2721 if(objptr != NULL) {
2722 void * dst = flushObj(objptr);
2724 ((void **)(((char *)&ao->___length___)+sizeof(int)))[j] = dst;
2730 BAMBOO_DEBUGPRINT(0xe30e);
2732 INTPTR size=pointer[0];
2734 for(i=1; i<=size; i++) {
2736 BAMBOO_DEBUGPRINT(0xe30f);
2738 unsigned int offset=pointer[i];
2739 void * objptr=*((void **)(((char *)ptr)+offset));
2742 BAMBOO_DEBUGPRINT_REG(objptr);
2744 if(objptr != NULL) {
2745 void * dst = flushObj(objptr);
2747 *((void **)(((char *)ptr)+offset)) = dst;
2750 } // for(i=1; i<=size; i++)
2751 } // if (pointer==0) else if (((INTPTR)pointer)==1) else ()
2752 // restore the mark field, indicating that this obj has been flushed
2753 ((int *)(ptr))[6] = INIT;
2754 } // if(((int *)(ptr))[6] == COMPACTED)
2755 } // while(gc_lobjmoreItems())
2757 BAMBOO_DEBUGPRINT(0xe310);
2760 // TODO if(BAMBOO_NUM_OF_CORE == 0) BAMBOO_DEBUGPRINT_REG(BAMBOO_GET_EXE_TIME());
2763 // send flush finish message to core coordinator
2764 if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
2765 gccorestatus[BAMBOO_NUM_OF_CORE] = 0;
2767 send_msg_2(STARTUPCORE, GCFINISHFLUSH, BAMBOO_NUM_OF_CORE, false);
2771 //if(BAMBOO_NUM_OF_CORE == 0) {
2772 //BAMBOO_DEBUGPRINT(0xffff);
2773 //BAMBOO_DEBUGPRINT_REG(num_mapinforequest);
2774 //BAMBOO_DEBUGPRINT_REG(flushstalltime);
2775 //BAMBOO_DEBUGPRINT_REG(num_mapinforequest_i);
2776 //BAMBOO_DEBUGPRINT_REG(flushstalltime_i);
2778 //BAMBOO_DEBUGPRINT_REG(flushstalltime);
2781 BAMBOO_DEBUGPRINT(0xe311);
2785 inline void gc_collect(struct garbagelist * stackptr) {
2786 // core collector routine
2788 if(INITPHASE == gcphase) {
2792 #ifdef RAWPATH // TODO GC_DEBUG
2793 printf("(%X,%X) Do initGC\n", udn_tile_coord_x(), udn_tile_coord_y());
2796 //send init finish msg to core coordinator
2797 send_msg_2(STARTUPCORE, GCFINISHINIT, BAMBOO_NUM_OF_CORE, false);
2799 if(MARKPHASE == gcphase) {
2803 #ifdef RAWPATH // TODO GC_DEBUG
2804 printf("(%x,%x) Start mark phase\n", udn_tile_coord_x(),
2805 udn_tile_coord_y());
2807 mark(true, stackptr);
2808 #ifdef RAWPATH // TODO GC_DEBUG
2809 printf("(%x,%x) Finish mark phase, start compact phase\n",
2810 udn_tile_coord_x(), udn_tile_coord_y());
2813 #ifdef RAWPATH // TODO GC_DEBUG
2814 printf("(%x,%x) Finish compact phase\n", udn_tile_coord_x(),
2815 udn_tile_coord_y());
2818 if(MAPPHASE == gcphase) {
2822 #ifdef RAWPATH // TODO GC_DEBUG
2823 printf("(%x,%x) Start map phase\n", udn_tile_coord_x(),
2824 udn_tile_coord_y());
2827 #ifdef RAWPATH // TODO GC_DEBUG
2828 printf("(%x,%x) Finish map phase\n", udn_tile_coord_x(),
2829 udn_tile_coord_y());
2832 if(FLUSHPHASE == gcphase) {
2836 #ifdef RAWPATH // TODO GC_DEBUG
2837 printf("(%x,%x) Start flush phase\n", udn_tile_coord_x(),
2838 udn_tile_coord_y());
2840 #ifdef GC_PROFILE//_S
2841 /*BAMBOO_DEBUGPRINT(0xaaaa);
2842 BAMBOO_DEBUGPRINT_REG(gc_num_obj);
2843 BAMBOO_DEBUGPRINT_REG(gc_num_liveobj);
2844 BAMBOO_DEBUGPRINT_REG(gc_num_forwardobj);
2845 BAMBOO_DEBUGPRINT(0xaaab);*/
2846 // send the num of obj/liveobj/forwardobj to the startupcore
2847 if(STARTUPCORE != BAMBOO_NUM_OF_CORE) {
2848 send_msg_4(STARTUPCORE, GCPROFILES, gc_num_obj,
2849 gc_num_liveobj, gc_num_forwardobj, false);
2852 #endif // GC_PROFLIE_S
2854 #ifdef RAWPATH // TODO GC_DEBUG
2855 printf("(%x,%x) Finish flush phase\n", udn_tile_coord_x(),
2856 udn_tile_coord_y());
2860 if(FINISHPHASE == gcphase) {
2864 #ifdef RAWPATH // TODO GC_DEBUG
2865 printf("(%x,%x) Finish gc!\n", udn_tile_coord_x(), udn_tile_coord_y());
2867 } // void gc_collect(struct garbagelist * stackptr)
2869 inline void gc_nocollect(struct garbagelist * stackptr) {
2871 if(INITPHASE == gcphase) {
2875 #ifdef RAWPATH // TODO GC_DEBUG
2876 printf("(%x,%x) Do initGC\n", udn_tile_coord_x(), udn_tile_coord_y());
2879 //send init finish msg to core coordinator
2880 send_msg_2(STARTUPCORE, GCFINISHINIT, BAMBOO_NUM_OF_CORE, false);
2882 if(MARKPHASE == gcphase) {
2886 #ifdef RAWPATH // TODO GC_DEBUG
2887 printf("(%x,%x) Start mark phase\n", udn_tile_coord_x(),
2888 udn_tile_coord_y());
2890 mark(true, stackptr);
2891 #ifdef RAWPATH // TODO GC_DEBUG
2892 printf("(%x,%x) Finish mark phase, wait for flush\n",
2893 udn_tile_coord_x(), udn_tile_coord_y());
2895 // non-gc core collector routine
2897 if(FLUSHPHASE == gcphase) {
2901 #ifdef RAWPATH // TODO GC_DEBUG
2902 printf("(%x,%x) Start flush phase\n", udn_tile_coord_x(),
2903 udn_tile_coord_y());
2905 #ifdef GC_PROFILE//_S
2906 /*BAMBOO_DEBUGPRINT(0xaaaa);
2907 BAMBOO_DEBUGPRINT_REG(gc_num_obj);
2908 BAMBOO_DEBUGPRINT_REG(gc_num_liveobj);
2909 BAMBOO_DEBUGPRINT_REG(gc_num_forwardobj);
2910 BAMBOO_DEBUGPRINT(0xaaab);*/
2911 if(STARTUPCORE != BAMBOO_NUM_OF_CORE) {
2912 send_msg_4(STARTUPCORE, GCPROFILES, gc_num_obj,
2913 gc_num_liveobj, gc_num_forwardobj, false);
2916 #endif // GC_PROFLIE_S
2918 #ifdef RAWPATH // TODO GC_DEBUG
2919 printf("(%x,%x) Finish flush phase\n", udn_tile_coord_x(),
2920 udn_tile_coord_y());
2924 if(FINISHPHASE == gcphase) {
2928 #ifdef RAWPATH // TODO GC_DEBUG
2929 printf("(%x,%x) Finish gc!\n", udn_tile_coord_x(), udn_tile_coord_y());
2931 } // void gc_collect(struct garbagelist * stackptr)
2933 inline void gc(struct garbagelist * stackptr) {
2936 gcprocessing = false;
2940 // core coordinator routine
2941 if(0 == BAMBOO_NUM_OF_CORE) {
2943 printf("(%x,%X) Check if can do gc or not\n", udn_tile_coord_x(),
2944 udn_tile_coord_y());
2947 // not ready to do gc
2956 #ifdef RAWPATH // TODO GC_DEBUG
2957 printf("(%x,%x) start gc! \n", udn_tile_coord_x(), udn_tile_coord_y());
2960 gcprocessing = true;
2961 gcphase = INITPHASE;
2963 waitconfirm = false;
2967 // Note: all cores need to init gc including non-gc cores
2968 for(i = 1; i < NUMCORESACTIVE /*NUMCORES4GC*/; i++) {
2969 // send GC init messages to all cores
2970 send_msg_1(i, GCSTARTINIT, false);
2972 bool isfirst = true;
2973 bool allStall = false;
2975 #ifdef RAWPATH // TODO GC_DEBUG
2976 printf("(%x,%x) Check core status \n", udn_tile_coord_x(),
2977 udn_tile_coord_y());
2980 gccorestatus[BAMBOO_NUM_OF_CORE] = 0;
2982 BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
2983 if(gc_checkAllCoreStatus_I()) {
2984 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
2987 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
2992 #ifdef RAWPATH // TODO GC_DEBUG
2993 printf("(%x,%x) Start mark phase \n", udn_tile_coord_x(),
2994 udn_tile_coord_y());
2996 // all cores have finished compacting
2997 // restore the gcstatus of all cores
2998 // Note: all cores have to do mark including non-gc cores
2999 gccorestatus[BAMBOO_NUM_OF_CORE] = 1;
3000 for(i = 1; i < NUMCORESACTIVE /*NUMCORES4GC*/; ++i) {
3001 gccorestatus[i] = 1;
3002 // send GC start messages to all cores
3003 send_msg_1(i, GCSTART, false);
3006 gcphase = MARKPHASE;
3008 while(MARKPHASE == gcphase) {
3009 mark(isfirst, stackptr);
3016 } // while(MARKPHASE == gcphase)
3017 // send msgs to all cores requiring large objs info
3018 // Note: only need to ask gc cores, non-gc cores do not host any objs
3019 numconfirm = NUMCORES4GC - 1;
3020 for(i = 1; i < NUMCORES4GC; ++i) {
3021 send_msg_1(i, GCLOBJREQUEST, false);
3023 gcloads[BAMBOO_NUM_OF_CORE] = gccurr_heaptop;
3028 } // wait for responses
3029 // check the heaptop
3030 if(gcheaptop < gcmarkedptrbound) {
3031 gcheaptop = gcmarkedptrbound;
3036 /*if(BAMBOO_NUM_OF_CORE == 0) {
3037 BAMBOO_DEBUGPRINT(0xeeee);
3038 BAMBOO_DEBUGPRINT_REG(num_markrequest);
3039 BAMBOO_DEBUGPRINT_REG(marktime);
3042 #ifdef RAWPATH // TODO GC_DEBUG
3043 printf("(%x,%x) prepare to cache large objs \n", udn_tile_coord_x(),
3044 udn_tile_coord_y());
3047 // cache all large objs
3049 // no enough space to cache large objs
3050 BAMBOO_EXIT(0xb107);
3052 // predict number of blocks to fill for each core
3054 int numpbc = loadbalance(&tmpheaptop);
3056 numpbc = (BAMBOO_SHARED_MEM_SIZE)/(BAMBOO_SMEM_SIZE);
3057 #ifdef RAWPATH // TODO GC_DEBUG
3058 printf("(%x,%x) mark phase finished \n", udn_tile_coord_x(),
3059 udn_tile_coord_y());
3062 //int tmptopptr = 0;
3063 //BASEPTR(gctopcore, 0, &tmptopptr);
3065 //tmptopptr = gcbaseva + (BAMBOO_SHARED_MEM_SIZE);
3066 tmpheaptop = gcbaseva + (BAMBOO_SHARED_MEM_SIZE);
3068 BAMBOO_DEBUGPRINT(0xabab);
3069 BAMBOO_DEBUGPRINT_REG(tmptopptr);
3071 for(i = 0; i < NUMCORES4GC; ++i) {
3073 BASEPTR(i, numpbc, &tmpcoreptr);
3074 //send start compact messages to all cores
3075 //TODO bug here, do not know if the direction is positive or negtive?
3076 if (tmpcoreptr < tmpheaptop /*tmptopptr*/) {
3077 gcstopblock[i] = numpbc + 1;
3078 if(i != STARTUPCORE) {
3079 send_msg_2(i, GCSTARTCOMPACT, numpbc+1, false);
3081 gcblock2fill = numpbc+1;
3082 } // if(i != STARTUPCORE)
3084 gcstopblock[i] = numpbc;
3085 if(i != STARTUPCORE) {
3086 send_msg_2(i, GCSTARTCOMPACT, numpbc, false);
3088 gcblock2fill = numpbc;
3089 } // if(i != STARTUPCORE)
3092 BAMBOO_DEBUGPRINT(0xf000+i);
3093 BAMBOO_DEBUGPRINT_REG(tmpcoreptr);
3094 BAMBOO_DEBUGPRINT_REG(gcstopblock[i]);
3096 // init some data strutures for compact phase
3098 gcfilledblocks[i] = 0;
3099 gcrequiredmems[i] = 0;
3107 bool finalcompact = false;
3108 // initialize pointers for comapcting
3109 struct moveHelper * orig =
3110 (struct moveHelper *)RUNMALLOC(sizeof(struct moveHelper));
3111 struct moveHelper * to =
3112 (struct moveHelper *)RUNMALLOC(sizeof(struct moveHelper));
3113 initOrig_Dst(orig, to);
3114 int filledblocks = 0;
3115 INTPTR heaptopptr = 0;
3116 bool finishcompact = false;
3117 bool iscontinue = true;
3118 bool localcompact = true;
3119 while((COMPACTPHASE == gcphase) || (SUBTLECOMPACTPHASE == gcphase)) {
3120 if((!finishcompact) && iscontinue) {
3122 BAMBOO_DEBUGPRINT(0xe001);
3123 BAMBOO_DEBUGPRINT_REG(numpbc);
3124 BAMBOO_DEBUGPRINT_REG(gcblock2fill);
3126 finishcompact = compacthelper(orig, to, &filledblocks,
3127 &heaptopptr, &localcompact);
3129 BAMBOO_DEBUGPRINT(0xe002);
3130 BAMBOO_DEBUGPRINT_REG(finishcompact);
3131 BAMBOO_DEBUGPRINT_REG(gctomove);
3132 BAMBOO_DEBUGPRINT_REG(gcrequiredmems[0]);
3133 BAMBOO_DEBUGPRINT_REG(gcfilledblocks[0]);
3134 BAMBOO_DEBUGPRINT_REG(gcstopblock[0]);
3138 BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
3139 if(gc_checkCoreStatus_I()) {
3140 // all cores have finished compacting
3141 // restore the gcstatus of all cores
3142 for(i = 0; i < NUMCORES4GC; ++i) {
3143 gccorestatus[i] = 1;
3145 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
3148 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
3149 // check if there are spare mem for pending move requires
3150 if(COMPACTPHASE == gcphase) {
3152 BAMBOO_DEBUGPRINT(0xe003);
3154 resolvePendingMoveRequest();
3156 BAMBOO_DEBUGPRINT_REG(gctomove);
3160 BAMBOO_DEBUGPRINT(0xe004);
3164 } // if(gc_checkCoreStatus_I()) else ...
3168 BAMBOO_DEBUGPRINT(0xe005);
3169 BAMBOO_DEBUGPRINT_REG(gcmovestartaddr);
3170 BAMBOO_DEBUGPRINT_REG(gcblock2fill);
3171 BAMBOO_DEBUGPRINT_REG(gctomove);
3173 to->ptr = gcmovestartaddr;
3174 to->numblocks = gcblock2fill - 1;
3175 to->bound = (to->numblocks==0) ?
3176 BAMBOO_SMEM_SIZE_L :
3177 BAMBOO_SMEM_SIZE_L+BAMBOO_SMEM_SIZE*to->numblocks;
3178 BASEPTR(gcdstcore, to->numblocks, &(to->base));
3179 to->offset = to->ptr - to->base;
3180 to->top = (to->numblocks==0) ?
3181 (to->offset) : (to->bound-BAMBOO_SMEM_SIZE+to->offset);
3183 to->offset = BAMBOO_CACHE_LINE_SIZE;
3184 to->ptr += to->offset; // for header
3185 to->top += to->offset;
3186 if(gcdstcore == BAMBOO_NUM_OF_CORE) {
3187 localcompact = true;
3189 localcompact = false;
3193 } else if(!finishcompact) {
3197 } // while(COMPACTPHASE == gcphase)
3201 #ifdef RAWPATH // TODO GC_DEBUG
3202 printf("(%x,%x) prepare to move large objs \n", udn_tile_coord_x(),
3203 udn_tile_coord_y());
3208 #ifdef RAWPATH // TODO GC_DEBUG
3209 printf("(%x,%x) compact phase finished \n", udn_tile_coord_x(),
3210 udn_tile_coord_y());
3218 gccorestatus[BAMBOO_NUM_OF_CORE] = 1;
3219 // Note: all cores should flush their runtime data including non-gc
3221 for(i = 1; i < NUMCORES4GC; ++i) {
3222 // send start flush messages to all cores
3223 gccorestatus[i] = 1;
3224 send_msg_1(i, GCSTARTMAPINFO, false);
3229 #ifdef RAWPATH // TODO GC_DEBUG
3230 printf("(%x,%x) Start map phase \n", udn_tile_coord_x(),
3231 udn_tile_coord_y());
3235 #ifdef RAWPATH // TODO GC_DEBUG
3236 printf("(%x,%x) Finish map phase \n", udn_tile_coord_x(),
3237 udn_tile_coord_y());
3239 gccorestatus[BAMBOO_NUM_OF_CORE] = 0;
3240 while(MAPPHASE == gcphase) {
3241 // check the status of all cores
3242 BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
3243 if(gc_checkCoreStatus_I()) {
3244 // all cores have finished sending mapping info
3245 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
3248 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
3249 } // while(MAPPHASE == gcphase)
3251 gcphase = FLUSHPHASE;
3252 gccorestatus[BAMBOO_NUM_OF_CORE] = 1;
3253 // Note: all cores should flush their runtime data including non-gc
3255 for(i = 1; i < NUMCORESACTIVE /*NUMCORES4GC*/; ++i) {
3256 // send start flush messages to all cores
3257 gccorestatus[i] = 1;
3258 send_msg_1(i, GCSTARTFLUSH, false);
3263 #ifdef RAWPATH // TODO GC_DEBUG
3264 printf("(%x,%x) Start flush phase \n", udn_tile_coord_x(),
3265 udn_tile_coord_y());
3269 gccorestatus[BAMBOO_NUM_OF_CORE] = 0;
3270 while(FLUSHPHASE == gcphase) {
3271 // check the status of all cores
3272 BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
3273 if(gc_checkAllCoreStatus_I()) {
3274 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
3277 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
3278 } // while(FLUSHPHASE == gcphase)
3279 gcphase = FINISHPHASE;
3281 // invalidate all shared mem pointers
3282 // put it here as it takes time to inform all the other cores to
3283 // finish gc and it might cause problem when some core resumes
3284 // mutator earlier than the other cores
3285 bamboo_cur_msp = NULL;
3286 bamboo_smem_size = 0;
3287 bamboo_smem_zero_top = NULL;
3289 gcprocessing = false;
3294 gccorestatus[BAMBOO_NUM_OF_CORE] = 1;
3295 for(i = 1; i < NUMCORESACTIVE /*NUMCORES4GC*/; ++i) {
3296 // send gc finish messages to all cores
3297 send_msg_1(i, GCFINISH, false);
3298 gccorestatus[i] = 1;
3300 #ifdef RAWPATH // TODO GC_DEBUG
3301 printf("(%x,%x) gc finished \n", udn_tile_coord_x(),
3302 udn_tile_coord_y());
3305 //BAMBOO_DEBUGPRINT(0x1111); // TODO
3306 /*#ifdef GC_PROFILE_S
3307 BAMBOO_DEBUGPRINT(0xaaaa);
3308 BAMBOO_DEBUGPRINT_REG(gc_num_obj);
3309 BAMBOO_DEBUGPRINT_REG(gc_num_liveobj);
3310 BAMBOO_DEBUGPRINT_REG(gc_num_forwardobj);
3311 BAMBOO_DEBUGPRINT_REG(gc_num_profiles);
3312 BAMBOO_DEBUGPRINT(0xaaab);
3313 if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
3314 BAMBOO_DEBUGPRINT(0xaaac);
3315 BAMBOO_DEBUGPRINT_REG(gc_num_livespace);
3316 BAMBOO_DEBUGPRINT_REG(gc_num_freespace);
3317 BAMBOO_DEBUGPRINT(0xaaad);
3319 gc_num_obj = gc_num_liveobj;
3321 gc_num_forwardobj = 0;
3322 #endif // GC_PROFLIE_S*/
3323 } else if(BAMBOO_NUM_OF_CORE < NUMCORES4GC) {
3324 gcprocessing = true;
3325 gc_collect(stackptr);
3327 // invalidate all shared mem pointers
3328 bamboo_cur_msp = NULL;
3329 bamboo_smem_size = 0;
3330 bamboo_smem_zero_top = NULL;
3332 gcprocessing = false;
3334 // not a gc core, should wait for gcfinish msg
3335 gcprocessing = true;
3336 gc_nocollect(stackptr);
3338 // invalidate all shared mem pointers
3339 bamboo_cur_msp = NULL;
3340 bamboo_smem_size = 0;
3341 bamboo_smem_zero_top = NULL;
3343 gcprocessing = false;
3345 } // void gc(struct garbagelist * stackptr)
3348 inline void gc_profileStart(void) {
3349 if(!gc_infoOverflow) {
3350 GCInfo* gcInfo = RUNMALLOC(sizeof(struct gc_info));
3351 gc_infoArray[gc_infoIndex] = gcInfo;
3353 gcInfo->time[0] = BAMBOO_GET_EXE_TIME();
3357 inline void gc_profileItem(void) {
3358 if(!gc_infoOverflow) {
3359 GCInfo* gcInfo = gc_infoArray[gc_infoIndex];
3360 gcInfo->time[gcInfo->index++] = BAMBOO_GET_EXE_TIME();
3364 inline void gc_profileEnd(void) {
3365 if(!gc_infoOverflow) {
3366 GCInfo* gcInfo = gc_infoArray[gc_infoIndex];
3367 gcInfo->time[gcInfo->index++] = BAMBOO_GET_EXE_TIME();
3368 gcInfo->time[gcInfo->index++] = gc_num_livespace;
3369 gcInfo->time[gcInfo->index++] = gc_num_freespace;
3370 gcInfo->time[gcInfo->index++] = gc_num_lobj;
3371 gcInfo->time[gcInfo->index++] = gc_num_lobjspace;
3372 gcInfo->time[gcInfo->index++] = gc_num_obj;
3373 gcInfo->time[gcInfo->index++] = gc_num_liveobj;
3374 gcInfo->time[gcInfo->index++] = gc_num_forwardobj;
3376 if(gc_infoIndex == GCINFOLENGTH) {
3377 gc_infoOverflow = true;
3378 //taskInfoIndex = 0;
3383 // output the profiling data
3384 void gc_outputProfileData() {
3387 unsigned long long totalgc = 0;
3389 //printf("Start Time, End Time, Duration\n");
3390 // output task related info
3391 for(i = 0; i < gc_infoIndex; i++) {
3392 GCInfo * gcInfo = gc_infoArray[i];
3393 unsigned long long tmp = 0;
3394 for(j = 0; j < gcInfo->index; j++) {
3395 printf("%lld(%lld), ", gcInfo->time[j], (gcInfo->time[j]-tmp));
3396 tmp = gcInfo->time[j];
3398 tmp = (tmp-gcInfo->time[0]);
3399 printf(" ++ %lld \n", tmp);
3403 if(gc_infoOverflow) {
3404 printf("Caution: gc info overflow!\n");
3407 printf("\n\n total gc time: %lld \n", totalgc);
3411 unsigned long long totalgc = 0;
3413 #ifndef BAMBOO_MEMPROF
3414 BAMBOO_DEBUGPRINT(0xdddd);
3416 // output task related info
3417 for(i= 0; i < gc_infoIndex; i++) {
3418 GCInfo * gcInfo = gc_infoArray[i];
3419 #ifdef BAMBOO_MEMPROF
3420 unsigned long long tmp=gcInfo->time[gcInfo->index-8]-gcInfo->time[0]; //0;
3422 unsigned long long tmp = 0;
3423 BAMBOO_DEBUGPRINT(0xddda);
3424 for(j = 0; j < gcInfo->index - 7; j++) {
3425 BAMBOO_DEBUGPRINT(gcInfo->time[j]);
3426 BAMBOO_DEBUGPRINT(gcInfo->time[j]-tmp);
3427 BAMBOO_DEBUGPRINT(0xdddb);
3428 tmp = gcInfo->time[j];
3430 tmp = (tmp-gcInfo->time[0]);
3431 BAMBOO_DEBUGPRINT_REG(tmp);
3432 BAMBOO_DEBUGPRINT(0xdddc);
3433 BAMBOO_DEBUGPRINT(gcInfo->time[gcInfo->index - 7]);
3434 BAMBOO_DEBUGPRINT(gcInfo->time[gcInfo->index - 6]);
3435 BAMBOO_DEBUGPRINT(gcInfo->time[gcInfo->index - 5]);
3436 BAMBOO_DEBUGPRINT(gcInfo->time[gcInfo->index - 4]);
3437 BAMBOO_DEBUGPRINT(gcInfo->time[gcInfo->index - 3]);
3438 BAMBOO_DEBUGPRINT(gcInfo->time[gcInfo->index - 2]);
3439 BAMBOO_DEBUGPRINT(gcInfo->time[gcInfo->index - 1]);
3440 BAMBOO_DEBUGPRINT(0xddde);
3444 #ifndef BAMBOO_MEMPROF
3445 BAMBOO_DEBUGPRINT(0xdddf);
3447 BAMBOO_DEBUGPRINT_REG(totalgc);
3449 if(gc_infoOverflow) {
3450 BAMBOO_DEBUGPRINT(0xefee);
3453 #ifndef BAMBOO_MEMPROF
3454 BAMBOO_DEBUGPRINT(0xeeee);
3458 #endif // #ifdef GC_PROFILE