2 #include "multicoregarbage.h"
3 #include "multicoreruntime.h"
4 #include "runtime_arch.h"
5 #include "SimpleHash.h"
6 #include "GenericHashtable.h"
8 extern struct genhashtable * activetasks;
9 extern struct parameterwrapper ** objectqueues[][NUMCLASSES];
10 extern struct taskparamdescriptor *currtpdo;
13 struct largeObjItem * head;
14 struct largeObjItem * tail;
17 struct largeObjList lObjList;
21 void gc_enqueue(void *ptr) {
22 if (gcheadindex==NUMPTRS) {
23 struct pointerblock * tmp;
28 tmp=malloc(sizeof(struct pointerblock));
33 gchead->ptrs[gcheadindex++]=ptr;
36 // dequeue and destroy the queue
38 if (gctailindex==NUMPTRS) {
39 struct pointerblock *tmp=tail;
47 return gctail->ptrs[gctailindex++];
50 // dequeue and do not destroy the queue
51 void * gc_dequeue2() {
52 if (gctailindex2==NUMPTRS) {
53 struct pointerblock *tmp=tail;
54 gctail2=gctail2->next;
57 return gctail2->ptrs[gctailindex2++];
61 if ((gchead==gctail)&&(gctailindex==gcheadindex))
67 if ((gchead==gctail2)&&(gctailindex2==gcheadindex))
72 INTPTR curr_heaptop = 0;
73 INTPTR curr_heapbound = 0;
75 bool isLarge(void * ptr,
78 // check if a pointer is referring to a large object
79 int type = ((int *)ptr)[0];
81 if(type < NUMCLASSES) {
83 size = classsize[type];
86 struct ArrayObject *ao=(struct ArrayObject *)ptr;
87 int elementsize=classsize[type];
88 int length=ao->___length___;
89 size=sizeof(struct ArrayObject)+length*elementsize;
93 return(!isLocal(ptr + size));
96 int hostcore(void * ptr) {
97 // check the host core of ptr
101 RESIDECORE(ptr, &x, &y);
102 host = (x==0)?(x*bamboo_height+y):(x*bamboo_height+y-2);
106 bool isLocal(void * ptr) {
107 // check if a pointer is in shared heap on this core
108 return hostcore(ptr) == BAMBOO_NUM_OF_CORE;
111 void transferMarkResults() {
112 // TODO, need distiguish between send and cache
113 // invoked inside interruptiong handler
117 // TODO check large objs here
120 DynamicHeader msgHdr = tmc_udn_header_from_cpu(STARTUPCORE);
123 __tmc_udn_send_header_with_size_and_tag(msgHdr, msgsize,
126 BAMBOO_DEBUGPRINT(0xbbbb);
127 BAMBOO_DEBUGPRINT(0xb000 + STARTUPCORE); // targetcore
129 udn_send(GCLOBJINFO);
131 BAMBOO_DEBUGPRINT(GCLOBJINFO);
135 BAMBOO_DEBUGPRINT_REG(msgsize);
137 udn_send(BAMBOO_NUM_OF_CORE);
139 BAMBOO_DEBUGPRINT_REG(BAMBOO_NUM_OF_CORE);
141 udn_send(curr_heaptop);
143 BAMBOO_DEBUGPRINT_REG(curr_heaptop);
145 // TODO large objs here
148 BAMBOO_DEBUGPRINT(0xffff);
151 // end of sending this msg, set sand msg flag false
152 isMsgSending = false;
156 void transferCompactStart(int core) {
157 // send start compact messages to all cores
158 // TODO no large obj info
164 // both lcore and rcore have the same action: either
165 // move objs or have incoming objs
166 if(gcdeltal[core] > 0) {
167 ismove = 0; // have incoming objs
169 } else if(gcdeltal[core] < 0) {
170 ismove = 1; // have objs to move
173 if(gcdeltar[core] > 0) {
174 ismove = 0; // have incoming objs
176 } else if(gcdeltar[core] < 0) {
177 ismove = 1; // have objs to move
180 msgsize += (movenum == 0) ? 0 : 2 + movenum * 2;
183 DynamicHeader msgHdr = tmc_udn_header_from_cpu(core);
186 __tmc_udn_send_header_with_size_and_tag(msgHdr, msgsize,
189 BAMBOO_DEBUGPRINT(0xbbbb);
190 BAMBOO_DEBUGPRINT(0xb000 + core); // targetcore
192 udn_send(GCSTARTCOMPACT);
194 BAMBOO_DEBUGPRINT(GCSTARTCOMPACT);
198 BAMBOO_DEBUGPRINT_REG(msgsize);
200 udn_send(gcreloads[core]);
202 BAMBOO_DEBUGPRINT_REG(gcreloads[core]);
207 BAMBOO_DEBUGPRINT_REG(movenum);
211 BAMBOO_DEBUGPRINT_REG(ismove);
214 if(gcdeltal[core] != 0) {
215 LEFTNEIGHBOUR(core, &dst);
216 udn_send(abs(gcdeltal[core]));
218 BAMBOO_DEBUGPRINT_REG(abs(gcdeltal[core]));
222 BAMBOO_DEBUGPRINT_REG(dst);
225 if(gcdeltar[core] != 0) {
226 RIGHTNEIGHBOUR(core, &dst);
227 udn_send(abs(gcdeltar[core]));
229 BAMBOO_DEBUGPRINT_REG(abs(gcdeltar[core]));
233 BAMBOO_DEBUGPRINT_REG(dst);
238 BAMBOO_DEBUGPRINT(0xffff);
241 // end of sending this msg, set sand msg flag false
242 isMsgSending = false;
246 void checkMarkStatue() {
247 if((!gcwaitconfirm) ||
248 (waitconfirm && (numconfirm == 0))) {
249 BAMBOO_START_CRITICAL_SECTION_STATUS();
250 gccorestatus[BAMBOO_NUM_OF_CORE] = 0;
251 gcnumsendobjs[BAMBOO_NUM_OF_CORE] = gcself_numsendobjs;
252 gcnumreceiveobjs[BAMBOO_NUM_OF_CORE] = gcself_numreceiveobjs;
253 // check the status of all cores
254 bool allStall = true;
255 for(i = 0; i < NUMCORES; ++i) {
256 if(gccorestatus[i] != 0) {
262 // check if the sum of send objs and receive obj are the same
263 // yes->check if the info is the latest; no->go on executing
265 for(i = 0; i < NUMCORES; ++i) {
266 sumsendobj += gcnumsendobjs[i];
268 for(i = 0; i < NUMCORES; ++i) {
269 sumsendobj -= gcnumreceiveobjs[i];
271 if(0 == sumsendobj) {
273 // the first time found all cores stall
274 // send out status confirm msg to all other cores
275 // reset the corestatus array too
276 gccorestatus[BAMBOO_NUM_OF_CORE] = 1;
278 numconfirm = NUMCORES - 1;
279 for(i = 1; i < NUMCORES; ++i) {
281 // send mark phase finish confirm request msg to core i
282 send_msg_1(i, GCMARKCONFIRM);
285 // all the core status info are the latest
287 gcphase = COMPACTPHASE;
288 // restore the gcstatus for all cores
289 for(i = 0; i < NUMCORES; ++i) {
292 } // if(!gcwautconfirm) else()
293 } // if(0 == sumsendobj)
295 BAMBOO_CLOSE_CRITICAL_SECTION_STATUS();
296 } // if((!gcwaitconfirm)...
300 // preparation for gc
301 // make sure to clear all incoming msgs espacially transfer obj msgs
304 (waitconfirm && (numconfirm == 0))) {
305 // send out status confirm msgs to all cores to check if there are
306 // transfer obj msgs on-the-fly
308 numconfirm = NUMCORES - 1;
309 for(i = 1; i < NUMCORES; ++i) {
311 // send status confirm msg to core i
312 send_msg_1(i, STATUSCONFIRM);
315 while(numconfirm != 0) {} // wait for confirmations
316 numsendobjs[BAMBOO_NUM_OF_CORE] = self_numsendobjs;
317 numreceiveobjs[BAMBOO_NUM_OF_CORE] = self_numreceiveobjs;
319 for(i = 0; i < NUMCORES; ++i) {
320 sumsendobj += numsendobjs[i];
322 for(i = 0; i < NUMCORES; ++i) {
323 sumsendobj -= numreceiveobjs[i];
325 if(0 == sumsendobj) {
328 // still have some transfer obj msgs on-the-fly, can not start gc
332 // previously asked for status confirmation and do not have all the
333 // confirmations yet, can not start gc
338 // compute load balance for all cores
340 // compute load balance
341 // initialize the deltas
343 int delta = 1 << 32 -1;
344 int deltanew = 1 << 32 - 1;
348 for(i = 0; i < NUMCORES; i++) {
349 gcdeltal[i] = gcdeltar[i] = 0;
350 gcreloads[i] = gcloads[i];
353 // iteratively balance the loads
357 // compute load balance
358 for(i = 0; i < NUMCORES; i++) {
359 if(gcreloads[i] > BAMBOO_SMEM_SIZE_L) {
360 // too much load, try to redirect some of it to its neighbours
361 LEFTNEIGHBOUR(i, &lcore);
362 RIGHTNEIGHBOUR(i, &rcore);
364 int tmp = (gcreloads[lcore] - gcreloads[i]) / 2;
366 gcdeltar[lcore] = 0-tmp;
367 deltanew += abs(gcreloads[lcore] - gcreloads[i]);
370 int tmp = (gcreloads[rcore] - gcreloads[i]) / 2;
372 gcdeltal[rcore] = 0-tmp;
373 deltanew += abs(gcreloads[rcore] - gcreloads[i]);
378 if((deltanew == 0) || (delta == deltanew)) {
381 // flush for new loads
382 for(i = 0; i < NUMCORES; i++) {
383 if((gcdeltal[i] != 0) || (gcdeltar[i] != 0)) {
385 gcreloads[i] += gcdeltal[i] + gcdeltar[i];
386 gcdeltal[i] = gcdeltar[i] = 0;
391 // decide how to do load balance
392 for(i = 0; i < NUMCORES; i++) {
393 gcdeltal[i] = gcdeltar[i] = 0;
395 for(i = 0; i < NUMCORES; i++) {
396 int tomove = (gcloads[i] - gcreloads[i]);
398 LEFTNEIGHBOUR(i, &lcore);
399 RIGHTNEIGHBOUR(i, &rcore);
403 lmove = (gcreloads[lcore] - gcloads[lcore] - gcdeltal[lcore]);
409 rmove = (gcreloads[rcore] - gcloads[rcore] - gcdeltar[rcore]);
414 // the one with bigger gap has higher priority
416 int ltomove = (lmove > tomove)? tomove:lmove;
417 gcdeltar[lcore] = ltomove;
418 gcdeltal[i] = 0-ltomove;
419 gcdeltal[rcore] = tomove - ltomove;
420 gcdeltar[i] = ltomove - tomove;
422 int rtomove = (rmove > tomove)? tomove:rmove;
423 gcdeltal[rcore] = rtomove;
424 gcdeltar[i] = 0-rtomove;
425 gcdeltar[lcore] = tomove - rtomove;
426 gcdeltal[i] = rtomove - tomove;
432 void gc(struct garbagelist * stackptr) {
438 // core coordinator routine
439 if(0 == BAMBOO_NUM_OF_CORE) {
441 // not ready to do gc
448 gcwaitconfirm = false;
451 for(i = 1; i < NUMCORES - 1; i++) {
452 // send GC start messages to all cores
453 send_msg_1(i, GCSTART);
456 bool allStall = false;
459 while(MARKPHASE == gcphase) {
460 mark(isfirst, stackptr);
467 } // while(MARKPHASE == gcphase)
468 // send msgs to all cores requiring large objs info
469 numconfirm = NUMCORES - 1;
470 for(i = 1; i < NUMCORES; ++i) {
471 send_msg_1(i, GCLOBJREQUEST);
473 while(numconfirm != 0) {} // wait for responses
475 // TODO need to decide where to put large objects
476 // TODO cache all large objects
478 for(i = 1; i < NUMCORES; ++i) {
479 //send start compact messages to all cores
480 transferCompactStart(i);
485 gccorestatus[BAMBOO_NUM_OF_CORE] = 0;
486 while(COMPACTPHASE == gcphase) {
487 // check the status of all cores
489 for(i = 0; i < NUMCORES; ++i) {
490 if(gccorestatus[i] != 0) {
496 // restore the gcstatus of all cores
497 for(i = 0; i < NUMCORES; ++i) {
502 } // while(COMPACTPHASE == gcphase)
504 gcphase = FLUSHPHASE;
505 for(i = 1; i < NUMCORES; ++i) {
506 // send start flush messages to all cores
507 send_msg_1(i, GCSTARTFLUSH);
512 gccorestatus[BAMBOO_NUM_OF_CORE] = 0;
513 while(FLUSHPHASE == gcphase) {
514 // check the status of all cores
516 for(i = 0; i < NUMCORES; ++i) {
517 if(gccorestatus[i] != 0) {
525 } // while(FLUSHPHASE == gcphase)
526 gcphase = FINISHPHASE;
527 for(i = 1; i < NUMCORES; ++i) {
528 // send gc finish messages to all cores
529 send_msg_1(i, GCFINISH);
532 // need to create free memory list and invalidate all
533 // shared mem pointers TODO
536 gcprocessing = false;
540 gc_collect(stackptr);
542 // invalidate all shared mem pointers
543 bamboo_cur_msp = NULL;
544 bamboo_smem_size = 0;
546 gcprocessing = false;
551 void tomark(struct garbagelist * stackptr) {
552 if(MARKPHASE != gcphase) {
561 gchead=gctail=gctail2=malloc(sizeof(struct pointerblock));
564 // enqueue current stack
565 while(stackptr!=NULL) {
566 for(i=0; i<stackptr->size; i++) {
567 gc_enqueue(stackptr->array[i]);
569 stackptr=stackptr->next;
571 // enqueue objectsets
572 for(i=0; i<NUMCLASSES; i++) {
573 struct parameterwrapper ** queues =
574 objectqueues[BAMBOO_NUM_OF_CORE][i];
575 int length = numqueues[BAMBOO_NUM_OF_CORE][i];
576 for(j = 0; j < length; ++j) {
577 struct parameterwrapper * parameter = queues[j];
578 struct ObjectHash * set=parameter->objectset;
579 struct ObjectNode * ptr=set->listhead;
581 gc_enqueue((void *)ptr->key);
586 // euqueue current task descriptor
587 for(i=0; i<currtpd->numParameters; i++) {
588 gc_enqueue(currtpd->parameterArray[i]);
590 // euqueue active tasks
591 struct genpointerlist * ptr=activetasks->list;
593 struct taskparamdescriptor *tpd=ptr->src;
595 for(i=0; i<tpd->numParameters; i++) {
596 gc_enqueue(tpd->parameterArray[i]);
600 // enqueue cached transferred obj
601 struct QueueItem * tmpobjptr = getHead(&objqueue);
602 while(tmpobjptr != NULL) {
603 struct transObjInfo * objInfo =
604 (struct transObjInfo *)(tmpobjptr->objectptr);
605 gc_enqueue(objInfo->objptr);
606 getNextQueueItem(tmpobjptr);
610 void mark(bool isfirst,
611 struct garbagelist * stackptr) {
615 curr_heaptop = BAMBOO_CACHE_LINE_SIZE;
616 curr_heapbound = BAMBOO_SMEM_SIZE_L;
622 while(MARKPHASE == gcphase) {
623 while(gc_moreItems2()) {
624 voit * ptr = gc_dequeue2();
627 if(isLarge(ptr, &type, &size)) {
628 // ptr is a large object
629 struct largeObjItem * loi =
630 (struct largeObjItem*)RUNMALLOC(sizeof(struct largeObjItem));
631 loi->orig = (INTPTR)ptr;
632 loi->dst = (INTPTR)0;
634 if(lObjList.head == NULL) {
635 lObjList.head = lObjList.tail = loi;
637 lObjList.tail->next = loi;
640 } else if (isLocal(ptr)) {
641 // ptr is an active object on this core
645 ALIGNSIZE(size, &isize);
646 curr_heaptop += isize;
647 if(curr_heaptop > curr_heapbound) {
648 // change to another block
649 curr_heaptop = curr_heapbound+BAMBOO_CACHE_LINE_SIZE+isize;
650 curr_heapbound += BAMBOO_SMEM_SIZE;
654 if(ptr > markedptrbound) {
655 markedptrbound = ptr;
658 // scan all pointers in ptr
659 unsigned INTPTR * pointer;
660 pointer=pointerarray[type];
662 /* Array of primitives */
664 } else if (((INTPTR)pointer)==1) {
665 /* Array of pointers */
666 struct ArrayObject *ao=(struct ArrayObject *) ptr;
667 int length=ao->___length___;
669 for(j=0; j<length; j++) {
671 ((void **)(((char *)&ao->___length___)+sizeof(int)))[j];
672 int host = hostcore(objptr);
673 if(BAMBOO_NUM_OF_CORE == host) {
677 // send a msg to host informing that objptr is active
678 send_msg_2(host, GCMARKEDOBJ, objptr);
679 gcself_numsendobjs++;
683 INTPTR size=pointer[0];
685 for(i=1; i<=size; i++) {
686 unsigned int offset=pointer[i];
687 void * objptr=*((void **)(((char *)ptr)+offset));
688 int host = hostcore(objptr);
689 if(BAMBOO_NUM_OF_CORE == host) {
693 // send a msg to host informing that objptr is active
694 send_msg_2(host, GCMARKEDOBJ, objptr);
695 gcself_numsendobjs++;
699 } // while(!isEmpty(gctomark))
700 gcbusystatus = false;
701 // send mark finish msg to core coordinator
702 send_msg_4(STARTUPCORE, GCFINISHMARK, BAMBOO_NUM_OF_CORE,
703 gcself_numsendobjs, gcself_numreceiveobjs);
705 if(BAMBOO_NUM_OF_CORE == 0) {
708 } // while(MARKPHASE == gcphase)
712 int numblocks; // block num for heap
713 INTPTR base; // base virtual address of current heap block
714 INTPTR ptr; // virtual address of current heap top
715 int offset; // offset in current heap block
716 int blockbase; // virtual address of current small block to check
717 int blockbound; // bound virtual address of current small blcok
718 int top; // real size of current heap block to check
719 int bound; // bound size of current heap block to check
722 void nextSBlock(struct moveHelper * orig) {
723 orig->blockbase = orig->blockbound;
724 if(orig->blockbase == orig->bound) {
725 // end of current heap block, jump to next one
727 BASEPTR(BAMBOO_NUM_OF_CORE, orig->numblocks, &(orig->base));
728 orig->bound = orig->base + BAMBOO_SMEM_SIZE;
729 orig->blockbase = orig->base;
731 orig->blockbound = orig->blockbase + *((int*)(orig->blockbase));
732 orig->offset = BAMBOO_CACHE_LINE_SIZE;
733 orig->ptr = orig->blockbase + orig->offset;
736 void nextBlock(struct moveHelper * to) {
737 to->top = to->bound + BAMBOO_CACHE_LINE_SIZE; // header!
738 to->bound += BAMBOO_SMEM_SIZE;
740 BASEPTR(BAMBOO_NUM_OF_CORE, to->numblocks, &(to->base));
741 to->offset = BAMBOO_CACHE_LINE_SIZE;
742 to->ptr = to->base + to->offset;
745 // endaddr does not contain spaces for headers
746 bool moveobj(struct moveHelper * orig,
747 struct moveHelper * to,
754 while((*((int*)(orig->ptr))) == -2) {
756 if(orig->ptr == orig->blockbound) {
761 // check the obj's type, size and mark flag
762 type = ((int *)(orig->ptr))[0];
765 // end of this block, go to next one
768 } else if(type < NUMCLASSES) {
770 size = classsize[type];
773 struct ArrayObject *ao=(struct ArrayObject *)(orig->ptr);
774 int elementsize=classsize[type];
775 int length=ao->___length___;
776 size=sizeof(struct ArrayObject)+length*elementsize;
778 mark = ((int *)(orig->ptr))[6];
780 // marked obj, copy it to current heap top
781 // check to see if remaining space is enough
782 ALIGNSIZE(size, &isize);
783 if((endaddr != NULL) && (to->top + isize > *endaddr)) {
784 // reached the endaddr
785 // fill offset to the endaddr for later configuration of header
786 to->offset += *endaddr - to->top;
787 to->top += *endaddr - to->top;
790 if(to->top + isize > to->bound) {
791 // fill the header of this block and then go to next block
792 to->offset += to->bound - to->top;
793 (*((int*)(to->base))) = to->offset;
794 if(endaddr != NULL) {
795 *endaddr = *endaddr + BAMBOO_CACHE_LINE_SIZE;
799 memcpy(to->ptr, orig->ptr, size);
800 // store mapping info
801 RuntimeHashadd(pointertbl, orig->ptr, to->ptr);
808 if(orig->ptr == orig->blockbound) {
814 void migrateobjs(struct moveHelper * orig) {
815 int num_dsts = cinstruction->movenum;
816 while(num_dsts > 0) {
818 // start moving objects to other cores
820 struct moveHelper * into =
821 (struct moveHelper *)RUNMALLOC(sizeof(struct moveHelper));
822 for(int j = 0; j < cinstruction->movenum; j++) {
823 if(cinstruction->moveflag[j] == 1) {
824 // can start moving to corresponding core
825 int dst = cinstruction->dsts[j];
827 into->ptr = cinstruction->startaddrs[j];
828 BLOCKINDEX(into->ptr, &(into->numblocks));
829 into->bound = (into->numblocks==0)?
831 BAMBOO_SMEM_SIZE_L+BAMBOO_SMEM_SIZE*into->numblocks;
832 BASEPTR(BAMBOO_NUM_OF_CORE, into->numblocks, &(into->base));
833 into->offset = into->ptr - into->base;
834 into->top = (into->numblocks==0)?
835 (into->offset):(into->bound-BAMBOO_SMEM_SIZE+into->offset);
836 into->base = into->ptr;
837 into->offset = BAMBOO_CACHE_LINE_SIZE;
838 into->ptr += into->offset; // for header
839 into->top += into->offset;
840 int endaddr = into->top + cinstruction->endaddrs[j];
842 bool stop = moveobj(orig, into, &endaddr);
844 // all objs before endaddr have been moved
848 } while(orig->ptr < markedptrbound + 1);
849 // set the flag indicating move finished
850 cinstruction->moveflag[j] = 2;
851 // fill the header of this blockk
852 (*((int*)(into->base))) = into->offset;
853 } // if(cinstruction->moveflag[j] == 1)
854 } // for(int j = 0; j < cinstruction->movenum; j++)
856 } // while(num_dsts > 0)
860 if(COMPACTPHASE != gcphase) {
864 INTPTR heaptopptr = 0;
866 // initialize pointers for comapcting
867 struct moveHelper * orig =
868 (struct moveHelper *)RUNMALLOC(sizeof(struct moveHelper));
869 struct moveHelper * to =
870 (struct moveHelper *)RUNMALLOC(sizeof(struct moveHelper));
872 to->top = to->offset = BAMBOO_CACHE_LINE_SIZE;
873 to->bound = BAMBOO_SMEM_SIZE_L;
874 BASEPTR(BAMBOO_NUM_OF_CORE, to->numblocks, &(to->base));
875 to->ptr = to->base + to->offset;
878 orig->base = to->base;
879 orig->bound = to->bound;
880 orig->blockbase = to->base;
881 orig->blockbound = orig->blockbase + *((int*)(orig->blockbase));
883 // scan over all objs in this block, compact those scheduled to
884 // reside on this core
885 // loop stop when finishing either scanning all active objs or moving
886 // all objs to reside on this core
887 int endaddr = cinstruction->loads;
889 bool stop = moveobj(orig, to, &endaddr);
890 curr_heaptop = to->top;
891 curr_heapbound = to->bound;
892 if(stop && (cinstruction->movenum != 0)) {
893 // all objs to reside on this core have been moved
894 // the remainging objs should be moved to other cores
898 } while(orig->ptr < markedptrbound + 1);
899 // fill the header of this block
900 (*((int*)(to->base))) = to->offset;
901 heaptopptr = to->ptr;
904 if(cinstruction->movenum != 0) {
905 if(cinstruction->ismove) {
906 // have objs to move to other cores
909 // might still have objs left, compact them to this core
910 // leave space for header
911 if(orig->ptr < markedptrbound + 1) {
912 if(to->top + BAMBOO_CACHE_LINE_SIZE > to->bound) {
913 // fill the left part of current block
914 memset(to->top, -2, to->bound - to->top);
918 to->top += BAMBOO_CACHE_LINE_SIZE; // for header
919 to->offset = BAMBOO_CACHE_LINE_SIZE;
921 to->ptr += BAMBOO_CACHE_LINE_SIZE;
923 while(orig->ptr < markedptrbound + 1) {
924 moveobj(orig, to, NULL);
925 curr_heaptop = to->top;
926 curr_heapbound = to->bound;
928 // fill the header of this blockk
929 (*((int*)(to->base))) = to->offset;
931 heaptopptr = to->ptr;
933 // have incoming objs, send messages to corresponding cores
935 INTPTR startaddr = 0;
937 int heapptr = curr_heapptr;
938 int top = curr_heaptop;
939 int bound = curr_heapbound;
940 for(int j = 0; j < cinstruction->movenum; j++) {
942 top = top+cinstruction->size2move[j]+BAMBOO_CACHE_LINE_SIZE;
944 // will cross block boundary
945 int numb = (top - bound) / BAMBOO_SMEM_SIZE + 1;
946 top += numb * BAMBOO_CACHE_LINE_SIZE;
947 BASEPTR(BAMBOO_NUM_OF_CORE, numblocks + numb, &endaddr);
949 (top-bound)%BAMBOO_SMEM_SIZE+BAMBOO_CACHE_LINE_SIZE;
951 bound += BAMBOO_SMEM_SIZE * numb;
954 heapptr+cinstruction->size2move[j]+BAMBOO_CACHE_LINE_SIZE;
957 send_msg_4(cinstruction->dsts[j], GCMOVESTART,
958 BAMBOO_NUM_OF_CORE, startaddr,
959 cinstruction->size2move[j]);
961 heaptopptr = heapptr;
962 } // if(cinstruction->ismove)
963 } // if(cinstruction->movenum != 0)
967 if((cinstruction != NULL) && (cinstruction->largeobjs != NULL)) {
968 // move all large objects
970 // dequeue the first large obj
971 struct largeObjItem * loi = cinstruction->largeobjs;
972 cinstruction->largeobjs = loi->next;
973 // move this large obj
974 memcpy(loi->dst, loi->orig, loi->length);
975 RuntimeHashadd(pointertbl, loi->orig, loi->dst);
977 }while(cinstruction->largeobjs != NULL);
979 // send compact finish message to core coordinator
980 send_msg_3(STARTUPCORE, GCFINISHCOMPACT,
981 BAMBOO_NUM_OF_CORE, to->ptr);
988 while(gc_moreItems()) {
989 voit * ptr = gc_dequeue();
990 int type = ((int *)(ptr))[0];
991 // scan all pointers in ptr
992 unsigned INTPTR * pointer;
993 pointer=pointerarray[type];
995 /* Array of primitives */
997 } else if (((INTPTR)pointer)==1) {
998 /* Array of pointers */
999 struct ArrayObject *ao=(struct ArrayObject *) ptr;
1000 int length=ao->___length___;
1002 for(j=0; j<length; j++) {
1004 ((void **)(((char *)&ao->___length___)+sizeof(int)))[j];
1005 // change to new address
1006 void *dstptr = NULL;
1007 RuntimeHashget(pointertbl, objptr, &dstptr);
1008 if(NULL == dstptr) {
1009 // send msg to host core for the mapping info
1010 obj2map = (int)objptr;
1013 send_msg_3(hostcore(objptr), GCMAPREQUEST, (int)objptr,
1014 BAMBOO_NUM_OF_CORE);
1016 RuntimeHashget(pointertbl, objptr, &dstptr);
1018 ((void **)(((char *)&ao->___length___)+sizeof(int)))[j]=dstptr;
1021 INTPTR size=pointer[0];
1023 for(i=1; i<=size; i++) {
1024 unsigned int offset=pointer[i];
1025 void * objptr=*((void **)(((char *)ptr)+offset));
1026 // change to new address
1027 void *dstptr = NULL;
1028 RuntimeHashget(pointertbl, objptr, &dstptr);
1029 if(NULL == dstptr) {
1030 // send msg to host core for the mapping info
1031 obj2map = (int)objptr;
1034 send_msg_3(hostcore(objptr), GCMAPREQUEST, (int)objptr,
1035 BAMBOO_NUM_OF_CORE);
1037 RuntimeHashget(pointertbl, objptr, &dstptr);
1039 *((void **)(((char *)ptr)+offset)) = dstptr;
1042 } // while(moi != NULL)
1043 // send flush finish message to core coordinator
1044 send_msg_2(STARTUPCORE, GCFINISHFLUSH, BAMBOO_NUM_OF_CORE);
1047 void gc_collect(struct garbagelist * stackptr) {
1048 // core collector routine
1049 mark(true, stackptr);
1051 while(FLUSHPHASE != gcphase) {}
1054 while(FINISHPHASE != gcphase) {}