2 #include "multicoregccompact.h"
3 #include "runtime_arch.h"
4 #include "multicoreruntime.h"
5 #include "multicoregarbage.h"
8 bool gc_checkCoreStatus() {
9 for(int i = 0; i < NUMCORES4GC; ++i) {
10 if(gccorestatus[i] != 0) {
17 void gc_resetCoreStatus() {
18 for(int i = 0; i < NUMCORES4GC; ++i) {
23 void initOrig_Dst(struct moveHelper * orig,struct moveHelper * to) {
25 to->localblocknum = 0;
26 BASEPTR(to->base, BAMBOO_NUM_OF_CORE, to->localblocknum);
28 to->bound=to->base+BLOCKSIZE(to->localblocknum);
31 orig->localblocknum = 0;
32 orig->ptr=orig->base = to->base;
33 orig->bound = orig->base + BLOCKSIZE(orig->localblocknum);
36 void getSpaceLocally(struct moveHelper *to) {
37 //we have space on our core...just keep going
39 BASEPTR(to->base,BAMBOO_NUM_OF_CORE, to->localblocknum);
41 to->bound = to->base + BLOCKSIZE(to->localblocknum);
44 void getSpaceRemotely(struct moveHelper *to, unsigned int minimumbytes) {
45 //need to get another block from elsewhere
46 //set flag to wait for memory
48 //send request for memory
49 send_msg_3(STARTUPCORE,GCFINISHCOMPACT,BAMBOO_NUM_OF_CORE, minimumbytes);
50 //wait for flag to be set that we received message
54 to->ptr = gcmovestartaddr;
56 //set localblock number to high number to indicate this block isn't local
57 to->localblocknum = MAXBLOCK;
58 unsigned int globalblocknum;
59 BLOCKINDEX(globalblocknum, to->ptr);
60 to->base = gcbaseva + OFFSET2BASEVA(globalblocknum);
61 to->bound = gcbaseva + BOUNDPTR(globalblocknum);
64 void getSpace(struct moveHelper *to, unsigned int minimumbytes) {
65 //need more space to compact into
66 if (to->localblocknum < gcblock2fill) {
69 getSpaceRemotely(to, minimumbytes);
73 void compacthelper(struct moveHelper * orig,struct moveHelper * to) {
74 bool senttopmessage=false;
76 if ((gcheaptop < ((unsigned INTPTR)(to->bound-to->ptr)))&&!senttopmessage) {
77 //This block is the last for this core...let the startup know
78 send_msg_3(STARTUPCORE, GCRETURNMEM, BAMBOO_NUM_OF_CORE, to->ptr+gcheaptop);
79 //Only send the message once
83 unsigned int minimumbytes=compactblocks(orig, to);
84 if (orig->ptr==orig->bound) {
85 //need more data to compact
87 orig->localblocknum++;
88 BASEPTR(orig->base,BAMBOO_NUM_OF_CORE, orig->localblocknum);
90 orig->bound = orig->base + BLOCKSIZE(orig->localblocknum);
91 if (orig->base >= gcbaseva+BAMBOO_SHARED_MEM_SIZE)
94 if (minimumbytes!=0) {
95 getSpace(to, minimumbytes);
99 send_msg_3(STARTUPCORE,GCFINISHCOMPACT,BAMBOO_NUM_OF_CORE, 0);
102 /* Should be invoked with interrupt turned off. */
104 void * assignSpareMem_I(unsigned int sourcecore, unsigned int requiredmem) {
108 void * assignSpareMem(unsigned int sourcecore,unsigned int requiredmem) {
109 BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
110 void * retval=assignSpareMem_I(sourcecore, requiredmem);
111 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
115 /* should be invoked with interrupt turned off */
117 void * gcfindSpareMem_I(unsigned int requiredmem,unsigned int requiredcore) {
119 for(int k = 0; k < NUMCORES4GC; k++) {
120 if((gccorestatus[k] == 0) && (gcfilledblocks[k] < gcstopblock[k])) {
121 // check if this stopped core has enough mem
122 startaddr=assignSpareMem_I(k, requiredmem);
126 // If we cannot find spare mem right now, hold the request
127 gcrequiredmems[requiredcore] = requiredmem;
132 bool gcfindSpareMem(unsigned int requiredmem,unsigned int requiredcore) {
133 BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
134 bool retval=gcfindSpareMem_I(requiredmem, requiredcore);
135 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
139 /* This function is performance critical... spend more time optimizing it */
141 unsigned int compactblocks(struct moveHelper * orig, struct moveHelper * to) {
142 void *toptrinit=to->ptr;
144 void *tobound=to->bound;
145 void *origptr=orig->ptr;
146 void *origbound=orig->bound;
147 unsigned INTPTR origendoffset=ALIGNTOTABLEINDEX((unsigned INTPTR)(origbound-gcbaseva));
148 unsigned int objlength;
150 while(origptr<origbound) {
151 //Try to skip over stuff fast first
152 unsigned INTPTR offset=(unsigned INTPTR) (origptr-gcbaseva);
153 unsigned INTPTR arrayoffset=ALIGNTOTABLEINDEX(offset);
154 if (!gcmarktbl[arrayoffset]) {
157 if (arrayoffset<origendoffset) {
158 //finished with block...
162 gcheaptop-=(unsigned INTPTR)(toptr-toptrinit)
165 } while(!gcmarktbl[arrayoffset]);
166 origptr=CONVERTTABLEINDEXTOPTR(arrayoffset);
169 //Scan more carefully next
170 objlength=getMarkedLength(origptr);
172 if (objlength!=NOTMARKED) {
173 unsigned int length=ALIGNSIZETOBYTES(objlength);
174 void *endtoptr=toptr+length;
175 if (endtoptr>tobound) {
176 gcheaptop-=(unsigned INTPTR)(toptr-toptrinit)
181 //good to move objects and update pointers
182 gcmappingtbl[OBJMAPPINGINDEX(origptr)]=toptr;
186 origptr+=ALIGNMENTSIZE;
191 BAMBOO_ASSERT(COMPACTPHASE == gc_status_info.gcphase);
194 // initialize structs for compacting
195 struct moveHelper orig={0,NULL,NULL,0,NULL,0,0,0,0};
196 struct moveHelper to={0,NULL,NULL,0,NULL,0,0,0,0};
197 initOrig_Dst(&orig, &to);
199 CACHEADAPT_SAMPLING_DATA_REVISE_INIT(&orig, &to);
201 compacthelper(&orig, &to);
204 void master_compact() {
205 // predict number of blocks to fill for each core
206 void * tmpheaptop = 0;
207 int numblockspercore = loadbalance(&tmpheaptop);
209 GC_PRINTF("mark phase finished \n");
211 gc_resetCoreStatus();
212 tmpheaptop = gcbaseva + BAMBOO_SHARED_MEM_SIZE;
213 for(int i = 0; i < NUMCORES4GC; i++) {
214 // init some data strutures for compact phase
215 gcfilledblocks[i] = 0;
216 gcrequiredmems[i] = 0;
218 //send start compact messages to all cores
219 gcstopblock[i] = numblockspercore;
220 if(i != STARTUPCORE) {
221 send_msg_2(i, GCSTARTCOMPACT, numblockspercore);
223 gcblock2fill = numblockspercore;
230 /* wait for all cores to finish compacting */
232 while(gc_checkCoreStatus())
237 GC_PRINTF("compact phase finished \n");
240 #endif // MULTICORE_GC