2 #include "multicoregccompact.h"
3 #include "runtime_arch.h"
4 #include "multicoreruntime.h"
5 #include "multicoregarbage.h"
8 bool gc_checkCoreStatus() {
9 for(int i = 0; i < NUMCORES4GC; ++i) {
10 if(gccorestatus[i] != 0) {
17 void gc_resetCoreStatus() {
18 for(int i = 0; i < NUMCORES4GC; ++i) {
23 void initOrig_Dst(struct moveHelper * orig,struct moveHelper * to) {
25 to->localblocknum = 0;
26 BASEPTR(to->base, BAMBOO_NUM_OF_CORE, to->localblocknum);
28 to->bound=to->base+BLOCKSIZE(to->localblocknum);
31 orig->localblocknum = 0;
32 orig->ptr=orig->base = to->base;
33 orig->bound = orig->base + BLOCKSIZE(orig->localblocknum);
36 void getSpaceLocally(struct moveHelper *to) {
37 //we have space on our core...just keep going
39 BASEPTR(to->base,BAMBOO_NUM_OF_CORE, to->localblocknum);
41 to->bound = to->base + BLOCKSIZE(to->localblocknum);
44 void getSpaceRemotely(struct moveHelper *to, unsigned int minimumbytes) {
45 //need to get another block from elsewhere
46 //set flag to wait for memory
48 //send request for memory
49 send_msg_3(STARTUPCORE,GCFINISHCOMPACT,BAMBOO_NUM_OF_CORE, minimumbytes);
50 //wait for flag to be set that we received message
54 to->ptr = gcmovestartaddr;
56 //set localblock number to high number to indicate this block isn't local
57 to->localblocknum = MAXBLOCK;
58 unsigned int globalblocknum;
59 BLOCKINDEX(globalblocknum, to->ptr);
60 to->base = gcbaseva + OFFSET2BASEVA(globalblocknum);
61 to->bound = gcbaseva + BOUNDPTR(globalblocknum);
64 void getSpace(struct moveHelper *to, unsigned int minimumbytes) {
65 //need more space to compact into
66 if (to->localblocknum < gcblock2fill) {
69 getSpaceRemotely(to, minimumbytes);
73 void compacthelper(struct moveHelper * orig,struct moveHelper * to) {
74 bool senttopmessage=false;
76 if ((gcheaptop < ((unsigned INTPTR)(to->bound-to->ptr)))&&!senttopmessage) {
77 //This block is the last for this core...let the startup know
78 send_msg_3(STARTUPCORE, GCRETURNMEM, BAMBOO_NUM_OF_CORE, to->ptr+gcheaptop);
79 //Only send the message once
83 unsigned int minimumbytes=compactblocks(orig, to);
84 if (orig->ptr==orig->bound) {
85 //need more data to compact
87 orig->localblocknum++;
88 BASEPTR(orig->base,BAMBOO_NUM_OF_CORE, orig->localblocknum);
90 orig->bound = orig->base + BLOCKSIZE(orig->localblocknum);
91 if (orig->base >= gcbaseva+BAMBOO_SHARED_MEM_SIZE)
94 if (minimumbytes!=0) {
95 getSpace(to, minimumbytes);
99 send_msg_3(STARTUPCORE,GCFINISHCOMPACT,BAMBOO_NUM_OF_CORE, 0);
102 /* Should be invoked with interrupt turned off. */
104 void * assignSpareMem_I(unsigned int sourcecore, unsigned int requiredmem) {
108 void * assignSpareMem(unsigned int sourcecore,unsigned int requiredmem) {
109 BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
110 void * retval=assignSpareMem_I(sourcecore, requiredmem);
111 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
115 /* should be invoked with interrupt turned off */
117 void * gcfindSpareMem_I(unsigned int requiredmem,unsigned int requiredcore) {
119 for(int k = 0; k < NUMCORES4GC; k++) {
122 // If we cannot find spare mem right now, hold the request
123 gcrequiredmems[requiredcore] = requiredmem;
128 bool gcfindSpareMem(unsigned int requiredmem,unsigned int requiredcore) {
129 BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
130 bool retval=gcfindSpareMem_I(requiredmem, requiredcore);
131 BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
135 /* This function is performance critical... spend more time optimizing it */
137 unsigned int compactblocks(struct moveHelper * orig, struct moveHelper * to) {
138 void *toptrinit=to->ptr;
140 void *tobound=to->bound;
141 void *origptr=orig->ptr;
142 void *origbound=orig->bound;
143 unsigned INTPTR origendoffset=ALIGNTOTABLEINDEX((unsigned INTPTR)(origbound-gcbaseva));
144 unsigned int objlength;
146 while(origptr<origbound) {
147 //Try to skip over stuff fast first
148 unsigned INTPTR offset=(unsigned INTPTR) (origptr-gcbaseva);
149 unsigned INTPTR arrayoffset=ALIGNTOTABLEINDEX(offset);
150 if (!gcmarktbl[arrayoffset]) {
153 if (arrayoffset<origendoffset) {
154 //finished with block...
158 gcheaptop-=(unsigned INTPTR)(toptr-toptrinit)
161 } while(!gcmarktbl[arrayoffset]);
162 origptr=CONVERTTABLEINDEXTOPTR(arrayoffset);
165 //Scan more carefully next
166 objlength=getMarkedLength(origptr);
168 if (objlength!=NOTMARKED) {
169 unsigned int length=ALIGNSIZETOBYTES(objlength);
170 void *endtoptr=toptr+length;
171 if (endtoptr>tobound) {
172 gcheaptop-=(unsigned INTPTR)(toptr-toptrinit)
177 //good to move objects and update pointers
178 gcmappingtbl[OBJMAPPINGINDEX(origptr)]=toptr;
182 origptr+=ALIGNMENTSIZE;
187 BAMBOO_ASSERT(COMPACTPHASE == gc_status_info.gcphase);
190 // initialize structs for compacting
191 struct moveHelper orig={0,NULL,NULL,0,NULL,0,0,0,0};
192 struct moveHelper to={0,NULL,NULL,0,NULL,0,0,0,0};
193 initOrig_Dst(&orig, &to);
195 CACHEADAPT_SAMPLING_DATA_REVISE_INIT(&orig, &to);
197 compacthelper(&orig, &to);
200 void master_compact() {
201 // predict number of blocks to fill for each core
202 void * tmpheaptop = 0;
203 numblockspercore = loadbalance(&tmpheaptop);
205 GC_PRINTF("mark phase finished \n");
207 gc_resetCoreStatus();
208 //initialize local data structures first....we don't want remote requests messing data up
209 unsigned int initblocks=numblockspercore*NUMCORES4GC;
210 allocationinfo.lowestfreeblock=NOFREEBLOCKS;
213 for(int i=0;i<initblocks;i++) {
214 allocationinfo.blocktable[i].status=BS_INIT;
218 for(int i=initblocks;i<GCNUMBLOCK;i++) {
219 allocationinfo.blocktable[i].status=BS_FREE;
220 allocationinfo.blocktable[i].usedspace=0;
223 //start all of the cores
224 for(int i = 0; i < NUMCORES4GC; i++) {
225 // init some data strutures for compact phase
226 gcrequiredmems[i] = 0;
228 //send start compact messages to all cores
229 if(i != STARTUPCORE) {
230 send_msg_2(i, GCSTARTCOMPACT, numblockspercore);
232 gcblock2fill = numblockspercore;
239 /* wait for all cores to finish compacting */
241 while(gc_checkCoreStatus())
246 GC_PRINTF("compact phase finished \n");
249 #endif // MULTICORE_GC