towards fixing bugs...
[IRC.git] / Robust / src / Runtime / bamboo / multicoregccompact.c
1 #ifdef MULTICORE_GC
2 #include "structdefs.h"
3 #include "multicoregccompact.h"
4 #include "runtime_arch.h"
5 #include "multicoreruntime.h"
6 #include "multicoregarbage.h"
7 #include "markbit.h"
8 #include "multicoremem_helper.h"
9 #include "gcqueue.h"
10
11 int gc_countRunningCores() {
12   int count=0;
13   for(int i = 0; i < NUMCORES4GC; i++) {
14     if(returnedmem[i]) {
15       count++;
16     }
17   }
18   return count;
19 }
20
21 void initOrig_Dst(struct moveHelper * orig,struct moveHelper * to) {
22   // init the dst ptr
23   to->localblocknum = 0;
24   BASEPTR(to->base, BAMBOO_NUM_OF_CORE, to->localblocknum);
25   to->ptr = to->base;
26   to->bound=to->base+BLOCKSIZE(to->localblocknum);
27   
28   // init the orig ptr
29   orig->localblocknum = 0;
30   orig->ptr=orig->base = to->base;
31   orig->bound=orig->base+BLOCKSIZE(orig->localblocknum);
32 #ifdef GC_CACHE_ADAPT
33   to->pagebound=to->base+BAMBOO_PAGE_SIZE;
34   orig->pagebound=orig->base+BAMBOO_PAGE_SIZE;
35 #endif
36 }
37
38 void getSpaceLocally(struct moveHelper *to) {
39   //we have space on our core...just keep going
40   to->localblocknum++;
41   BASEPTR(to->base,BAMBOO_NUM_OF_CORE, to->localblocknum);
42   to->ptr=to->base;
43   to->bound=to->base+BLOCKSIZE(to->localblocknum);
44 #ifdef GC_CACHE_ADAPT
45   to->pagebound=to->base+BAMBOO_PAGE_SIZE;
46 #endif
47 }
48
49 //This function is called on the master core only...and typically by
50 //the message interrupt handler
51
52 void handleReturnMem_I(unsigned int cnum, void *heaptop) {
53   unsigned int blockindex;
54   BLOCKINDEX(blockindex, heaptop);
55   unsigned INTPTR localblocknum=GLOBALBLOCK2LOCAL(blockindex);
56   //this core is done as far as memory usage is concerned
57   returnedmem[cnum]=0;
58
59   struct blockrecord * blockrecord=&allocationinfo.blocktable[blockindex];
60
61   blockrecord->status=BS_FREE;
62   blockrecord->usedspace=(unsigned INTPTR)(heaptop-OFFSET2BASEVA(blockindex)-gcbaseva);
63   blockrecord->freespace=BLOCKSIZE(localblocknum)-blockrecord->usedspace;
64   /* Update the lowest free block */
65   if (blockindex < allocationinfo.lowestfreeblock) {
66     allocationinfo.lowestfreeblock=blockindex;
67   }
68
69   /* This is our own block...means we should mark other blocks above us as free*/
70   
71   if (cnum==blockrecord->corenum) {
72     unsigned INTPTR nextlocalblocknum=localblocknum+1;
73     for(;nextlocalblocknum<numblockspercore;nextlocalblocknum++) {
74       unsigned INTPTR blocknum=BLOCKINDEX2(cnum, nextlocalblocknum);
75       struct blockrecord * nextblockrecord=&allocationinfo.blocktable[blocknum];
76       nextblockrecord->status=BS_FREE;
77       nextblockrecord->usedspace=0;
78       //this is true because this cannot be the lowest block
79       nextblockrecord->freespace=BLOCKSIZE(1);
80     }
81   }
82
83   //this could be the last one....
84   int count=gc_countRunningCores();
85   if (gcmovepending==count) {
86     // All cores have stopped...hand out memory as necessary to handle all requests
87     handleMemoryRequests_I();
88   } else {
89     //see if returned memory blocks let us resolve requests
90     useReturnedMem(cnum, allocationinfo.lowestfreeblock);
91   }
92 }
93
94 void useReturnedMem(unsigned int retcorenum, block_t localblockindex) {
95   for(int i=0;i<NUMCORES4GC;i++) {
96     unsigned INTPTR requiredmem=gcrequiredmems[i];
97     if (requiredmem) {
98       unsigned INTPTR desiredmem=maxusefulmems[i];
99       unsigned INTPTR threshold=(desiredmem<MINMEMORYCHUNKSIZE)? desiredmem: MINMEMORYCHUNKSIZE;
100       unsigned INTPTR memcheck=requiredmem>threshold?requiredmem:threshold;
101
102
103       for(block_t nextlocalblocknum=localblockindex;nextlocalblocknum<numblockspercore;nextlocalblocknum++) {
104         unsigned INTPTR blocknum=BLOCKINDEX2(retcorenum, nextlocalblocknum);
105         struct blockrecord * nextblockrecord=&allocationinfo.blocktable[blocknum];
106         if (nextblockrecord->status==BS_FREE) {
107           unsigned INTPTR freespace=nextblockrecord->freespace&~BAMBOO_CACHE_LINE_MASK;
108           if (freespace>=memcheck) {
109             nextblockrecord->status=BS_USED;
110             void *blockptr=OFFSET2BASEVA(blocknum)+gcbaseva;
111             unsigned INTPTR usedspace=((nextblockrecord->usedspace-1)&~BAMBOO_CACHE_LINE_MASK)+BAMBOO_CACHE_LINE_SIZE;
112             //taken care of one block
113             gcmovepending--;
114             void *startaddr=blockptr+usedspace;
115             gcrequiredmems[i]=0;
116             maxusefulmems[i]=0;
117             if (i==STARTUPCORE) {
118               gctomove = true;
119               gcmovestartaddr = startaddr;
120             } else if(BAMBOO_CHECK_SEND_MODE()) {
121               cache_msg_2_I(i,GCMOVESTART,startaddr);
122             } else {
123               send_msg_2_I(i,GCMOVESTART,startaddr);
124             }
125           }
126         }
127       }
128     }
129   }
130 }
131
132 void handleReturnMem(unsigned int cnum, void *heaptop) {
133   BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
134   handleReturnMem_I(cnum, heaptop);
135   BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
136 }
137
138 void getSpaceRemotely(struct moveHelper *to, unsigned int minimumbytes) {
139   //need to get another block from elsewhere
140   //set flag to wait for memory
141
142   if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
143     gctomove=false;
144     BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
145     void *startaddr=handlegcfinishcompact_I(BAMBOO_NUM_OF_CORE, minimumbytes, gccurr_heaptop);
146     BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
147
148     if (startaddr) {
149       gcmovestartaddr=startaddr;
150     } else {
151       while(!gctomove) ;
152     }
153   } else {
154     gctomove=false;
155     //send request for memory
156     send_msg_4(STARTUPCORE,GCFINISHCOMPACT,BAMBOO_NUM_OF_CORE, minimumbytes, gccurr_heaptop);
157     //wait for flag to be set that we received message
158     while(!gctomove)
159       ;
160   }
161
162   //store pointer
163   to->ptr = gcmovestartaddr;
164
165   //set localblock number to high number to indicate this block isn't local
166   to->localblocknum = MAXBLOCK;
167   unsigned int globalblocknum;
168   BLOCKINDEX(globalblocknum, to->ptr);
169   to->base = gcbaseva + OFFSET2BASEVA(globalblocknum);
170   to->bound=gcbaseva+BOUNDPTR(globalblocknum);
171 #ifdef GC_CACHE_ADAPT
172   to->pagebound=(void *)((int)((int)(to->ptr)&(~(BAMBOO_PAGE_SIZE-1)))+BAMBOO_PAGE_SIZE);
173 #endif
174 }
175
176 void getSpace(struct moveHelper *to, unsigned int minimumbytes) {
177   //need more space to compact into
178   if ((to->localblocknum+1) < gcblock2fill) {
179     getSpaceLocally(to);
180   } else {
181     getSpaceRemotely(to, minimumbytes);
182   }
183 }
184
185 void compacthelper(struct moveHelper * orig,struct moveHelper * to) {
186   bool senttopmessage=false;
187   while(true) {
188     if ((gccurr_heaptop <= ((unsigned INTPTR)(to->bound-to->ptr)))&&!senttopmessage) {
189       //This block is the last for this core...let the startup know
190       if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
191         handleReturnMem(BAMBOO_NUM_OF_CORE, to->ptr+gccurr_heaptop);
192       } else {
193         send_msg_3(STARTUPCORE, GCRETURNMEM, BAMBOO_NUM_OF_CORE, to->ptr+gccurr_heaptop);
194       }
195       //Only send the message once
196       senttopmessage=true;
197     }
198     unsigned int minimumbytes=COMPACTUNITS(orig, to);
199     if (orig->ptr==orig->bound) {
200       //need more data to compact
201       //increment the core
202       orig->localblocknum++;
203       BASEPTR(orig->base,BAMBOO_NUM_OF_CORE, orig->localblocknum);
204       orig->ptr=orig->base;
205       orig->bound=orig->base+BLOCKSIZE(orig->localblocknum);
206 #ifdef GC_CACHE_ADAPT
207       orig->pagebound=orig->base+BAMBOO_PAGE_SIZE;
208 #endif
209       if (orig->base >= gcbaseva+BAMBOO_SHARED_MEM_SIZE)
210         break;
211     }
212     if (minimumbytes!=0) {
213       getSpace(to, minimumbytes);
214     }
215   }
216   if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
217     BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
218     handlegcfinishcompact_I(BAMBOO_NUM_OF_CORE, 0, 0);
219     BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
220   } else {
221     send_msg_4(STARTUPCORE,GCFINISHCOMPACT,BAMBOO_NUM_OF_CORE, 0, 0);
222   }
223 }
224
225 void * checkNeighbors_I(int ncorenum, unsigned INTPTR requiredmem, unsigned INTPTR desiredmem) {
226   int minblockindex=allocationinfo.lowestfreeblock/NUMCORES4GC;
227   unsigned INTPTR threshold=(desiredmem<MINMEMORYCHUNKSIZE)? desiredmem: MINMEMORYCHUNKSIZE;
228   unsigned INTPTR memcheck=requiredmem>threshold?requiredmem:threshold;
229
230   for(block_t lblock=minblockindex;lblock<numblockspercore;lblock++) {
231     for(int i=0;i<NUM_CORES2TEST;i++) {
232       int neighborcore=core2test[ncorenum][i];
233       if (neighborcore!=-1) {
234         block_t globalblockindex=BLOCKINDEX2(neighborcore, lblock);
235         struct blockrecord * block=&allocationinfo.blocktable[globalblockindex];
236         if (block->status==BS_FREE) {
237           unsigned INTPTR freespace=block->freespace&~BAMBOO_CACHE_LINE_MASK;
238           if (memcheck<=freespace) {
239             //we have a block
240             //mark block as used
241             block->status=BS_USED;
242             void *blockptr=OFFSET2BASEVA(globalblockindex)+gcbaseva;
243             unsigned INTPTR usedspace=((block->usedspace-1)&~BAMBOO_CACHE_LINE_MASK)+BAMBOO_CACHE_LINE_SIZE;
244             return blockptr+usedspace;
245           }
246         }
247       }
248     }
249   }
250   return NULL;
251 }
252
253 void * globalSearch_I(unsigned int topblock, unsigned INTPTR requiredmem, unsigned INTPTR desiredmem) {
254   unsigned int firstfree=NOFREEBLOCK;
255   unsigned INTPTR threshold=(desiredmem<MINMEMORYCHUNKSIZE)? desiredmem: MINMEMORYCHUNKSIZE;
256   unsigned INTPTR memcheck=requiredmem>threshold?requiredmem:threshold;
257
258   for(block_t i=allocationinfo.lowestfreeblock;i<topblock;i++) {
259     struct blockrecord * block=&allocationinfo.blocktable[i];
260     if (block->status==BS_FREE) {
261       if(firstfree==NOFREEBLOCK)
262         firstfree=i;
263       unsigned INTPTR freespace=block->freespace&~BAMBOO_CACHE_LINE_MASK;
264       if (memcheck<=freespace) {
265         //we have a block
266         //mark block as used
267         block->status=BS_USED;
268         void *blockptr=OFFSET2BASEVA(i)+gcbaseva;
269         unsigned INTPTR usedspace=((block->usedspace-1)&~BAMBOO_CACHE_LINE_MASK)+BAMBOO_CACHE_LINE_SIZE;
270         allocationinfo.lowestfreeblock=firstfree;
271         return blockptr+usedspace;
272       }
273     }
274   }
275   allocationinfo.lowestfreeblock=firstfree;
276   return NULL;
277 }
278
279 void handleOneMemoryRequest(int core, unsigned int lowestblock) {
280   unsigned INTPTR requiredmem=gcrequiredmems[core];
281   unsigned INTPTR desiredmem=maxusefulmems[core];
282   block_t firstfree=NOFREEBLOCK;
283   unsigned INTPTR threshold=(desiredmem<MINMEMORYCHUNKSIZE)? desiredmem: MINMEMORYCHUNKSIZE;
284   unsigned INTPTR memcheck=requiredmem>threshold?requiredmem:threshold;
285
286   for(block_t searchblock=lowestblock;searchblock<GCNUMBLOCK;searchblock++) {
287     struct blockrecord * block=&allocationinfo.blocktable[searchblock];
288     if (block->status==BS_FREE) {
289       if(firstfree==NOFREEBLOCK)
290         firstfree=searchblock;
291       //don't take a block from another core that hasn't returned its memory yet
292       if (block->corenum!=core&&returnedmem[block->corenum])
293         continue;
294       
295       unsigned INTPTR freespace=block->freespace&~BAMBOO_CACHE_LINE_MASK;
296       if (freespace>=memcheck) {
297         //TODO: should check memory block at same level on our own core...if that works, use it to preserve locality
298
299         //we have a block
300         //mark block as used
301         block->status=BS_USED;
302         void *blockptr=OFFSET2BASEVA(searchblock)+gcbaseva;
303         unsigned INTPTR usedspace=((block->usedspace-1)&~BAMBOO_CACHE_LINE_MASK)+BAMBOO_CACHE_LINE_SIZE;
304         allocationinfo.lowestfreeblock=firstfree;
305         //taken care of one block
306         gcmovepending--;
307         void *startaddr=blockptr+usedspace;
308         if(BAMBOO_CHECK_SEND_MODE()) {
309           cache_msg_2_I(core,GCMOVESTART,startaddr);
310         } else {
311           send_msg_2_I(core,GCMOVESTART,startaddr);
312         }
313         return;
314       }
315     }
316   }
317   //this is bad...ran out of memory
318   printf("Out of memory.  Was trying for %u bytes\n", threshold);
319   BAMBOO_EXIT();
320 }
321
322 void handleMemoryRequests_I() {
323   unsigned int lowestblock=allocationinfo.lowestfreeblock;
324   if (lowestblock==NOFREEBLOCK) {
325     lowestblock=numblockspercore*NUMCORES4GC;
326   }
327   
328   for(int i=0;i < NUMCORES4GC; i++) {
329     if (gcrequiredmems[i]) {
330       handleOneMemoryRequest(i, lowestblock);
331       lowestblock=allocationinfo.lowestfreeblock;
332     }
333   }
334 }
335
336 /* should be invoked with interrupt turned off */
337
338 void * gcfindSpareMem_I(unsigned INTPTR requiredmem, unsigned INTPTR desiredmem,unsigned int requiredcore) {
339   if (allocationinfo.lowestfreeblock!=NOFREEBLOCK) {
340     //There are spare blocks
341     unsigned int topblock=numblockspercore*NUMCORES4GC;
342     void *memblock;
343     
344     if (memblock=checkNeighbors_I(requiredcore, requiredmem, desiredmem)) {
345       return memblock;
346     } else if (memblock=globalSearch_I(topblock, requiredmem, desiredmem)) {
347       return memblock;
348     }
349   }
350   
351   // If we cannot find spare mem right now, hold the request
352   gcrequiredmems[requiredcore] = requiredmem;
353   maxusefulmems[requiredcore]=desiredmem;
354   gcmovepending++;
355
356   int count=gc_countRunningCores();
357   if (gcmovepending==count) {
358     // All cores have stopped...hand out memory as necessary to handle all requests
359     handleMemoryRequests_I();
360   }
361
362   return NULL;
363
364
365 #ifdef GC_CACHE_ADAPT
366 unsigned int compactblockshelper(struct moveHelper * orig, struct moveHelper * to) {
367   unsigned int minimumbytes=0;
368   void *origptr=orig->ptr;
369   void *origbound=orig->bound;
370   void * tmporig=orig->ptr;
371   void * tmpto=to->ptr;
372
373   while(true) {
374     //call compactblocks using the page boundaries at the current bounds
375     minimumbytes=compactblocks(orig, to);
376     if(minimumbytes == 0) {
377       //bump the orig page bound...
378       //use old orig pointer to make sure we get correct block
379       CACHEADAPT_FINISH_SRC_PAGE(tmporig, tmpto, to->ptr);
380       if (orig->ptr<origbound) {
381         tmporig=orig->ptr;
382         tmpto=to->ptr;
383         orig->pagebound=orig->pagebound+BAMBOO_PAGE_SIZE;
384       } else {
385         return 0;
386       }
387     } else {
388       // require more memory
389       void *endtoptr=to->ptr+minimumbytes;
390       if (endtoptr>to->bound) {
391         CACHEADAPT_FINISH_DST_PAGE(orig->ptr, tmpto, to->ptr, 0);
392         return minimumbytes;
393       } else {
394         CACHEADAPT_FINISH_DST_PAGE(orig->ptr, tmpto, to->ptr, minimumbytes);
395         to->pagebound=((endtoptr-1)&~(BAMBOO_PAGE_SIZE-1))+BAMBOO_PAGE_SIZE;
396         //update pointers to avoid double counting the stuff we already added in
397         tmporig=orig->ptr+minimumbytes;
398         tmpto=to->ptr+minimumbytes;
399       }
400     }
401   }
402 }
403 #endif
404
405 /* This function is performance critical...  spend more time optimizing it */
406
407 unsigned int compactblocks(struct moveHelper * orig, struct moveHelper * to) {
408   void *toptrinit=to->ptr;
409   void *toptr=toptrinit;
410   void *origptr=orig->ptr;
411 #ifdef GC_CACHE_ADAPT
412   void *origbound=orig->pagebound;
413   void *tobound=to->pagebound;
414 #else
415   void *origbound=orig->bound;
416   void *tobound=to->bound;
417 #endif
418   unsigned INTPTR origendoffset=ALIGNTOTABLEINDEX((unsigned INTPTR)(origbound-gcbaseva));
419   unsigned int objlength;
420
421   while(origptr<origbound) {
422     //Try to skip over stuff fast first
423     unsigned INTPTR offset=(unsigned INTPTR) (origptr-gcbaseva);
424     unsigned INTPTR arrayoffset=ALIGNTOTABLEINDEX(offset);
425     if (!gcmarktbl[arrayoffset]) {
426       do {
427         arrayoffset++;
428         if (arrayoffset>=origendoffset) {
429           //finished with block(a page in CACHE_ADAPT version)...
430           to->ptr=toptr;
431           orig->ptr=origbound;
432           gccurr_heaptop-=(unsigned INTPTR)(toptr-toptrinit);
433           return 0;
434         }
435       } while(!gcmarktbl[arrayoffset]);
436       origptr=CONVERTTABLEINDEXTOPTR(arrayoffset);
437     }
438     
439     //Scan more carefully next
440     objlength=getMarkedLength(origptr);
441
442     if (objlength!=NOTMARKED) {
443       unsigned int length=ALIGNSIZETOBYTES(objlength);
444
445       //code between this and next comment should be removed
446 #ifdef GC_DEBUG
447       unsigned int size;
448       unsigned int type;
449       gettype_size(origptr, &type, &size);
450       size=((size-1)&(~(ALIGNMENTSIZE-1)))+ALIGNMENTSIZE;
451       
452       if (size!=length) {
453         tprintf("BAD SIZE IN BITMAP: type=%u object=%x size=%u length=%u\n", type, origptr, size, length);
454         unsigned INTPTR alignsize=ALIGNOBJSIZE((unsigned INTPTR)(origptr-gcbaseva));
455         unsigned INTPTR hibits=alignsize>>4;
456         unsigned INTPTR lobits=(alignsize&15)<<1;
457         tprintf("hibits=%x lobits=%x\n", hibits, lobits);
458         tprintf("hi=%x lo=%x\n", gcmarktbl[hibits], gcmarktbl[hibits+1]);
459       }
460 #endif
461       //end of code to remove
462
463       void *endtoptr=toptr+length;
464       if (endtoptr>tobound) {
465         gccurr_heaptop-=(unsigned INTPTR)(toptr-toptrinit);
466         to->ptr=toptr;
467         orig->ptr=origptr;
468         return length;
469       }
470       //good to move objects and update pointers
471       
472       gcmappingtbl[OBJMAPPINGINDEX(origptr)]=toptr;
473       
474       origptr+=length;
475       toptr=endtoptr;
476     } else
477       origptr+=ALIGNMENTSIZE;
478   }
479   to->ptr=toptr;
480   orig->ptr=origptr;
481   gccurr_heaptop-=(unsigned INTPTR)(toptr-toptrinit);
482   return 0;
483 }
484
485 void compact() {
486   BAMBOO_ASSERT(COMPACTPHASE == gc_status_info.gcphase);
487   
488   // initialize structs for compacting
489   struct moveHelper orig;
490   struct moveHelper to;
491   initOrig_Dst(&orig, &to);
492
493   CACHEADAPT_SAMPLING_DATA_REVISE_INIT(&orig, &to);
494   compacthelper(&orig, &to);
495
496
497 void master_compact() {
498   // predict number of blocks to fill for each core
499   numblockspercore = loadbalance()+1;
500   
501   GC_PRINTF("mark phase finished \n");
502   
503   gc_resetCoreStatus();
504   //initialize local data structures first....we don't want remote requests messing data up
505   unsigned int initblocks=numblockspercore*NUMCORES4GC;
506   allocationinfo.lowestfreeblock=NOFREEBLOCK;
507
508   //assigned blocks
509   for(int i=0;i<initblocks;i++) {
510     allocationinfo.blocktable[i].status=BS_USED;
511   }
512
513   //free blocks
514   for(int i=initblocks;i<GCNUMBLOCK;i++) {
515     allocationinfo.blocktable[i].status=BS_FREE;
516     allocationinfo.blocktable[i].usedspace=0;
517     //this is true because all cores have at least one block already...
518     allocationinfo.blocktable[i].freespace=BLOCKSIZE(1);
519   }
520
521   //start all of the cores
522   for(int i = 0; i < NUMCORES4GC; i++) {
523     // init some data strutures for compact phase
524     gcrequiredmems[i] = 0;
525     gccorestatus[i] = 1;
526     returnedmem[i] = 1;
527     //send start compact messages to all cores
528     if(i != STARTUPCORE) {
529       send_msg_2(i, GCSTARTCOMPACT, numblockspercore);
530     } else {
531       gcblock2fill = numblockspercore;
532     }
533   }
534   GCPROFILE_ITEM();
535   // compact phase
536   compact();
537   /* wait for all cores to finish compacting */
538   
539
540   while(!gc_checkCoreStatus())
541     ;
542
543 #ifdef GC_DEBUG
544   void *nextvalid=gcbaseva;
545   for(void *tmp=gcbaseva; tmp<gcbaseva+BAMBOO_SHARED_MEM_SIZE;tmp+=ALIGNMENTSIZE) {
546     unsigned int objlength=getMarkedLength(tmp);
547     void *forwarding=gcmappingtbl[OBJMAPPINGINDEX(tmp)];
548     if (tmp>=nextvalid&&((objlength!=0)!=(forwarding!=NULL))) {
549       tprintf("Maps disagree tmp=%x olength=%u forwarding=%x\n",tmp, objlength, forwarding);
550     }
551     if (tmp<nextvalid&&forwarding!=NULL) {
552       tprintf("Weird forwarding pointer\n");
553     }
554     if (tmp>=nextvalid&&(objlength!=0||forwarding!=NULL)) {
555       unsigned int length=ALIGNSIZETOBYTES(objlength);
556       unsigned int size;
557       unsigned int type;
558       nextvalid=tmp+length;
559       gettype_size(tmp, &type, &size);
560       size=((size-1)&(~(ALIGNMENTSIZE-1)))+ALIGNMENTSIZE;
561       if (size!=length) {
562         tprintf("Bad size in bitmap: tmp=%x length=%u size=%u type=%u\n", tmp, length, size, type);
563       }
564       block_t blockindex;
565       BLOCKINDEX(blockindex, forwarding);
566       struct blockrecord * block=&allocationinfo.blocktable[blockindex];
567       void *blockptr=OFFSET2BASEVA(blockindex)+gcbaseva;
568
569       if (block->status==BS_FREE) {
570         if (forwarding>(blockptr+block->usedspace)) {
571           tprintf("Pointer references free space forwarding=%x tmp=%x length=%u type=%u blockindex=%u, baseptr=%x, usedspace=%u, status=%u\n", forwarding, tmp, length, type,blockindex, blockptr, block->usedspace, block->status);
572         }
573       }
574     }
575   }
576 #endif
577
578   GCPROFILE_ITEM();
579
580   //just in case we didn't get blocks back...
581   if (allocationinfo.lowestfreeblock==NOFREEBLOCK)
582     allocationinfo.lowestfreeblock=numblockspercore*NUMCORES4GC;
583
584   GC_PRINTF("compact phase finished \n");
585 }
586
587 #endif // MULTICORE_GC