3 #include "structdefs.h"
5 #include "SimpleHash.h"
7 #include "GenericHashtable.h"
9 #if defined(THREADS) || defined(DSTM) || defined(STM)
25 #define INITIALHEAPSIZE 128*1024*1024
26 #define GCPOINT(x) ((int)((x)*0.95))
27 /* This define takes in how full the heap is initially and returns a new heap size to use */
28 #define HEAPSIZE(x,y) ((int)(x+y))*2
31 extern struct genhashtable * activetasks;
33 extern struct parameterwrapper * objectqueues[NUMCLASSES];
35 extern struct genhashtable * failedtasks;
36 extern struct taskparamdescriptor *currtpd;
37 extern struct ctable *forward;
38 extern struct ctable *reverse;
39 extern struct RuntimeHash *fdtoobject;
42 #if defined(THREADS) || defined(DSTM) || defined(STM)
44 struct listitem * list=NULL;
48 //Need to check if pointers are transaction pointers
49 //this also catches the special flag value of 1 for local copies
51 #define ENQUEUE(orig, dst) \
52 if ((!(((unsigned int)orig)&0x1))) { \
53 if (orig>=curr_heapbase&&orig<curr_heaptop) { \
55 if (gc_createcopy(orig,©)) \
61 #define ENQUEUE(orig, dst) \
62 if (orig>=curr_heapbase&&orig<curr_heaptop) { \
64 if (gc_createcopy(orig,©)) \
68 #define SENQUEUE(orig, dst) \
71 if (gc_createcopy(orig,©)) \
75 #elif defined(FASTCHECK)
76 #define ENQUEUE(orig, dst) \
77 if (((unsigned int)orig)!=1) { \
79 if (gc_createcopy(orig,©)) \
83 #define ENQUEUE(orig, dst) \
85 if (gc_createcopy(orig,©)) \
92 struct pointerblock *next;
95 void * curr_heapbase=0;
96 void * curr_heapptr=0;
97 void * curr_heapgcpoint=0;
98 void * curr_heaptop=0;
100 void * to_heapbase=0;
105 struct pointerblock *head=NULL;
107 struct pointerblock *tail=NULL;
109 struct pointerblock *spare=NULL;
111 void enqueue(void *ptr) {
112 if (headindex==NUMPTRS) {
113 struct pointerblock * tmp;
118 tmp=malloc(sizeof(struct pointerblock));
123 head->ptrs[headindex++]=ptr;
127 if (tailindex==NUMPTRS) {
128 struct pointerblock *tmp=tail;
136 return tail->ptrs[tailindex++];
140 void fixobjlist(struct objlist * ptr) {
143 for(i=0;i<ptr->offset;i++) {
144 SENQUEUE(ptr->objs[i], ptr->objs[i]);
150 void fixtable(chashlistnode_t ** tc_table, chashlistnode_t **tc_list, cliststruct_t **cstr, unsigned int tc_size) {
151 unsigned int mask=(tc_size<<4)-1;
152 chashlistnode_t *node=calloc(tc_size, sizeof(chashlistnode_t));
153 chashlistnode_t *ptr=*tc_table;
154 chashlistnode_t *curr;
158 chashlistnode_t *newlist=NULL;
159 for(i=0;i<tc_size;i++) {
162 do { //Inner loop to go through linked lists
164 chashlistnode_t *tmp,*next;
166 if ((key=(void *)curr->key) == 0) { //Exit inner loop if there the first element is 0
167 break; //key = val =0 for element if not present within the hash table
170 if (curr->val>=curr_heapbase&&curr->val<curr_heaptop) {
171 SENQUEUE(curr->val, curr->val);
173 //rewrite transaction cache entry
174 void *vptr=curr->val;
175 int type=((int *)vptr)[0];
176 unsigned INTPTR *pointer=pointerarray[type];
178 //array of primitives - do nothing
179 struct ArrayObject *ao=(struct ArrayObject *) vptr;
180 SENQUEUE((void *)ao->___objlocation___, *((void **)&ao->___objlocation___));
181 } else if (((INTPTR)pointer)==1) {
183 struct ArrayObject *ao=(struct ArrayObject *) vptr;
184 int length=ao->___length___;
186 SENQUEUE((void *)ao->___objlocation___, *((void **)&ao->___objlocation___));
187 for(i=0; i<length; i++) {
188 void *objptr=((void **)(((char *)&ao->___length___)+sizeof(int)))[i];
189 SENQUEUE(objptr, ((void **)(((char *)&ao->___length___)+sizeof(int)))[i]);
192 INTPTR size=pointer[0];
194 for(i=1; i<=size; i++) {
195 unsigned int offset=pointer[i];
196 void * objptr=*((void **)(((char *)vptr)+offset));
197 SENQUEUE(objptr, *((void **)(((char *)vptr)+offset)));
203 index = (((unsigned INTPTR)key) & mask) >>4;
207 // Insert into the new table
209 tmp->key = curr->key;
210 tmp->val = curr->val;
213 } else if (isfirst) {
214 chashlistnode_t *newnode;
215 if ((*cstr)->num<NUMCLIST) {
216 newnode=&(*cstr)->array[(*cstr)->num];
220 cliststruct_t *tcl=calloc(1,sizeof(cliststruct_t));
223 newnode=&tcl->array[0];
226 newnode->key = curr->key;
227 newnode->val = curr->val;
228 newnode->next = tmp->next;
229 newnode->lnext=newlist;
235 curr->next=tmp->next;
249 if ((head==tail)&&(tailindex==headindex))
255 struct pointerblock *taghead=NULL;
258 void enqueuetag(struct ___TagDescriptor___ *ptr) {
259 if (tagindex==NUMPTRS) {
260 struct pointerblock * tmp=malloc(sizeof(struct pointerblock));
265 taghead->ptrs[tagindex++]=ptr;
270 __thread char * memorybase=NULL;
271 __thread char * memorytop=NULL;
275 void collect(struct garbagelist * stackptr) {
276 #if defined(THREADS)||defined(DSTM)||defined(STM)
278 pthread_mutex_lock(&gclistlock);
280 if ((listcount+1)==threadcount) {
281 break; /* Have all other threads stopped */
283 pthread_cond_wait(&gccond, &gclistlock);
290 head=tail=malloc(sizeof(struct pointerblock));
296 taghead=malloc(sizeof(struct pointerblock));
303 fixtable(&c_table, &c_list, &c_structs, c_size);
309 /* Check current stack */
310 #if defined(THREADS)||defined(DSTM)||defined(STM)
312 struct listitem *listptr=list;
316 while(stackptr!=NULL) {
318 for(i=0; i<stackptr->size; i++) {
319 void * orig=stackptr->array[i];
320 ENQUEUE(orig, stackptr->array[i]);
322 stackptr=stackptr->next;
324 #if defined(THREADS)||defined(DSTM)||defined(STM)
325 /* Go to next thread */
328 void * orig=listptr->locklist;
329 ENQUEUE(orig, listptr->locklist);
332 if ((*listptr->tc_table)!=NULL) {
333 fixtable(listptr->tc_table, listptr->tc_list, listptr->tc_structs, listptr->tc_size);
334 fixobjlist(listptr->objlist);
336 *(listptr->base)=NULL;
338 stackptr=listptr->stackptr;
339 listptr=listptr->next;
347 ENQUEUE(___fcrevert___, ___fcrevert___);
352 /* Update objectsets */
354 for(i=0; i<NUMCLASSES; i++) {
355 #if !defined(MULTICORE)
356 struct parameterwrapper * p=objectqueues[i];
358 struct ObjectHash * set=p->objectset;
359 struct ObjectNode * ptr=set->listhead;
361 void *orig=(void *)ptr->key;
362 ENQUEUE(orig, *((void **)(&ptr->key)));
365 ObjectHashrehash(set); /* Rehash the table */
374 struct cnode * ptr=forward->listhead;
376 void * orig=(void *)ptr->key;
377 ENQUEUE(orig, *((void **)(&ptr->key)));
380 crehash(forward); /* Rehash the table */
384 struct cnode * ptr=reverse->listhead;
386 void *orig=(void *)ptr->val;
387 ENQUEUE(orig, *((void**)(&ptr->val)));
394 struct RuntimeNode * ptr=fdtoobject->listhead;
396 void *orig=(void *)ptr->data;
397 ENQUEUE(orig, *((void**)(&ptr->data)));
403 /* Update current task descriptor */
405 for(i=0; i<currtpd->numParameters; i++) {
406 void *orig=currtpd->parameterArray[i];
407 ENQUEUE(orig, currtpd->parameterArray[i]);
412 /* Update active tasks */
414 struct genpointerlist * ptr=activetasks->list;
416 struct taskparamdescriptor *tpd=ptr->src;
418 for(i=0; i<tpd->numParameters; i++) {
419 void * orig=tpd->parameterArray[i];
420 ENQUEUE(orig, tpd->parameterArray[i]);
424 genrehash(activetasks);
427 /* Update failed tasks */
429 struct genpointerlist * ptr=failedtasks->list;
431 struct taskparamdescriptor *tpd=ptr->src;
433 for(i=0; i<tpd->numParameters; i++) {
434 void * orig=tpd->parameterArray[i];
435 ENQUEUE(orig, tpd->parameterArray[i]);
439 genrehash(failedtasks);
444 void * ptr=dequeue();
445 void *cpy=((void **)ptr)[1];
446 int type=((int *)cpy)[0];
447 unsigned INTPTR * pointer;
451 /* Nothing is inside */
456 pointer=pointerarray[type];
458 /* Array of primitives */
460 #if defined(DSTM)||defined(FASTCHECK)
461 struct ArrayObject *ao=(struct ArrayObject *) ptr;
462 struct ArrayObject *ao_cpy=(struct ArrayObject *) cpy;
463 ENQUEUE((void *)ao->___nextobject___, *((void **)&ao_cpy->___nextobject___));
464 ENQUEUE((void *)ao->___localcopy___, *((void **)&ao_cpy->___localcopy___));
467 struct ArrayObject *ao=(struct ArrayObject *) ptr;
468 struct ArrayObject *ao_cpy=(struct ArrayObject *) cpy;
469 SENQUEUE((void *)ao->___objlocation___, *((void **)&ao_cpy->___objlocation___));
471 } else if (((INTPTR)pointer)==1) {
472 /* Array of pointers */
473 struct ArrayObject *ao=(struct ArrayObject *) ptr;
474 struct ArrayObject *ao_cpy=(struct ArrayObject *) cpy;
475 #if (defined(DSTM)||defined(FASTCHECK))
476 ENQUEUE((void *)ao->___nextobject___, *((void **)&ao_cpy->___nextobject___));
477 ENQUEUE((void *)ao->___localcopy___, *((void **)&ao_cpy->___localcopy___));
480 SENQUEUE((void *)ao->___objlocation___, *((void **)&ao_cpy->___objlocation___));
482 int length=ao->___length___;
484 for(i=0; i<length; i++) {
485 void *objptr=((void **)(((char *)&ao->___length___)+sizeof(int)))[i];
486 ENQUEUE(objptr, ((void **)(((char *)&ao_cpy->___length___)+sizeof(int)))[i]);
489 INTPTR size=pointer[0];
491 for(i=1; i<=size; i++) {
492 unsigned int offset=pointer[i];
493 void * objptr=*((void **)(((char *)ptr)+offset));
494 ENQUEUE(objptr, *((void **)(((char *)cpy)+offset)));
502 #if defined(THREADS)||defined(DSTM)||defined(STM)
504 pthread_mutex_unlock(&gclistlock);
509 /* Fix up the references from tags. This can't be done earlier,
510 because we don't want tags to keep objects alive */
512 while(taghead!=NULL) {
514 struct pointerblock *tmp=taghead->next;
515 for(i=0; i<tagindex; i++) {
516 struct ___TagDescriptor___ *tagd=taghead->ptrs[i];
517 struct ___Object___ *obj=tagd->flagptr;
518 struct ___TagDescriptor___ *copy=((struct ___TagDescriptor___**)tagd)[1];
520 /* Zero object case */
521 } else if (obj->type==-1) {
522 /* Single object case */
523 copy->flagptr=((struct ___Object___**)obj)[1];
524 } else if (obj->type==OBJECTARRAYTYPE) {
526 struct ArrayObject *ao=(struct ArrayObject *) obj;
530 struct ArrayObject *aonew;
532 /* Count live objects */
533 for(j=0; j<ao->___cachedCode___; j++) {
534 struct ___Object___ * tobj=ARRAYGET(ao, struct ___Object___ *, j);
539 livecount=((livecount-1)/OBJECTARRAYINTERVAL+1)*OBJECTARRAYINTERVAL;
540 aonew=(struct ArrayObject *) tomalloc(sizeof(struct ArrayObject)+sizeof(struct ___Object___*)*livecount);
541 memcpy(aonew, ao, sizeof(struct ArrayObject));
542 aonew->type=OBJECTARRAYTYPE;
543 aonew->___length___=livecount;
545 for(j=0; j<ao->___cachedCode___; j++) {
546 struct ___Object___ * tobj=ARRAYGET(ao, struct ___Object___ *, j);
547 if (tobj->type==-1) {
548 struct ___Object___ * tobjcpy=((struct ___Object___**)tobj)[1];
549 ARRAYSET(aonew, struct ___Object___*, k++,tobjcpy);
552 aonew->___cachedCode___=k;
553 for(; k<livecount; k++) {
554 ARRAYSET(aonew, struct ___Object___*, k, NULL);
557 /* No object live anymore */
568 void * tomalloc(int size) {
569 void * ptr=to_heapptr;
576 #if defined(THREADS)||defined(DSTM)||defined(STM)
577 void checkcollect(void * ptr) {
578 struct listitem * tmp=stopforgc((struct garbagelist *)ptr);
579 pthread_mutex_lock(&gclock); // Wait for GC
581 pthread_mutex_unlock(&gclock);
585 void checkcollect2(void * ptr) {
586 int ptrarray[]={1, (int)ptr, (int) revertlist};
587 struct listitem * tmp=stopforgc((struct garbagelist *)ptrarray);
588 pthread_mutex_lock(&gclock); // Wait for GC
590 pthread_mutex_unlock(&gclock);
591 revertlist=(struct ___Object___*)ptrarray[2];
595 struct listitem * stopforgc(struct garbagelist * ptr) {
596 struct listitem * litem=malloc(sizeof(struct listitem));
599 litem->locklist=pthread_getspecific(threadlocks);
602 litem->tc_size=c_size;
603 litem->tc_table=&c_table;
604 litem->tc_list=&c_list;
605 litem->tc_structs=&c_structs;
606 litem->objlist=newobjs;
607 litem->base=&memorybase;
610 pthread_mutex_lock(&gclistlock);
616 pthread_cond_signal(&gccond);
617 pthread_mutex_unlock(&gclistlock);
621 void restartaftergc(struct listitem * litem) {
622 pthread_mutex_lock(&gclistlock);
624 pthread_setspecific(threadlocks, litem->locklist);
626 if (litem->prev==NULL) {
629 litem->prev->next=litem->next;
631 if (litem->next!=NULL) {
632 litem->next->prev=litem->prev;
635 pthread_mutex_unlock(&gclistlock);
641 #define MEMORYBLOCK 65536
642 void * helper(struct garbagelist *, int);
643 void * mygcmalloc(struct garbagelist * stackptr, int size) {
646 if (memorybase==NULL||(memorybase+size)>memorytop) {
647 int toallocate=(size>MEMORYBLOCK)?size:MEMORYBLOCK;
648 memorybase=helper(stackptr, toallocate);
649 memorytop=memorybase+toallocate;
651 char *retvalue=memorybase;
656 void * helper(struct garbagelist * stackptr, int size) {
658 void * mygcmalloc(struct garbagelist * stackptr, int size) {
661 #if defined(THREADS)||defined(DSTM)||defined(STM)
662 if (pthread_mutex_trylock(&gclock)!=0) {
663 struct listitem *tmp=stopforgc(stackptr);
664 pthread_mutex_lock(&gclock);
672 if (curr_heapptr>curr_heapgcpoint) {
673 if (curr_heapbase==0) {
674 /* Need to allocate base heap */
675 curr_heapbase=malloc(INITIALHEAPSIZE);
676 if (curr_heapbase==NULL) {
677 printf("malloc failed\n");
680 bzero(curr_heapbase, INITIALHEAPSIZE);
681 curr_heaptop=curr_heapbase+INITIALHEAPSIZE;
682 curr_heapgcpoint=((char *) curr_heapbase)+GCPOINT(INITIALHEAPSIZE);
683 curr_heapptr=curr_heapbase+size;
685 to_heapbase=malloc(INITIALHEAPSIZE);
686 if (to_heapbase==NULL) {
687 printf("malloc failed\n");
690 to_heaptop=to_heapbase+INITIALHEAPSIZE;
691 to_heapptr=to_heapbase;
693 #if defined(THREADS)||defined(DSTM)||defined(STM)
694 pthread_mutex_unlock(&gclock);
699 /* Grow the to heap if necessary */
701 int curr_heapsize=curr_heaptop-curr_heapbase;
702 int to_heapsize=to_heaptop-to_heapbase;
705 last_heapsize=HEAPSIZE(lastgcsize, size);
706 if ((last_heapsize&7)!=0)
707 last_heapsize+=(8-(last_heapsize%8));
709 if (curr_heapsize>last_heapsize)
710 last_heapsize=curr_heapsize;
711 if (last_heapsize>to_heapsize) {
713 to_heapbase=malloc(last_heapsize);
714 if (to_heapbase==NULL) {
715 printf("Error Allocating enough memory\n");
718 to_heaptop=to_heapbase+last_heapsize;
719 to_heapptr=to_heapbase;
723 /* Do our collection */
726 /* Update stat on previous gc size */
727 lastgcsize=(to_heapptr-to_heapbase)+size;
730 printf("Garbage collected: Old bytes: %u\n", curr_heapptr-curr_heapbase);
731 printf("New space: %u\n", to_heapptr-to_heapbase);
732 printf("Total space: %u\n", to_heaptop-to_heapbase);
734 /* Flip to/curr heaps */
736 void * tmp=to_heapbase;
737 to_heapbase=curr_heapbase;
741 to_heaptop=curr_heaptop;
745 curr_heapptr=to_heapptr+size;
746 curr_heapgcpoint=((char *) curr_heapbase)+GCPOINT(curr_heaptop-curr_heapbase);
747 to_heapptr=to_heapbase;
749 /* Not enough room :(, redo gc */
750 if (curr_heapptr>curr_heapgcpoint) {
751 #if defined(THREADS)||defined(DSTM)||defined(STM)
752 pthread_mutex_unlock(&gclock);
754 return mygcmalloc(stackptr, size);
757 bzero(tmp, curr_heaptop-tmp);
758 #if defined(THREADS)||defined(DSTM)||defined(STM)
759 pthread_mutex_unlock(&gclock);
764 #if defined(THREADS)||defined(DSTM)||defined(STM)
765 pthread_mutex_unlock(&gclock);
772 int gc_createcopy(void * orig, void ** copy_ptr) {
777 int type=((int *)orig)[0];
779 *copy_ptr=((void **)orig)[1];
782 if (type<NUMCLASSES) {
783 /* We have a normal object */
785 int size=classsize[type]+sizeof(objheader_t);
786 void *newobj=tomalloc(size);
787 memcpy(newobj,((char *) orig)-sizeof(objheader_t), size);
788 newobj=((char *)newobj)+sizeof(objheader_t);
790 int size=classsize[type];
791 void *newobj=tomalloc(size);
792 memcpy(newobj, orig, size);
795 ((void **)orig)[1]=newobj;
799 /* We have an array */
800 struct ArrayObject *ao=(struct ArrayObject *)orig;
801 int elementsize=classsize[type];
802 int length=ao->___length___;
804 int size=sizeof(struct ArrayObject)+length*elementsize+sizeof(objheader_t);
805 void *newobj=tomalloc(size);
806 memcpy(newobj, ((char*)orig)-sizeof(objheader_t), size);
807 newobj=((char *)newobj)+sizeof(objheader_t);
809 int size=sizeof(struct ArrayObject)+length*elementsize;
810 void *newobj=tomalloc(size);
811 memcpy(newobj, orig, size);
815 ((void **)orig)[1]=newobj;