changes towards second parallel collector
authorbdemsky <bdemsky>
Tue, 5 Jul 2011 21:15:25 +0000 (21:15 +0000)
committerbdemsky <bdemsky>
Tue, 5 Jul 2011 21:15:25 +0000 (21:15 +0000)
Robust/src/Runtime/bamboo/pmc_garbage.c
Robust/src/Runtime/bamboo/pmc_garbage.h
Robust/src/Runtime/bamboo/pmc_mark.c
Robust/src/Runtime/bamboo/pmc_queue.c [new file with mode: 0644]
Robust/src/Runtime/bamboo/pmc_queue.h [new file with mode: 0644]

index a9bfd4828bebd1c8866e6896f3d1417fd938a00a..e0027e573c78baeb5a8ec7f16c1ae4d97dc6ee6d 100644 (file)
@@ -1,2 +1,13 @@
 #include "pmc_garbage.h"
 
+void incrementthreads() {
+  tmc_spin_mutex_lock(&pmc_heapptr->lock);
+  pmc_heapptr->numthreads++;
+  tmc_spin_mutex_unlock(&pmc_heapptr->lock);
+}
+
+void decrementthreads() {
+  tmc_spin_mutex_lock(&pmc_heapptr->lock);
+  pmc_heapptr->numthreads--;
+  tmc_spin_mutex_unlock(&pmc_heapptr->lock);
+}
index dc3e2025859d8c3585961a305911df198347d1b0..4bf76737785e5d392d01d2ab850d4912668ec920 100644 (file)
@@ -1,22 +1,29 @@
 #ifndef PMC_GARBAGE_H
 #define PMC_GARBAGE_H
+#include <tmc/spin.h>
+
 struct pmc_unit {
-  unsigned int lock;
+  tmc_spin_mutex_t lock;
   unsigned int numbytes;
 };
 
 struct pmc_region {
   void * lastptr;
   struct ___Object___ * lastobj;
+  struct pmc_queue markqueue;
 };
 
 struct pmc_heap {
   struct pmc_region units[NUMCORES4GC*4];
   struct pmc_region regions[NUMCORES4GC];
-  unsigned int lock;
-  unsigned int numthreads;
+  tmc_spin_mutex_t lock;
+  volatile unsigned int numthreads;
 };
 
 extern struct pmc_heap * pmc_heapptr;
 
+void incrementthreads();
+void decrementthreads() {
+
+
 #endif
index f7dd4086c3c4e942687dda1d859568780fa15e64..a8dbf8d02cb3c44efe7c35d56443cd5bcc47aff7 100644 (file)
@@ -7,7 +7,7 @@
 void pmc_markObj(struct ___Object___ *ptr) {
   if (!ptr->mark) {
     ptr->mark=1;
-    pmc_enqueue(ptr);
+    pmc_enqueue(pmc_localqueue, ptr);
   }
 }
 
@@ -47,14 +47,38 @@ void pmc_markgarbagelist(struct garbagelist * listptr) {
 void pmc_mark(struct garbagelist *stackptr) {
   pmc_tomark(stackptr);
   while(true) {
+    //scan everything in our local queue
     pmc_marklocal();
-    
-    
+    if (pmc_trysteal())
+      break;
   }
 }
 
+bool pmc_trysteal() {
+  decrementthreads();
+  while(pmc_heapptr->numthreads) {
+    for(int i=0;i<NUMCORES4GC;i++) {
+      struct pmc_queue *queue=&pmc_heapptr->regions[i].markqueue;
+      if (!pmc_isEmpty(queue)) {
+       incrementthreads();
+       void *objptr=pmc_dequeue(queue);
+       if (objptr!=NULL) {
+         unsigned int type=((struct ___Object___*)objptr)->type;
+         pmc_scanPtrsInObj(objptr, type);
+       }
+       return false;
+      }
+    }
+  }
+  return true;
+}
+
 void pmc_marklocal() {
-  
+  void *objptr;
+  while(objptr=pmc_dequeue(pmc_localqueue)) {
+    unsigned int type=((struct ___Object___*)objptr)->type;
+    pmc_scanPtrsInObj(objptr, type);
+  }
 }
 
 void pmc_tomark(struct garbagelist * stackptr) {
diff --git a/Robust/src/Runtime/bamboo/pmc_queue.c b/Robust/src/Runtime/bamboo/pmc_queue.c
new file mode 100644 (file)
index 0000000..55dff87
--- /dev/null
@@ -0,0 +1,56 @@
+#include "pmc_queue.h"
+
+void pmc_init(struct pmc_queue *queue) {
+  queue->head=queue->tail=RUNMALLOC(struct pmc_queue_segment);
+  queue->headindex=queue->tailindex=0;
+}
+
+void * pmc_dequeue(struct pmc_queue *queue) {
+  void *value=NULL;
+  tmc_spin_mutex_lock(&queue->lock);
+  //do-while loop allows sharing cleanup code
+  do {
+    //check for possible rollover
+    if (queue->tailindex==NUM_PMC_QUEUE_OBJECTS) {
+      if (queue->tail!=queue->head) {
+       struct pmc_queue_segment *oldtail=queue->tail;
+       queue->tail=oldtail->next;
+       queue->tailindex=0;
+       RUNFREE(oldtail);
+      } else break;
+    }
+    //now try to decrement
+    if (queue->tailindex!=queue->headindex) {
+      value=queue->tail[queue->tailindex];
+      queue->tailindex++;
+    }
+  } while(false);
+  tmc_spin_mutex_unlock(&queue->lock);
+  return status;
+}
+
+void pmc_enqueue(struct pmc_queue* queue, void *ptr) {
+  if (queue->headindex<NUM_PMC_QUEUE_OBJECTS) {
+    queue->head->objects[queue->headindex]=ptr;
+    //need fence to prevent reordering
+    __insn_mf();
+    queue->headindex++;
+    return;
+  } else {
+    struct pmc_queue_segment * seg=RUNMALLOC(struct pmc_queue_segment);
+    seg->objects[0]=ptr;
+    //simplify everything by grabbing a lock on segment change
+    tmc_spin_mutex_lock(&queue->lock);
+    queue->headindex=1;
+    queue->head->next=seg;
+    queue->head=seg;
+    tmc_spin_mutex_unlock(&queue->lock);
+  }
+}
+
+bool pmc_isEmpty(struct pmc_queue *queue) {
+  tmc_spin_mutex_lock(&queue->lock);
+  bool status=(queue->head==queue->tail)&&(queue->headindex==queue->tailindex);
+  tmc_spin_mutex_unlock(&queue->lock);
+  return status;
+}
diff --git a/Robust/src/Runtime/bamboo/pmc_queue.h b/Robust/src/Runtime/bamboo/pmc_queue.h
new file mode 100644 (file)
index 0000000..69d047a
--- /dev/null
@@ -0,0 +1,23 @@
+#ifndef PMC_QUEUE_H
+#define PMC_QUEUE_H
+#include <tmc/spin.h>
+
+#define NUM_PMC_QUEUE_OBJECTS 256
+struct pmc_queue_segment {
+  volatile void * objects[NUM_PMC_QUEUE_OBJECTS];
+  struct pmc_queue_segment * next;
+};
+
+struct pmc_queue {
+  volatile struct pmc_queue_segment *head;
+  volatile struct pmc_queue_segment *tail;
+  volatile int headindex;
+  volatile int tailindex;
+  tmc_spin_mutex_t lock;
+};
+
+void * pmc_dequeue(struct pmc_queue *queue);
+void pmc_enqueue(struct pmc_queue* queue, void *ptr);
+bool pmc_isEmpty(struct pmc_queue *queue);
+void pmc_init(struct pmc_queue *queue);
+#endif