X-Git-Url: http://plrg.eecs.uci.edu/git/?p=model-checker-benchmarks.git;a=blobdiff_plain;f=ms-queue%2Fmy_queue.c;h=492c862c578cf21eeaf4658b7e1df95cf920d66b;hp=bb62fa781183c98f5c7727cdbb79114012749029;hb=4cd598105d4f196946bb15068ec4be2dc6c691bc;hpb=ec4f36cd03b7ba4326a99770719bff27999549aa diff --git a/ms-queue/my_queue.c b/ms-queue/my_queue.c index bb62fa7..492c862 100644 --- a/ms-queue/my_queue.c +++ b/ms-queue/my_queue.c @@ -1,49 +1,76 @@ #include #include #include "librace.h" +#include "model-assert.h" #include "my_queue.h" -static unsigned int *node_nums; +#define relaxed memory_order_relaxed +#define release memory_order_release +#define acquire memory_order_acquire +#define MAX_FREELIST 4 /* Each thread can own up to MAX_FREELIST free nodes */ +#define INITIAL_FREE 2 /* Each thread starts with INITIAL_FREE free nodes */ + +static unsigned int (*free_lists)[MAX_FREELIST]; + +/* Search this thread's free list for a "new" node */ static unsigned int new_node() { - return node_nums[get_thread_num()]; + int i; + int t = get_thread_num(); + for (i = 0; i < MAX_FREELIST; i++) { + unsigned int node = load_32(&free_lists[t][i]); + if (node) { + store_32(&free_lists[t][i], 0); + return node; + } + } + /* free_list is empty? */ + MODEL_ASSERT(0); + return 0; } +/* Place this node index back on this thread's free list */ static void reclaim(unsigned int node) { - node_nums[get_thread_num()] = node; + int i; + int t = get_thread_num(); + + /* Don't reclaim NULL node */ + MODEL_ASSERT(node); + + for (i = 0; i < MAX_FREELIST; i++) { + /* Should never race with our own thread here */ + unsigned int idx = load_32(&free_lists[t][i]); + + /* Found empty spot in free list */ + if (idx == 0) { + store_32(&free_lists[t][i], node); + return; + } + } + /* free list is full? */ + MODEL_ASSERT(0); } void init_queue(queue_t *q, int num_threads) { - unsigned int i; - pointer head; - pointer tail; - pointer next; + int i, j; - node_nums = malloc(num_threads * sizeof(*node_nums)); - for (i = 0; i < num_threads; i++) - node_nums[i] = 2 + i; - - /* initialize queue */ - head = MAKE_POINTER(1, 0); - tail = MAKE_POINTER(1, 0); - next = MAKE_POINTER(0, 0); // (NULL, 0) - - atomic_init(&q->head, head); - atomic_init(&q->tail, tail); - atomic_init(&q->nodes[1].next, next); - - /* initialize avail list */ - for (i = 2; i < MAX_NODES; i++) { - next = MAKE_POINTER(i + 1, 0); - atomic_init(&q->nodes[i].next, next); + /* Initialize each thread's free list with INITIAL_FREE NULL "pointers" */ + free_lists = malloc(num_threads * sizeof(*free_lists)); + for (i = 0; i < num_threads; i++) { + for (j = 0; j < INITIAL_FREE; j++) { + free_lists[i][j] = 2 + i * MAX_FREELIST + j; + atomic_init(&q->nodes[free_lists[i][j]].next, MAKE_POINTER(0, 0)); + } } - next = MAKE_POINTER(0, 0); // (NULL, 0) - atomic_init(&q->nodes[MAX_NODES].next, next); + /* initialize queue */ + atomic_init(&q->head, MAKE_POINTER(1, 0)); + atomic_init(&q->tail, MAKE_POINTER(1, 0)); + atomic_init(&q->nodes[1].next, MAKE_POINTER(0, 0)); } void enqueue(queue_t *q, unsigned int val) @@ -56,32 +83,34 @@ void enqueue(queue_t *q, unsigned int val) node = new_node(); store_32(&q->nodes[node].value, val); - tmp = atomic_load(&q->nodes[node].next); + tmp = atomic_load_explicit(&q->nodes[node].next, relaxed); set_ptr(&tmp, 0); // NULL - atomic_store(&q->nodes[node].next, tmp); + atomic_store_explicit(&q->nodes[node].next, tmp, relaxed); while (!success) { - tail = atomic_load(&q->tail); - next = atomic_load(&q->nodes[get_ptr(tail)].next); - if (tail == atomic_load(&q->tail)) { + tail = atomic_load_explicit(&q->tail, acquire); + next = atomic_load_explicit(&q->nodes[get_ptr(tail)].next, acquire); + if (tail == atomic_load_explicit(&q->tail, relaxed)) { if (get_ptr(next) == 0) { // == NULL pointer value = MAKE_POINTER(node, get_count(next) + 1); - success = atomic_compare_exchange_weak(&q->nodes[get_ptr(tail)].next, - &next, value); + success = atomic_compare_exchange_strong_explicit(&q->nodes[get_ptr(tail)].next, + &next, value, memory_order_acq_rel, memory_order_acq_rel); } if (!success) { - unsigned int ptr = get_ptr(atomic_load(&q->nodes[get_ptr(tail)].next)); + unsigned int ptr = get_ptr(atomic_load_explicit(&q->nodes[get_ptr(tail)].next, memory_order_seq_cst)); pointer value = MAKE_POINTER(ptr, get_count(tail) + 1); - atomic_compare_exchange_strong(&q->tail, - &tail, value); + atomic_compare_exchange_strong_explicit(&q->tail, + &tail, value, + memory_order_acq_rel, memory_order_acq_rel); thrd_yield(); } } } - atomic_compare_exchange_strong(&q->tail, + atomic_compare_exchange_strong_explicit(&q->tail, &tail, - MAKE_POINTER(node, get_count(tail) + 1)); + MAKE_POINTER(node, get_count(tail) + 1), + memory_order_acq_rel, memory_order_acq_rel); } unsigned int dequeue(queue_t *q) @@ -93,23 +122,25 @@ unsigned int dequeue(queue_t *q) pointer next; while (!success) { - head = atomic_load(&q->head); - tail = atomic_load(&q->tail); - next = atomic_load(&q->nodes[get_ptr(head)].next); - if (atomic_load(&q->head) == head) { + head = atomic_load_explicit(&q->head, acquire); + tail = atomic_load_explicit(&q->tail, acquire); + next = atomic_load_explicit(&q->nodes[get_ptr(head)].next, acquire); + if (atomic_load_explicit(&q->head, relaxed) == head) { if (get_ptr(head) == get_ptr(tail)) { if (get_ptr(next) == 0) { // NULL return 0; // NULL } - atomic_compare_exchange_strong(&q->tail, + atomic_compare_exchange_strong_explicit(&q->tail, &tail, - MAKE_POINTER(get_ptr(next), get_count(tail) + 1)); + MAKE_POINTER(get_ptr(next), get_count(tail) + 1), + memory_order_acq_rel, memory_order_acq_rel); thrd_yield(); } else { value = load_32(&q->nodes[get_ptr(next)].value); - success = atomic_compare_exchange_weak(&q->head, + success = atomic_compare_exchange_strong_explicit(&q->head, &head, - MAKE_POINTER(get_ptr(next), get_count(head) + 1)); + MAKE_POINTER(get_ptr(next), get_count(head) + 1), + memory_order_acq_rel, memory_order_acq_rel); if (!success) thrd_yield(); }