lots of changes
[cdsspec-compiler.git] / grammer / spec.txt
1 #include <threads.h>
2 #include <stdlib.h>
3 #include "librace.h"
4 #include "model-assert.h"
5
6 #include "my_queue.h"
7
8 #define relaxed memory_order_relaxed
9 #define release memory_order_release
10 #define acquire memory_order_acquire
11
12 #define MAX_FREELIST 4 /* Each thread can own up to MAX_FREELIST free nodes */
13 #define INITIAL_FREE 2 /* Each thread starts with INITIAL_FREE free nodes */
14
15 #define POISON_IDX 0x666
16
17 static unsigned int (*free_lists)[MAX_FREELIST];
18
19 /* Search this thread's free list for a "new" node */
20 static unsigned int new_node()
21 {
22         int i;
23         int t = get_thread_num();
24         for (i = 0; i < MAX_FREELIST; i++) {
25                 unsigned int node = load_32(&free_lists[t][i]);
26                 if (node) {
27                         store_32(&free_lists[t][i], 0);
28                         return node;
29                 }
30         }
31         /* free_list is empty? */
32         MODEL_ASSERT(0);
33         return 0;
34 }
35
36 /* Place this node index back on this thread's free list */
37 static void reclaim(unsigned int node)
38 {
39         int i;
40         int t = get_thread_num();
41
42         /* Don't reclaim NULL node */
43         MODEL_ASSERT(node);
44
45         for (i = 0; i < MAX_FREELIST; i++) {
46                 /* Should never race with our own thread here */
47                 unsigned int idx = load_32(&free_lists[t][i]);
48
49                 /* Found empty spot in free list */
50                 if (idx == 0) {
51                         store_32(&free_lists[t][i], node);
52                         return;
53                 }
54         }
55         /* free list is full? */
56         MODEL_ASSERT(0);
57 }
58
59 void init_queue(queue_t *q, int num_threads)
60 {
61         /**
62                 @Begin
63                 @Entry_point
64                 @End
65         */
66
67         int i, j;
68
69         /* Initialize each thread's free list with INITIAL_FREE pointers */
70         /* The actual nodes are initialized with poison indexes */
71         free_lists = malloc(num_threads * sizeof(*free_lists));
72         for (i = 0; i < num_threads; i++) {
73                 for (j = 0; j < INITIAL_FREE; j++) {
74                         free_lists[i][j] = 2 + i * MAX_FREELIST + j;
75                         atomic_init(&q->nodes[free_lists[i][j]].next, MAKE_POINTER(POISON_IDX, 0));
76                 }
77         }
78
79         /* initialize queue */
80         atomic_init(&q->head, MAKE_POINTER(1, 0));
81         atomic_init(&q->tail, MAKE_POINTER(1, 0));
82         atomic_init(&q->nodes[1].next, MAKE_POINTER(0, 0));
83 }
84
85 /**
86         @Begin
87         @Interface_define: Enqueue
88         @End
89 */
90 void enqueue(queue_t *q, unsigned int val)
91 {
92         int success = 0;
93         unsigned int node;
94         pointer tail;
95         pointer next;
96         pointer tmp;
97
98         node = new_node();
99         store_32(&q->nodes[node].value, val);
100         tmp = atomic_load_explicit(&q->nodes[node].next, relaxed);
101         set_ptr(&tmp, 0); // NULL
102         atomic_store_explicit(&q->nodes[node].next, tmp, relaxed);
103
104         while (!success) {
105                 tail = atomic_load_explicit(&q->tail, acquire);
106                 next = atomic_load_explicit(&q->nodes[get_ptr(tail)].next, acquire);
107                 if (tail == atomic_load_explicit(&q->tail, relaxed)) {
108
109                         /* Check for uninitialized 'next' */
110                         MODEL_ASSERT(get_ptr(next) != POISON_IDX);
111
112                         if (get_ptr(next) == 0) { // == NULL
113                                 pointer value = MAKE_POINTER(node, get_count(next) + 1);
114                                 success = atomic_compare_exchange_strong_explicit(&q->nodes[get_ptr(tail)].next,
115                                                 &next, value, release, release);
116                         }
117                         if (!success) {
118                                 unsigned int ptr = get_ptr(atomic_load_explicit(&q->nodes[get_ptr(tail)].next, acquire));
119                                 pointer value = MAKE_POINTER(ptr,
120                                                 get_count(tail) + 1);
121                                 int commit_success = 0;
122                                 commit_success = atomic_compare_exchange_strong_explicit(&q->tail,
123                                                 &tail, value, release, release);
124                                 /**
125                                         @Begin
126                                         @Commit_point_define_check: __ATOMIC_RET__ == true
127                                         @Label: Enqueue_Success_Point
128                                         @End
129                                 */
130                                 thrd_yield();
131                         }
132                 }
133         }
134         atomic_compare_exchange_strong_explicit(&q->tail,
135                         &tail,
136                         MAKE_POINTER(node, get_count(tail) + 1),
137                         release, release);
138 }
139
140
141 /**
142         @Begin
143         @Interface_define: Dequeue
144         @End
145 */
146 unsigned int dequeue(queue_t *q)
147 {
148         unsigned int value;
149         int success = 0;
150         pointer head;
151         pointer tail;
152         pointer next;
153
154         while (!success) {
155                 head = atomic_load_explicit(&q->head, acquire);
156                 tail = atomic_load_explicit(&q->tail, relaxed);
157                 next = atomic_load_explicit(&q->nodes[get_ptr(head)].next, acquire);
158                 if (atomic_load_explicit(&q->head, relaxed) == head) {
159                         if (get_ptr(head) == get_ptr(tail)) {
160
161                                 /* Check for uninitialized 'next' */
162                                 MODEL_ASSERT(get_ptr(next) != POISON_IDX);
163
164                                 if (get_ptr(next) == 0) { // NULL
165                                         return 0; // NULL
166                                 }
167                                 atomic_compare_exchange_strong_explicit(&q->tail,
168                                                 &tail,
169                                                 MAKE_POINTER(get_ptr(next), get_count(tail) + 1),
170                                                 release, release);
171                                 thrd_yield();
172                         } else {
173                                 value = load_32(&q->nodes[get_ptr(next)].value);
174                                 success = atomic_compare_exchange_strong_explicit(&q->head,
175                                                 &head,
176                                                 MAKE_POINTER(get_ptr(next), get_count(head) + 1),
177                                                 release, release);
178                                 /**
179                                         @Begin
180                                         @Commit_point_define_check: __ATOMIC_RET__ == true
181                                         @Label: Dequeue_Success_Point
182                                         @End
183                                 */
184                                 if (!success)
185                                         thrd_yield();
186                         }
187                 }
188         }
189         reclaim(get_ptr(head));
190         return value;
191 }