6 bool succ(int res) { return res != EMPTY && res != ABORT; }
7 bool fail(int res) { return !succ(res); } */
9 /** @DeclareState: IntList *deque;
10 @Initial: deque = new IntList;
11 @Commutativity: push <-> push (true)
12 @Commutativity: push <-> take (true)
13 @Commutativity: take <-> take (true) */
15 Deque * create_size(int size) {
16 Deque * q = (Deque *) calloc(1, sizeof(Deque));
17 Array * a = (Array *) calloc(1, sizeof(Array)+size*sizeof(atomic_int));
18 atomic_store_explicit(&q->array, a, memory_order_relaxed);
19 atomic_store_explicit(&q->top, 0, memory_order_relaxed);
20 atomic_store_explicit(&q->bottom, 0, memory_order_relaxed);
21 atomic_store_explicit(&a->size, size, memory_order_relaxed);
26 Deque * q = (Deque *) calloc(1, sizeof(Deque));
27 Array * a = (Array *) calloc(1, sizeof(Array)+2*sizeof(atomic_int));
30 * We specifically create the space of the new array in this initialization
31 * method just to make up the fact that the new array can potentially be
32 * used by other things or contains just junk data. Please see Section 6.4.1
35 /********** BEGIN **********/
37 Array *new_a = (Array *) calloc(1, new_size * sizeof(atomic_int) + sizeof(Array));
38 atomic_store_explicit(&new_a->size, new_size, memory_order_relaxed);
40 for(i=0; i < new_size; i++) {
41 atomic_store_explicit(&new_a->buffer[i % new_size], 0, memory_order_relaxed);
44 /********** END **********/
46 atomic_store_explicit(&q->array, a, memory_order_relaxed);
47 atomic_store_explicit(&q->top, 0, memory_order_relaxed);
48 atomic_store_explicit(&q->bottom, 0, memory_order_relaxed);
49 atomic_store_explicit(&a->size, 2, memory_order_relaxed);
55 1. The expected way to use the deque is that we have a main thread where we
56 call push() and take(); and then we have other stealing threads that only
58 2. Bottom stores the index that push() is ready to update on; Top stores the
59 index that steal() is ready to read from.
60 3. take() greedly decreases the bottom, and then later check whether it is
61 going to take the last element; If so, it will race with the corresponding
63 4. In this implementation, there are two places where we update the Top: a)
64 take() racing the last element and steal() consumes an element. We need to
65 have seq_cst for all the updates because we need to have a total SC order
66 between them such that the SC fences in take() and steal() can prevent the
67 load of Top right after the fence in take() will read the update-to-date
69 5. Note that the steal() really can bail any time since it never retries!!!
74 /** @PreCondition: return succ(C_RET) ? !STATE(deque)->empty() &&
75 STATE(deque)->back() == C_RET : true;
76 @JustifyingPrecondition: if (!succ(C_RET) && !STATE(deque)->empty()) {
77 // Make sure there are concurrent stealers who take those items
78 ForEach (item, STATE(deque))
79 if (!HasItem(CONCURRENT, Guard(EQ(NAME, "steal") && C_RET(steal) == item)))
82 @Transition: if (succ(C_RET)) {
83 if (STATE(deque)->empty()) return false;
84 STATE(deque)->pop_back();
87 // take() greedly decrease the Bottom, then check later
88 size_t b = atomic_load_explicit(&q->bottom, memory_order_relaxed) - 1;
89 Array *a = (Array *) atomic_load_explicit(&q->array, memory_order_relaxed);
90 atomic_store_explicit(&q->bottom, b, memory_order_relaxed);
92 // XXX-injection-#1: Weaken the parameter "memory_order_seq_cst" to
93 // "memory_order_acq_rel", run "make" to recompile, and then run:
94 // "./run.sh ./chase-lev-deque-bugfix/testcase2 -m2 -y -u3 -tSPEC"
95 /********** Detected Correctness (testcase2) **********/
96 atomic_thread_fence(memory_order_seq_cst);
97 size_t t = atomic_load_explicit(&q->top, memory_order_relaxed);
100 /* Non-empty queue. */
101 int sz = atomic_load_explicit(&a->size,memory_order_relaxed);
102 // Reads the buffer value before racing
103 x = atomic_load_explicit(&a->buffer[b % sz], memory_order_relaxed);
105 /* Single last element in queue. */
106 // XXX-overly-strong-#1: This was confirmed by the original authors
107 // that the first parameter doesn't have to be memory_order_seq_cst
108 // (which was the original one in the PPoPP'13 paper).
109 if (!atomic_compare_exchange_strong_explicit(&q->top, &t, t + 1,
110 memory_order_relaxed, memory_order_relaxed)) {
114 // Restore the Bottom
115 atomic_store_explicit(&q->bottom, b + 1, memory_order_relaxed);
117 } else { /* Empty queue. */
119 // Restore the Bottom
120 atomic_store_explicit(&q->bottom, b + 1, memory_order_relaxed);
122 // Make sure we have one ordering point (push <-> take) when it's empty
123 /** @OPClearDefine: true */
127 void resize(Deque *q) {
128 Array *a = (Array *) atomic_load_explicit(&q->array, memory_order_relaxed);
129 size_t size=atomic_load_explicit(&a->size, memory_order_relaxed);
130 size_t new_size=size << 1;
132 // Suppose the allocation returns a pack of memroy that was used by
133 // something else before.
134 // Array *new_a = (Array *) calloc(1, new_size * sizeof(atomic_int) + sizeof(Array)); // This is the original line
135 Array *new_a = (Array *) q->newArray;
137 size_t top=atomic_load_explicit(&q->top, memory_order_relaxed);
138 size_t bottom=atomic_load_explicit(&q->bottom, memory_order_relaxed);
139 atomic_store_explicit(&new_a->size, new_size, memory_order_relaxed);
141 for(i=top; i < bottom; i++) {
142 atomic_store_explicit(&new_a->buffer[i % new_size], atomic_load_explicit(&a->buffer[i % size], memory_order_relaxed), memory_order_relaxed);
145 // XXX-injection-#2: Weaken the parameter "memory_order_release" to
146 // "memory_order_relaxed", run "make" to recompile, and then run:
147 // "./run.sh ./chase-lev-deque-bugfix/testcase1 -m2 -y -u3 -tSPEC"
148 /********** Detected UL **********/
149 atomic_store_explicit(&q->array, new_a, memory_order_release);
153 /** @Transition: STATE(deque)->push_back(x); */
154 void push(Deque *q, int x) {
155 size_t b = atomic_load_explicit(&q->bottom, memory_order_relaxed);
156 // XXX-injection-#3: Weaken the parameter "memory_order_acquire" to
157 // "memory_order_relaxed", run "make" to recompile, and then run:
158 // "./run.sh ./chase-lev-deque-bugfix/testcase1 -x1000 -m2 -y -u3 -tSPEC"
159 /********** Detected Correctness (testcase1 -x1000) **********/
160 size_t t = atomic_load_explicit(&q->top, memory_order_acquire);
161 Array *a = (Array *) atomic_load_explicit(&q->array, memory_order_relaxed);
162 if (b - t > atomic_load_explicit(&a->size, memory_order_relaxed) - 1) /* Full queue. */ {
164 /********** Also Detected (testcase1) **********/
165 //Bug in paper...should have next line...
166 a = (Array *) atomic_load_explicit(&q->array, memory_order_relaxed);
168 // Update the buffer (this is the ordering point)
169 atomic_store_explicit(&a->buffer[b % atomic_load_explicit(&a->size, memory_order_relaxed)], x, memory_order_relaxed);
170 /** @OPDefine: true */
172 // XXX-injection-#4: Weaken the parameter "memory_order_release" to
173 // "memory_order_relaxed", run "make" to recompile, and then run:
174 // "./run.sh ./chase-lev-deque-bugfix/testcase1 -m2 -y -u3 -tSPEC"
175 /********** Detected UL (testcase1) **********/
176 atomic_thread_fence(memory_order_release);
177 atomic_store_explicit(&q->bottom, b + 1, memory_order_relaxed);
180 /** @PreCondition: return succ(C_RET) ? !STATE(deque)->empty() &&
181 STATE(deque)->front() == C_RET : true;
182 @Transition: if (succ(C_RET)) {
183 if (STATE(deque)->empty()) return false;
184 STATE(deque)->pop_front();
186 int steal(Deque *q) {
187 // XXX-overly-strong-#2: This was found by AutoMO and was confirmed by the
188 // original authors that the parameter doesn't have to be
189 // memory_order_acquire (which was the original one in the PPoPP'13 paper).
190 // The reason is that this load is followed by an SC fence (discussed in
192 size_t t = atomic_load_explicit(&q->top, memory_order_relaxed);
194 // XXX-injection-#5: Weaken the parameter "memory_order_seq_cst" to
195 // "memory_order_acq_rel", run "make" to recompile, and then run:
196 // "./run.sh ./chase-lev-deque-bugfix/testcase3 -m2 -y -u3 -x200 -tSPEC"
197 /********** Detected Correctness (testcase3) **********/
198 atomic_thread_fence(memory_order_seq_cst);
200 // XXX-injection-#6: Weaken the parameter "memory_order_acquire" to
201 // "memory_order_relaxed", run "make" to recompile, and then run:
202 // "./run.sh ./chase-lev-deque-bugfix/testcase1 -x100 -m2 -y -u3 -tSPEC"
203 /********** Detected UL (testcase1 -x100) **********/
204 size_t b = atomic_load_explicit(&q->bottom, memory_order_acquire);
207 /* Non-empty queue. */
208 // XXX-known-bug-#1: To reproduce the bug, weaken the parameter
209 // "memory_order_acquire" to "memory_order_relaxed", run "make" to
210 // recompile, and then run:
211 // "./run.sh ./chase-lev-deque-bugfix/testcase1 -x100 -m2 -y -u3 -tSPEC"
212 /********** Detected Correctness after initializing the array (testcase1 -x100) **********/
213 Array *a = (Array *) atomic_load_explicit(&q->array, memory_order_acquire);
214 int sz = atomic_load_explicit(&a->size, memory_order_relaxed);
215 x = atomic_load_explicit(&a->buffer[t % sz], memory_order_relaxed);
216 // XXX-injection-#7: Weaken the parameter "memory_order_seq_cst" to
217 // "memory_order_acq_rel", run "make" to recompile, and then run:
218 // "./run.sh ./chase-lev-deque-bugfix/testcase3 -x500 -m2 -y -u3 -tSPEC"
219 /********** Detected Correctness (testcase3 -x500) **********/
220 bool succ = atomic_compare_exchange_strong_explicit(&q->top, &t, t + 1,
221 memory_order_seq_cst, memory_order_relaxed);
222 /** @OPDefine: true */