int x;
if (t <= b) {
/* Non-empty queue. */
- int size = atomic_load_explicit(&a->size, memory_order_relaxed);
- if (size == 0)
- model_print("take: size == 0\n");
- // TODO: size can be zero here!!
- x = atomic_load_explicit(&a->buffer[b % size], memory_order_relaxed);
+ x = atomic_load_explicit(&a->buffer[b % atomic_load_explicit(&a->size,memory_order_relaxed)], memory_order_relaxed);
/**
@Begin
@Commit_point_define_check: t != b
atomic_store_explicit(&new_a->size, new_size, memory_order_relaxed);
size_t i;
for(i=top; i < bottom; i++) {
- if (new_size == 0)
- model_print("resize: new_size == 0\n");
- if (size == 0)
- model_print("resize: size == 0\n");
atomic_store_explicit(&new_a->buffer[i % new_size], atomic_load_explicit(&a->buffer[i % size], memory_order_relaxed), memory_order_relaxed);
}
atomic_store_explicit(&q->array, new_a, memory_order_release);
//Bug in paper...should have next line...
a = (Array *) atomic_load_explicit(&q->array, memory_order_relaxed);
}
- int size = atomic_load_explicit(&a->size, memory_order_relaxed);
- if (size == 0)
- model_print("push: size == 0\n");
- atomic_store_explicit(&a->buffer[b % size], x, memory_order_relaxed);
+ atomic_store_explicit(&a->buffer[b % atomic_load_explicit(&a->size, memory_order_relaxed)], x, memory_order_relaxed);
atomic_thread_fence(memory_order_release);
/**
@Begin
if (t < b) {
/* Non-empty queue. */
Array *a = (Array *) atomic_load_explicit(&q->array, memory_order_acquire);
- int size = atomic_load_explicit(&a->size, memory_order_relaxed);
- if (size == 0)
- model_print("steal: size == 0\n");
- x = atomic_load_explicit(&a->buffer[t % size], memory_order_relaxed);
+ x = atomic_load_explicit(&a->buffer[t % atomic_load_explicit(&a->size, memory_order_relaxed)], memory_order_relaxed);
bool succ = atomic_compare_exchange_strong_explicit(&q->top, &t, t + 1,
memory_order_seq_cst, memory_order_relaxed);
/**
#ifndef DEQUE_H
#define DEQUE_H
-#include <spec_lib.h>
-#include <stdlib.h>
-#include <cdsannotate.h>
-#include <specannotation.h>
-#include <model_memory.h>
-#include "common.h"
typedef struct {
atomic_size_t size;
int b;
int c;
-static void task1(void * param) {
- a=steal(q);
-}
+static void task(void * param) {
+ // FIXME: Add the following take() will expose an Uninitialzied Load bug...
+ //a=take(q);
-static void task2(void * param) {
- a=take(q);
+ a=steal(q);
}
int user_main(int argc, char **argv)
@Entry_point
@End
*/
- thrd_t t1, t2;
+ thrd_t t;
q=create();
+ thrd_create(&t, task, 0);
push(q, 1);
push(q, 2);
- thrd_create(&t1, task1, 0);
- thrd_create(&t2, task2, 0);
push(q, 4);
b=take(q);
c=take(q);
- thrd_join(t1);
- thrd_join(t2);
+ thrd_join(t);
bool correct=true;
if (a!=1 && a!=2 && a!=4 && a!= EMPTY)
correct=false;
if (!correct)
printf("a=%d b=%d c=%d\n",a,b,c);
- //MODEL_ASSERT(correct);
+ MODEL_ASSERT(correct);
return 0;
}
@Happens_before:
# Since commit_point_set has no ID attached, A -> B means that for any B,
# the previous A happens before B.
+ Read_Unlock -> Read_Lock
+ Read_Unlock -> Write_Lock
+ Read_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
+ Read_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
+
Write_Unlock -> Write_Lock
Write_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
@Interface: Write_Trylock
@Commit_point_set:
Write_Trylock_Succ_Point | Write_Trylock_Fail_Point
- @Condition:
- !writer_lock_acquired && reader_lock_cnt == 0
@HB_condition:
HB_Write_Trylock_Succ :: __RET__ == 1
@Action:
- if (__COND_SAT__)
+ if (__RET__ == 1)
writer_lock_acquired = true;
- @Post_check:
- __COND_SAT__ ? __RET__ == 1 : __RET__ == 0
@End
*/
static inline int write_trylock(rwlock_t *rw)
{
int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
+ //model_print("write_trylock: priorvalue: %d\n", priorvalue);
/**
@Begin
@Potential_commit_point_define: true
{
mcs_mutex::guard g(mutex);
printf("store: %d\n", 17);
- store_32(&shared, 17);
+ //store_32(&shared, 17);
+ shared = 17;
mutex->unlock(&g);
mutex->lock(&g);
- printf("load: %u\n", load_32(&shared));
+ //printf("load: %u\n", load_32(&shared));
+ printf("load: %u\n", shared);
}
void threadB(void *arg)
{
mcs_mutex::guard g(mutex);
- printf("load: %u\n", load_32(&shared));
+ //printf("load: %u\n", load_32(&shared));
+ printf("load: %u\n", shared);
mutex->unlock(&g);
mutex->lock(&g);
printf("store: %d\n", 17);
- store_32(&shared, 17);
+ shared = 17;
+ //store_32(&shared, 17);
}
int user_main(int argc, char **argv)
int i;
int t = get_thread_num();
for (i = 0; i < MAX_FREELIST; i++) {
- unsigned int node = load_32(&free_lists[t][i]);
+ //unsigned int node = load_32(&free_lists[t][i]);
+ unsigned int node = free_lists[t][i];
if (node) {
- store_32(&free_lists[t][i], 0);
+ //store_32(&free_lists[t][i], 0);
+ free_lists[t][i] = 0;
return node;
}
}
/* free_list is empty? */
- MODEL_ASSERT(0);
+ //MODEL_ASSERT(0);
return 0;
}
int t = get_thread_num();
/* Don't reclaim NULL node */
- MODEL_ASSERT(node);
+ //MODEL_ASSERT(node);
for (i = 0; i < MAX_FREELIST; i++) {
/* Should never race with our own thread here */
- unsigned int idx = load_32(&free_lists[t][i]);
+ //unsigned int idx = load_32(&free_lists[t][i]);
+ unsigned int idx = free_lists[t][i];
/* Found empty spot in free list */
if (idx == 0) {
- store_32(&free_lists[t][i], node);
+ //store_32(&free_lists[t][i], node);
+ free_lists[t][i] = node;
return;
}
}
/* free list is full? */
- MODEL_ASSERT(0);
+ //MODEL_ASSERT(0);
}
void init_queue(queue_t *q, int num_threads)
pointer tmp;
node = new_node();
- store_32(&q->nodes[node].value, val);
+ //store_32(&q->nodes[node].value, val);
+ q->nodes[node].value = val;
tmp = atomic_load_explicit(&q->nodes[node].next, relaxed);
set_ptr(&tmp, 0); // NULL
atomic_store_explicit(&q->nodes[node].next, tmp, relaxed);
if (tail == atomic_load_explicit(&q->tail, relaxed)) {
/* Check for uninitialized 'next' */
- MODEL_ASSERT(get_ptr(next) != POISON_IDX);
+ //MODEL_ASSERT(get_ptr(next) != POISON_IDX);
if (get_ptr(next) == 0) { // == NULL
pointer value = MAKE_POINTER(node, get_count(next) + 1);
if (get_ptr(head) == get_ptr(tail)) {
/* Check for uninitialized 'next' */
- MODEL_ASSERT(get_ptr(next) != POISON_IDX);
+ //MODEL_ASSERT(get_ptr(next) != POISON_IDX);
if (get_ptr(next) == 0) { // NULL
/**
release, release);
thrd_yield();
} else {
- value = load_32(&q->nodes[get_ptr(next)].value);
+ //value = load_32(&q->nodes[get_ptr(next)].value);
+ value = q->nodes[get_ptr(next)].value;
success = atomic_compare_exchange_strong_explicit(&q->head,
&head,
MAKE_POINTER(get_ptr(next), get_count(head) + 1),