fixed command line
authorPeizhao Ou <peizhaoo@uci.edu>
Tue, 27 Sep 2016 22:31:31 +0000 (15:31 -0700)
committerPeizhao Ou <peizhaoo@uci.edu>
Tue, 27 Sep 2016 22:31:31 +0000 (15:31 -0700)
138 files changed:
benchmark/Makefile [deleted file]
benchmark/chase-lev-deque-bugfix/Makefile [deleted file]
benchmark/chase-lev-deque-bugfix/deque.c [deleted file]
benchmark/chase-lev-deque-bugfix/deque.h [deleted file]
benchmark/chase-lev-deque-bugfix/main.c [deleted file]
benchmark/chase-lev-deque-bugfix/note.txt [deleted file]
benchmark/chase-lev-deque-bugfix/testcase1.c [deleted file]
benchmark/chase-lev-deque-bugfix/testcase2.c [deleted file]
benchmark/cliffc-hashtable/.nfs0000000001748028000000e7 [deleted file]
benchmark/cliffc-hashtable/NonBlockingHashMap.java [deleted file]
benchmark/cliffc-hashtable/cliffc_hashtable.h [deleted file]
benchmark/cliffc-hashtable/main.cc [deleted file]
benchmark/cliffc-hashtable/main.cc.backup [deleted file]
benchmark/concurrent-hashmap/ConcurrentHashMap.java [deleted file]
benchmark/concurrent-hashmap/Makefile [deleted file]
benchmark/concurrent-hashmap/hashmap.h [deleted file]
benchmark/concurrent-hashmap/main.cc [deleted file]
benchmark/concurrent-hashmap/note.txt [deleted file]
benchmark/concurrent-hashmap/testcase1.cc [deleted file]
benchmark/include/unrelacy.h [deleted file]
benchmark/linuxrwlocks/.gitignore [deleted file]
benchmark/linuxrwlocks/Makefile [deleted file]
benchmark/linuxrwlocks/linuxrwlocks.c [deleted file]
benchmark/linuxrwlocks/testcase1.c [deleted file]
benchmark/linuxrwlocks/testcase2.c [deleted file]
benchmark/mcs-lock/.gitignore [deleted file]
benchmark/mcs-lock/Makefile [deleted file]
benchmark/mcs-lock/mcs-lock.cc [deleted file]
benchmark/mcs-lock/mcs-lock.h [deleted file]
benchmark/mcs-lock/note.txt [deleted file]
benchmark/mpmc-queue/.gitignore [deleted file]
benchmark/mpmc-queue/Makefile [deleted file]
benchmark/mpmc-queue/mpmc-queue.cc [deleted file]
benchmark/mpmc-queue/mpmc-queue.h [deleted file]
benchmark/mpmc-queue/testcase1.cc [deleted file]
benchmark/mpmc-queue/testcase2.cc [deleted file]
benchmark/mpmc-queue/testcase3.cc [deleted file]
benchmark/ms-queue/.gitignore [deleted file]
benchmark/ms-queue/Makefile [deleted file]
benchmark/ms-queue/main.c [deleted file]
benchmark/ms-queue/my_queue.c [deleted file]
benchmark/ms-queue/my_queue.h [deleted file]
benchmark/ms-queue/note.txt [deleted file]
benchmark/ms-queue/testcase.c [deleted file]
benchmark/ms-queue/testcase1.c [deleted file]
benchmark/ms-queue/testcase2.c [deleted file]
benchmark/ms-queue/testcase3.c [deleted file]
benchmark/notes.txt [deleted file]
benchmark/read-copy-update/Makefile [deleted file]
benchmark/read-copy-update/rcu.cc [deleted file]
benchmark/seqlock/Makefile [deleted file]
benchmark/seqlock/seqlock.cc [deleted file]
benchmark/seqlock/seqlock.h [deleted file]
benchmark/spsc-bugfix/.gitignore [deleted file]
benchmark/spsc-bugfix/Makefile [deleted file]
benchmark/spsc-bugfix/eventcount-relacy.h [deleted file]
benchmark/spsc-bugfix/eventcount.h [deleted file]
benchmark/spsc-bugfix/queue-relacy.h [deleted file]
benchmark/spsc-bugfix/queue.h [deleted file]
benchmark/spsc-bugfix/spsc-queue.cc [deleted file]
benchmark/spsc-bugfix/spsc-relacy.cc [deleted file]
benchmark/trylock/.gitignore [deleted file]
benchmark/trylock/Makefile [deleted file]
benchmark/trylock/trylock.c [deleted file]
correctness-model/note.txt [deleted file]
correctness-model/writeup/.#implementation.tex.1.9 [deleted file]
correctness-model/writeup/.#introduction.tex.1.24 [deleted file]
correctness-model/writeup/.#introduction.tex.1.26 [deleted file]
correctness-model/writeup/.#memorymodel.tex.1.1 [deleted file]
correctness-model/writeup/.#specification.tex.1.47 [deleted file]
correctness-model/writeup/CVS/Entries [deleted file]
correctness-model/writeup/CVS/Repository [deleted file]
correctness-model/writeup/CVS/Root [deleted file]
correctness-model/writeup/abstract.tex [deleted file]
correctness-model/writeup/conclusion.tex [deleted file]
correctness-model/writeup/confstrs-abbrv.bib [deleted file]
correctness-model/writeup/confstrs-long.bib [deleted file]
correctness-model/writeup/evaluation.tex [deleted file]
correctness-model/writeup/example.tex [deleted file]
correctness-model/writeup/figures/CVS/Entries [deleted file]
correctness-model/writeup/figures/CVS/Repository [deleted file]
correctness-model/writeup/figures/CVS/Root [deleted file]
correctness-model/writeup/figures/fence_sw.dot [deleted file]
correctness-model/writeup/figures/fence_sw2.dot [deleted file]
correctness-model/writeup/figures/fence_sw_r_collapse.dot [deleted file]
correctness-model/writeup/figures/fence_sw_r_collapse2.dot [deleted file]
correctness-model/writeup/figures/fence_sw_w_collapse.dot [deleted file]
correctness-model/writeup/figures/fence_sw_w_collapse2.dot [deleted file]
correctness-model/writeup/figures/ff_sc_rf.dot [deleted file]
correctness-model/writeup/figures/ff_sc_rf2.dot [deleted file]
correctness-model/writeup/figures/fw_sc_rf.dot [deleted file]
correctness-model/writeup/figures/fw_sc_rf2.dot [deleted file]
correctness-model/writeup/figures/lockhistory.pdf [deleted file]
correctness-model/writeup/figures/rcuhistory.odg [deleted file]
correctness-model/writeup/figures/rcuhistory.pdf [deleted file]
correctness-model/writeup/figures/release_seq.pdf [deleted file]
correctness-model/writeup/figures/rmw_atomicity.dot [deleted file]
correctness-model/writeup/figures/rmw_atomicity2.dot [deleted file]
correctness-model/writeup/figures/rmw_mo.dot [deleted file]
correctness-model/writeup/figures/rmw_mo2.dot [deleted file]
correctness-model/writeup/figures/rr_mo.dot [deleted file]
correctness-model/writeup/figures/rr_mo2.dot [deleted file]
correctness-model/writeup/figures/rw_mo.dot [deleted file]
correctness-model/writeup/figures/rw_mo2.dot [deleted file]
correctness-model/writeup/figures/sc_mo.dot [deleted file]
correctness-model/writeup/figures/sc_mo2.dot [deleted file]
correctness-model/writeup/figures/sc_wr_mo.dot [deleted file]
correctness-model/writeup/figures/sc_wr_mo2.dot [deleted file]
correctness-model/writeup/figures/specdesign.pdf [deleted file]
correctness-model/writeup/figures/specworkflow.pdf [deleted file]
correctness-model/writeup/figures/wf_sc_rf.dot [deleted file]
correctness-model/writeup/figures/wf_sc_rf2.dot [deleted file]
correctness-model/writeup/figures/workflow.pdf [deleted file]
correctness-model/writeup/figures/wr_mo.dot [deleted file]
correctness-model/writeup/figures/wr_mo2.dot [deleted file]
correctness-model/writeup/figures/ww_mo.dot [deleted file]
correctness-model/writeup/figures/ww_mo2.dot [deleted file]
correctness-model/writeup/figures/ww_sc_fence_first_collapse.dot [deleted file]
correctness-model/writeup/figures/ww_sc_fence_first_collapse2.dot [deleted file]
correctness-model/writeup/figures/ww_sc_fence_mo.dot [deleted file]
correctness-model/writeup/figures/ww_sc_fence_mo2.dot [deleted file]
correctness-model/writeup/figures/ww_sc_fence_second_collapse.dot [deleted file]
correctness-model/writeup/figures/ww_sc_fence_second_collapse2.dot [deleted file]
correctness-model/writeup/formalization.tex [deleted file]
correctness-model/writeup/implementation.tex [deleted file]
correctness-model/writeup/introduction.tex [deleted file]
correctness-model/writeup/issta15_submission.pdf [deleted file]
correctness-model/writeup/makefile [deleted file]
correctness-model/writeup/memorymodel.tex [deleted file]
correctness-model/writeup/paper.bib [deleted file]
correctness-model/writeup/paper.tex [deleted file]
correctness-model/writeup/related.tex [deleted file]
correctness-model/writeup/sig-alternate.cls [deleted file]
correctness-model/writeup/speccfg.tex [deleted file]
correctness-model/writeup/specification.tex [deleted file]
correctness-model/writeup/spell.lst [deleted file]
correctness-model/writeup/technical.tex [deleted file]
src/edu/uci/eecs/codeGenerator/CodeGenerator.java

diff --git a/benchmark/Makefile b/benchmark/Makefile
deleted file mode 100644 (file)
index 3980f68..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-include ../benchmarks.mk
-
-TESTNAME = main testcase1 testcase2
-
-HEADERS = deque.h
-OBJECTS = deque.o
-
-all: $(TESTNAME)
-
-$(TESTNAME): % : %.c $(OBJECTS)
-       $(CC) -o $@ $^ $(CPPFLAGS) $(LDFLAGS)
-
-%.o: %.c
-       $(CC) -c -o $@ $< $(CPPFLAGS)
-
-clean:
-       rm -f $(TESTNAME) *.o
diff --git a/benchmark/chase-lev-deque-bugfix/Makefile b/benchmark/chase-lev-deque-bugfix/Makefile
deleted file mode 100644 (file)
index 3980f68..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-include ../benchmarks.mk
-
-TESTNAME = main testcase1 testcase2
-
-HEADERS = deque.h
-OBJECTS = deque.o
-
-all: $(TESTNAME)
-
-$(TESTNAME): % : %.c $(OBJECTS)
-       $(CC) -o $@ $^ $(CPPFLAGS) $(LDFLAGS)
-
-%.o: %.c
-       $(CC) -c -o $@ $< $(CPPFLAGS)
-
-clean:
-       rm -f $(TESTNAME) *.o
diff --git a/benchmark/chase-lev-deque-bugfix/deque.c b/benchmark/chase-lev-deque-bugfix/deque.c
deleted file mode 100644 (file)
index ff28527..0000000
+++ /dev/null
@@ -1,178 +0,0 @@
-#include <stdatomic.h>
-#include <inttypes.h>
-#include "deque.h"
-#include <stdlib.h>
-#include <stdio.h>
-
-inline bool fail(int ret) {
-       return ret == ABORT || ret == EMPTY;
-}
-
-Deque * create() {
-       Deque * q = (Deque *) calloc(1, sizeof(Deque));
-       Array * a = (Array *) calloc(1, sizeof(Array)+2*sizeof(atomic_int));
-       atomic_store_explicit(&q->array, a, memory_order_relaxed);
-       atomic_store_explicit(&q->top, 0, memory_order_relaxed);
-       atomic_store_explicit(&q->bottom, 0, memory_order_relaxed);
-       atomic_store_explicit(&a->size, 2, memory_order_relaxed);
-       return q;
-}
-
-
-/**
-       @Begin
-       @Interface_define: Take 
-       @End
-*/
-int take(Deque *q) {
-       size_t b = atomic_load_explicit(&q->bottom, memory_order_relaxed) - 1;
-       Array *a = (Array *) atomic_load_explicit(&q->array, memory_order_relaxed);
-       atomic_store_explicit(&q->bottom, b, memory_order_relaxed);
-       /**** SPEC (sequential) (testcase1.c) ****/
-       atomic_thread_fence(memory_order_seq_cst);
-       size_t t = atomic_load_explicit(&q->top, memory_order_relaxed);
-       int x;
-       if (t <= b) {
-               /* Non-empty queue. */
-               int size = atomic_load_explicit(&a->size,memory_order_relaxed);
-               x = atomic_load_explicit(&a->buffer[b % size], memory_order_relaxed);
-               /**
-                       @Begin
-                       @Commit_point_define_check: true 
-                       @Label: TakeReadBuffer
-                       @End
-               */
-               if (t == b) {
-                       /* Single last element in queue. */
-                       //FIXME: weaken the following seq_cst causes no spec problem
-                       bool succ = atomic_compare_exchange_strong_explicit(&q->top, &t, t +
-                               1, memory_order_seq_cst, memory_order_relaxed);
-                       if (!succ) {
-                               /* Failed race. */
-                               x = EMPTY;
-                       }
-                       atomic_store_explicit(&q->bottom, b + 1, memory_order_relaxed);
-               }
-       } else { /* Empty queue. */
-               x = EMPTY;
-               atomic_store_explicit(&q->bottom, b + 1, memory_order_relaxed);
-       }
-       return x;
-}
-
-void resize(Deque *q) {
-       Array *a = (Array *) atomic_load_explicit(&q->array, memory_order_relaxed);
-       size_t size=atomic_load_explicit(&a->size, memory_order_relaxed);
-       size_t new_size=size << 1;
-       Array *new_a = (Array *) calloc(1, new_size * sizeof(atomic_int) + sizeof(Array));
-       size_t top=atomic_load_explicit(&q->top, memory_order_relaxed);
-       size_t bottom=atomic_load_explicit(&q->bottom, memory_order_relaxed);
-       atomic_store_explicit(&new_a->size, new_size, memory_order_relaxed);
-       size_t i;
-
-       // Initialize the whole new array to turn off the CDSChecker UL error
-       // Check if CDSSpec checker can catch this bug
-       /*
-       for(i=0; i < new_size; i++) {
-               atomic_store_explicit(&new_a->buffer[i % new_size], atomic_load_explicit(&a->buffer[i % size], memory_order_relaxed), memory_order_relaxed);
-       }
-       */
-       for(i=top; i < bottom; i++) {
-               atomic_store_explicit(&new_a->buffer[i % new_size], atomic_load_explicit(&a->buffer[i % size], memory_order_relaxed), memory_order_relaxed);
-       }
-       /**** detected UL ****/
-       atomic_store_explicit(&q->array, new_a, memory_order_release);
-       //printf("resize\n");
-}
-
-/**
-       @Begin
-       @Interface_define: Push
-       @End
-*/
-void push(Deque *q, int x) {
-       size_t b = atomic_load_explicit(&q->bottom, memory_order_relaxed);
-       /**** SPEC (sequential) ****/
-       size_t t = atomic_load_explicit(&q->top, memory_order_acquire);
-       Array *a = (Array *) atomic_load_explicit(&q->array, memory_order_relaxed);
-       if (b - t > atomic_load_explicit(&a->size, memory_order_relaxed) - 1) /* Full queue. */ {
-               resize(q);
-               // CDSSpec can actually detect the same bug if we avoid the UL error
-               //Bug in paper...should have next line...
-               a = (Array *) atomic_load_explicit(&q->array, memory_order_relaxed);
-       }
-       int size = atomic_load_explicit(&a->size, memory_order_relaxed);
-
-       atomic_store_explicit(&a->buffer[b % size], x, memory_order_relaxed);
-       /** @OPDefine: true */
-
-       /**** UL & SPEC (Sync) (run with -u100 to avoid the uninitialized bug) ****/
-       atomic_thread_fence(memory_order_release);
-       atomic_store_explicit(&q->bottom, b + 1, memory_order_relaxed);
-       
-}
-
-/**
-       @Begin
-       @Interface_define: Steal 
-       @End
-*/
-int steal(Deque *q) {
-       //Watch out: actually on need to be an acquire (don't count it)
-       // An old bug
-       size_t t = atomic_load_explicit(&q->top, memory_order_acquire);
-       /**
-               @Begin
-               @Commit_point_define_check: true
-               @Label: StealReadTop1
-               @End
-       */
-       /********** SPEC error (testcase3.c) **********/
-       atomic_thread_fence(memory_order_seq_cst);
-       /**** SPEC & UL ****/
-       size_t b = atomic_load_explicit(&q->bottom, memory_order_acquire);
-       
-       int x = EMPTY;
-       if (t < b) {
-               /**
-                       @Begin
-                       @Commit_point_clear: true
-                       @Label: StealClear1
-                       @End
-               */
-
-               /* Non-empty queue. */
-               /**** detected UL ****/
-               Array *a = (Array *) atomic_load_explicit(&q->array, memory_order_acquire);
-               int size = atomic_load_explicit(&a->size, memory_order_relaxed);
-               x = atomic_load_explicit(&a->buffer[t % size], memory_order_relaxed);
-               /**
-                       @Begin
-                       @Commit_point_define_check: true
-                       @Label: StealReadBuffer
-                       @End
-               */
-               /**** SPEC (sequential) ****/ 
-               bool succ = atomic_compare_exchange_strong_explicit(&q->top, &t, t + 1,
-                       memory_order_seq_cst, memory_order_relaxed);
-               if (!succ) {
-                       /**
-                               @Begin
-                               @Commit_point_clear: true
-                               @Label: StealClear2
-                               @End
-                       */
-
-                       /**
-                               @Begin
-                               @Commit_point_define_check: true
-                               @Label: StealReadTop2
-                               @End
-                       */
-
-                       /* Failed race. */
-                       return ABORT;
-               }
-       }
-       return x;
-}
diff --git a/benchmark/chase-lev-deque-bugfix/deque.h b/benchmark/chase-lev-deque-bugfix/deque.h
deleted file mode 100644 (file)
index 4d1f34e..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-#ifndef DEQUE_H
-#define DEQUE_H
-
-#include <spec_lib.h>
-#include <stdlib.h>
-#include <cdsannotate.h>
-#include <specannotation.h>
-#include <model_memory.h>
-#include "common.h"
-
-typedef struct {
-       atomic_size_t size;
-       atomic_int buffer[];
-} Array;
-
-typedef struct {
-       atomic_size_t top, bottom;
-       atomic_uintptr_t array; /* Atomic(Array *) */
-} Deque;
-
-#define EMPTY 0xffffffff
-#define ABORT 0xfffffffe
-
-inline bool fail(int ret);
-/**
-    @Begin
-    @Options:
-        LANG = C;
-    @Global_define:
-        @DeclareVar:
-                       IntegerList *__deque;
-        @InitVar:
-                       __deque = createIntegerList();
-               @Finalize:
-                       if (__deque)
-                               destroyIntegerList(__deque);
-                       return true;
-               @DefineFunc:
-                       bool succ(int res) {
-                               return res != EMPTY && res != ABORT;
-                       }
-    @Happens_before:
-        Push -> Steal
-       @Commutativity: Push <-> Steal: true
-       @Commutativity: Take <-> Steal: true
-       @Commutativity: Steal <-> Steal: fail(_Method1.__RET__) || fail(_Method2.__RET__)
-
-    @End
-*/
-
-
-Deque * create();
-void resize(Deque *q);
-
-/**
-    @Begin
-    @Interface: Take 
-    @Commit_point_set: TakeReadBottom | TakeReadBuffer | TakeReadTop
-    @ID: succ(__RET__) ? __RET__ : DEFAULT_CALL_ID
-    @Action:
-               int elem = 0;
-       if (succ(__RET__)) {
-                       elem = back(__deque);
-                       pop_back(__deque);
-                       //model_print("take: elem=%d, __RET__=%d\n", elem, __RET__);
-               }
-    @Post_check: succ(__RET__) ? __RET__ == elem : true
-    @End
-*/
-int take(Deque *q);
-
-/**
-    @Begin
-    @Interface: Push 
-    @Commit_point_set: PushUpdateBuffer
-    @ID: x 
-    @Action:
-               push_back(__deque, x);
-               //model_print("push: elem=%d\n", x);
-    @End
-*/
-void push(Deque *q, int x);
-
-/**
-    @Begin
-    @Interface: Steal 
-    @Commit_point_set: StealReadTop1 | StealReadTop2 | StealReadBuffer
-    @ID: succ(__RET__) ? __RET__ : DEFAULT_CALL_ID
-    @Action:
-       int elem = 0;
-       if (succ(__RET__)) {
-                       elem = front(__deque);
-                       pop_front(__deque);
-                       //model_print("steal: elem=%d, __RET__=%d\n", elem, __RET__);
-               }
-    @Post_check: succ(__RET__) ? __RET__ == elem : true
-    @End
-*/
-int steal(Deque *q);
-
-#endif
diff --git a/benchmark/chase-lev-deque-bugfix/main.c b/benchmark/chase-lev-deque-bugfix/main.c
deleted file mode 100644 (file)
index 87e5b9b..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-#include <stdlib.h>
-#include <assert.h>
-#include <stdio.h>
-#include <threads.h>
-#include <stdatomic.h>
-
-#include "model-assert.h"
-
-#include "deque.h"
-
-Deque *q;
-int a;
-int b;
-int c;
-
-static void task(void * param) {
-       a=steal(q);
-       if (a == ABORT) {
-               printf("Steal NULL\n");
-       } else {
-               printf("Steal %d\n", a);
-       }
-}
-
-int user_main(int argc, char **argv)
-{
-       /**
-               @Begin
-               @Entry_point
-               @End
-       */
-       thrd_t t;
-       q=create();
-       thrd_create(&t, task, 0);
-       push(q, 1);
-       printf("Push 1\n");
-       push(q, 2);
-       printf("Push 2\n");
-       push(q, 4);
-       printf("Push 4\n");
-       b=take(q);
-       if (b == EMPTY) {
-               printf("Take NULL\n");
-       } else {
-               printf("Take %d\n", b);
-       }
-       c=take(q);
-       if (c == EMPTY) {
-               printf("Take NULL\n");
-       } else {
-               printf("Take %d\n", c);
-       }
-       thrd_join(t);
-/*
-       bool correct=true;
-       if (a!=1 && a!=2 && a!=4 && a!= EMPTY)
-               correct=false;
-       if (b!=1 && b!=2 && b!=4 && b!= EMPTY)
-               correct=false;
-       if (c!=1 && c!=2 && c!=4 && a!= EMPTY)
-               correct=false;
-       if (a!=EMPTY && b!=EMPTY && c!=EMPTY && (a+b+c)!=7)
-               correct=false;
-       if (!correct)
-               printf("a=%d b=%d c=%d\n",a,b,c);
-               */
-       //MODEL_ASSERT(correct);
-
-       return 0;
-}
diff --git a/benchmark/chase-lev-deque-bugfix/note.txt b/benchmark/chase-lev-deque-bugfix/note.txt
deleted file mode 100644 (file)
index bfeb604..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-#1: Ordering point of the deque as the following: 
-
-First of all, the normal usage scenario of the deque is that the main thread
-calls push() or take() to add or remove elements into the deque from the bottom
-(at the same side) while other independent threads call steal() to remove
-elements from the top (at the other side). Thus intuitively, we use the store of
-the Bottom as the ordering point of push(). For take() and steal(), when there's
-no race between them, meaning that there are more than one element, we also use
-the load of Bottom as their ordering points. However, when take() has to remove
-an preemptively, it will use a CAS to update the Top to race with other stealing
-threads. Thus, in that case, we have additional ordering points at the CAS/Load
-(when failure happens) to order the steal() and the take().
diff --git a/benchmark/chase-lev-deque-bugfix/testcase1.c b/benchmark/chase-lev-deque-bugfix/testcase1.c
deleted file mode 100644 (file)
index e09efde..0000000
+++ /dev/null
@@ -1,78 +0,0 @@
-#include <stdlib.h>
-#include <assert.h>
-#include <stdio.h>
-#include <threads.h>
-#include <stdatomic.h>
-
-#include "model-assert.h"
-
-#include "deque.h"
-
-Deque *q;
-int a;
-int b;
-int c;
-int x;
-
-static void task(void * param) {
-       a=steal(q);
-       if (a == ABORT) {
-               printf("Steal NULL\n");
-       } else {
-               printf("Steal %d\n", a);
-       }
-       
-       x=steal(q);
-       if (x == ABORT) {
-               printf("Steal NULL\n");
-       } else {
-               printf("Steal %d\n", x);
-       }
-}
-
-int user_main(int argc, char **argv)
-{
-       /**
-               @Begin
-               @Entry_point
-               @End
-       */
-       thrd_t t;
-       q=create();
-       thrd_create(&t, task, 0);
-       push(q, 1);
-       printf("Push 1\n");
-       push(q, 2);
-       printf("Push 2\n");
-       push(q, 4);
-       printf("Push 4\n");
-       b=take(q);
-       if (b == EMPTY) {
-               printf("Take NULL\n");
-       } else {
-               printf("Take %d\n", b);
-       }
-       c=take(q);
-       if (c == EMPTY) {
-               printf("Take NULL\n");
-       } else {
-               printf("Take %d\n", c);
-       }
-       thrd_join(t);
-/*
-       bool correct=true;
-       if (a!=1 && a!=2 && a!=4 && a!= EMPTY)
-               correct=false;
-       if (b!=1 && b!=2 && b!=4 && b!= EMPTY)
-               correct=false;
-       if (c!=1 && c!=2 && c!=4 && a!= EMPTY)
-               correct=false;
-       if (a!=EMPTY && b!=EMPTY && c!=EMPTY && (a+b+c)!=7)
-               correct=false;
-       if (!correct)
-               printf("a=%d b=%d c=%d\n",a,b,c);
-               */
-       //MODEL_ASSERT(correct);
-
-       return 0;
-}
diff --git a/benchmark/chase-lev-deque-bugfix/testcase2.c b/benchmark/chase-lev-deque-bugfix/testcase2.c
deleted file mode 100644 (file)
index 5c88a76..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-#include <stdlib.h>
-#include <assert.h>
-#include <stdio.h>
-#include <threads.h>
-#include <stdatomic.h>
-
-#include "model-assert.h"
-
-#include "deque.h"
-
-Deque *q;
-int a;
-int b;
-int c;
-int x;
-
-static void task(void * param) {
-       a=steal(q);
-       if (a == ABORT) {
-               printf("Steal NULL\n");
-       } else {
-               printf("Steal %d\n", a);
-       }
-       /*
-       x=steal(q);
-       if (x == ABORT) {
-               printf("Steal NULL\n");
-       } else {
-               printf("Steal %d\n", x);
-       }
-       */
-}
-
-int user_main(int argc, char **argv)
-{
-       /**
-               @Begin
-               @Entry_point
-               @End
-       */
-       thrd_t t;
-       q=create();
-       push(q, 1);
-       printf("Push 1\n");
-       push(q, 2);
-       printf("Push 2\n");
-       push(q, 4);
-       printf("Push 4\n");
-       push(q, 5);
-       printf("Push 5\n");
-       thrd_create(&t, task, 0);
-
-       b=take(q);
-       if (b == EMPTY) {
-               printf("Take NULL\n");
-       } else {
-               printf("Take %d\n", b);
-       }
-       c=take(q);
-       if (c == EMPTY) {
-               printf("Take NULL\n");
-       } else {
-               printf("Take %d\n", c);
-       }
-       thrd_join(t);
-/*
-       bool correct=true;
-       if (a!=1 && a!=2 && a!=4 && a!= EMPTY)
-               correct=false;
-       if (b!=1 && b!=2 && b!=4 && b!= EMPTY)
-               correct=false;
-       if (c!=1 && c!=2 && c!=4 && a!= EMPTY)
-               correct=false;
-       if (a!=EMPTY && b!=EMPTY && c!=EMPTY && (a+b+c)!=7)
-               correct=false;
-       if (!correct)
-               printf("a=%d b=%d c=%d\n",a,b,c);
-               */
-       //MODEL_ASSERT(correct);
-
-       return 0;
-}
diff --git a/benchmark/cliffc-hashtable/.nfs0000000001748028000000e7 b/benchmark/cliffc-hashtable/.nfs0000000001748028000000e7
deleted file mode 100644 (file)
index 3213571..0000000
Binary files a/benchmark/cliffc-hashtable/.nfs0000000001748028000000e7 and /dev/null differ
diff --git a/benchmark/cliffc-hashtable/NonBlockingHashMap.java b/benchmark/cliffc-hashtable/NonBlockingHashMap.java
deleted file mode 100644 (file)
index 5995e48..0000000
+++ /dev/null
@@ -1,1292 +0,0 @@
-/*\r
- * Written by Cliff Click and released to the public domain, as explained at\r
- * http://creativecommons.org/licenses/publicdomain\r
- */\r
-\r
-package org.cliffc.high_scale_lib;\r
-import java.io.IOException;\r
-import java.io.Serializable;\r
-import java.lang.reflect.Field;\r
-import java.util.*;\r
-import java.util.concurrent.ConcurrentMap;\r
-import java.util.concurrent.atomic.*;\r
-import sun.misc.Unsafe;\r
-\r
-/**\r
- * A lock-free alternate implementation of {@link java.util.concurrent.ConcurrentHashMap}\r
- * with better scaling properties and generally lower costs to mutate the Map.\r
- * It provides identical correctness properties as ConcurrentHashMap.  All\r
- * operations are non-blocking and multi-thread safe, including all update\r
- * operations.  {@link NonBlockingHashMap} scales substatially better than\r
- * {@link java.util.concurrent.ConcurrentHashMap} for high update rates, even with a\r
- * large concurrency factor.  Scaling is linear up to 768 CPUs on a 768-CPU\r
- * Azul box, even with 100% updates or 100% reads or any fraction in-between.\r
- * Linear scaling up to all cpus has been observed on a 32-way Sun US2 box,\r
- * 32-way Sun Niagra box, 8-way Intel box and a 4-way Power box.\r
- *\r
- * This class obeys the same functional specification as {@link\r
- * java.util.Hashtable}, and includes versions of methods corresponding to\r
- * each method of <tt>Hashtable</tt>. However, even though all operations are\r
- * thread-safe, operations do <em>not</em> entail locking and there is\r
- * <em>not</em> any support for locking the entire table in a way that\r
- * prevents all access.  This class is fully interoperable with\r
- * <tt>Hashtable</tt> in programs that rely on its thread safety but not on\r
- * its synchronization details.\r
- *\r
- * <p> Operations (including <tt>put</tt>) generally do not block, so may\r
- * overlap with other update operations (including other <tt>puts</tt> and\r
- * <tt>removes</tt>).  Retrievals reflect the results of the most recently\r
- * <em>completed</em> update operations holding upon their onset.  For\r
- * aggregate operations such as <tt>putAll</tt>, concurrent retrievals may\r
- * reflect insertion or removal of only some entries.  Similarly, Iterators\r
- * and Enumerations return elements reflecting the state of the hash table at\r
- * some point at or since the creation of the iterator/enumeration.  They do\r
- * <em>not</em> throw {@link ConcurrentModificationException}.  However,\r
- * iterators are designed to be used by only one thread at a time.\r
- *\r
- * <p> Very full tables, or tables with high reprobe rates may trigger an\r
- * internal resize operation to move into a larger table.  Resizing is not\r
- * terribly expensive, but it is not free either; during resize operations\r
- * table throughput may drop somewhat.  All threads that visit the table\r
- * during a resize will 'help' the resizing but will still be allowed to\r
- * complete their operation before the resize is finished (i.e., a simple\r
- * 'get' operation on a million-entry table undergoing resizing will not need\r
- * to block until the entire million entries are copied).\r
- *\r
- * <p>This class and its views and iterators implement all of the\r
- * <em>optional</em> methods of the {@link Map} and {@link Iterator}\r
- * interfaces.\r
- *\r
- * <p> Like {@link Hashtable} but unlike {@link HashMap}, this class\r
- * does <em>not</em> allow <tt>null</tt> to be used as a key or value.\r
- *\r
- *\r
- * @since 1.5\r
- * @author Cliff Click\r
- * @param <TypeK> the type of keys maintained by this map\r
- * @param <TypeV> the type of mapped values\r
- *\r
- * @version 1.1.2\r
- * @author Prashant Deva - moved hash() function out of get_impl() so it is\r
- * not calculated multiple times.\r
- */\r
-\r
-public class NonBlockingHashMap<TypeK, TypeV>\r
-  extends AbstractMap<TypeK, TypeV>\r
-  implements ConcurrentMap<TypeK, TypeV>, Cloneable, Serializable {\r
-\r
-  private static final long serialVersionUID = 1234123412341234123L;\r
-\r
-  private static final int REPROBE_LIMIT=10; // Too many reprobes then force a table-resize\r
-\r
-  // --- Bits to allow Unsafe access to arrays\r
-  private static final Unsafe _unsafe = UtilUnsafe.getUnsafe();\r
-  private static final int _Obase  = _unsafe.arrayBaseOffset(Object[].class);\r
-  private static final int _Oscale = _unsafe.arrayIndexScale(Object[].class);\r
-  private static long rawIndex(final Object[] ary, final int idx) {\r
-    assert idx >= 0 && idx < ary.length;\r
-    return _Obase + idx * _Oscale;\r
-  }\r
-\r
-  // --- Setup to use Unsafe\r
-  private static final long _kvs_offset;\r
-  static {                      // <clinit>\r
-    Field f = null;\r
-    try { f = NonBlockingHashMap.class.getDeclaredField("_kvs"); }\r
-    catch( java.lang.NoSuchFieldException e ) { throw new RuntimeException(e); }\r
-    _kvs_offset = _unsafe.objectFieldOffset(f);\r
-  }\r
-  private final boolean CAS_kvs( final Object[] oldkvs, final Object[] newkvs ) {\r
-    return _unsafe.compareAndSwapObject(this, _kvs_offset, oldkvs, newkvs );\r
-  }\r
-\r
-  // --- Adding a 'prime' bit onto Values via wrapping with a junk wrapper class\r
-  private static final class Prime {\r
-    final Object _V;\r
-    Prime( Object V ) { _V = V; }\r
-    static Object unbox( Object V ) { return V instanceof Prime ? ((Prime)V)._V : V; }\r
-  }\r
-\r
-  // --- hash ----------------------------------------------------------------\r
-  // Helper function to spread lousy hashCodes\r
-  private static final int hash(final Object key) {\r
-    int h = key.hashCode();     // The real hashCode call\r
-    // Spread bits to regularize both segment and index locations,\r
-    // using variant of single-word Wang/Jenkins hash.\r
-    h += (h <<  15) ^ 0xffffcd7d;\r
-    h ^= (h >>> 10);\r
-    h += (h <<   3);\r
-    h ^= (h >>>  6);\r
-    h += (h <<   2) + (h << 14);\r
-    return h ^ (h >>> 16);\r
-  }\r
-\r
-  // --- The Hash Table --------------------\r
-  // Slot 0 is always used for a 'CHM' entry below to hold the interesting\r
-  // bits of the hash table.  Slot 1 holds full hashes as an array of ints.\r
-  // Slots {2,3}, {4,5}, etc hold {Key,Value} pairs.  The entire hash table\r
-  // can be atomically replaced by CASing the _kvs field.\r
-  //\r
-  // Why is CHM buried inside the _kvs Object array, instead of the other way\r
-  // around?  The CHM info is used during resize events and updates, but not\r
-  // during standard 'get' operations.  I assume 'get' is much more frequent\r
-  // than 'put'.  'get' can skip the extra indirection of skipping through the\r
-  // CHM to reach the _kvs array.\r
-  private transient Object[] _kvs;\r
-  private static final CHM   chm   (Object[] kvs) { return (CHM  )kvs[0]; }\r
-  private static final int[] hashes(Object[] kvs) { return (int[])kvs[1]; }\r
-  // Number of K,V pairs in the table\r
-  private static final int len(Object[] kvs) { return (kvs.length-2)>>1; }\r
-\r
-  // Time since last resize\r
-  private transient long _last_resize_milli;\r
-\r
-  // --- Minimum table size ----------------\r
-  // Pick size 8 K/V pairs, which turns into (8*2+2)*4+12 = 84 bytes on a\r
-  // standard 32-bit HotSpot, and (8*2+2)*8+12 = 156 bytes on 64-bit Azul.\r
-  private static final int MIN_SIZE_LOG=3;             //\r
-  private static final int MIN_SIZE=(1<<MIN_SIZE_LOG); // Must be power of 2\r
-\r
-  // --- Sentinels -------------------------\r
-  // No-Match-Old - putIfMatch does updates only if it matches the old value,\r
-  // and NO_MATCH_OLD basically counts as a wildcard match.\r
-  private static final Object NO_MATCH_OLD = new Object(); // Sentinel\r
-  // Match-Any-not-null - putIfMatch does updates only if it find a real old\r
-  // value.\r
-  private static final Object MATCH_ANY = new Object(); // Sentinel\r
-  // This K/V pair has been deleted (but the Key slot is forever claimed).\r
-  // The same Key can be reinserted with a new value later.\r
-  private static final Object TOMBSTONE = new Object();\r
-  // Prime'd or box'd version of TOMBSTONE.  This K/V pair was deleted, then a\r
-  // table resize started.  The K/V pair has been marked so that no new\r
-  // updates can happen to the old table (and since the K/V pair was deleted\r
-  // nothing was copied to the new table).\r
-  private static final Prime TOMBPRIME = new Prime(TOMBSTONE);\r
-\r
-  // --- key,val -------------------------------------------------------------\r
-  // Access K,V for a given idx\r
-  //\r
-  // Note that these are static, so that the caller is forced to read the _kvs\r
-  // field only once, and share that read across all key/val calls - lest the\r
-  // _kvs field move out from under us and back-to-back key & val calls refer\r
-  // to different _kvs arrays.\r
-  private static final Object key(Object[] kvs,int idx) { return kvs[(idx<<1)+2]; }\r
-  private static final Object val(Object[] kvs,int idx) { return kvs[(idx<<1)+3]; }\r
-  private static final boolean CAS_key( Object[] kvs, int idx, Object old, Object key ) {\r
-    return _unsafe.compareAndSwapObject( kvs, rawIndex(kvs,(idx<<1)+2), old, key );\r
-  }\r
-  private static final boolean CAS_val( Object[] kvs, int idx, Object old, Object val ) {\r
-    return _unsafe.compareAndSwapObject( kvs, rawIndex(kvs,(idx<<1)+3), old, val );\r
-  }\r
-\r
-\r
-  // --- dump ----------------------------------------------------------------\r
-  /** Verbose printout of table internals, useful for debugging.  */\r
-  public final void print() {\r
-    System.out.println("=========");\r
-    print2(_kvs);\r
-    System.out.println("=========");\r
-  }\r
-  // print the entire state of the table\r
-  private final void print( Object[] kvs ) {\r
-    for( int i=0; i<len(kvs); i++ ) {\r
-      Object K = key(kvs,i);\r
-      if( K != null ) {\r
-        String KS = (K == TOMBSTONE) ? "XXX" : K.toString();\r
-        Object V = val(kvs,i);\r
-        Object U = Prime.unbox(V);\r
-        String p = (V==U) ? "" : "prime_";\r
-        String US = (U == TOMBSTONE) ? "tombstone" : U.toString();\r
-        System.out.println(""+i+" ("+KS+","+p+US+")");\r
-      }\r
-    }\r
-    Object[] newkvs = chm(kvs)._newkvs; // New table, if any\r
-    if( newkvs != null ) {\r
-      System.out.println("----");\r
-      print(newkvs);\r
-    }\r
-  }\r
-  // print only the live values, broken down by the table they are in\r
-  private final void print2( Object[] kvs) {\r
-    for( int i=0; i<len(kvs); i++ ) {\r
-      Object key = key(kvs,i);\r
-      Object val = val(kvs,i);\r
-      Object U = Prime.unbox(val);\r
-      if( key != null && key != TOMBSTONE &&  // key is sane\r
-          val != null && U   != TOMBSTONE ) { // val is sane\r
-        String p = (val==U) ? "" : "prime_";\r
-        System.out.println(""+i+" ("+key+","+p+val+")");\r
-      }\r
-    }\r
-    Object[] newkvs = chm(kvs)._newkvs; // New table, if any\r
-    if( newkvs != null ) {\r
-      System.out.println("----");\r
-      print2(newkvs);\r
-    }\r
-  }\r
-\r
-  // Count of reprobes\r
-  private transient Counter _reprobes = new Counter();\r
-  /** Get and clear the current count of reprobes.  Reprobes happen on key\r
-   *  collisions, and a high reprobe rate may indicate a poor hash function or\r
-   *  weaknesses in the table resizing function.\r
-   *  @return the count of reprobes since the last call to {@link #reprobes}\r
-   *  or since the table was created.   */\r
-  public long reprobes() { long r = _reprobes.get(); _reprobes = new Counter(); return r; }\r
-\r
-\r
-  // --- reprobe_limit -----------------------------------------------------\r
-  // Heuristic to decide if we have reprobed toooo many times.  Running over\r
-  // the reprobe limit on a 'get' call acts as a 'miss'; on a 'put' call it\r
-  // can trigger a table resize.  Several places must have exact agreement on\r
-  // what the reprobe_limit is, so we share it here.\r
-  private static final int reprobe_limit( int len ) {\r
-    return REPROBE_LIMIT + (len>>2);\r
-  }\r
-\r
-  // --- NonBlockingHashMap --------------------------------------------------\r
-  // Constructors\r
-\r
-  /** Create a new NonBlockingHashMap with default minimum size (currently set\r
-   *  to 8 K/V pairs or roughly 84 bytes on a standard 32-bit JVM). */\r
-  public NonBlockingHashMap( ) { this(MIN_SIZE); }\r
-\r
-  /** Create a new NonBlockingHashMap with initial room for the given number of\r
-   *  elements, thus avoiding internal resizing operations to reach an\r
-   *  appropriate size.  Large numbers here when used with a small count of\r
-   *  elements will sacrifice space for a small amount of time gained.  The\r
-   *  initial size will be rounded up internally to the next larger power of 2. */\r
-  public NonBlockingHashMap( final int initial_sz ) { initialize(initial_sz); }\r
-  private final void initialize( int initial_sz ) {\r
-    if( initial_sz < 0 ) throw new IllegalArgumentException();\r
-    int i;                      // Convert to next largest power-of-2\r
-    if( initial_sz > 1024*1024 ) initial_sz = 1024*1024;\r
-    for( i=MIN_SIZE_LOG; (1<<i) < (initial_sz<<2); i++ ) ;\r
-    // Double size for K,V pairs, add 1 for CHM and 1 for hashes\r
-    _kvs = new Object[((1<<i)<<1)+2];\r
-    _kvs[0] = new CHM(new Counter()); // CHM in slot 0\r
-    _kvs[1] = new int[1<<i];          // Matching hash entries\r
-    _last_resize_milli = System.currentTimeMillis();\r
-  }\r
-  // Version for subclassed readObject calls, to be called after the defaultReadObject\r
-  protected final void initialize() { initialize(MIN_SIZE); }\r
-\r
-  // --- wrappers ------------------------------------------------------------\r
-\r
-  /** Returns the number of key-value mappings in this map.\r
-   *  @return the number of key-value mappings in this map */\r
-  @Override \r
-  public int     size       ( )                       { return chm(_kvs).size(); }\r
-  /** Returns <tt>size() == 0</tt>.\r
-   *  @return <tt>size() == 0</tt> */\r
-  @Override \r
-  public boolean isEmpty    ( )                       { return size() == 0;      }\r
-\r
-  /** Tests if the key in the table using the <tt>equals</tt> method.\r
-   * @return <tt>true</tt> if the key is in the table using the <tt>equals</tt> method\r
-   * @throws NullPointerException if the specified key is null  */\r
-  @Override \r
-  public boolean containsKey( Object key )            { return get(key) != null; }\r
-\r
-  /** Legacy method testing if some key maps into the specified value in this\r
-   *  table.  This method is identical in functionality to {@link\r
-   *  #containsValue}, and exists solely to ensure full compatibility with\r
-   *  class {@link java.util.Hashtable}, which supported this method prior to\r
-   *  introduction of the Java Collections framework.\r
-   *  @param  val a value to search for\r
-   *  @return <tt>true</tt> if this map maps one or more keys to the specified value\r
-   *  @throws NullPointerException if the specified value is null */\r
-  public boolean contains   ( Object val )            { return containsValue(val); }\r
-\r
-  /** Maps the specified key to the specified value in the table.  Neither key\r
-   *  nor value can be null.\r
-   *  <p> The value can be retrieved by calling {@link #get} with a key that is\r
-   *  equal to the original key.\r
-   *  @param key key with which the specified value is to be associated\r
-   *  @param val value to be associated with the specified key\r
-   *  @return the previous value associated with <tt>key</tt>, or\r
-   *          <tt>null</tt> if there was no mapping for <tt>key</tt>\r
-   *  @throws NullPointerException if the specified key or value is null  */\r
-  @Override\r
-  public TypeV   put        ( TypeK  key, TypeV val ) { return putIfMatch( key,      val, NO_MATCH_OLD); }\r
-\r
-  /** Atomically, do a {@link #put} if-and-only-if the key is not mapped.\r
-   *  Useful to ensure that only a single mapping for the key exists, even if\r
-   *  many threads are trying to create the mapping in parallel.\r
-   *  @return the previous value associated with the specified key,\r
-   *         or <tt>null</tt> if there was no mapping for the key\r
-   *  @throws NullPointerException if the specified key or value is null  */\r
-  public TypeV   putIfAbsent( TypeK  key, TypeV val ) { return putIfMatch( key,      val, TOMBSTONE   ); }\r
-\r
-  /** Removes the key (and its corresponding value) from this map.\r
-   *  This method does nothing if the key is not in the map.\r
-   *  @return the previous value associated with <tt>key</tt>, or\r
-   *         <tt>null</tt> if there was no mapping for <tt>key</tt>\r
-   *  @throws NullPointerException if the specified key is null */\r
-  @Override\r
-  public TypeV   remove     ( Object key )            { return putIfMatch( key,TOMBSTONE, NO_MATCH_OLD); }\r
-\r
-  /** Atomically do a {@link #remove(Object)} if-and-only-if the key is mapped\r
-   *  to a value which is <code>equals</code> to the given value.\r
-   *  @throws NullPointerException if the specified key or value is null */\r
-  public boolean remove     ( Object key,Object val ) { return putIfMatch( key,TOMBSTONE, val ) == val; }\r
-\r
-  /** Atomically do a <code>put(key,val)</code> if-and-only-if the key is\r
-   *  mapped to some value already.\r
-   *  @throws NullPointerException if the specified key or value is null */\r
-  public TypeV   replace    ( TypeK  key, TypeV val ) { return putIfMatch( key,      val,MATCH_ANY   ); }\r
-\r
-  /** Atomically do a <code>put(key,newValue)</code> if-and-only-if the key is\r
-   *  mapped a value which is <code>equals</code> to <code>oldValue</code>.\r
-   *  @throws NullPointerException if the specified key or value is null */\r
-  public boolean replace    ( TypeK  key, TypeV  oldValue, TypeV newValue ) {\r
-    return putIfMatch( key, newValue, oldValue ) == oldValue;\r
-  }\r
-\r
-  private final TypeV putIfMatch( Object key, Object newVal, Object oldVal ) {\r
-    if (oldVal == null || newVal == null) throw new NullPointerException();\r
-    final Object res = putIfMatch( this, _kvs, key, newVal, oldVal );\r
-    assert !(res instanceof Prime);\r
-    assert res != null;\r
-    return res == TOMBSTONE ? null : (TypeV)res;\r
-  }\r
-\r
-\r
-  /** Copies all of the mappings from the specified map to this one, replacing\r
-   *  any existing mappings.\r
-   *  @param m mappings to be stored in this map */\r
-  @Override\r
-  public void putAll(Map<? extends TypeK, ? extends TypeV> m) {\r
-    for (Map.Entry<? extends TypeK, ? extends TypeV> e : m.entrySet())\r
-      put(e.getKey(), e.getValue());\r
-  }\r
-\r
-  /** Removes all of the mappings from this map. */\r
-  @Override\r
-  public void clear() {         // Smack a new empty table down\r
-    Object[] newkvs = new NonBlockingHashMap(MIN_SIZE)._kvs;\r
-    while( !CAS_kvs(_kvs,newkvs) ) // Spin until the clear works\r
-      ;\r
-  }\r
-\r
-  /** Returns <tt>true</tt> if this Map maps one or more keys to the specified\r
-   *  value.  <em>Note</em>: This method requires a full internal traversal of the\r
-   *  hash table and is much slower than {@link #containsKey}.\r
-   *  @param val value whose presence in this map is to be tested\r
-   *  @return <tt>true</tt> if this map maps one or more keys to the specified value\r
-   *  @throws NullPointerException if the specified value is null */\r
-  @Override\r
-  public boolean containsValue( final Object val ) {\r
-    if( val == null ) throw new NullPointerException();\r
-    for( TypeV V : values() )\r
-      if( V == val || V.equals(val) )\r
-        return true;\r
-    return false;\r
-  }\r
-\r
-  // This function is supposed to do something for Hashtable, and the JCK\r
-  // tests hang until it gets called... by somebody ... for some reason,\r
-  // any reason....\r
-  protected void rehash() {\r
-  }\r
-\r
-  /**\r
-   * Creates a shallow copy of this hashtable. All the structure of the\r
-   * hashtable itself is copied, but the keys and values are not cloned.\r
-   * This is a relatively expensive operation.\r
-   *\r
-   * @return  a clone of the hashtable.\r
-   */\r
-  @Override\r
-  public Object clone() {\r
-    try {\r
-      // Must clone, to get the class right; NBHM might have been\r
-      // extended so it would be wrong to just make a new NBHM.\r
-      NonBlockingHashMap<TypeK,TypeV> t = (NonBlockingHashMap<TypeK,TypeV>) super.clone();\r
-      // But I don't have an atomic clone operation - the underlying _kvs\r
-      // structure is undergoing rapid change.  If I just clone the _kvs\r
-      // field, the CHM in _kvs[0] won't be in sync.\r
-      //\r
-      // Wipe out the cloned array (it was shallow anyways).\r
-      t.clear();\r
-      // Now copy sanely\r
-      for( TypeK K : keySet() ) {\r
-        final TypeV V = get(K);  // Do an official 'get'\r
-        t.put(K,V);\r
-      }\r
-      return t;\r
-    } catch (CloneNotSupportedException e) {\r
-      // this shouldn't happen, since we are Cloneable\r
-      throw new InternalError();\r
-    }\r
-  }\r
-\r
-  /**\r
-   * Returns a string representation of this map.  The string representation\r
-   * consists of a list of key-value mappings in the order returned by the\r
-   * map's <tt>entrySet</tt> view's iterator, enclosed in braces\r
-   * (<tt>"{}"</tt>).  Adjacent mappings are separated by the characters\r
-   * <tt>", "</tt> (comma and space).  Each key-value mapping is rendered as\r
-   * the key followed by an equals sign (<tt>"="</tt>) followed by the\r
-   * associated value.  Keys and values are converted to strings as by\r
-   * {@link String#valueOf(Object)}.\r
-   *\r
-   * @return a string representation of this map\r
-   */\r
-  @Override\r
-  public String toString() {\r
-    Iterator<Entry<TypeK,TypeV>> i = entrySet().iterator();\r
-    if( !i.hasNext())\r
-      return "{}";\r
-\r
-    StringBuilder sb = new StringBuilder();\r
-    sb.append('{');\r
-    for (;;) {\r
-      Entry<TypeK,TypeV> e = i.next();\r
-      TypeK key = e.getKey();\r
-      TypeV value = e.getValue();\r
-      sb.append(key   == this ? "(this Map)" : key);\r
-      sb.append('=');\r
-      sb.append(value == this ? "(this Map)" : value);\r
-      if( !i.hasNext())\r
-        return sb.append('}').toString();\r
-      sb.append(", ");\r
-    }\r
-  }\r
-\r
-  // --- keyeq ---------------------------------------------------------------\r
-  // Check for key equality.  Try direct pointer compare first, then see if\r
-  // the hashes are unequal (fast negative test) and finally do the full-on\r
-  // 'equals' v-call.\r
-  private static boolean keyeq( Object K, Object key, int[] hashes, int hash, int fullhash ) {\r
-    return\r
-      K==key ||                 // Either keys match exactly OR\r
-      // hash exists and matches?  hash can be zero during the install of a\r
-      // new key/value pair.\r
-      ((hashes[hash] == 0 || hashes[hash] == fullhash) &&\r
-       // Do not call the users' "equals()" call with a Tombstone, as this can\r
-       // surprise poorly written "equals()" calls that throw exceptions\r
-       // instead of simply returning false.\r
-       K != TOMBSTONE &&        // Do not call users' equals call with a Tombstone\r
-       // Do the match the hard way - with the users' key being the loop-\r
-       // invariant "this" pointer.  I could have flipped the order of\r
-       // operands (since equals is commutative), but I'm making mega-morphic\r
-       // v-calls in a reprobing loop and nailing down the 'this' argument\r
-       // gives both the JIT and the hardware a chance to prefetch the call target.\r
-       key.equals(K));          // Finally do the hard match\r
-  }\r
-\r
-  // --- get -----------------------------------------------------------------\r
-  /** Returns the value to which the specified key is mapped, or {@code null}\r
-   *  if this map contains no mapping for the key.\r
-   *  <p>More formally, if this map contains a mapping from a key {@code k} to\r
-   *  a value {@code v} such that {@code key.equals(k)}, then this method\r
-   *  returns {@code v}; otherwise it returns {@code null}.  (There can be at\r
-   *  most one such mapping.)\r
-   * @throws NullPointerException if the specified key is null */\r
-  // Never returns a Prime nor a Tombstone.\r
-  @Override\r
-  public TypeV get( Object key ) {\r
-    final int fullhash= hash (key); // throws NullPointerException if key is null\r
-    final Object V = get_impl(this,_kvs,key,fullhash);\r
-    assert !(V instanceof Prime); // Never return a Prime\r
-    return (TypeV)V;\r
-  }\r
-\r
-  private static final Object get_impl( final NonBlockingHashMap topmap, final Object[] kvs, final Object key, final int fullhash ) {\r
-    final int len     = len  (kvs); // Count of key/value pairs, reads kvs.length\r
-    final CHM chm     = chm  (kvs); // The CHM, for a volatile read below; reads slot 0 of kvs\r
-    final int[] hashes=hashes(kvs); // The memoized hashes; reads slot 1 of kvs\r
-\r
-    int idx = fullhash & (len-1); // First key hash\r
-\r
-    // Main spin/reprobe loop, looking for a Key hit\r
-    int reprobe_cnt=0;\r
-    while( true ) {\r
-      // Probe table.  Each read of 'val' probably misses in cache in a big\r
-      // table; hopefully the read of 'key' then hits in cache.\r
-      final Object K = key(kvs,idx); // Get key   before volatile read, could be null\r
-      final Object V = val(kvs,idx); // Get value before volatile read, could be null or Tombstone or Prime\r
-      if( K == null ) return null;   // A clear miss\r
-\r
-      // We need a volatile-read here to preserve happens-before semantics on\r
-      // newly inserted Keys.  If the Key body was written just before inserting\r
-      // into the table a Key-compare here might read the uninitalized Key body.\r
-      // Annoyingly this means we have to volatile-read before EACH key compare.\r
-      // .\r
-      // We also need a volatile-read between reading a newly inserted Value\r
-      // and returning the Value (so the user might end up reading the stale\r
-      // Value contents).  Same problem as with keys - and the one volatile\r
-      // read covers both.\r
-      final Object[] newkvs = chm._newkvs; // VOLATILE READ before key compare\r
-\r
-      // Key-compare\r
-      if( keyeq(K,key,hashes,idx,fullhash) ) {\r
-        // Key hit!  Check for no table-copy-in-progress\r
-        if( !(V instanceof Prime) ) // No copy?\r
-          return (V == TOMBSTONE) ? null : V; // Return the value\r
-        // Key hit - but slot is (possibly partially) copied to the new table.\r
-        // Finish the copy & retry in the new table.\r
-        return get_impl(topmap,chm.copy_slot_and_check(topmap,kvs,idx,key),key,fullhash); // Retry in the new table\r
-      }\r
-      // get and put must have the same key lookup logic!  But only 'put'\r
-      // needs to force a table-resize for a too-long key-reprobe sequence.\r
-      // Check for too-many-reprobes on get - and flip to the new table.\r
-         // ???? Why a TOMBSTONE key means no more keys in this table\r
-         // because a TOMBSTONE key should be null before\r
-      if( ++reprobe_cnt >= reprobe_limit(len) || // too many probes\r
-          key == TOMBSTONE ) // found a TOMBSTONE key, means no more keys in this table\r
-        return newkvs == null ? null : get_impl(topmap,topmap.help_copy(newkvs),key,fullhash); // Retry in the new table\r
-\r
-      idx = (idx+1)&(len-1);    // Reprobe by 1!  (could now prefetch)\r
-    }\r
-  }\r
-\r
-  // --- putIfMatch ---------------------------------------------------------\r
-  // Put, Remove, PutIfAbsent, etc.  Return the old value.  If the returned\r
-  // value is equal to expVal (or expVal is NO_MATCH_OLD) then the put can be\r
-  // assumed to work (although might have been immediately overwritten).  Only\r
-  // the path through copy_slot passes in an expected value of null, and\r
-  // putIfMatch only returns a null if passed in an expected null.\r
-  private static final Object putIfMatch( final NonBlockingHashMap topmap, final Object[] kvs, final Object key, final Object putval, final Object expVal ) {\r
-    assert putval != null;\r
-    assert !(putval instanceof Prime);\r
-    assert !(expVal instanceof Prime);\r
-    final int fullhash = hash  (key); // throws NullPointerException if key null\r
-    final int len      = len   (kvs); // Count of key/value pairs, reads kvs.length\r
-    final CHM chm      = chm   (kvs); // Reads kvs[0]\r
-    final int[] hashes = hashes(kvs); // Reads kvs[1], read before kvs[0]\r
-    int idx = fullhash & (len-1);\r
-\r
-    // ---\r
-    // Key-Claim stanza: spin till we can claim a Key (or force a resizing).\r
-    int reprobe_cnt=0;\r
-    Object K=null, V=null;\r
-    Object[] newkvs=null;\r
-    while( true ) {             // Spin till we get a Key slot\r
-      V = val(kvs,idx);         // Get old value (before volatile read below!)\r
-      K = key(kvs,idx);         // Get current key\r
-      if( K == null ) {         // Slot is free?\r
-        // Found an empty Key slot - which means this Key has never been in\r
-        // this table.  No need to put a Tombstone - the Key is not here!\r
-        if( putval == TOMBSTONE ) return putval; // Not-now & never-been in this table\r
-        // Claim the null key-slot\r
-        if( CAS_key(kvs,idx, null, key ) ) { // Claim slot for Key\r
-          chm._slots.add(1);      // Raise key-slots-used count\r
-          hashes[idx] = fullhash; // Memoize fullhash\r
-          break;                  // Got it!\r
-        }\r
-        // CAS to claim the key-slot failed.\r
-        //\r
-        // This re-read of the Key points out an annoying short-coming of Java\r
-        // CAS.  Most hardware CAS's report back the existing value - so that\r
-        // if you fail you have a *witness* - the value which caused the CAS\r
-        // to fail.  The Java API turns this into a boolean destroying the\r
-        // witness.  Re-reading does not recover the witness because another\r
-        // thread can write over the memory after the CAS.  Hence we can be in\r
-        // the unfortunate situation of having a CAS fail *for cause* but\r
-        // having that cause removed by a later store.  This turns a\r
-        // non-spurious-failure CAS (such as Azul has) into one that can\r
-        // apparently spuriously fail - and we avoid apparent spurious failure\r
-        // by not allowing Keys to ever change.\r
-        K = key(kvs,idx);       // CAS failed, get updated value\r
-        assert K != null;       // If keys[idx] is null, CAS shoulda worked\r
-      }\r
-      // Key slot was not null, there exists a Key here\r
-\r
-      // We need a volatile-read here to preserve happens-before semantics on\r
-      // newly inserted Keys.  If the Key body was written just before inserting\r
-      // into the table a Key-compare here might read the uninitalized Key body.\r
-      // Annoyingly this means we have to volatile-read before EACH key compare.\r
-      newkvs = chm._newkvs;     // VOLATILE READ before key compare\r
-\r
-      if( keyeq(K,key,hashes,idx,fullhash) )\r
-        break;                  // Got it!\r
-\r
-      // get and put must have the same key lookup logic!  Lest 'get' give\r
-      // up looking too soon.\r
-      //topmap._reprobes.add(1);\r
-      if( ++reprobe_cnt >= reprobe_limit(len) || // too many probes or\r
-          key == TOMBSTONE ) { // found a TOMBSTONE key, means no more keys\r
-        // We simply must have a new table to do a 'put'.  At this point a\r
-        // 'get' will also go to the new table (if any).  We do not need\r
-        // to claim a key slot (indeed, we cannot find a free one to claim!).\r
-        newkvs = chm.resize(topmap,kvs);\r
-        if( expVal != null ) topmap.help_copy(newkvs); // help along an existing copy\r
-        return putIfMatch(topmap,newkvs,key,putval,expVal);\r
-      }\r
-\r
-      idx = (idx+1)&(len-1); // Reprobe!\r
-    } // End of spinning till we get a Key slot\r
-\r
-    // ---\r
-    // Found the proper Key slot, now update the matching Value slot.  We\r
-    // never put a null, so Value slots monotonically move from null to\r
-    // not-null (deleted Values use Tombstone).  Thus if 'V' is null we\r
-    // fail this fast cutout and fall into the check for table-full.\r
-    if( putval == V ) return V; // Fast cutout for no-change\r
-\r
-    // See if we want to move to a new table (to avoid high average re-probe\r
-    // counts).  We only check on the initial set of a Value from null to\r
-    // not-null (i.e., once per key-insert).  Of course we got a 'free' check\r
-    // of newkvs once per key-compare (not really free, but paid-for by the\r
-    // time we get here).\r
-    if( newkvs == null &&       // New table-copy already spotted?\r
-        // Once per fresh key-insert check the hard way\r
-        ((V == null && chm.tableFull(reprobe_cnt,len)) ||\r
-         // Or we found a Prime, but the JMM allowed reordering such that we\r
-         // did not spot the new table (very rare race here: the writing\r
-         // thread did a CAS of _newkvs then a store of a Prime.  This thread\r
-         // reads the Prime, then reads _newkvs - but the read of Prime was so\r
-         // delayed (or the read of _newkvs was so accelerated) that they\r
-         // swapped and we still read a null _newkvs.  The resize call below\r
-         // will do a CAS on _newkvs forcing the read.\r
-         V instanceof Prime) )\r
-      newkvs = chm.resize(topmap,kvs); // Force the new table copy to start\r
-    // See if we are moving to a new table.\r
-    // If so, copy our slot and retry in the new table.\r
-    if( newkvs != null )\r
-      return putIfMatch(topmap,chm.copy_slot_and_check(topmap,kvs,idx,expVal),key,putval,expVal);\r
-\r
-    // ---\r
-    // We are finally prepared to update the existing table\r
-    while( true ) {\r
-      assert !(V instanceof Prime);\r
-\r
-      // Must match old, and we do not?  Then bail out now.  Note that either V\r
-      // or expVal might be TOMBSTONE.  Also V can be null, if we've never\r
-      // inserted a value before.  expVal can be null if we are called from\r
-      // copy_slot.\r
-\r
-      if( expVal != NO_MATCH_OLD && // Do we care about expected-Value at all?\r
-          V != expVal &&            // No instant match already?\r
-          (expVal != MATCH_ANY || V == TOMBSTONE || V == null) &&\r
-          !(V==null && expVal == TOMBSTONE) &&    // Match on null/TOMBSTONE combo\r
-          (expVal == null || !expVal.equals(V)) ) // Expensive equals check at the last\r
-        return V;                                 // Do not update!\r
-\r
-      // Actually change the Value in the Key,Value pair\r
-      if( CAS_val(kvs, idx, V, putval ) ) {\r
-        // CAS succeeded - we did the update!\r
-        // Both normal put's and table-copy calls putIfMatch, but table-copy\r
-        // does not (effectively) increase the number of live k/v pairs.\r
-        if( expVal != null ) {\r
-          // Adjust sizes - a striped counter\r
-          if(  (V == null || V == TOMBSTONE) && putval != TOMBSTONE ) chm._size.add( 1);\r
-          if( !(V == null || V == TOMBSTONE) && putval == TOMBSTONE ) chm._size.add(-1);\r
-        }\r
-        return (V==null && expVal!=null) ? TOMBSTONE : V;\r
-      } \r
-      // Else CAS failed\r
-      V = val(kvs,idx);         // Get new value\r
-      // If a Prime'd value got installed, we need to re-run the put on the\r
-      // new table.  Otherwise we lost the CAS to another racing put.\r
-      // Simply retry from the start.\r
-      if( V instanceof Prime )\r
-        return putIfMatch(topmap,chm.copy_slot_and_check(topmap,kvs,idx,expVal),key,putval,expVal);\r
-    }\r
-  }\r
-\r
-  // --- help_copy ---------------------------------------------------------\r
-  // Help along an existing resize operation.  This is just a fast cut-out\r
-  // wrapper, to encourage inlining for the fast no-copy-in-progress case.  We\r
-  // always help the top-most table copy, even if there are nested table\r
-  // copies in progress.\r
-  private final Object[] help_copy( Object[] helper ) {\r
-    // Read the top-level KVS only once.  We'll try to help this copy along,\r
-    // even if it gets promoted out from under us (i.e., the copy completes\r
-    // and another KVS becomes the top-level copy).\r
-    Object[] topkvs = _kvs;\r
-    CHM topchm = chm(topkvs);\r
-    if( topchm._newkvs == null ) return helper; // No copy in-progress\r
-    topchm.help_copy_impl(this,topkvs,false);\r
-    return helper;\r
-  }\r
-\r
-\r
-  // --- CHM -----------------------------------------------------------------\r
-  // The control structure for the NonBlockingHashMap\r
-  private static final class CHM<TypeK,TypeV> {\r
-    // Size in active K,V pairs\r
-    private final Counter _size;\r
-    public int size () { return (int)_size.get(); }\r
-\r
-    // ---\r
-    // These next 2 fields are used in the resizing heuristics, to judge when\r
-    // it is time to resize or copy the table.  Slots is a count of used-up\r
-    // key slots, and when it nears a large fraction of the table we probably\r
-    // end up reprobing too much.  Last-resize-milli is the time since the\r
-    // last resize; if we are running back-to-back resizes without growing\r
-    // (because there are only a few live keys but many slots full of dead\r
-    // keys) then we need a larger table to cut down on the churn.\r
-\r
-    // Count of used slots, to tell when table is full of dead unusable slots\r
-    private final Counter _slots;\r
-    public int slots() { return (int)_slots.get(); }\r
-\r
-    // ---\r
-    // New mappings, used during resizing.\r
-    // The 'new KVs' array - created during a resize operation.  This\r
-    // represents the new table being copied from the old one.  It's the\r
-    // volatile variable that is read as we cross from one table to the next,\r
-    // to get the required memory orderings.  It monotonically transits from\r
-    // null to set (once).\r
-    volatile Object[] _newkvs;\r
-    private final AtomicReferenceFieldUpdater<CHM,Object[]> _newkvsUpdater =\r
-      AtomicReferenceFieldUpdater.newUpdater(CHM.class,Object[].class, "_newkvs");\r
-    // Set the _next field if we can.\r
-    boolean CAS_newkvs( Object[] newkvs ) {\r
-      while( _newkvs == null )\r
-        if( _newkvsUpdater.compareAndSet(this,null,newkvs) )\r
-          return true;\r
-      return false;\r
-    }\r
-    // Sometimes many threads race to create a new very large table.  Only 1\r
-    // wins the race, but the losers all allocate a junk large table with\r
-    // hefty allocation costs.  Attempt to control the overkill here by\r
-    // throttling attempts to create a new table.  I cannot really block here\r
-    // (lest I lose the non-blocking property) but late-arriving threads can\r
-    // give the initial resizing thread a little time to allocate the initial\r
-    // new table.  The Right Long Term Fix here is to use array-lets and\r
-    // incrementally create the new very large array.  In C I'd make the array\r
-    // with malloc (which would mmap under the hood) which would only eat\r
-    // virtual-address and not real memory - and after Somebody wins then we\r
-    // could in parallel initialize the array.  Java does not allow\r
-    // un-initialized array creation (especially of ref arrays!).\r
-    volatile long _resizers; // count of threads attempting an initial resize\r
-    private static final AtomicLongFieldUpdater<CHM> _resizerUpdater =\r
-      AtomicLongFieldUpdater.newUpdater(CHM.class, "_resizers");\r
-\r
-    // ---\r
-    // Simple constructor\r
-    CHM( Counter size ) {\r
-      _size = size;\r
-      _slots= new Counter();\r
-    }\r
-\r
-    // --- tableFull ---------------------------------------------------------\r
-    // Heuristic to decide if this table is too full, and we should start a\r
-    // new table.  Note that if a 'get' call has reprobed too many times and\r
-    // decided the table must be full, then always the estimate_sum must be\r
-    // high and we must report the table is full.  If we do not, then we might\r
-    // end up deciding that the table is not full and inserting into the\r
-    // current table, while a 'get' has decided the same key cannot be in this\r
-    // table because of too many reprobes.  The invariant is:\r
-    //   slots.estimate_sum >= max_reprobe_cnt >= reprobe_limit(len)\r
-    private final boolean tableFull( int reprobe_cnt, int len ) {\r
-      return\r
-        // Do the cheap check first: we allow some number of reprobes always\r
-        reprobe_cnt >= REPROBE_LIMIT &&\r
-        // More expensive check: see if the table is > 1/4 full.\r
-        _slots.estimate_get() >= reprobe_limit(len);\r
-    }\r
-\r
-    // --- resize ------------------------------------------------------------\r
-    // Resizing after too many probes.  "How Big???" heuristics are here.\r
-    // Callers will (not this routine) will 'help_copy' any in-progress copy.\r
-    // Since this routine has a fast cutout for copy-already-started, callers\r
-    // MUST 'help_copy' lest we have a path which forever runs through\r
-    // 'resize' only to discover a copy-in-progress which never progresses.\r
-    private final Object[] resize( NonBlockingHashMap topmap, Object[] kvs) {\r
-      assert chm(kvs) == this;\r
-\r
-      // Check for resize already in progress, probably triggered by another thread\r
-      Object[] newkvs = _newkvs; // VOLATILE READ\r
-      if( newkvs != null )       // See if resize is already in progress\r
-        return newkvs;           // Use the new table already\r
-\r
-      // No copy in-progress, so start one.  First up: compute new table size.\r
-      int oldlen = len(kvs);    // Old count of K,V pairs allowed\r
-      int sz = size();          // Get current table count of active K,V pairs\r
-      int newsz = sz;           // First size estimate\r
-\r
-      // Heuristic to determine new size.  We expect plenty of dead-slots-with-keys\r
-      // and we need some decent padding to avoid endless reprobing.\r
-      if( sz >= (oldlen>>2) ) { // If we are >25% full of keys then...\r
-        newsz = oldlen<<1;      // Double size\r
-        if( sz >= (oldlen>>1) ) // If we are >50% full of keys then...\r
-          newsz = oldlen<<2;    // Double double size\r
-      }\r
-      // This heuristic in the next 2 lines leads to a much denser table\r
-      // with a higher reprobe rate\r
-      //if( sz >= (oldlen>>1) ) // If we are >50% full of keys then...\r
-      //  newsz = oldlen<<1;    // Double size\r
-\r
-      // Last (re)size operation was very recent?  Then double again; slows\r
-      // down resize operations for tables subject to a high key churn rate.\r
-      long tm = System.currentTimeMillis();\r
-      long q=0;\r
-      if( newsz <= oldlen && // New table would shrink or hold steady?\r
-          tm <= topmap._last_resize_milli+10000 && // Recent resize (less than 1 sec ago)\r
-          (q=_slots.estimate_get()) >= (sz<<1) ) // 1/2 of keys are dead?\r
-        newsz = oldlen<<1;      // Double the existing size\r
-\r
-      // Do not shrink, ever\r
-      if( newsz < oldlen ) newsz = oldlen;\r
-\r
-      // Convert to power-of-2\r
-      int log2;\r
-      for( log2=MIN_SIZE_LOG; (1<<log2) < newsz; log2++ ) ; // Compute log2 of size\r
-\r
-      // Now limit the number of threads actually allocating memory to a\r
-      // handful - lest we have 750 threads all trying to allocate a giant\r
-      // resized array.\r
-      long r = _resizers;\r
-      while( !_resizerUpdater.compareAndSet(this,r,r+1) )\r
-        r = _resizers;\r
-      // Size calculation: 2 words (K+V) per table entry, plus a handful.  We\r
-      // guess at 32-bit pointers; 64-bit pointers screws up the size calc by\r
-      // 2x but does not screw up the heuristic very much.\r
-      int megs = ((((1<<log2)<<1)+4)<<3/*word to bytes*/)>>20/*megs*/;\r
-      if( r >= 2 && megs > 0 ) { // Already 2 guys trying; wait and see\r
-        newkvs = _newkvs;        // Between dorking around, another thread did it\r
-        if( newkvs != null )     // See if resize is already in progress\r
-          return newkvs;         // Use the new table already\r
-        // TODO - use a wait with timeout, so we'll wakeup as soon as the new table\r
-        // is ready, or after the timeout in any case.\r
-        //synchronized( this ) { wait(8*megs); }         // Timeout - we always wakeup\r
-        // For now, sleep a tad and see if the 2 guys already trying to make\r
-        // the table actually get around to making it happen.\r
-        try { Thread.sleep(8*megs); } catch( Exception e ) { }\r
-      }\r
-      // Last check, since the 'new' below is expensive and there is a chance\r
-      // that another thread slipped in a new thread while we ran the heuristic.\r
-      newkvs = _newkvs;\r
-      if( newkvs != null )      // See if resize is already in progress\r
-        return newkvs;          // Use the new table already\r
-\r
-      // Double size for K,V pairs, add 1 for CHM\r
-      newkvs = new Object[((1<<log2)<<1)+2]; // This can get expensive for big arrays\r
-      newkvs[0] = new CHM(_size); // CHM in slot 0\r
-      newkvs[1] = new int[1<<log2]; // hashes in slot 1\r
-\r
-      // Another check after the slow allocation\r
-      if( _newkvs != null )     // See if resize is already in progress\r
-        return _newkvs;         // Use the new table already\r
-\r
-      // The new table must be CAS'd in so only 1 winner amongst duplicate\r
-      // racing resizing threads.  Extra CHM's will be GC'd.\r
-      if( CAS_newkvs( newkvs ) ) { // NOW a resize-is-in-progress!\r
-        //notifyAll();            // Wake up any sleepers\r
-        //long nano = System.nanoTime();\r
-        //System.out.println(" "+nano+" Resize from "+oldlen+" to "+(1<<log2)+" and had "+(_resizers-1)+" extras" );\r
-        //if( System.out != null ) System.out.print("["+log2);\r
-        topmap.rehash();        // Call for Hashtable's benefit\r
-      } else                    // CAS failed?\r
-        newkvs = _newkvs;       // Reread new table\r
-      return newkvs;\r
-    }\r
-\r
-\r
-    // The next part of the table to copy.  It monotonically transits from zero\r
-    // to _kvs.length.  Visitors to the table can claim 'work chunks' by\r
-    // CAS'ing this field up, then copying the indicated indices from the old\r
-    // table to the new table.  Workers are not required to finish any chunk;\r
-    // the counter simply wraps and work is copied duplicately until somebody\r
-    // somewhere completes the count.\r
-    volatile long _copyIdx = 0;\r
-    static private final AtomicLongFieldUpdater<CHM> _copyIdxUpdater =\r
-      AtomicLongFieldUpdater.newUpdater(CHM.class, "_copyIdx");\r
-\r
-    // Work-done reporting.  Used to efficiently signal when we can move to\r
-    // the new table.  From 0 to len(oldkvs) refers to copying from the old\r
-    // table to the new.\r
-    volatile long _copyDone= 0;\r
-    static private final AtomicLongFieldUpdater<CHM> _copyDoneUpdater =\r
-      AtomicLongFieldUpdater.newUpdater(CHM.class, "_copyDone");\r
-\r
-    // --- help_copy_impl ----------------------------------------------------\r
-    // Help along an existing resize operation.  We hope its the top-level\r
-    // copy (it was when we started) but this CHM might have been promoted out\r
-    // of the top position.\r
-    private final void help_copy_impl( NonBlockingHashMap topmap, Object[] oldkvs, boolean copy_all ) {\r
-      assert chm(oldkvs) == this;\r
-      Object[] newkvs = _newkvs;\r
-      assert newkvs != null;    // Already checked by caller\r
-      int oldlen = len(oldkvs); // Total amount to copy\r
-      final int MIN_COPY_WORK = Math.min(oldlen,1024); // Limit per-thread work\r
-\r
-      // ---\r
-      int panic_start = -1;\r
-      int copyidx=-9999;            // Fool javac to think it's initialized\r
-      while( _copyDone < oldlen ) { // Still needing to copy?\r
-        // Carve out a chunk of work.  The counter wraps around so every\r
-        // thread eventually tries to copy every slot repeatedly.\r
-\r
-        // We "panic" if we have tried TWICE to copy every slot - and it still\r
-        // has not happened.  i.e., twice some thread somewhere claimed they\r
-        // would copy 'slot X' (by bumping _copyIdx) but they never claimed to\r
-        // have finished (by bumping _copyDone).  Our choices become limited:\r
-        // we can wait for the work-claimers to finish (and become a blocking\r
-        // algorithm) or do the copy work ourselves.  Tiny tables with huge\r
-        // thread counts trying to copy the table often 'panic'.\r
-        if( panic_start == -1 ) { // No panic?\r
-          copyidx = (int)_copyIdx;\r
-          while( copyidx < (oldlen<<1) && // 'panic' check\r
-                 !_copyIdxUpdater.compareAndSet(this,copyidx,copyidx+MIN_COPY_WORK) )\r
-            copyidx = (int)_copyIdx;      // Re-read\r
-          if( !(copyidx < (oldlen<<1)) )  // Panic!\r
-            panic_start = copyidx;        // Record where we started to panic-copy\r
-        }\r
-\r
-        // We now know what to copy.  Try to copy.\r
-        int workdone = 0;\r
-        for( int i=0; i<MIN_COPY_WORK; i++ )\r
-          if( copy_slot(topmap,(copyidx+i)&(oldlen-1),oldkvs,newkvs) ) // Made an oldtable slot go dead?\r
-            workdone++;         // Yes!\r
-        if( workdone > 0 )      // Report work-done occasionally\r
-          copy_check_and_promote( topmap, oldkvs, workdone );// See if we can promote\r
-        //for( int i=0; i<MIN_COPY_WORK; i++ )\r
-        //  if( copy_slot(topmap,(copyidx+i)&(oldlen-1),oldkvs,newkvs) ) // Made an oldtable slot go dead?\r
-        //    copy_check_and_promote( topmap, oldkvs, 1 );// See if we can promote\r
-\r
-        copyidx += MIN_COPY_WORK;\r
-        // Uncomment these next 2 lines to turn on incremental table-copy.\r
-        // Otherwise this thread continues to copy until it is all done.\r
-        if( !copy_all && panic_start == -1 ) // No panic?\r
-          return;       // Then done copying after doing MIN_COPY_WORK\r
-      }\r
-      // Extra promotion check, in case another thread finished all copying\r
-      // then got stalled before promoting.\r
-      copy_check_and_promote( topmap, oldkvs, 0 );// See if we can promote\r
-    }\r
-\r
-\r
-    // --- copy_slot_and_check -----------------------------------------------\r
-    // Copy slot 'idx' from the old table to the new table.  If this thread\r
-    // confirmed the copy, update the counters and check for promotion.\r
-    //\r
-    // Returns the result of reading the volatile _newkvs, mostly as a\r
-    // convenience to callers.  We come here with 1-shot copy requests\r
-    // typically because the caller has found a Prime, and has not yet read\r
-    // the _newkvs volatile - which must have changed from null-to-not-null\r
-    // before any Prime appears.  So the caller needs to read the _newkvs\r
-    // field to retry his operation in the new table, but probably has not\r
-    // read it yet.\r
-    private final Object[] copy_slot_and_check( NonBlockingHashMap topmap, Object[] oldkvs, int idx, Object should_help ) {\r
-      assert chm(oldkvs) == this;\r
-      Object[] newkvs = _newkvs; // VOLATILE READ\r
-      // We're only here because the caller saw a Prime, which implies a\r
-      // table-copy is in progress.\r
-      assert newkvs != null;\r
-      if( copy_slot(topmap,idx,oldkvs,_newkvs) )   // Copy the desired slot\r
-        copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied\r
-      // Generically help along any copy (except if called recursively from a helper)\r
-      return (should_help == null) ? newkvs : topmap.help_copy(newkvs);\r
-    }\r
-\r
-    // --- copy_check_and_promote --------------------------------------------\r
-    private final void copy_check_and_promote( NonBlockingHashMap topmap, Object[] oldkvs, int workdone ) {\r
-      assert chm(oldkvs) == this;\r
-      int oldlen = len(oldkvs);\r
-      // We made a slot unusable and so did some of the needed copy work\r
-      long copyDone = _copyDone;\r
-      assert (copyDone+workdone) <= oldlen;\r
-      if( workdone > 0 ) {\r
-        while( !_copyDoneUpdater.compareAndSet(this,copyDone,copyDone+workdone) ) {\r
-          copyDone = _copyDone; // Reload, retry\r
-          assert (copyDone+workdone) <= oldlen;\r
-        }\r
-        //if( (10*copyDone/oldlen) != (10*(copyDone+workdone)/oldlen) )\r
-        //System.out.print(" "+(copyDone+workdone)*100/oldlen+"%"+"_"+(_copyIdx*100/oldlen)+"%");\r
-      }\r
-\r
-      // Check for copy being ALL done, and promote.  Note that we might have\r
-      // nested in-progress copies and manage to finish a nested copy before\r
-      // finishing the top-level copy.  We only promote top-level copies.\r
-      if( copyDone+workdone == oldlen && // Ready to promote this table?\r
-          topmap._kvs == oldkvs && // Looking at the top-level table?\r
-          // Attempt to promote\r
-          topmap.CAS_kvs(oldkvs,_newkvs) ) {\r
-        topmap._last_resize_milli = System.currentTimeMillis(); // Record resize time for next check\r
-        //long nano = System.nanoTime();\r
-        //System.out.println(" "+nano+" Promote table to "+len(_newkvs));\r
-        //if( System.out != null ) System.out.print("]");\r
-      }\r
-    }\r
-    // --- copy_slot ---------------------------------------------------------\r
-    // Copy one K/V pair from oldkvs[i] to newkvs.  Returns true if we can\r
-    // confirm that the new table guaranteed has a value for this old-table\r
-    // slot.  We need an accurate confirmed-copy count so that we know when we\r
-    // can promote (if we promote the new table too soon, other threads may\r
-    // 'miss' on values not-yet-copied from the old table).  We don't allow\r
-    // any direct updates on the new table, unless they first happened to the\r
-    // old table - so that any transition in the new table from null to\r
-    // not-null must have been from a copy_slot (or other old-table overwrite)\r
-    // and not from a thread directly writing in the new table.  Thus we can\r
-    // count null-to-not-null transitions in the new table.\r
-    private boolean copy_slot( NonBlockingHashMap topmap, int idx, Object[] oldkvs, Object[] newkvs ) {\r
-      // Blindly set the key slot from null to TOMBSTONE, to eagerly stop\r
-      // fresh put's from inserting new values in the old table when the old\r
-      // table is mid-resize.  We don't need to act on the results here,\r
-      // because our correctness stems from box'ing the Value field.  Slamming\r
-      // the Key field is a minor speed optimization.\r
-      Object key;\r
-      while( (key=key(oldkvs,idx)) == null )\r
-        CAS_key(oldkvs,idx, null, TOMBSTONE);\r
-\r
-      // ---\r
-      // Prevent new values from appearing in the old table.\r
-      // Box what we see in the old table, to prevent further updates.\r
-      Object oldval = val(oldkvs,idx); // Read OLD table\r
-      while( !(oldval instanceof Prime) ) {\r
-        final Prime box = (oldval == null || oldval == TOMBSTONE) ? TOMBPRIME : new Prime(oldval);\r
-        if( CAS_val(oldkvs,idx,oldval,box) ) { // CAS down a box'd version of oldval\r
-          // If we made the Value slot hold a TOMBPRIME, then we both\r
-          // prevented further updates here but also the (absent)\r
-          // oldval is vaccuously available in the new table.  We\r
-          // return with true here: any thread looking for a value for\r
-          // this key can correctly go straight to the new table and\r
-          // skip looking in the old table.\r
-          if( box == TOMBPRIME )\r
-            return true;\r
-          // Otherwise we boxed something, but it still needs to be\r
-          // copied into the new table.\r
-          oldval = box;         // Record updated oldval\r
-          break;                // Break loop; oldval is now boxed by us\r
-        }\r
-        oldval = val(oldkvs,idx); // Else try, try again\r
-      }\r
-      if( oldval == TOMBPRIME ) return false; // Copy already complete here!\r
-\r
-      // ---\r
-      // Copy the value into the new table, but only if we overwrite a null.\r
-      // If another value is already in the new table, then somebody else\r
-      // wrote something there and that write is happens-after any value that\r
-      // appears in the old table.  If putIfMatch does not find a null in the\r
-      // new table - somebody else should have recorded the null-not_null\r
-      // transition in this copy.\r
-      Object old_unboxed = ((Prime)oldval)._V;\r
-      assert old_unboxed != TOMBSTONE;\r
-      boolean copied_into_new = (putIfMatch(topmap, newkvs, key, old_unboxed, null) == null);\r
-\r
-      // ---\r
-      // Finally, now that any old value is exposed in the new table, we can\r
-      // forever hide the old-table value by slapping a TOMBPRIME down.  This\r
-      // will stop other threads from uselessly attempting to copy this slot\r
-      // (i.e., it's a speed optimization not a correctness issue).\r
-      while( !CAS_val(oldkvs,idx,oldval,TOMBPRIME) )\r
-        oldval = val(oldkvs,idx);\r
-\r
-      return copied_into_new;\r
-    } // end copy_slot\r
-  } // End of CHM\r
-\r
-\r
-  // --- Snapshot ------------------------------------------------------------\r
-  // The main class for iterating over the NBHM.  It "snapshots" a clean\r
-  // view of the K/V array.\r
-  private class SnapshotV implements Iterator<TypeV>, Enumeration<TypeV> {\r
-    final Object[] _sskvs;\r
-    public SnapshotV() {\r
-      while( true ) {           // Verify no table-copy-in-progress\r
-        Object[] topkvs = _kvs;\r
-        CHM topchm = chm(topkvs);\r
-        if( topchm._newkvs == null ) { // No table-copy-in-progress\r
-          // The "linearization point" for the iteration.  Every key in this\r
-          // table will be visited, but keys added later might be skipped or\r
-          // even be added to a following table (also not iterated over).\r
-          _sskvs = topkvs;\r
-          break;\r
-        }\r
-        // Table copy in-progress - so we cannot get a clean iteration.  We\r
-        // must help finish the table copy before we can start iterating.\r
-        topchm.help_copy_impl(NonBlockingHashMap.this,topkvs,true);\r
-      }\r
-      // Warm-up the iterator\r
-      next();\r
-    }\r
-    int length() { return len(_sskvs); }\r
-    Object key(int idx) { return NonBlockingHashMap.key(_sskvs,idx); }\r
-    private int _idx;              // Varies from 0-keys.length\r
-    private Object _nextK, _prevK; // Last 2 keys found\r
-    private TypeV  _nextV, _prevV; // Last 2 values found\r
-    public boolean hasNext() { return _nextV != null; }\r
-    public TypeV next() {\r
-      // 'next' actually knows what the next value will be - it had to\r
-      // figure that out last go-around lest 'hasNext' report true and\r
-      // some other thread deleted the last value.  Instead, 'next'\r
-      // spends all its effort finding the key that comes after the\r
-      // 'next' key.\r
-      if( _idx != 0 && _nextV == null ) throw new NoSuchElementException();\r
-      _prevK = _nextK;          // This will become the previous key\r
-      _prevV = _nextV;          // This will become the previous value\r
-      _nextV = null;            // We have no more next-key\r
-      // Attempt to set <_nextK,_nextV> to the next K,V pair.\r
-      // _nextV is the trigger: stop searching when it is != null\r
-      while( _idx<length() ) {  // Scan array\r
-        _nextK = key(_idx++); // Get a key that definitely is in the set (for the moment!)\r
-        if( _nextK != null && // Found something?\r
-            _nextK != TOMBSTONE &&\r
-            (_nextV=get(_nextK)) != null )\r
-          break;                // Got it!  _nextK is a valid Key\r
-      }                         // Else keep scanning\r
-      return _prevV;            // Return current value.\r
-    }\r
-    public void remove() {\r
-      if( _prevV == null ) throw new IllegalStateException();\r
-      putIfMatch( NonBlockingHashMap.this, _sskvs, _prevK, TOMBSTONE, _prevV );\r
-      _prevV = null;\r
-    }\r
-\r
-    public TypeV nextElement() { return next(); }\r
-    public boolean hasMoreElements() { return hasNext(); }\r
-  }\r
-\r
-  /** Returns an enumeration of the values in this table.\r
-   *  @return an enumeration of the values in this table\r
-   *  @see #values()  */\r
-  public Enumeration<TypeV> elements() { return new SnapshotV(); }\r
-\r
-  // --- values --------------------------------------------------------------\r
-  /** Returns a {@link Collection} view of the values contained in this map.\r
-   *  The collection is backed by the map, so changes to the map are reflected\r
-   *  in the collection, and vice-versa.  The collection supports element\r
-   *  removal, which removes the corresponding mapping from this map, via the\r
-   *  <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>,\r
-   *  <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations.\r
-   *  It does not support the <tt>add</tt> or <tt>addAll</tt> operations.\r
-   *\r
-   *  <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator that\r
-   *  will never throw {@link ConcurrentModificationException}, and guarantees\r
-   *  to traverse elements as they existed upon construction of the iterator,\r
-   *  and may (but is not guaranteed to) reflect any modifications subsequent\r
-   *  to construction. */\r
-  @Override\r
-  public Collection<TypeV> values() {\r
-    return new AbstractCollection<TypeV>() {\r
-      @Override public void    clear   (          ) {        NonBlockingHashMap.this.clear        ( ); }\r
-      @Override public int     size    (          ) { return NonBlockingHashMap.this.size         ( ); }\r
-      @Override public boolean contains( Object v ) { return NonBlockingHashMap.this.containsValue(v); }\r
-      @Override public Iterator<TypeV> iterator()   { return new SnapshotV(); }\r
-    };\r
-  }\r
-\r
-  // --- keySet --------------------------------------------------------------\r
-  private class SnapshotK implements Iterator<TypeK>, Enumeration<TypeK> {\r
-    final SnapshotV _ss;\r
-    public SnapshotK() { _ss = new SnapshotV(); }\r
-    public void remove() { _ss.remove(); }\r
-    public TypeK next() { _ss.next(); return (TypeK)_ss._prevK; }\r
-    public boolean hasNext() { return _ss.hasNext(); }\r
-    public TypeK nextElement() { return next(); }\r
-    public boolean hasMoreElements() { return hasNext(); }\r
-  }\r
-\r
-  /** Returns an enumeration of the keys in this table.\r
-   *  @return an enumeration of the keys in this table\r
-   *  @see #keySet()  */\r
-  public Enumeration<TypeK> keys() { return new SnapshotK(); }\r
-\r
-  /** Returns a {@link Set} view of the keys contained in this map.  The set\r
-   *  is backed by the map, so changes to the map are reflected in the set,\r
-   *  and vice-versa.  The set supports element removal, which removes the\r
-   *  corresponding mapping from this map, via the <tt>Iterator.remove</tt>,\r
-   *  <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt>, and\r
-   *  <tt>clear</tt> operations.  It does not support the <tt>add</tt> or\r
-   *  <tt>addAll</tt> operations.\r
-   *\r
-   *  <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator that\r
-   *  will never throw {@link ConcurrentModificationException}, and guarantees\r
-   *  to traverse elements as they existed upon construction of the iterator,\r
-   *  and may (but is not guaranteed to) reflect any modifications subsequent\r
-   *  to construction.  */\r
-  @Override\r
-  public Set<TypeK> keySet() {\r
-    return new AbstractSet<TypeK> () {\r
-      @Override public void    clear   (          ) {        NonBlockingHashMap.this.clear   ( ); }\r
-      @Override public int     size    (          ) { return NonBlockingHashMap.this.size    ( ); }\r
-      @Override public boolean contains( Object k ) { return NonBlockingHashMap.this.containsKey(k); }\r
-      @Override public boolean remove  ( Object k ) { return NonBlockingHashMap.this.remove  (k) != null; }\r
-      @Override public Iterator<TypeK> iterator()   { return new SnapshotK(); }\r
-    };\r
-  }\r
-\r
-\r
-  // --- entrySet ------------------------------------------------------------\r
-  // Warning: Each call to 'next' in this iterator constructs a new NBHMEntry.\r
-  private class NBHMEntry extends AbstractEntry<TypeK,TypeV> {\r
-    NBHMEntry( final TypeK k, final TypeV v ) { super(k,v); }\r
-    public TypeV setValue(final TypeV val) {\r
-      if( val == null ) throw new NullPointerException();\r
-      _val = val;\r
-      return put(_key, val);\r
-    }\r
-  }\r
-\r
-  private class SnapshotE implements Iterator<Map.Entry<TypeK,TypeV>> {\r
-    final SnapshotV _ss;\r
-    public SnapshotE() { _ss = new SnapshotV(); }\r
-    public void remove() { _ss.remove(); }\r
-    public Map.Entry<TypeK,TypeV> next() { _ss.next(); return new NBHMEntry((TypeK)_ss._prevK,_ss._prevV); }\r
-    public boolean hasNext() { return _ss.hasNext(); }\r
-  }\r
-\r
-  /** Returns a {@link Set} view of the mappings contained in this map.  The\r
-   *  set is backed by the map, so changes to the map are reflected in the\r
-   *  set, and vice-versa.  The set supports element removal, which removes\r
-   *  the corresponding mapping from the map, via the\r
-   *  <tt>Iterator.remove</tt>, <tt>Set.remove</tt>, <tt>removeAll</tt>,\r
-   *  <tt>retainAll</tt>, and <tt>clear</tt> operations.  It does not support\r
-   *  the <tt>add</tt> or <tt>addAll</tt> operations.\r
-   *\r
-   *  <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator\r
-   *  that will never throw {@link ConcurrentModificationException},\r
-   *  and guarantees to traverse elements as they existed upon\r
-   *  construction of the iterator, and may (but is not guaranteed to)\r
-   *  reflect any modifications subsequent to construction.\r
-   *\r
-   *  <p><strong>Warning:</strong> the iterator associated with this Set\r
-   *  requires the creation of {@link java.util.Map.Entry} objects with each\r
-   *  iteration.  The {@link NonBlockingHashMap} does not normally create or\r
-   *  using {@link java.util.Map.Entry} objects so they will be created soley\r
-   *  to support this iteration.  Iterating using {@link #keySet} or {@link\r
-   *  #values} will be more efficient.\r
-   */\r
-  @Override\r
-  public Set<Map.Entry<TypeK,TypeV>> entrySet() {\r
-    return new AbstractSet<Map.Entry<TypeK,TypeV>>() {\r
-      @Override public void    clear   (          ) {        NonBlockingHashMap.this.clear( ); }\r
-      @Override public int     size    (          ) { return NonBlockingHashMap.this.size ( ); }\r
-      @Override public boolean remove( final Object o ) {\r
-        if( !(o instanceof Map.Entry)) return false;\r
-        final Map.Entry<?,?> e = (Map.Entry<?,?>)o;\r
-        return NonBlockingHashMap.this.remove(e.getKey(), e.getValue());\r
-      }\r
-      @Override public boolean contains(final Object o) {\r
-        if( !(o instanceof Map.Entry)) return false;\r
-        final Map.Entry<?,?> e = (Map.Entry<?,?>)o;\r
-        TypeV v = get(e.getKey());\r
-        return v.equals(e.getValue());\r
-      }\r
-      @Override public Iterator<Map.Entry<TypeK,TypeV>> iterator() { return new SnapshotE(); }\r
-    };\r
-  }\r
-\r
-  // --- writeObject -------------------------------------------------------\r
-  // Write a NBHM to a stream\r
-  private void writeObject(java.io.ObjectOutputStream s) throws IOException  {\r
-    s.defaultWriteObject();     // Nothing to write\r
-    for( Object K : keySet() ) {\r
-      final Object V = get(K);  // Do an official 'get'\r
-      s.writeObject(K);         // Write the <TypeK,TypeV> pair\r
-      s.writeObject(V);\r
-    }\r
-    s.writeObject(null);        // Sentinel to indicate end-of-data\r
-    s.writeObject(null);\r
-  }\r
-\r
-  // --- readObject --------------------------------------------------------\r
-  // Read a CHM from a stream\r
-  private void readObject(java.io.ObjectInputStream s) throws IOException, ClassNotFoundException {\r
-    s.defaultReadObject();      // Read nothing\r
-    initialize(MIN_SIZE);\r
-    for(;;) {\r
-      final TypeK K = (TypeK) s.readObject();\r
-      final TypeV V = (TypeV) s.readObject();\r
-      if( K == null ) break;\r
-      put(K,V);                 // Insert with an offical put\r
-    }\r
-  }\r
-\r
-} // End NonBlockingHashMap class\r
diff --git a/benchmark/cliffc-hashtable/cliffc_hashtable.h b/benchmark/cliffc-hashtable/cliffc_hashtable.h
deleted file mode 100644 (file)
index c09b7a1..0000000
+++ /dev/null
@@ -1,975 +0,0 @@
-#ifndef CLIFFC_HASHTABLE_H
-#define CLIFFC_HASHTABLE_H
-
-#include <iostream>
-#include <atomic>
-#include "stdio.h" 
-//#include <common.h>
-#ifdef STANDALONE
-#include <assert.h>
-#define MODEL_ASSERT assert 
-#else
-#include <model-assert.h>
-#endif
-
-#include <spec_lib.h>
-#include <stdlib.h>
-#include <cdsannotate.h>
-#include <specannotation.h>
-#include <model_memory.h>
-#include "common.h" 
-
-using namespace std;
-
-/**
-       This header file declares and defines a simplified version of Cliff Click's
-       NonblockingHashMap. It contains all the necessary structrues and main
-       functions. In simplified_cliffc_hashtable.cc file, it has the definition for
-       the static fields.
-*/
-
-template<typename TypeK, typename TypeV>
-class cliffc_hashtable;
-
-/**
-       Corresponding the the Object[] array in Cliff Click's Java implementation.
-       It keeps the first two slots for CHM (Hashtable control unit) and the hash
-       records (an array of hash used for fast negative key-equality check).
-*/
-struct kvs_data {
-       int _size;
-       atomic<void*> *_data;
-       
-       kvs_data(int sz) {
-               _size = sz;
-               int real_size = sz * 2 + 2;
-               _data = new atomic<void*>[real_size];
-               // The control block should be initialized in resize()
-               // Init the hash record array
-               int *hashes = new int[_size];
-               int i;
-               for (i = 0; i < _size; i++) {
-                       hashes[i] = 0;
-               }
-               // Init the data to Null slot
-               for (i = 2; i < real_size; i++) {
-                       _data[i].store(NULL, memory_order_relaxed);
-               }
-               _data[1].store(hashes, memory_order_relaxed);
-       }
-
-       ~kvs_data() {
-               int *hashes = (int*) _data[1].load(memory_order_relaxed);
-               delete hashes;
-               delete[] _data;
-       }
-};
-
-struct slot {
-       bool _prime;
-       void *_ptr;
-
-       slot(bool prime, void *ptr) {
-               _prime = prime;
-               _ptr = ptr;
-       }
-};
-
-
-/**
-       TypeK must have defined function "int hashCode()" which return the hash
-       code for the its object, and "int equals(TypeK anotherKey)" which is
-       used to judge equality.
-       TypeK and TypeV should define their own copy constructor.
-*/
-/**
-       @Begin
-       @Class_begin
-       @End
-*/
-template<typename TypeK, typename TypeV>
-class cliffc_hashtable {
-       /**
-               # The synchronization we have for the hashtable gives us the property of
-               # serializability, so we should have a sequential hashtable when we check the
-               # correctness. The key thing is to identify all the commit point.
-       
-               @Begin
-               @Options:
-                       LANG = CPP;
-                       CLASS = cliffc_hashtable;
-               @Global_define:
-                       @DeclareVar:
-                       spec_table *map;
-                       spec_table *id_map;
-                       id_tag_t *tag;
-                       @InitVar:
-                               map = new_spec_table_default(equals_key);
-                               id_map = new_spec_table_default(equals_id);
-                               tag = new_id_tag();
-                       //@Cleanup:
-                       //      model_print("cleaning up\n");
-                       @DefineFunc:
-                       bool equals_key(void *ptr1, void *ptr2) {
-                               TypeK *key1 = (TypeK*) ptr1, *key2 = (TypeK*) ptr2;
-                               if (key1 == NULL || key2 == NULL)
-                                       return false;
-                               return key1->equals(key2);
-                       }
-                       
-                       @DefineFunc:
-                       bool equals_val(void *ptr1, void *ptr2) {
-                               if (ptr1 == ptr2)
-                                       return true;
-                               TypeV *val1 = (TypeV*) ptr1, *val2 = (TypeV*) ptr2;
-                               if (val1 == NULL || val2 == NULL)
-                                       return false;
-                               return val1->equals(val2);
-                       }
-
-                       @DefineFunc:
-                       bool equals_id(void *ptr1, void *ptr2) {
-                               id_tag_t *id1 = (id_tag_t*) ptr1, *id2 = (id_tag_t*) ptr2;
-                               if (id1 == NULL || id2 == NULL)
-                                       return false;
-                               return (*id1).tag == (*id2).tag;
-                       }
-
-                       @DefineFunc:
-                       # Update the tag for the current key slot if the corresponding tag
-                       # is NULL, otherwise just return that tag. It will update the next
-                       # available tag too if it requires a new tag for that key slot.
-                       call_id_t getKeyTag(TypeK *key) {
-                               if (!spec_table_contains(id_map, key)) {
-                                       call_id_t cur_id = current(tag);
-                                       spec_table_put(id_map, key, (void*) cur_id);
-                                       next(tag);
-                                       return cur_id;
-                               } else {
-                                       call_id_t res = (call_id_t) spec_table_get(id_map, key);
-                                       return res;
-                               }
-                       }
-               @Happens_before:
-                       Put->Get
-                       Put->Put
-               @End
-       */
-
-friend class CHM;
-       /**
-               The control structure for the hashtable
-       */
-       private:
-       class CHM {
-               friend class cliffc_hashtable;
-               private:
-               atomic<kvs_data*> _newkvs;
-               
-               // Size of active K,V pairs
-               atomic_int _size;
-       
-               // Count of used slots
-               atomic_int _slots;
-               
-               // The next part of the table to copy
-               atomic_int _copy_idx;
-               
-               // Work-done reporting
-               atomic_int _copy_done;
-       
-               public:
-               CHM(int size) {
-                       _newkvs.store(NULL, memory_order_relaxed);
-                       _size.store(size, memory_order_relaxed);
-                       _slots.store(0, memory_order_relaxed);
-       
-                       _copy_idx.store(0, memory_order_relaxed);
-                       _copy_done.store(0, memory_order_relaxed);
-               }
-       
-               ~CHM() {}
-               
-               private:
-                       
-               // Heuristic to decide if the table is too full
-               bool table_full(int reprobe_cnt, int len) {
-                       return
-                               reprobe_cnt >= REPROBE_LIMIT &&
-                               _slots.load(memory_order_relaxed) >= reprobe_limit(len);
-               }
-       
-               kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
-                       //model_print("resizing...\n");
-                       /**** FIXME: miss ****/
-                       kvs_data *newkvs = _newkvs.load(memory_order_acquire);
-                       if (newkvs != NULL)
-                               return newkvs;
-       
-                       // No copy in-progress, start one; Only double the table size
-                       int oldlen = kvs->_size;
-                       int sz = _size.load(memory_order_relaxed);
-                       int newsz = sz;
-                       
-                       // Just follow Cliff Click's heuristic to decide the new size
-                       if (sz >= (oldlen >> 2)) { // If we are 25% full
-                               newsz = oldlen << 1; // Double size
-                               if (sz >= (oldlen >> 1))
-                                       newsz = oldlen << 2; // Double double size
-                       }
-       
-                       // We do not record the record timestamp
-                       if (newsz <= oldlen) newsz = oldlen << 1;
-                       // Do not shrink ever
-                       if (newsz < oldlen) newsz = oldlen;
-       
-                       // Last check cause the 'new' below is expensive
-                       /**** FIXME: miss ****/
-                       newkvs = _newkvs.load(memory_order_acquire);
-                       //model_print("hey1\n");
-                       if (newkvs != NULL) return newkvs;
-       
-                       newkvs = new kvs_data(newsz);
-                       void *chm = (void*) new CHM(sz);
-                       //model_print("hey2\n");
-                       newkvs->_data[0].store(chm, memory_order_relaxed);
-       
-                       kvs_data *cur_newkvs; 
-                       // Another check after the slow allocation
-                       /**** FIXME: miss ****/
-                       if ((cur_newkvs = _newkvs.load(memory_order_acquire)) != NULL)
-                               return cur_newkvs;
-                       // CAS the _newkvs to the allocated table
-                       kvs_data *desired = (kvs_data*) NULL;
-                       kvs_data *expected = (kvs_data*) newkvs; 
-                       /**** FIXME: miss ****/
-                       //model_print("release in resize!\n"); 
-                       if (!_newkvs.compare_exchange_strong(desired, expected, memory_order_release,
-                                       memory_order_relaxed)) {
-                               // Should clean the allocated area
-                               delete newkvs;
-                               /**** FIXME: miss ****/
-                               newkvs = _newkvs.load(memory_order_acquire);
-                       }
-                       return newkvs;
-               }
-       
-               void help_copy_impl(cliffc_hashtable *topmap, kvs_data *oldkvs,
-                       bool copy_all) {
-                       MODEL_ASSERT (get_chm(oldkvs) == this);
-                       /**** FIXME: miss ****/
-                       kvs_data *newkvs = _newkvs.load(memory_order_acquire);
-                       int oldlen = oldkvs->_size;
-                       int min_copy_work = oldlen > 1024 ? 1024 : oldlen;
-               
-                       // Just follow Cliff Click's code here
-                       int panic_start = -1;
-                       int copyidx;
-                       while (_copy_done.load(memory_order_relaxed) < oldlen) {
-                               copyidx = _copy_idx.load(memory_order_relaxed);
-                               if (panic_start == -1) { // No painc
-                                       copyidx = _copy_idx.load(memory_order_relaxed);
-                                       while (copyidx < (oldlen << 1) &&
-                                               !_copy_idx.compare_exchange_strong(copyidx, copyidx +
-                                                       min_copy_work, memory_order_relaxed, memory_order_relaxed))
-                                               copyidx = _copy_idx.load(memory_order_relaxed);
-                                       if (!(copyidx < (oldlen << 1)))
-                                               panic_start = copyidx;
-                               }
-       
-                               // Now copy the chunk of work we claimed
-                               int workdone = 0;
-                               for (int i = 0; i < min_copy_work; i++)
-                                       if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs,
-                                               newkvs))
-                                               workdone++;
-                               if (workdone > 0)
-                                       copy_check_and_promote(topmap, oldkvs, workdone);
-       
-                               copyidx += min_copy_work;
-                               if (!copy_all && panic_start == -1)
-                                       return; // We are done with the work we claim
-                       }
-                       copy_check_and_promote(topmap, oldkvs, 0); // See if we can promote
-               }
-       
-               kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
-                       *oldkvs, int idx, void *should_help) {
-                       /**** FIXME: miss ****/
-                       kvs_data *newkvs = _newkvs.load(memory_order_acquire);
-                       // We're only here cause the caller saw a Prime
-                       if (copy_slot(topmap, idx, oldkvs, newkvs))
-                               copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied
-                       return (should_help == NULL) ? newkvs : topmap->help_copy(newkvs);
-               }
-       
-               void copy_check_and_promote(cliffc_hashtable *topmap, kvs_data*
-                       oldkvs, int workdone) {
-                       int oldlen = oldkvs->_size;
-                       int copyDone = _copy_done.load(memory_order_relaxed);
-                       if (workdone > 0) {
-                               while (true) {
-                                       copyDone = _copy_done.load(memory_order_relaxed);
-                                       if (_copy_done.compare_exchange_weak(copyDone, copyDone +
-                                               workdone, memory_order_relaxed, memory_order_relaxed))
-                                               break;
-                               }
-                       }
-       
-                       // Promote the new table to the current table
-                       if (copyDone + workdone == oldlen &&
-                               topmap->_kvs.load(memory_order_relaxed) == oldkvs) {
-                               /**** FIXME: miss ****/
-                               kvs_data *newkvs = _newkvs.load(memory_order_acquire);
-                               /**** CDSChecker error ****/
-                               topmap->_kvs.compare_exchange_strong(oldkvs, newkvs, memory_order_release,
-                                       memory_order_relaxed);
-                       }
-               }
-       
-               bool copy_slot(cliffc_hashtable *topmap, int idx, kvs_data *oldkvs,
-                       kvs_data *newkvs) {
-                       slot *key_slot;
-                       while ((key_slot = key(oldkvs, idx)) == NULL)
-                               CAS_key(oldkvs, idx, NULL, TOMBSTONE);
-       
-                       // First CAS old to Prime
-                       slot *oldval = val(oldkvs, idx);
-                       while (!is_prime(oldval)) {
-                               slot *box = (oldval == NULL || oldval == TOMBSTONE)
-                                       ? TOMBPRIME : new slot(true, oldval->_ptr);
-                               if (CAS_val(oldkvs, idx, oldval, box)) {
-                                       if (box == TOMBPRIME)
-                                               return 1; // Copy done
-                                       // Otherwise we CAS'd the box
-                                       oldval = box; // Record updated oldval
-                                       break;
-                               }
-                               oldval = val(oldkvs, idx); // Else re-try
-                       }
-       
-                       if (oldval == TOMBPRIME) return false; // Copy already completed here
-       
-                       slot *old_unboxed = new slot(false, oldval->_ptr);
-                       int copied_into_new = (putIfMatch(topmap, newkvs, key_slot, old_unboxed,
-                               NULL) == NULL);
-       
-                       // Old value is exposed in the new table
-                       while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME))
-                               oldval = val(oldkvs, idx);
-       
-                       return copied_into_new;
-               }
-       };
-
-       
-
-       private:
-       static const int Default_Init_Size = 4; // Intial table size
-
-       static slot* const MATCH_ANY;
-       static slot* const NO_MATCH_OLD;
-
-       static slot* const TOMBPRIME;
-       static slot* const TOMBSTONE;
-
-       static const int REPROBE_LIMIT = 10; // Forces a table-resize
-
-       atomic<kvs_data*> _kvs;
-
-       public:
-       cliffc_hashtable() {
-               // Should initialize the CHM for the construction of the table
-               // For other CHM in kvs_data, they should be initialzed in resize()
-               // because the size is determined dynamically
-               /**
-                       @Begin
-                       @Entry_point
-                       @End
-               */
-               kvs_data *kvs = new kvs_data(Default_Init_Size);
-               void *chm = (void*) new CHM(0);
-               kvs->_data[0].store(chm, memory_order_relaxed);
-               _kvs.store(kvs, memory_order_relaxed);
-       }
-
-       cliffc_hashtable(int init_size) {
-               // Should initialize the CHM for the construction of the table
-               // For other CHM in kvs_data, they should be initialzed in resize()
-               // because the size is determined dynamically
-
-               /**
-                       @Begin
-                       @Entry_point
-                       @End
-               */
-               kvs_data *kvs = new kvs_data(init_size);
-               void *chm = (void*) new CHM(0);
-               kvs->_data[0].store(chm, memory_order_relaxed);
-               _kvs.store(kvs, memory_order_relaxed);
-       }
-
-       /**
-               @Begin
-               @Interface: Get
-               //@Commit_point_set: Get_Point1 | Get_Point2 | Get_ReadKVS | Get_ReadNewKVS | Get_Clear
-               @Commit_point_set: Get_Point1 | Get_Point2 | Get_Clear
-               //@Commit_point_set: Get_Point1 | Get_Point2 | Get_Point3
-               @ID: getKeyTag(key)
-               @Action:
-                       TypeV *_Old_Val = (TypeV*) spec_table_get(map, key);
-                       //bool passed = equals_val(_Old_Val, __RET__);
-                       //bool passed = false;
-                       //if (!passed) {
-                               //int old = _Old_Val == NULL ? 0 : _Old_Val->_val;
-                               //int ret = __RET__ == NULL ? 0 : __RET__->_val;
-                               //model_print("Get: key: %d, _Old_Val: %d, RET: %d\n",
-                               //key->_val, old, ret);
-                       //}
-               @Post_check:
-                       //__RET__ == NULL ? true : equals_val(_Old_Val, __RET__)
-                       equals_val(_Old_Val, __RET__)
-               @End
-       */
-       TypeV* get(TypeK *key) {
-               slot *key_slot = new slot(false, key);
-               int fullhash = hash(key_slot);
-               /**** CDSChecker error ****/
-               kvs_data *kvs = _kvs.load(memory_order_acquire);
-               /**
-                       //@Begin
-                       @Commit_point_define_check: true
-                       @Label: Get_ReadKVS
-                       @End
-               */
-               slot *V = get_impl(this, kvs, key_slot, fullhash);
-               if (V == NULL) return NULL;
-               MODEL_ASSERT (!is_prime(V));
-               return (TypeV*) V->_ptr;
-       }
-
-       /**
-               @Begin
-               @Interface: Put
-               //@Commit_point_set: Put_Point | Put_ReadKVS | Put_ReadNewKVS | Put_WriteKey
-               @Commit_point_set: Put_Point | Put_WriteKey
-               @ID: getKeyTag(key)
-               @Action:
-                       # Remember this old value at checking point
-                       TypeV *_Old_Val = (TypeV*) spec_table_get(map, key);
-                       spec_table_put(map, key, val);
-                       //bool passed = equals_val(__RET__, _Old_Val);
-                       //bool passed = false;
-                       //if (!passed) {
-                               //int old = _Old_Val == NULL ? 0 : _Old_Val->_val;
-                               //int ret = __RET__ == NULL ? 0 : __RET__->_val;
-                               //model_print("Put: key: %d, val: %d, _Old_Val: %d, RET: %d\n",
-                               //      key->_val, val->_val, old, ret);
-                       //}
-               @Post_check:
-                       equals_val(__RET__, _Old_Val)
-               @End
-       */
-       TypeV* put(TypeK *key, TypeV *val) {
-               return putIfMatch(key, val, NO_MATCH_OLD);
-       }
-
-       /**
-//             @Begin
-               @Interface: PutIfAbsent
-               @Commit_point_set:
-                       Write_Success_Point | PutIfAbsent_Fail_Point
-               @Condition: !spec_table_contains(map, key)
-               @HB_condition:
-                       COND_PutIfAbsentSucc :: __RET__ == NULL
-               @ID: getKeyTag(key)
-               @Action:
-                       void *_Old_Val = spec_table_get(map, key);
-                       if (__COND_SAT__)
-                               spec_table_put(map, key, value);
-               @Post_check:
-                       __COND_SAT__ ? __RET__ == NULL : equals_val(_Old_Val, __RET__) 
-               @End
-       */
-       TypeV* putIfAbsent(TypeK *key, TypeV *value) {
-               return putIfMatch(key, val, TOMBSTONE);
-       }
-
-       /**
-//             @Begin
-               @Interface: RemoveAny
-               @Commit_point_set: Write_Success_Point
-               @ID: getKeyTag(key)
-               @Action:
-                       void *_Old_Val = spec_table_get(map, key);
-                       spec_table_put(map, key, NULL);
-               @Post_check:
-                       equals_val(__RET__, _Old_Val)
-               @End
-       */
-       TypeV* remove(TypeK *key) {
-               return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD);
-       }
-
-       /**
-//             @Begin
-               @Interface: RemoveIfMatch
-               @Commit_point_set:
-                       Write_Success_Point | RemoveIfMatch_Fail_Point
-               @Condition:
-                       equals_val(spec_table_get(map, key), val)
-               @HB_condition:
-                       COND_RemoveIfMatchSucc :: __RET__ == true
-               @ID: getKeyTag(key)
-               @Action:
-                       if (__COND_SAT__)
-                               spec_table_put(map, key, NULL);
-               @Post_check:
-                       __COND_SAT__ ? __RET__ : !__RET__
-               @End
-       */
-       bool remove(TypeK *key, TypeV *val) {
-               slot *val_slot = val == NULL ? NULL : new slot(false, val);
-               return putIfMatch(key, TOMBSTONE, val) == val;
-
-       }
-
-       /**
-//             @Begin
-               @Interface: ReplaceAny
-               @Commit_point_set:
-                       Write_Success_Point
-               @ID: getKeyTag(key)
-               @Action:
-                       void *_Old_Val = spec_table_get(map, key);
-               @Post_check:
-                       equals_val(__RET__, _Old_Val)
-               @End
-       */
-       TypeV* replace(TypeK *key, TypeV *val) {
-               return putIfMatch(key, val, MATCH_ANY);
-       }
-
-       /**
-//             @Begin
-               @Interface: ReplaceIfMatch
-               @Commit_point_set:
-                       Write_Success_Point | ReplaceIfMatch_Fail_Point
-               @Condition:
-                       equals_val(spec_table_get(map, key), oldval)
-               @HB_condition:
-                       COND_ReplaceIfMatchSucc :: __RET__ == true
-               @ID: getKeyTag(key)
-               @Action:
-                       if (__COND_SAT__)
-                               spec_table_put(map, key, newval);
-               @Post_check:
-                       __COND_SAT__ ? __RET__ : !__RET__
-               @End
-       */
-       bool replace(TypeK *key, TypeV *oldval, TypeV *newval) {
-               return putIfMatch(key, newval, oldval) == oldval;
-       }
-
-       private:
-       static CHM* get_chm(kvs_data* kvs) {
-               CHM *res = (CHM*) kvs->_data[0].load(memory_order_relaxed);
-               return res;
-       }
-
-       static int* get_hashes(kvs_data *kvs) {
-               return (int *) kvs->_data[1].load(memory_order_relaxed);
-       }
-       
-       // Preserve happens-before semantics on newly inserted keys
-       static inline slot* key(kvs_data *kvs, int idx) {
-               MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
-               // Corresponding to the volatile read in get_impl() and putIfMatch in
-               // Cliff Click's Java implementation
-               slot *res = (slot*) kvs->_data[idx * 2 + 2].load(memory_order_relaxed);
-               /**
-                       @Begin
-                       # This is a complicated potential commit point since many many functions are
-                       # calling val().
-                       @Potential_commit_point_define: true
-                       @Label: Read_Key_Point
-                       @End
-               */
-               return res;
-       }
-
-       /**
-               The atomic operation in val() function is a "potential" commit point,
-               which means in some case it is a real commit point while it is not for
-               some other cases. This so happens because the val() function is such a
-               fundamental function that many internal operation will call. Our
-               strategy is that we label any potential commit points and check if they
-               really are the commit points later.
-       */
-       // Preserve happens-before semantics on newly inserted values
-       static inline slot* val(kvs_data *kvs, int idx) {
-               MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
-               // Corresponding to the volatile read in get_impl() and putIfMatch in
-               // Cliff Click's Java implementation
-               /**** CDSChecker error & hb violation ****/
-               slot *res = (slot*) kvs->_data[idx * 2 + 3].load(memory_order_acquire);
-               /**
-                       @Begin
-                       # This is a complicated potential commit point since many many functions are
-                       # calling val().
-                       @Potential_commit_point_define: true
-                       @Label: Read_Val_Point
-                       @End
-               */
-               return res;
-
-
-       }
-
-       static int hash(slot *key_slot) {
-               MODEL_ASSERT(key_slot != NULL && key_slot->_ptr != NULL);
-               TypeK* key = (TypeK*) key_slot->_ptr;
-               int h = key->hashCode();
-               // Spread bits according to Cliff Click's code
-               h += (h << 15) ^ 0xffffcd7d;
-               h ^= (h >> 10);
-               h += (h << 3);
-               h ^= (h >> 6);
-               h += (h << 2) + (h << 14);
-               return h ^ (h >> 16);
-       }
-       
-       // Heuristic to decide if reprobed too many times. 
-       // Be careful here: Running over the limit on a 'get' acts as a 'miss'; on a
-       // put it triggers a table resize. Several places MUST have exact agreement.
-       static int reprobe_limit(int len) {
-               return REPROBE_LIMIT + (len >> 2);
-       }
-       
-       static inline bool is_prime(slot *val) {
-               return (val != NULL) && val->_prime;
-       }
-
-       // Check for key equality. Try direct pointer comparison first (fast
-       // negative teset) and then the full 'equals' call
-       static bool keyeq(slot *K, slot *key_slot, int *hashes, int hash,
-               int fullhash) {
-               // Caller should've checked this.
-               MODEL_ASSERT (K != NULL);
-               TypeK* key_ptr = (TypeK*) key_slot->_ptr;
-               return
-                       K == key_slot ||
-                               ((hashes[hash] == 0 || hashes[hash] == fullhash) &&
-                               K != TOMBSTONE &&
-                               key_ptr->equals(K->_ptr));
-       }
-
-       static bool valeq(slot *val_slot1, slot *val_slot2) {
-               MODEL_ASSERT (val_slot1 != NULL);
-               TypeK* ptr1 = (TypeV*) val_slot1->_ptr;
-               if (val_slot2 == NULL || ptr1 == NULL) return false;
-               return ptr1->equals(val_slot2->_ptr);
-       }
-       
-       // Together with key() preserve the happens-before relationship on newly
-       // inserted keys
-       static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
-               bool res = kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
-                       desired, memory_order_relaxed, memory_order_relaxed);
-               /**
-                       # If it is a successful put instead of a copy or any other internal
-                       # operantions, expected != NULL
-                       @Begin
-                       @Potential_commit_point_define: res
-                       @Label: Write_Key_Point
-                       @End
-               */
-               return res;
-       }
-
-       /**
-               Same as the val() function, we only label the CAS operation as the
-               potential commit point.
-       */
-       // Together with val() preserve the happens-before relationship on newly
-       // inserted values
-       static inline bool CAS_val(kvs_data *kvs, int idx, void *expected, void
-               *desired) {
-               /**** CDSChecker error & HB violation ****/
-               bool res =  kvs->_data[2 * idx + 3].compare_exchange_strong(expected,
-                       desired, memory_order_acq_rel, memory_order_relaxed);
-               /**
-                       # If it is a successful put instead of a copy or any other internal
-                       # operantions, expected != NULL
-                       @Begin
-                       @Potential_commit_point_define: res
-                       @Label: Write_Val_Point
-                       @End
-               */
-               return res;
-       }
-
-       slot* get_impl(cliffc_hashtable *topmap, kvs_data *kvs, slot* key_slot, int
-               fullhash) {
-               int len = kvs->_size;
-               CHM *chm = get_chm(kvs);
-               int *hashes = get_hashes(kvs);
-
-               int idx = fullhash & (len - 1);
-               int reprobe_cnt = 0;
-               while (true) {
-                       slot *K = key(kvs, idx);
-                       /**
-                               @Begin
-                               @Commit_point_define: K == NULL
-                               @Potential_commit_point_label: Read_Key_Point
-                               @Label: Get_Point1
-                               @End
-                       */
-                       slot *V = val(kvs, idx);
-                       
-                       if (K == NULL) {
-                               //model_print("Key is null\n");
-                               return NULL; // A miss
-                       }
-                       
-                       if (keyeq(K, key_slot, hashes, idx, fullhash)) {
-                               // Key hit! Check if table-resize in progress
-                               if (!is_prime(V)) {
-                                       /**
-                                               @Begin
-                                               @Commit_point_clear: true
-                                               @Label: Get_Clear
-                                               @End
-                                       */
-
-                                       /**
-                                               @Begin
-                                               @Commit_point_define: true
-                                               @Potential_commit_point_label: Read_Val_Point
-                                               @Label: Get_Point2
-                                               @End
-                                       */
-                                       return (V == TOMBSTONE) ? NULL : V; // Return this value
-                               }
-                               // Otherwise, finish the copy & retry in the new table
-                               return get_impl(topmap, chm->copy_slot_and_check(topmap, kvs,
-                                       idx, key_slot), key_slot, fullhash);
-                       }
-
-                       if (++reprobe_cnt >= REPROBE_LIMIT ||
-                               key_slot == TOMBSTONE) {
-                               // Retry in new table
-                               // Atomic read can be here 
-                               /**** FIXME: miss ****/
-                               kvs_data *newkvs = chm->_newkvs.load(memory_order_acquire);
-                               /**
-                                       //@Begin
-                                       @Commit_point_define_check: true
-                                       @Label: Get_ReadNewKVS
-                                       @End
-                               */
-                               return newkvs == NULL ? NULL : get_impl(topmap,
-                                       topmap->help_copy(newkvs), key_slot, fullhash);
-                       }
-
-                       idx = (idx + 1) & (len - 1); // Reprobe by 1
-               }
-       }
-
-       // A wrapper of the essential function putIfMatch()
-       TypeV* putIfMatch(TypeK *key, TypeV *value, slot *old_val) {
-               // TODO: Should throw an exception rather return NULL
-               if (old_val == NULL) {
-                       return NULL;
-               }
-               slot *key_slot = new slot(false, key);
-
-               slot *value_slot = new slot(false, value);
-               /**** FIXME: miss ****/
-               kvs_data *kvs = _kvs.load(memory_order_acquire);
-               /**
-                       //@Begin
-                       @Commit_point_define_check: true
-                       @Label: Put_ReadKVS
-                       @End
-               */
-               slot *res = putIfMatch(this, kvs, key_slot, value_slot, old_val);
-               // Only when copy_slot() call putIfMatch() will it return NULL
-               MODEL_ASSERT (res != NULL); 
-               MODEL_ASSERT (!is_prime(res));
-               return res == TOMBSTONE ? NULL : (TypeV*) res->_ptr;
-       }
-
-       /**
-               Put, Remove, PutIfAbsent, etc will call this function. Return the old
-               value. If the returned value is equals to the expVal (or expVal is
-               NO_MATCH_OLD), then this function puts the val_slot to the table 'kvs'.
-               Only copy_slot will pass a NULL expVal, and putIfMatch only returns a
-               NULL if passed a NULL expVal.
-       */
-       static slot* putIfMatch(cliffc_hashtable *topmap, kvs_data *kvs, slot
-               *key_slot, slot *val_slot, slot *expVal) {
-               MODEL_ASSERT (val_slot != NULL);
-               MODEL_ASSERT (!is_prime(val_slot));
-               MODEL_ASSERT (!is_prime(expVal));
-
-               int fullhash = hash(key_slot);
-               int len = kvs->_size;
-               CHM *chm = get_chm(kvs);
-               int *hashes = get_hashes(kvs);
-               int idx = fullhash & (len - 1);
-
-               // Claim a key slot
-               int reprobe_cnt = 0;
-               slot *K;
-               slot *V;
-               kvs_data *newkvs;
-               
-               while (true) { // Spin till we get a key slot
-                       K = key(kvs, idx);
-                       V = val(kvs, idx);
-                       if (K == NULL) { // Get a free slot
-                               if (val_slot == TOMBSTONE) return val_slot;
-                               // Claim the null key-slot
-                               if (CAS_key(kvs, idx, NULL, key_slot)) {
-                                       /**
-                                               @Begin
-                                               @Commit_point_define: true
-                                               @Potential_commit_point_label: Write_Key_Point
-                                               @Label: Put_WriteKey
-                                               @End
-                                       */
-                                       chm->_slots.fetch_add(1, memory_order_relaxed); // Inc key-slots-used count
-                                       hashes[idx] = fullhash; // Memorize full hash
-                                       break;
-                               }
-                               K = key(kvs, idx); // CAS failed, get updated value
-                               MODEL_ASSERT (K != NULL);
-                       }
-
-                       // Key slot not null, there exists a Key here
-                       if (keyeq(K, key_slot, hashes, idx, fullhash))
-                               break; // Got it
-                       
-                       // Notice that the logic here should be consistent with that of get.
-                       // The first predicate means too many reprobes means nothing in the
-                       // old table.
-                       if (++reprobe_cnt >= reprobe_limit(len) ||
-                               K == TOMBSTONE) { // Found a Tombstone key, no more keys
-                               newkvs = chm->resize(topmap, kvs);
-                               //model_print("resize1\n");
-                               // Help along an existing copy
-                               if (expVal != NULL) topmap->help_copy(newkvs);
-                               return putIfMatch(topmap, newkvs, key_slot, val_slot, expVal);
-                       }
-
-                       idx = (idx + 1) & (len - 1); // Reprobe
-               } // End of spinning till we get a Key slot
-
-               if (val_slot == V) return V; // Fast cutout for no-change
-       
-               // Here it tries to resize cause it doesn't want other threads to stop
-               // its progress (eagerly try to resize soon)
-               /**** FIXME: miss ****/
-               newkvs = chm->_newkvs.load(memory_order_acquire);
-               /**
-                       //@Begin
-                       @Commit_point_define_check: true
-                       @Label: Put_ReadNewKVS
-                       @End
-               */
-               if (newkvs == NULL &&
-                       ((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V))) {
-                       //model_print("resize2\n");
-                       newkvs = chm->resize(topmap, kvs); // Force the copy to start
-               }
-               
-               // Finish the copy and then put it in the new table
-               if (newkvs != NULL)
-                       return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs, idx,
-                               expVal), key_slot, val_slot, expVal);
-               
-               // Decided to update the existing table
-               while (true) {
-                       MODEL_ASSERT (!is_prime(V));
-
-                       if (expVal != NO_MATCH_OLD &&
-                               V != expVal &&
-                               (expVal != MATCH_ANY || V == TOMBSTONE || V == NULL) &&
-                               !(V == NULL && expVal == TOMBSTONE) &&
-                               (expVal == NULL || !valeq(expVal, V))) {
-                               /**
-                                       //@Begin
-                                       @Commit_point_define: expVal == TOMBSTONE
-                                       @Potential_commit_point_label: Read_Val_Point
-                                       @Label: PutIfAbsent_Fail_Point
-                                               # This is a check for the PutIfAbsent() when the value
-                                               # is not absent
-                                       @End
-                               */
-                               /**
-                                       //@Begin
-                                       @Commit_point_define: expVal != NULL && val_slot == TOMBSTONE
-                                       @Potential_commit_point_label: Read_Val_Point
-                                       @Label: RemoveIfMatch_Fail_Point
-                                       @End
-                               */
-                               /**
-                                       //@Begin
-                                       @Commit_point_define: expVal != NULL && !valeq(expVal, V)
-                                       @Potential_commit_point_label: Read_Val_Point
-                                       @Label: ReplaceIfMatch_Fail_Point
-                                       @End
-                               */
-                               return V; // Do not update!
-                       }
-
-                       if (CAS_val(kvs, idx, V, val_slot)) {
-                               /**
-                                       @Begin
-                                       # The only point where a successful put happens
-                                       @Commit_point_define: true
-                                       @Potential_commit_point_label: Write_Val_Point
-                                       @Label: Put_Point
-                                       @End
-                               */
-                               if (expVal != NULL) { // Not called by a table-copy
-                                       // CAS succeeded, should adjust size
-                                       // Both normal put's and table-copy calls putIfMatch, but
-                                       // table-copy does not increase the number of live K/V pairs
-                                       if ((V == NULL || V == TOMBSTONE) &&
-                                               val_slot != TOMBSTONE)
-                                               chm->_size.fetch_add(1, memory_order_relaxed);
-                                       if (!(V == NULL || V == TOMBSTONE) &&
-                                               val_slot == TOMBSTONE)
-                                               chm->_size.fetch_add(-1, memory_order_relaxed);
-                               }
-                               return (V == NULL && expVal != NULL) ? TOMBSTONE : V;
-                       }
-                       // Else CAS failed
-                       V = val(kvs, idx);
-                       if (is_prime(V))
-                               return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs,
-                                       idx, expVal), key_slot, val_slot, expVal);
-               }
-       }
-
-       // Help along an existing table-resize. This is a fast cut-out wrapper.
-       kvs_data* help_copy(kvs_data *helper) {
-               /**** FIXME: miss ****/
-               kvs_data *topkvs = _kvs.load(memory_order_acquire);
-               CHM *topchm = get_chm(topkvs);
-               // No cpy in progress
-               if (topchm->_newkvs.load(memory_order_relaxed) == NULL) return helper;
-               topchm->help_copy_impl(this, topkvs, false);
-               return helper;
-       }
-};
-/**
-       @Begin
-       @Class_end
-       @End
-*/
-
-#endif
diff --git a/benchmark/cliffc-hashtable/main.cc b/benchmark/cliffc-hashtable/main.cc
deleted file mode 100644 (file)
index 5c4f93c..0000000
+++ /dev/null
@@ -1,110 +0,0 @@
-#include <iostream>
-#include <threads.h>
-#include "cliffc_hashtable.h"
-
-using namespace std;
-
-template<typename TypeK, typename TypeV>
-slot* const cliffc_hashtable<TypeK, TypeV>::MATCH_ANY = new slot(false, NULL);
-
-template<typename TypeK, typename TypeV>
-slot* const cliffc_hashtable<TypeK, TypeV>::NO_MATCH_OLD = new slot(false, NULL);
-
-template<typename TypeK, typename TypeV>
-slot* const cliffc_hashtable<TypeK, TypeV>::TOMBPRIME = new slot(true, NULL);
-
-template<typename TypeK, typename TypeV>
-slot* const cliffc_hashtable<TypeK, TypeV>::TOMBSTONE = new slot(false, NULL);
-
-
-class IntWrapper {
-       private:
-               public:
-           int _val;
-
-               IntWrapper(int val) : _val(val) {}
-
-               IntWrapper() : _val(0) {}
-
-               IntWrapper(IntWrapper& copy) : _val(copy._val) {}
-
-               int get() {
-                       return _val;
-               }
-
-               int hashCode() {
-                       return _val;
-               }
-               
-               bool operator==(const IntWrapper& rhs) {
-                       return false;
-               }
-
-               bool equals(const void *another) {
-                       if (another == NULL)
-                               return false;
-                       IntWrapper *ptr =
-                               (IntWrapper*) another;
-                       return ptr->_val == _val;
-               }
-};
-
-cliffc_hashtable<IntWrapper, IntWrapper> *table;
-IntWrapper *val1, *val2;
-IntWrapper *k0, *k1, *k2, *k3, *k4, *k5;
-IntWrapper *v0, *v1, *v2, *v3, *v4, *v5;
-
-void threadA(void *arg) {
-       IntWrapper *Res;
-       int res;
-       Res = table->put(k3, v3);
-       res = Res == NULL ? 0 : Res->_val;
-       printf("Put1: key_%d, val_%d, res_%d\n", k3->_val, v3->_val, res);
-
-       Res = table->get(k2);
-       res = Res == NULL ? 0 : Res->_val;
-       printf("Get2: key_%d, res_%d\n", k2->_val, res);
-}
-
-void threadB(void *arg) {
-       IntWrapper *Res;
-       int res;
-       Res = table->put(k2, v2);
-       res = Res == NULL ? 0 : Res->_val;
-       printf("Put3: key_%d, val_%d, res_%d\n", k2->_val, v2->_val, res);
-
-       Res = table->get(k3);
-       res = Res == NULL ? 0 : Res->_val;
-       printf("Get4: key_%d, res_%d\n", k3->_val, res);
-}
-
-void threadC(void *arg) {
-}
-
-int user_main(int argc, char *argv[]) {
-       thrd_t t1, t2, t3;
-       table = new cliffc_hashtable<IntWrapper, IntWrapper>(32);
-    k1 = new IntWrapper(3);
-       k2 = new IntWrapper(5);
-       k3 = new IntWrapper(11);
-       k4 = new IntWrapper(7);
-       k5 = new IntWrapper(13);
-
-       v0 = new IntWrapper(2048);
-       v1 = new IntWrapper(1024);
-       v2 = new IntWrapper(47);
-       v3 = new IntWrapper(73);
-       v4 = new IntWrapper(81);
-       v5 = new IntWrapper(99);
-
-       thrd_create(&t1, threadA, NULL);
-       thrd_create(&t2, threadB, NULL);
-       thrd_create(&t3, threadC, NULL);
-       thrd_join(t1);
-       thrd_join(t2);
-       thrd_join(t3);
-       
-       return 0;
-}
-
-
diff --git a/benchmark/cliffc-hashtable/main.cc.backup b/benchmark/cliffc-hashtable/main.cc.backup
deleted file mode 100644 (file)
index 4806fca..0000000
+++ /dev/null
@@ -1,133 +0,0 @@
-#include <iostream>
-#include <threads.h>
-#include "cliffc_hashtable.h"
-
-using namespace std;
-
-template<typename TypeK, typename TypeV>
-slot* const cliffc_hashtable<TypeK, TypeV>::MATCH_ANY = new slot(false, NULL);
-
-template<typename TypeK, typename TypeV>
-slot* const cliffc_hashtable<TypeK, TypeV>::NO_MATCH_OLD = new slot(false, NULL);
-
-template<typename TypeK, typename TypeV>
-slot* const cliffc_hashtable<TypeK, TypeV>::TOMBPRIME = new slot(true, NULL);
-
-template<typename TypeK, typename TypeV>
-slot* const cliffc_hashtable<TypeK, TypeV>::TOMBSTONE = new slot(false, NULL);
-
-
-class IntWrapper {
-       private:
-               public:
-           int _val;
-
-               IntWrapper(int val) : _val(val) {}
-
-               IntWrapper() : _val(0) {}
-
-               IntWrapper(IntWrapper& copy) : _val(copy._val) {}
-
-               int get() {
-                       return _val;
-               }
-
-               int hashCode() {
-                       return _val;
-               }
-               
-               bool operator==(const IntWrapper& rhs) {
-                       return false;
-               }
-
-               bool equals(const void *another) {
-                       if (another == NULL)
-                               return false;
-                       IntWrapper *ptr =
-                               (IntWrapper*) another;
-                       return ptr->_val == _val;
-               }
-};
-
-cliffc_hashtable<IntWrapper, IntWrapper> *table;
-IntWrapper *val1, *val2;
-IntWrapper *k0, *k1, *k2, *k3, *k4, *k5;
-IntWrapper *v0, *v1, *v2, *v3, *v4, *v5;
-
-void threadA(void *arg) {
-       //table->put(k1, v4);
-       table->put(k3, v3);
-       val1 = table->get(k2);
-       //table->put(k3, v3);
-       /*
-       val1 = table->get(k3);
-       if (val1 != NULL)
-               model_print("val1: %d\n", val1->_val);
-       else
-               model_print("val1: NULL\n");*/
-       //table->put(k3, v3);
-       
-}
-
-void threadB(void *arg) {
-       //table->put(k1, v1);
-       //table->put(k2, v4);
-       //table->put(k3, v3);
-}
-
-void threadMain(void *arg) {
-       table->put(k2, v2);
-       val1 = table->get(k3);
-       //val2 = table->get(k2);
-       /*
-       if (val1 != NULL)
-               model_print("val1: %d\n", val1->_val);
-       else
-               model_print("val1: NULL\n");
-       if (val2 != NULL)
-               model_print("val2: %d\n", val2->_val);
-       else
-               model_print("val2: NULL\n");*/
-}
-
-int user_main(int argc, char *argv[]) {
-       thrd_t t1, t2;
-       table = new cliffc_hashtable<IntWrapper, IntWrapper>(32);
-    k1 = new IntWrapper(3);
-       k2 = new IntWrapper(5);
-       k3 = new IntWrapper(11);
-       k4 = new IntWrapper(7);
-       k5 = new IntWrapper(13);
-
-       v1 = new IntWrapper(1024);
-       v2 = new IntWrapper(47);
-       v3 = new IntWrapper(73);
-       v4 = new IntWrapper(81);
-       v5 = new IntWrapper(99);
-
-       v0 = new IntWrapper(2048);
-       //table->put(k1, v0);
-       //table->put(k2, v0);
-       //model_print("hey\n");
-       thrd_create(&t1, threadA, NULL);
-       thrd_create(&t2, threadB, NULL);
-       threadMain(NULL);
-
-       thrd_join(t1);
-       thrd_join(t2);
-       /*
-       if (val1 == NULL) {
-               cout << "val1: NULL" << endl;
-       } else {
-               cout << val1->get() << endl;
-       }
-       //MODEL_ASSERT(val1 == NULL || val1->get() == 2 || val1->get() == 81);
-       if (val2 == NULL) {
-               cout << "val2: NULL" << endl;
-       } else {
-               cout << val2->get() << endl;
-       }
-       */
-       return 0;
-}
-
diff --git a/benchmark/concurrent-hashmap/ConcurrentHashMap.java b/benchmark/concurrent-hashmap/ConcurrentHashMap.java
deleted file mode 100644 (file)
index 23a4ed4..0000000
+++ /dev/null
@@ -1,1224 +0,0 @@
-/*
-  File: ConcurrentHashMap
-
-  Written by Doug Lea. Adapted and released, under explicit
-  permission, from JDK1.2 HashMap.java and Hashtable.java which
-  carries the following copyright:
-
-     * Copyright 1997 by Sun Microsystems, Inc.,
-     * 901 San Antonio Road, Palo Alto, California, 94303, U.S.A.
-     * All rights reserved.
-     *
-     * This software is the confidential and proprietary information
-     * of Sun Microsystems, Inc. ("Confidential Information").  You
-     * shall not disclose such Confidential Information and shall use
-     * it only in accordance with the terms of the license agreement
-     * you entered into with Sun.
-
-  History:
-  Date       Who                What
-  26nov2000  dl               Created, based on ConcurrentReaderHashMap
-  12jan2001  dl               public release
-  17nov2001  dl               Minor tunings
-  24oct2003  dl               Segment implements Serializable
-*/
-
-package EDU.oswego.cs.dl.util.concurrent;
-
-import java.util.Map;
-import java.util.AbstractMap;
-import java.util.AbstractSet;
-import java.util.AbstractCollection;
-import java.util.Collection;
-import java.util.Set;
-import java.util.Iterator;
-import java.util.Enumeration;
-import java.util.NoSuchElementException;
-
-import java.io.Serializable;
-import java.io.IOException;
-import java.io.ObjectInputStream;
-import java.io.ObjectOutputStream;
-
-
-/**
- * A version of Hashtable supporting 
- * concurrency for both retrievals and updates:
- *
- * <dl> 
- * <dt> Retrievals
- *
- * <dd> Retrievals may overlap updates.  (This is the same policy as
- * ConcurrentReaderHashMap.)  Successful retrievals using get(key) and
- * containsKey(key) usually run without locking. Unsuccessful
- * retrievals (i.e., when the key is not present) do involve brief
- * synchronization (locking).  Because retrieval operations can
- * ordinarily overlap with update operations (i.e., put, remove, and
- * their derivatives), retrievals can only be guaranteed to return the
- * results of the most recently <em>completed</em> operations holding
- * upon their onset. Retrieval operations may or may not return
- * results reflecting in-progress writing operations.  However, the
- * retrieval operations do always return consistent results -- either
- * those holding before any single modification or after it, but never
- * a nonsense result.  For aggregate operations such as putAll and
- * clear, concurrent reads may reflect insertion or removal of only
- * some entries.  
- * <p>
- *
- * Iterators and Enumerations (i.e., those returned by
- * keySet().iterator(), entrySet().iterator(), values().iterator(),
- * keys(), and elements()) return elements reflecting the state of the
- * hash table at some point at or since the creation of the
- * iterator/enumeration.  They will return at most one instance of
- * each element (via next()/nextElement()), but might or might not
- * reflect puts and removes that have been processed since they were
- * created.  They do <em>not</em> throw ConcurrentModificationException.
- * However, these iterators are designed to be used by only one
- * thread at a time. Passing an iterator across multiple threads may
- * lead to unpredictable results if the table is being concurrently
- * modified.  <p>
- *
- *
- * <dt> Updates
- *
- * <dd> This class supports a hard-wired preset <em>concurrency
- * level</em> of 32. This allows a maximum of 32 put and/or remove
- * operations to proceed concurrently. This level is an upper bound on
- * concurrency, not a guarantee, since it interacts with how
- * well-strewn elements are across bins of the table. (The preset
- * value in part reflects the fact that even on large multiprocessors,
- * factors other than synchronization tend to be bottlenecks when more
- * than 32 threads concurrently attempt updates.)
- * Additionally, operations triggering internal resizing and clearing
- * do not execute concurrently with any operation.  
- * <p>
- *
- * There is <em>NOT</em> any support for locking the entire table to
- * prevent updates. This makes it imposssible, for example, to
- * add an element only if it is not already present, since another
- * thread may be in the process of doing the same thing.
- * If you need such capabilities, consider instead using the
- * ConcurrentReaderHashMap class. 
- *
- * </dl>
- *
- * Because of how concurrency control is split up, the
- * size() and isEmpty() methods require accumulations across 32
- * control segments, and so might be slightly slower than you expect.
- * <p>
- *
- * This class may be used as a direct replacement for
- * java.util.Hashtable in any application that does not rely
- * on the ability to lock the entire table to prevent updates.  
- * As of this writing, it performs much faster than Hashtable in
- * typical multi-threaded applications with multiple readers and writers.
- * Like Hashtable but unlike java.util.HashMap,
- * this class does NOT allow <tt>null</tt> to be used as a key or
- * value. 
- * <p> 
- *
- * Implementation note: A slightly
- * faster implementation of this class will be possible once planned
- * Java Memory Model revisions are in place.
- *
- * <p>[<a href="http://gee.cs.oswego.edu/dl/classes/EDU/oswego/cs/dl/util/concurrent/intro.html"> Introduction to this package. </a>]
-
- **/
-
-
-public class ConcurrentHashMap 
-  extends    AbstractMap 
-  implements Map, Cloneable, Serializable {
-
-  /*
-    The basic strategy is an optimistic-style scheme based on
-    the guarantee that the hash table and its lists are always
-    kept in a consistent enough state to be read without locking:
-
-    * Read operations first proceed without locking, by traversing the
-       apparently correct list of the apparently correct bin. If an
-       entry is found, but not invalidated (value field null), it is
-       returned. If not found, operations must recheck (after a memory
-       barrier) to make sure they are using both the right list and
-       the right table (which can change under resizes). If
-       invalidated, reads must acquire main update lock to wait out
-       the update, and then re-traverse.
-
-    * All list additions are at the front of each bin, making it easy
-       to check changes, and also fast to traverse.  Entry next
-       pointers are never assigned. Remove() builds new nodes when
-       necessary to preserve this.
-
-    * Remove() (also clear()) invalidates removed nodes to alert read
-       operations that they must wait out the full modifications.
-
-    * Locking for puts, removes (and, when necessary gets, etc)
-      is controlled by Segments, each covering a portion of the
-      table. During operations requiring global exclusivity (mainly
-      resize and clear), ALL of these locks are acquired at once.
-      Note that these segments are NOT contiguous -- they are based
-      on the least 5 bits of hashcodes. This ensures that the same
-      segment controls the same slots before and after resizing, which
-      is necessary for supporting concurrent retrievals. This
-      comes at the price of a mismatch of logical vs physical locality,
-      but this seems not to be a performance problem in practice.
-  */
-
-  /**
-   * The hash table data.
-   */
-  protected transient Entry[] table;
-
-
-  /** 
-   * The number of concurrency control segments.
-   * The value can be at most 32 since ints are used
-   * as bitsets over segments. Emprically, it doesn't
-   * seem to pay to decrease it either, so the value should be at least 32.
-   * In other words, do not redefine this :-)
-   **/
-
-  protected static final int CONCURRENCY_LEVEL = 32;
-
-  /**
-   * Mask value for indexing into segments
-   **/
-
-  protected static final int SEGMENT_MASK = CONCURRENCY_LEVEL - 1;
-
-  /**
-   * Bookkeeping for each concurrency control segment.
-   * Each segment contains a local count of the number of 
-   * elements in its region.
-   * However, the main use of a Segment is for its lock.
-   **/
-  protected final static class Segment implements Serializable {
-    /**
-     * The number of elements in this segment's region.
-     * It is always updated within synchronized blocks.
-     **/
-    protected int count;
-
-    /**
-     * Get the count under synch. 
-     **/
-    protected synchronized int getCount() { return count; }
-
-    /**
-     * Force a synchronization
-     **/
-    protected synchronized void synch() {}
-  }
-
-  /**
-   * The array of concurrency control segments.
-   **/
-
-  protected final Segment[] segments = new Segment[CONCURRENCY_LEVEL]; 
-
-
-  /**
-   * The default initial number of table slots for this table (32).
-   * Used when not otherwise specified in constructor.
-   **/
-  public static int DEFAULT_INITIAL_CAPACITY = 32; 
-
-
-  /**
-   * The minimum capacity, used if a lower value is implicitly specified
-   * by either of the constructors with arguments.  
-   * MUST be a power of two.
-   */
-  private static final int MINIMUM_CAPACITY = 32;
-  
-  /**
-   * The maximum capacity, used if a higher value is implicitly specified
-   * by either of the constructors with arguments.
-   * MUST be a power of two <= 1<<30.
-   */
-  private static final int MAXIMUM_CAPACITY = 1 << 30;
-  
-  /**
-   * The default load factor for this table (0.75)
-   * Used when not otherwise specified in constructor.
-   **/
-
-  public static final float DEFAULT_LOAD_FACTOR = 0.75f; 
-
-  /**
-   * The load factor for the hash table.
-   *
-   * @serial
-   */
-  protected final float loadFactor;
-
-  /**
-   * Per-segment resize threshold.  
-   *
-   * @serial
-   */
-  protected int threshold;
-
-
-  /**
-   * Number of segments voting for resize. The table is
-   * doubled when 1/4 of the segments reach threshold.
-   * Volatile but updated without synch since this is just a heuristic.
-   **/
-
-  protected transient volatile int votesForResize;
-
-
-  /**
-   * Return the number of set bits in w.
-   * For a derivation of this algorithm, see
-   * "Algorithms and data structures with applications to 
-   *  graphics and geometry", by Jurg Nievergelt and Klaus Hinrichs,
-   *  Prentice Hall, 1993.
-   * See also notes by Torsten Sillke at
-   * http://www.mathematik.uni-bielefeld.de/~sillke/PROBLEMS/bitcount
-   **/
-  protected static int bitcount(int w) {
-    w -= (0xaaaaaaaa & w) >>> 1;
-    w = (w & 0x33333333) + ((w >>> 2) & 0x33333333);
-    w = (w + (w >>> 4)) & 0x0f0f0f0f;
-    w += w >>> 8;
-    w += w >>> 16;
-    return w & 0xff;
-  }
-
-  /**
-   * Returns the appropriate capacity (power of two) for the specified 
-   * initial capacity argument.
-   */
-  private int p2capacity(int initialCapacity) {
-    int cap = initialCapacity;
-    
-    // Compute the appropriate capacity
-    int result;
-    if (cap > MAXIMUM_CAPACITY || cap < 0) {
-      result = MAXIMUM_CAPACITY;
-    } else {
-      result = MINIMUM_CAPACITY;
-      while (result < cap)
-        result <<= 1;
-    }
-    return result;
-  }
-
-  /**
-   * Return hash code for Object x. Since we are using power-of-two
-   * tables, it is worth the effort to improve hashcode via
-   * the same multiplicative scheme as used in IdentityHashMap.
-   */
-  protected static int hash(Object x) {
-    int h = x.hashCode();
-    // Multiply by 127 (quickly, via shifts), and mix in some high
-    // bits to help guard against bunching of codes that are
-    // consecutive or equally spaced.
-    return ((h << 7) - h + (h >>> 9) + (h >>> 17));
-  }
-
-
-  /** 
-   * Check for equality of non-null references x and y. 
-   **/
-  protected boolean eq(Object x, Object y) {
-    return x == y || x.equals(y);
-  }
-
-  /** Create table array and set the per-segment threshold **/
-  protected Entry[] newTable(int capacity) {
-    threshold = (int)(capacity * loadFactor / CONCURRENCY_LEVEL) + 1;
-    return new Entry[capacity];
-  }
-
-  /**
-   * Constructs a new, empty map with the specified initial 
-   * capacity and the specified load factor.
-   *
-   * @param initialCapacity the initial capacity.
-   *  The actual initial capacity is rounded to the nearest power of two.
-   * @param loadFactor  the load factor threshold, used to control resizing.
-   *   This value is used in an approximate way: When at least
-   *   a quarter of the segments of the table reach per-segment threshold, or
-   *   one of the segments itself exceeds overall threshold,
-   *   the table is doubled. 
-   *   This will on average cause resizing when the table-wide
-   *   load factor is slightly less than the threshold. If you'd like
-   *   to avoid resizing, you can set this to a ridiculously large
-   *   value.
-   * @throws IllegalArgumentException  if the load factor is nonpositive.
-   */
-  public ConcurrentHashMap(int initialCapacity, 
-                           float loadFactor) {
-    if (!(loadFactor > 0))
-      throw new IllegalArgumentException("Illegal Load factor: "+
-                                         loadFactor);
-    this.loadFactor = loadFactor;
-    for (int i = 0; i < segments.length; ++i) 
-      segments[i] = new Segment();
-    int cap = p2capacity(initialCapacity);
-    table = newTable(cap);
-  }
-
-  /**
-   * Constructs a new, empty map with the specified initial 
-   * capacity and default load factor.
-   *
-   * @param   initialCapacity   the initial capacity of the 
-   *                            ConcurrentHashMap.
-   * @throws    IllegalArgumentException if the initial maximum number 
-   *              of elements is less
-   *              than zero.
-   */
-  public ConcurrentHashMap(int initialCapacity) {
-    this(initialCapacity, DEFAULT_LOAD_FACTOR);
-  }
-
-  /**
-   * Constructs a new, empty map with a default initial capacity
-   * and default load factor.
-   */
-  public ConcurrentHashMap() {
-    this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR);
-  }
-
-  /**
-   * Constructs a new map with the same mappings as the given map.  The
-   * map is created with a capacity of twice the number of mappings in
-   * the given map or 32 (whichever is greater), and a default load factor.
-   */
-  public ConcurrentHashMap(Map t) {
-    this(Math.max((int) (t.size() / DEFAULT_LOAD_FACTOR) + 1, 
-                  MINIMUM_CAPACITY),
-         DEFAULT_LOAD_FACTOR);
-    putAll(t);
-  }
-
-  /**
-   * Returns the number of key-value mappings in this map.
-   *
-   * @return the number of key-value mappings in this map.
-   */
-  public int size() {
-    int c = 0;
-    for (int i = 0; i < segments.length; ++i) 
-      c += segments[i].getCount();
-    return c;
-  }
-
-  /**
-   * Returns <tt>true</tt> if this map contains no key-value mappings.
-   *
-   * @return <tt>true</tt> if this map contains no key-value mappings.
-   */
-  public boolean isEmpty() {
-    for (int i = 0; i < segments.length; ++i) 
-      if (segments[i].getCount() != 0)
-        return false;
-    return true;
-  }
-
-
-  /**
-   * Returns the value to which the specified key is mapped in this table.
-   *
-   * @param   key   a key in the table.
-   * @return  the value to which the key is mapped in this table;
-   *          <code>null</code> if the key is not mapped to any value in
-   *          this table.
-   * @exception  NullPointerException  if the key is
-   *               <code>null</code>.
-   * @see     #put(Object, Object)
-   */
-  public Object get(Object key) {
-    int hash = hash(key);     // throws null pointer exception if key null
-
-    // Try first without locking...
-    Entry[] tab = table;
-    int index = hash & (tab.length - 1);
-    Entry first = tab[index];
-    Entry e;
-
-    for (e = first; e != null; e = e.next) {
-      if (e.hash == hash && eq(key, e.key)) {
-        Object value = e.value;
-        if (value != null) 
-          return value;
-        else
-          break;
-      }
-    }
-
-    // Recheck under synch if key apparently not there or interference
-    Segment seg = segments[hash & SEGMENT_MASK];
-    synchronized(seg) { 
-      tab = table;
-      index = hash & (tab.length - 1);
-      Entry newFirst = tab[index];
-      if (e != null || first != newFirst) {
-        for (e = newFirst; e != null; e = e.next) {
-          if (e.hash == hash && eq(key, e.key)) 
-            return e.value;
-        }
-      }
-      return null;
-    }
-  }
-
-  /**
-   * Tests if the specified object is a key in this table.
-   * 
-   * @param   key   possible key.
-   * @return  <code>true</code> if and only if the specified object 
-   *          is a key in this table, as determined by the 
-   *          <tt>equals</tt> method; <code>false</code> otherwise.
-   * @exception  NullPointerException  if the key is
-   *               <code>null</code>.
-   * @see     #contains(Object)
-   */
-
-  public boolean containsKey(Object key) {
-    return get(key) != null;
-  }
-  
-
-  /**
-   * Maps the specified <code>key</code> to the specified 
-   * <code>value</code> in this table. Neither the key nor the 
-   * value can be <code>null</code>. (Note that this policy is
-   * the same as for java.util.Hashtable, but unlike java.util.HashMap,
-   * which does accept nulls as valid keys and values.)<p>
-   *
-   * The value can be retrieved by calling the <code>get</code> method 
-   * with a key that is equal to the original key. 
-   *
-   * @param      key     the table key.
-   * @param      value   the value.
-   * @return     the previous value of the specified key in this table,
-   *             or <code>null</code> if it did not have one.
-   * @exception  NullPointerException  if the key or value is
-   *               <code>null</code>.
-   * @see     Object#equals(Object)
-   * @see     #get(Object)
-   */
-  public Object put(Object key, Object value) {
-    if (value == null) 
-      throw new NullPointerException();
-    
-    int hash = hash(key);
-    Segment seg = segments[hash & SEGMENT_MASK];
-    int segcount;
-    Entry[] tab;
-    int votes;
-
-    synchronized(seg) {
-      tab = table;
-      int index = hash & (tab.length-1);
-      Entry first = tab[index];
-
-      for (Entry e = first; e != null; e = e.next) {
-        if (e.hash == hash && eq(key, e.key)) {
-          Object oldValue = e.value; 
-          e.value = value;
-          return oldValue;
-        }
-      }
-
-      //  Add to front of list
-      Entry newEntry = new Entry(hash, key, value, first);
-      tab[index] = newEntry;
-
-      if ((segcount = ++seg.count) < threshold)
-        return null;
-
-      int bit = (1 << (hash & SEGMENT_MASK));
-      votes = votesForResize;
-      if ((votes & bit) == 0)
-        votes = votesForResize |= bit;
-    }
-
-    // Attempt resize if 1/4 segs vote,
-    // or if this seg itself reaches the overall threshold.
-    // (The latter check is just a safeguard to avoid pathological cases.)
-    if (bitcount(votes) >= CONCURRENCY_LEVEL / 4  ||
-        segcount > (threshold * CONCURRENCY_LEVEL)) 
-      resize(0, tab);
-
-    return null;
-  }
-
-  /**
-   * Gather all locks in order to call rehash, by
-   * recursing within synch blocks for each segment index.
-   * @param index the current segment. initially call value must be 0
-   * @param assumedTab the state of table on first call to resize. If
-   * this changes on any call, the attempt is aborted because the
-   * table has already been resized by another thread.
-   */
-
-  protected void resize(int index, Entry[] assumedTab) {
-    Segment seg = segments[index];
-    synchronized(seg) {
-      if (assumedTab == table) {
-        int next = index+1;
-        if (next < segments.length)
-          resize(next, assumedTab);
-        else
-          rehash();
-      }
-    }
-  }
-
-  /**
-   * Rehashes the contents of this map into a new table
-   * with a larger capacity. 
-   */
-  protected void rehash() {
-    votesForResize = 0; // reset
-
-    Entry[] oldTable = table;
-    int oldCapacity = oldTable.length;
-
-    if (oldCapacity >= MAXIMUM_CAPACITY) {
-      threshold = Integer.MAX_VALUE; // avoid retriggering
-      return;
-    }
-    
-    int newCapacity = oldCapacity << 1;
-    Entry[] newTable = newTable(newCapacity);
-    int mask = newCapacity - 1;
-
-    /*
-     * Reclassify nodes in each list to new Map.  Because we are
-     * using power-of-two expansion, the elements from each bin
-     * must either stay at same index, or move to
-     * oldCapacity+index. We also eliminate unnecessary node
-     * creation by catching cases where old nodes can be reused
-     * because their next fields won't change. Statistically, at
-     * the default threshhold, only about one-sixth of them need
-     * cloning. (The nodes they replace will be garbage
-     * collectable as soon as they are no longer referenced by any
-     * reader thread that may be in the midst of traversing table
-     * right now.)
-     */
-    
-    for (int i = 0; i < oldCapacity ; i++) {
-      // We need to guarantee that any existing reads of old Map can
-      //  proceed. So we cannot yet null out each bin.  
-      Entry e = oldTable[i];
-      
-      if (e != null) {
-        int idx = e.hash & mask;
-        Entry next = e.next;
-        
-        //  Single node on list
-        if (next == null) 
-          newTable[idx] = e;
-        
-        else {    
-          // Reuse trailing consecutive sequence of all same bit
-          Entry lastRun = e;
-          int lastIdx = idx;
-          for (Entry last = next; last != null; last = last.next) {
-            int k = last.hash & mask;
-            if (k != lastIdx) {
-              lastIdx = k;
-              lastRun = last;
-            }
-          }
-          newTable[lastIdx] = lastRun;
-          
-          // Clone all remaining nodes
-          for (Entry p = e; p != lastRun; p = p.next) {
-            int k = p.hash & mask;
-            newTable[k] = new Entry(p.hash, p.key, 
-                                    p.value, newTable[k]);
-          }
-        }
-      }
-    }
-    
-    table = newTable;
-  }
-
-
-  /**
-   * Removes the key (and its corresponding value) from this 
-   * table. This method does nothing if the key is not in the table.
-   *
-   * @param   key   the key that needs to be removed.
-   * @return  the value to which the key had been mapped in this table,
-   *          or <code>null</code> if the key did not have a mapping.
-   * @exception  NullPointerException  if the key is
-   *               <code>null</code>.
-   */
-  public Object remove(Object key) {
-    return remove(key, null); 
-  }
-
-
-  /**
-   * Removes the (key, value) pair from this 
-   * table. This method does nothing if the key is not in the table,
-   * or if the key is associated with a different value. This method
-   * is needed by EntrySet.
-   *
-   * @param   key   the key that needs to be removed.
-   * @param   value   the associated value. If the value is null,
-   *                   it means "any value".
-   * @return  the value to which the key had been mapped in this table,
-   *          or <code>null</code> if the key did not have a mapping.
-   * @exception  NullPointerException  if the key is
-   *               <code>null</code>.
-   */
-
-  protected Object remove(Object key, Object value) {
-    /*
-      Find the entry, then 
-        1. Set value field to null, to force get() to retry
-        2. Rebuild the list without this entry.
-           All entries following removed node can stay in list, but
-           all preceeding ones need to be cloned.  Traversals rely
-           on this strategy to ensure that elements will not be
-          repeated during iteration.
-    */
-
-    int hash = hash(key);
-    Segment seg = segments[hash & SEGMENT_MASK];
-
-    synchronized(seg) {
-      Entry[] tab = table;
-      int index = hash & (tab.length-1);
-      Entry first = tab[index];
-      Entry e = first;
-
-      for (;;) {
-        if (e == null)
-          return null;
-        if (e.hash == hash && eq(key, e.key)) 
-          break;
-        e = e.next;
-      }
-
-      Object oldValue = e.value;
-      if (value != null && !value.equals(oldValue))
-        return null;
-     
-      e.value = null;
-
-      Entry head = e.next;
-      for (Entry p = first; p != e; p = p.next) 
-        head = new Entry(p.hash, p.key, p.value, head);
-      tab[index] = head;
-      seg.count--;
-      return oldValue;
-    }
-  }
-
-
-  /**
-   * Returns <tt>true</tt> if this map maps one or more keys to the
-   * specified value. Note: This method requires a full internal
-   * traversal of the hash table, and so is much slower than
-   * method <tt>containsKey</tt>.
-   *
-   * @param value value whose presence in this map is to be tested.
-   * @return <tt>true</tt> if this map maps one or more keys to the
-   * specified value.  
-   * @exception  NullPointerException  if the value is <code>null</code>.
-   */
-  public boolean containsValue(Object value) {
-
-    if (value == null) throw new NullPointerException();
-
-    for (int s = 0; s < segments.length; ++s) {
-      Segment seg = segments[s];
-      Entry[] tab;
-      synchronized(seg) { tab = table; }
-      for (int i = s; i < tab.length; i+= segments.length) {
-        for (Entry e = tab[i]; e != null; e = e.next) 
-          if (value.equals(e.value))
-            return true;
-      }
-    }
-    return false;
-  }
-
-  /**
-   * Tests if some key maps into the specified value in this table.
-   * This operation is more expensive than the <code>containsKey</code>
-   * method.<p>
-   *
-   * Note that this method is identical in functionality to containsValue,
-   * (which is part of the Map interface in the collections framework).
-   * 
-   * @param      value   a value to search for.
-   * @return     <code>true</code> if and only if some key maps to the
-   *             <code>value</code> argument in this table as 
-   *             determined by the <tt>equals</tt> method;
-   *             <code>false</code> otherwise.
-   * @exception  NullPointerException  if the value is <code>null</code>.
-   * @see        #containsKey(Object)
-   * @see        #containsValue(Object)
-   * @see         Map
-   */
-
-  public boolean contains(Object value) {
-    return containsValue(value);
-  }
-
-  /**
-   * Copies all of the mappings from the specified map to this one.
-   * 
-   * These mappings replace any mappings that this map had for any of the
-   * keys currently in the specified Map.
-   *
-   * @param t Mappings to be stored in this map.
-   */
-
-  public void putAll(Map t) {
-    int n = t.size();
-    if (n == 0)
-      return;
-
-    // Expand enough to hold at least n elements without resizing.
-    // We can only resize table by factor of two at a time.
-    // It is faster to rehash with fewer elements, so do it now.
-    for(;;) {
-      Entry[] tab;
-      int max;
-      synchronized(segments[0]) { // must synch on some segment. pick 0.
-        tab = table;
-        max = threshold * CONCURRENCY_LEVEL;
-      }
-      if (n < max)
-        break;
-      resize(0, tab);
-    }
-
-    for (Iterator it = t.entrySet().iterator(); it.hasNext();) {
-      Map.Entry entry = (Map.Entry) it.next();
-      put(entry.getKey(), entry.getValue());
-    }
-  }
-
-  /**
-   * Removes all mappings from this map.
-   */
-
-  public void clear() {
-    // We don't need all locks at once so long as locks
-    //   are obtained in low to high order
-    for (int s = 0; s < segments.length; ++s) {
-      Segment seg = segments[s];
-      synchronized(seg) {
-        Entry[] tab = table;
-        for (int i = s; i < tab.length; i+= segments.length) {
-          for (Entry e = tab[i]; e != null; e = e.next) 
-            e.value = null; 
-          tab[i] = null;
-          seg.count = 0;
-        }
-      }
-    }
-  }
-
-  /**
-   * Returns a shallow copy of this 
-   * <tt>ConcurrentHashMap</tt> instance: the keys and
-   * values themselves are not cloned.
-   *
-   * @return a shallow copy of this map.
-   */
-
-  public Object clone() {
-    // We cannot call super.clone, since it would share final segments array,
-    // and there's no way to reassign finals.
-    return new ConcurrentHashMap(this);
-  }
-
-  // Views
-
-  protected transient Set keySet = null;
-  protected transient Set entrySet = null;
-  protected transient Collection values = null;
-
-  /**
-   * Returns a set view of the keys contained in this map.  The set is
-   * backed by the map, so changes to the map are reflected in the set, and
-   * vice-versa.  The set supports element removal, which removes the
-   * corresponding mapping from this map, via the <tt>Iterator.remove</tt>,
-   * <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt>, and
-   * <tt>clear</tt> operations.  It does not support the <tt>add</tt> or
-   * <tt>addAll</tt> operations.
-   *
-   * @return a set view of the keys contained in this map.
-   */
-  
-  public Set keySet() {
-    Set ks = keySet;
-    return (ks != null)? ks : (keySet = new KeySet());
-  }
-  
-  private class KeySet extends AbstractSet {
-    public Iterator iterator() {
-      return new KeyIterator();
-    }
-    public int size() {
-      return ConcurrentHashMap.this.size();
-    }
-    public boolean contains(Object o) {
-      return ConcurrentHashMap.this.containsKey(o);
-    }
-    public boolean remove(Object o) {
-      return ConcurrentHashMap.this.remove(o) != null;
-    }
-    public void clear() {
-      ConcurrentHashMap.this.clear();
-    }
-  }
-
-  /**
-   * Returns a collection view of the values contained in this map.  The
-   * collection is backed by the map, so changes to the map are reflected in
-   * the collection, and vice-versa.  The collection supports element
-   * removal, which removes the corresponding mapping from this map, via the
-   * <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>,
-   * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations.
-   * It does not support the <tt>add</tt> or <tt>addAll</tt> operations.
-   *
-   * @return a collection view of the values contained in this map.
-   */
-  
-  public Collection values() {
-    Collection vs = values;
-    return (vs != null)? vs : (values = new Values());
-  }
-  
-  private class Values extends AbstractCollection {
-    public Iterator iterator() {
-      return new ValueIterator();
-    }
-    public int size() {
-      return ConcurrentHashMap.this.size();
-    }
-    public boolean contains(Object o) {
-      return ConcurrentHashMap.this.containsValue(o);
-    }
-    public void clear() {
-      ConcurrentHashMap.this.clear();
-    }
-  }
-
-  /**
-   * Returns a collection view of the mappings contained in this map.  Each
-   * element in the returned collection is a <tt>Map.Entry</tt>.  The
-   * collection is backed by the map, so changes to the map are reflected in
-   * the collection, and vice-versa.  The collection supports element
-   * removal, which removes the corresponding mapping from the map, via the
-   * <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>,
-   * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations.
-   * It does not support the <tt>add</tt> or <tt>addAll</tt> operations.
-   *
-   * @return a collection view of the mappings contained in this map.
-   */
-  
-  public Set entrySet() {
-    Set es = entrySet;
-    return (es != null) ? es : (entrySet = new EntrySet());
-  }
-
-  private class EntrySet extends AbstractSet {
-    public Iterator iterator() {
-      return new HashIterator();
-    }
-    public boolean contains(Object o) {
-      if (!(o instanceof Map.Entry))
-        return false;
-      Map.Entry entry = (Map.Entry)o;
-      Object v = ConcurrentHashMap.this.get(entry.getKey());
-      return v != null && v.equals(entry.getValue());
-    }
-    public boolean remove(Object o) {
-      if (!(o instanceof Map.Entry))
-        return false;
-      Map.Entry e = (Map.Entry)o;
-      return ConcurrentHashMap.this.remove(e.getKey(), e.getValue()) != null;
-    }
-    public int size() {
-      return ConcurrentHashMap.this.size();
-    }
-    public void clear() {
-      ConcurrentHashMap.this.clear();
-    }
-  }
-
-  /**
-   * Returns an enumeration of the keys in this table.
-   *
-   * @return  an enumeration of the keys in this table.
-   * @see     Enumeration
-   * @see     #elements()
-   * @see      #keySet()
-   * @see      Map
-   */
-  public Enumeration keys() {
-    return new KeyIterator();
-  }
-
-  /**
-   * Returns an enumeration of the values in this table.
-   * Use the Enumeration methods on the returned object to fetch the elements
-   * sequentially.
-   *
-   * @return  an enumeration of the values in this table.
-   * @see     java.util.Enumeration
-   * @see     #keys()
-   * @see      #values()
-   * @see      Map
-   */
-  
-  public Enumeration elements() {
-    return new ValueIterator();
-  }
-
-  /**
-   * ConcurrentHashMap collision list entry.
-   */
-
-  protected static class Entry implements Map.Entry {
-    /* 
-       The use of volatile for value field ensures that
-       we can detect status changes without synchronization.
-       The other fields are never changed, and are
-       marked as final. 
-    */
-
-    protected final Object key;
-    protected volatile Object value;
-    protected final int hash;
-    protected final Entry next;
-
-    Entry(int hash, Object key, Object value, Entry next) {
-      this.value = value;
-      this.hash = hash;
-      this.key = key;
-      this.next = next;
-    }
-
-    // Map.Entry Ops 
-
-    public Object getKey() {
-      return key;
-    }
-
-    /**
-     * Get the value.  Note: In an entrySet or entrySet.iterator,
-     * unless you can guarantee lack of concurrent modification,
-     * <tt>getValue</tt> <em>might</em> return null, reflecting the
-     * fact that the entry has been concurrently removed. However,
-     * there are no assurances that concurrent removals will be
-     * reflected using this method.
-     * 
-     * @return     the current value, or null if the entry has been 
-     * detectably removed.
-     **/
-    public Object getValue() {
-      return value; 
-    }
-
-    /**
-     * Set the value of this entry.  Note: In an entrySet or
-     * entrySet.iterator), unless you can guarantee lack of concurrent
-     * modification, <tt>setValue</tt> is not strictly guaranteed to
-     * actually replace the value field obtained via the <tt>get</tt>
-     * operation of the underlying hash table in multithreaded
-     * applications.  If iterator-wide synchronization is not used,
-     * and any other concurrent <tt>put</tt> or <tt>remove</tt>
-     * operations occur, sometimes even to <em>other</em> entries,
-     * then this change is not guaranteed to be reflected in the hash
-     * table. (It might, or it might not. There are no assurances
-     * either way.)
-     *
-     * @param      value   the new value.
-     * @return     the previous value, or null if entry has been detectably
-     * removed.
-     * @exception  NullPointerException  if the value is <code>null</code>.
-     * 
-     **/
-
-    public Object setValue(Object value) {
-      if (value == null)
-        throw new NullPointerException();
-      Object oldValue = this.value;
-      this.value = value;
-      return oldValue;
-    }
-
-    public boolean equals(Object o) {
-      if (!(o instanceof Map.Entry))
-        return false;
-      Map.Entry e = (Map.Entry)o;
-      return (key.equals(e.getKey()) && value.equals(e.getValue()));
-    }
-    
-    public int hashCode() {
-      return  key.hashCode() ^ value.hashCode();
-    }
-    
-    public String toString() {
-      return key + "=" + value;
-    }
-
-  }
-
-  protected class HashIterator implements Iterator, Enumeration {
-    protected final Entry[] tab;           // snapshot of table
-    protected int index;                   // current slot 
-    protected Entry entry = null;          // current node of slot
-    protected Object currentKey;           // key for current node
-    protected Object currentValue;         // value for current node
-    protected Entry lastReturned = null;   // last node returned by next
-
-    protected HashIterator() {
-      // force all segments to synch
-      synchronized(segments[0]) { tab = table; }
-      for (int i = 1; i < segments.length; ++i) segments[i].synch();
-      index = tab.length - 1;
-    }
-
-    public boolean hasMoreElements() { return hasNext(); }
-    public Object nextElement() { return next(); }
-
-    public boolean hasNext() {
-      /*
-        currentkey and currentValue are set here to ensure that next()
-        returns normally if hasNext() returns true. This avoids
-        surprises especially when final element is removed during
-        traversal -- instead, we just ignore the removal during
-        current traversal.  
-      */
-
-      for (;;) {
-        if (entry != null) {
-          Object v = entry.value;
-          if (v != null) {
-            currentKey = entry.key;
-            currentValue = v;
-            return true;
-          }
-          else
-            entry = entry.next;
-        }
-
-        while (entry == null && index >= 0)
-          entry = tab[index--];
-
-        if (entry == null) {
-          currentKey = currentValue = null;
-          return false;
-        }
-      }
-    }
-
-    protected Object returnValueOfNext() { return entry; }
-
-    public Object next() {
-      if (currentKey == null && !hasNext())
-        throw new NoSuchElementException();
-
-      Object result = returnValueOfNext();
-      lastReturned = entry;
-      currentKey = currentValue = null;
-      entry = entry.next;
-      return result;
-    }
-
-    public void remove() {
-      if (lastReturned == null)
-        throw new IllegalStateException();
-      ConcurrentHashMap.this.remove(lastReturned.key);
-      lastReturned = null;
-    }
-
-  }
-
-  protected class KeyIterator extends HashIterator {
-    protected Object returnValueOfNext() { return currentKey; }
-  }
-  
-  protected class ValueIterator extends HashIterator {
-    protected Object returnValueOfNext() { return currentValue; }
-  }
-  
-  /**
-   * Save the state of the <tt>ConcurrentHashMap</tt> 
-   * instance to a stream (i.e.,
-   * serialize it).
-   *
-   * @serialData  
-   * An estimate of the table size, followed by
-   * the key (Object) and value (Object)
-   * for each key-value mapping, followed by a null pair.
-   * The key-value mappings are emitted in no particular order.
-   */
-
-  private void writeObject(java.io.ObjectOutputStream s)
-    throws IOException  {
-    // Write out the loadfactor, and any hidden stuff
-    s.defaultWriteObject();
-
-    // Write out capacity estimate. It is OK if this
-    // changes during the write, since it is only used by
-    // readObject to set initial capacity, to avoid needless resizings.
-
-    int cap;
-    synchronized(segments[0]) { cap = table.length; }
-    s.writeInt(cap);
-
-    // Write out keys and values (alternating)
-    for (int k = 0; k < segments.length; ++k) {
-      Segment seg = segments[k];
-      Entry[] tab;
-      synchronized(seg) { tab = table; }
-      for (int i = k; i < tab.length; i+= segments.length) {
-        for (Entry e = tab[i]; e != null; e = e.next) {
-          s.writeObject(e.key);
-          s.writeObject(e.value);
-        }
-      }
-    }
-
-    s.writeObject(null);
-    s.writeObject(null);
-  }
-
-  /**
-   * Reconstitute the <tt>ConcurrentHashMap</tt> 
-   * instance from a stream (i.e.,
-   * deserialize it).
-   */
-  private void readObject(java.io.ObjectInputStream s)
-    throws IOException, ClassNotFoundException  {
-    // Read in the threshold, loadfactor, and any hidden stuff
-    s.defaultReadObject();
-
-    int cap = s.readInt();
-    table = newTable(cap);
-    for (int i = 0; i < segments.length; ++i) 
-      segments[i] = new Segment();
-
-
-    // Read the keys and values, and put the mappings in the table
-    for (;;) {
-      Object key = s.readObject();
-      Object value = s.readObject();
-      if (key == null)
-        break;
-      put(key, value);
-    }
-  }
-}
diff --git a/benchmark/concurrent-hashmap/Makefile b/benchmark/concurrent-hashmap/Makefile
deleted file mode 100644 (file)
index 6b615a1..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-include ../benchmarks.mk
-
-BENCH := hashmap
-TESTS := main testcase1
-
-all: $(TESTS)
-
-$(BENCH).o : $(BENCH).h
-       $(CXX) -o $@ $< $(CXXFLAGS) -c $(LDFLAGS)
-
-$(TESTS): % : %.cc $(BENCH).o
-       $(CXX) -o $@ $< $(CXXFLAGS) $(LDFLAGS)
-
-clean:
-       rm -f *.o *.d $(TESTS)
diff --git a/benchmark/concurrent-hashmap/hashmap.h b/benchmark/concurrent-hashmap/hashmap.h
deleted file mode 100644 (file)
index 99973e3..0000000
+++ /dev/null
@@ -1,296 +0,0 @@
-#ifndef _HASHMAP_H
-#define _HASHMAP_H
-
-#include <iostream>
-#include <atomic>
-#include "stdio.h" 
-#include <stdlib.h>
-#include <mutex>
-
-#include <spec_lib.h>
-#include <cdsannotate.h>
-#include <specannotation.h>
-#include <model_memory.h>
-#include "common.h" 
-
-
-#include "common.h"
-
-#define relaxed memory_order_relaxed
-#define release memory_order_release
-#define acquire memory_order_acquire
-#define acq_rel memory_order_acq_rel
-#define seq_cst memory_order_seq_cst
-
-using namespace std;
-
-/**
-       For the sake of simplicity, we map Integer -> Integer.
-*/
-
-class Entry {
-       public:
-       int key;
-       atomic_int value;
-       int hash;
-       atomic<Entry*> next;
-
-       Entry(int h, int k, int v, Entry *n) {
-               this->hash = h;
-               this->key = k;
-               this->value.store(v, relaxed);
-               this->next.store(n, relaxed);
-       }
-};
-
-class Segment {
-       public:
-       int count;
-       mutex segMutex;
-
-       void lock() {
-               segMutex.lock();
-       }
-
-       void unlock() {
-               segMutex.unlock();
-       }
-
-       Segment() {
-               this->count = 0;
-       }
-};
-
-/**
-    @Begin
-       @Class_begin
-       @End
-*/
-class HashMap {
-       public:
-
-       /**
-               @Begin
-               @Options:
-                       LANG = CPP;
-                       CLASS = HashMap;
-               @Global_define:
-                       @DeclareVar:
-                               IntegerMap *__map;
-                       @InitVar:
-                               __map = createIntegerMap();
-                       @Finalize:
-                               if (__map)
-                                       destroyIntegerMap(__map);
-                               return true;
-               @Happens_before: Put -> Get 
-               @Commutativity: Put <-> Put: _Method1.key != _Method2.key 
-               @Commutativity: Put <-> Get: _Method1.key != _Method2.key
-               @Commutativity: Get <-> Get: true 
-               @End
-       */
-
-       atomic<Entry*> *table;
-
-       int capacity;
-       int size;
-
-       static const int CONCURRENCY_LEVEL = 4;
-
-       static const int SEGMENT_MASK = CONCURRENCY_LEVEL - 1;
-
-       Segment *segments[CONCURRENCY_LEVEL];
-
-       static const int DEFAULT_INITIAL_CAPACITY = 16;
-
-       // Not gonna consider resize now...
-       
-       HashMap() {
-               /**
-                       @Begin
-                       @Entry_point
-                       @End
-               */
-               this->size = 0;
-               this->capacity = DEFAULT_INITIAL_CAPACITY;
-               this->table = new atomic<Entry*>[capacity];
-               for (int i = 0; i < capacity; i++) {
-                       atomic_init(&table[i], NULL);
-               }
-               for (int i = 0; i < CONCURRENCY_LEVEL; i++) {
-                       segments[i] = new Segment;
-               }
-       }
-
-       int hashKey(int key) {
-               return key;
-       }
-       
-
-       /**
-               @Begin
-               @Interface: Get 
-               @Commit_point_set: GetReadValue1 | GetReadValue2 | GetReadNothing
-               @ID: __RET__ 
-               @Action:
-                       int res = getIntegerMap(__map, key);
-                       //model_print("Get: key=%d, res=%d\n", key, res);
-
-               @Post_check: __RET__ ? res == __RET__: true
-               @End
-       */
-       int get(int key) {
-               ASSERT (key);
-               int hash = hashKey(key);
-
-               // Try first without locking...
-               atomic<Entry*> *tab = table;
-               int index = hash & (capacity - 1);
-               atomic<Entry*> *first = &tab[index];
-               Entry *e;
-               int res = 0;
-
-               // Should be a load acquire
-               // This load action here makes it problematic for the SC analysis, what
-               // we need to do is as follows: if the get() method ever acquires the
-               // lock, we ignore this operation for the SC analysis, and otherwise we
-               // take it into consideration
-               
-               /**** UL (main.cc) ****/
-               Entry *firstPtr = first->load(acquire);
-
-               e = firstPtr;
-               while (e != NULL) {
-                       if (key, e->key) {
-                               /**** inadmissible (testcase1.cc) ****/
-                               res = e->value.load(seq_cst);
-                               /**
-                                       @Begin
-                                       @Commit_point_define_check: res != 0
-                                       @Label: GetReadValue1
-                                       @End
-                               */
-                               if (res != 0)
-                                       return res;
-                               else
-                                       break;
-                       }
-                       // Loading the next entry, this can be relaxed because the
-                       // synchronization has been established by the load of head
-                       e = e->next.load(relaxed);
-               }
-       
-               // Recheck under synch if key apparently not there or interference
-               Segment *seg = segments[hash & SEGMENT_MASK];
-               seg->lock(); // Critical region begins
-               // Not considering resize now, so ignore the reload of table...
-
-               // Synchronized by locking, no need to be load acquire
-               Entry *newFirstPtr = first->load(relaxed);
-               if (e != NULL || firstPtr != newFirstPtr) {
-                       e = newFirstPtr;
-                       while (e != NULL) {
-                               if (key == e->key) {
-                                       // Protected by lock, no need to be SC
-                                       res = e->value.load(relaxed);
-                                       /**
-                                               @Begin
-                                               @Commit_point_define_check: true 
-                                               @Label: GetReadValue2
-                                               @End
-                                       */
-                                       seg->unlock(); // Critical region ends
-                                       return res;
-                               }
-                               // Synchronized by locking
-                               e = e->next.load(relaxed);
-                       }
-               }
-               seg->unlock(); // Critical region ends
-               /**
-                       @Begin
-                       @Commit_point_define_check: true 
-                       @Label: GetReadNothing
-                       @End
-               */
-               return 0;
-       }
-
-       /**
-               @Begin
-               @Interface: Put 
-               @Commit_point_set: PutUpdateValue | PutInsertValue
-               @ID: value
-               @Action:
-                       putIntegerMap(__map, key, value);
-                       //model_print("Put: key=%d, val=%d\n", key, value);
-               @End
-       */
-       int put(int key, int value) {
-               ASSERT (key && value);
-               int hash = hashKey(key);
-               Segment *seg = segments[hash & SEGMENT_MASK];
-               atomic<Entry*> *tab;
-
-               seg->lock(); // Critical region begins
-               tab = table;
-               int index = hash & (capacity - 1);
-
-               atomic<Entry*> *first = &tab[index];
-               Entry *e;
-               int oldValue = 0;
-       
-               // The written of the entry is synchronized by locking
-               Entry *firstPtr = first->load(relaxed);
-               e = firstPtr;
-               while (e != NULL) {
-                       if (key == e->key) {
-                               // This could be a relaxed (because locking synchronize
-                               // with the previous put()),  no need to be acquire
-                               oldValue = e->value.load(relaxed);
-                               
-                               /**** inadmissible (testcase1.cc) ****/
-                               e->value.store(value, seq_cst);
-                               /**
-                                       @Begin
-                                       @Commit_point_define_check: true 
-                                       @Label: PutUpdateValue
-                                       @End
-                               */
-                               seg->unlock(); // Don't forget to unlock before return
-                               return oldValue;
-                       }
-                       // Synchronized by locking
-                       e = e->next.load(relaxed);
-               }
-
-               // Add to front of list
-               Entry *newEntry = new Entry(hash, key, value, firstPtr);
-               
-               /**** UL (main.cc) ****/
-               // Publish the newEntry to others
-               first->store(newEntry, release);
-               /**
-                       @Begin
-                       @Commit_point_clear: true 
-                       @Label: PutClear
-                       @End
-               */
-
-               /**
-                       @Begin
-                       @Commit_point_define_check: true 
-                       @Label: PutInsertValue
-                       @End
-               */
-               seg->unlock(); // Critical region ends
-               return 0;
-       }
-};
-/**
-    @Begin
-       @Class_end
-       @End
-*/
-
-#endif
diff --git a/benchmark/concurrent-hashmap/main.cc b/benchmark/concurrent-hashmap/main.cc
deleted file mode 100644 (file)
index 4190075..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-#include <iostream>
-#include <threads.h>
-#include "hashmap.h"
-
-HashMap *table;
-
-void threadA(void *arg) {
-       table->put(1, 1);
-       printf("Thrd A: Put %d -> %d\n", 1, 1);
-       int r1 = table->get(2);
-       printf("Thrd A: Get %d\n", r1);
-}
-
-void threadB(void *arg) {
-       table->put(2, 2);
-       printf("Thrd B: Put %d -> %d\n", 2, 2);
-       int r2 = table->get(1);
-       printf("Thrd B: Get %d\n", r2);
-}
-
-int user_main(int argc, char *argv[]) {
-       thrd_t t1, t2;
-
-       table = new HashMap;
-
-       thrd_create(&t1, threadA, NULL);
-       thrd_create(&t2, threadB, NULL);
-       thrd_join(t1);
-       thrd_join(t2);
-       
-       return 0;
-}
-
-
diff --git a/benchmark/concurrent-hashmap/note.txt b/benchmark/concurrent-hashmap/note.txt
deleted file mode 100644 (file)
index f080a48..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-#Non-SC:
-The following case can be non-SC.
-
-Thrd1                                          Thrd2
-put(k1, v1); // a                      put(k2, v2); // c
-get(k2); // b                          get(k1); // d
-
-When b and d both read the old head of the list (and they later grab the lock,
-making it the interface SC), it's non-SC because neither reads the updated
-value.
-
-Run testcase1 to make the store and load of value slot to be seq_cst.
-
-Then run testcase2 with "-o annotation" to get store and load of key slot to be
-release/acquire.
-
-0m0.015s + 0m0.000 = 0m0.015s 
diff --git a/benchmark/concurrent-hashmap/testcase1.cc b/benchmark/concurrent-hashmap/testcase1.cc
deleted file mode 100644 (file)
index 8a69d4a..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-#include <iostream>
-#include <threads.h>
-#include "hashmap.h"
-
-HashMap *table;
-
-void threadA(void *arg) {
-       table->put(1, 11);
-       printf("Thrd A: Put %d -> %d\n", 1, 11);
-       int r1 = table->get(2);
-       printf("Thrd A: Get %d\n", r1);
-}
-
-void threadB(void *arg) {
-       table->put(2, 22);
-       printf("Thrd B: Put %d -> %d\n", 2, 22);
-       int r2 = table->get(1);
-       printf("Thrd B: Get %d\n", r2);
-}
-
-int user_main(int argc, char *argv[]) {
-       thrd_t t1, t2;
-
-       table = new HashMap;
-       table->put(1, 1);
-       table->put(2, 2);
-
-       thrd_create(&t1, threadA, NULL);
-       thrd_create(&t2, threadB, NULL);
-       thrd_join(t1);
-       thrd_join(t2);
-       
-       return 0;
-}
-
-
diff --git a/benchmark/include/unrelacy.h b/benchmark/include/unrelacy.h
deleted file mode 100644 (file)
index 729d76f..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-#ifndef __UNRELACY_H__
-#define __UNRELACY_H__
-
-#include <stdatomic.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <mutex>
-#include <condition_variable>
-
-#include <model-assert.h>
-#include <librace.h>
-
-#define $
-
-#define ASSERT(expr) MODEL_ASSERT(expr)
-#define RL_ASSERT(expr) MODEL_ASSERT(expr)
-
-#define RL_NEW new
-#define RL_DELETE(expr) delete expr
-
-#define mo_seqcst memory_order_relaxed
-#define mo_release memory_order_release
-#define mo_acquire memory_order_acquire
-#define mo_acq_rel memory_order_acq_rel
-#define mo_relaxed memory_order_relaxed
-
-namespace rl {
-
-       /* This 'useless' struct is declared just so we can use partial template
-        * specialization in our store and load functions. */
-       template <typename T, size_t n>
-       struct useless {
-               static void store(void *addr, T val);
-               static T load(const void *addr);
-       };
-
-       template <typename T>
-       struct useless<T, 1> {
-               static void store(void *addr, T val) { store_8(addr, (uint8_t)val); }
-               static T load(const void *addr) { return (T)load_8(addr); }
-       };
-
-       template <typename T>
-       struct useless<T, 2> {
-               static void store(void *addr, T val) { store_16(addr, (uint16_t)val); }
-               static T load(const void *addr) { return (T)load_16(addr); }
-       };
-
-       template <typename T>
-       struct useless<T, 4> {
-               static void store(void *addr, T val) { store_32(addr, (uint32_t)val); }
-               static T load(const void *addr) { return (T)load_32(addr); }
-       };
-
-       template <typename T>
-       struct useless<T, 8> {
-               static void store(void *addr, T val) { store_64(addr, (uint64_t)val); }
-               static T load(const void *addr) { return (T)load_64(addr); }
-       };
-
-       template <typename T>
-       struct var {
-               var() { useless<T, sizeof(T)>::store(&value, 0); }
-               var(T v) { useless<T, sizeof(T)>::store(&value, v); }
-               var(var const& r) {
-                       value = r.value;
-               }
-               ~var() { }
-
-               void operator = (T v) { useless<T, sizeof(T)>::store(&value, v); }
-               T operator () () { return useless<T, sizeof(T)>::load(&value); }
-               void operator += (T v) {
-                       useless<T, sizeof(T)>::store(&value,
-                                       useless<T, sizeof(T)>::load(&value) + v);
-               }
-               bool operator == (const struct var<T> v) const { return useless<T, sizeof(T)>::load(&value) == useless<T, sizeof(T)>::load(&v.value); }
-
-               T value;
-       };
-
-       class backoff_t
-       {
-        public:
-               typedef int debug_info_param;
-               void yield(debug_info_param info) { }
-               void yield() { }
-       };
-
-
-       typedef backoff_t backoff;
-       typedef backoff_t linear_backoff;
-       typedef backoff_t exp_backoff;
-
-}
-
-#endif /* __UNRELACY_H__ */
diff --git a/benchmark/linuxrwlocks/.gitignore b/benchmark/linuxrwlocks/.gitignore
deleted file mode 100644 (file)
index 2fdb632..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/linuxrwlocks
diff --git a/benchmark/linuxrwlocks/Makefile b/benchmark/linuxrwlocks/Makefile
deleted file mode 100644 (file)
index c62b36a..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-include ../benchmarks.mk
-
-TESTNAME = linuxrwlocks testcase1 testcase2
-
-all: $(TESTNAME)
-
-$(TESTNAME): % : %.c
-       $(CC) -o $@ $< $(CFLAGS) $(LDFLAGS)
-
-clean:
-       rm -f $(TESTNAME) *.o
diff --git a/benchmark/linuxrwlocks/linuxrwlocks.c b/benchmark/linuxrwlocks/linuxrwlocks.c
deleted file mode 100644 (file)
index 03a292b..0000000
+++ /dev/null
@@ -1,333 +0,0 @@
-#include <stdio.h>
-#include <threads.h>
-#include <stdatomic.h>
-
-#include <spec_lib.h>
-#include <stdlib.h>
-#include <cdsannotate.h>
-#include <specannotation.h>
-#include <model_memory.h>
-#include "common.h"
-
-#include "librace.h"
-
-#define RW_LOCK_BIAS            0x00100000
-#define WRITE_LOCK_CMP          RW_LOCK_BIAS
-
-typedef union {
-       atomic_int lock;
-} rwlock_t;
-
-/** Example implementation of linux rw lock along with 2 thread test
- *  driver... */
-
-/**
-       Properties to check:
-       1. At most 1 thread can acquire the write lock, and at the same time, no
-               other threads can acquire any lock (including read/write lock).
-       2. At most RW_LOCK_BIAS threads can successfully acquire the read lock.
-       3. A read_unlock release 1 read lock, and a write_unlock release the write
-       lock. They can not release a lock that they don't acquire.
-       ###
-       4. Read_lock and write_lock can not be grabbed at the same time.
-       5. Happpens-before relationship should be checked and guaranteed, which
-       should be as the following:
-               a. read_unlock hb-> write_lock
-               b. write_unlock hb-> write_lock
-               c. write_unlock hb-> read_lock
-*/
-
-/**
-       Interesting point for locks:
-       a. If the users unlock() before any lock(), then the model checker will fail.
-       For this case, we can not say that the data structure is buggy, how can we
-       tell them from a real data structure bug???
-       b. We should specify that for a specific thread, successful locks and
-       unlocks should always come in pairs. We could make this check as an
-       auxiliary check since it is an extra rule for how the interfaces should called.
-*/
-
-/** @DeclareState:
-               bool writer_lock_acquired;
-               int reader_lock_cnt;
-       @Initial: writer_lock_acquired = false;
-               reader_lock_cnt = 0;
-       @Commutativity: Read_Lock <-> Read_Lock: true
-       @Commutativity: Read_Lock <-> Read_Unlock: true
-       @Commutativity: Read_Lock <-> Read_Trylock: true
-       @Commutativity: Read_Lock <-> Write_Trylock: !_Method2.__RET__
-       
-       @Commutativity: Read_Unlock <-> Read_Unlock: true
-       @Commutativity: Read_Unlock <-> Read_Trylock: true
-       @Commutativity: Read_Unlock <-> Write_Trylock: !_Method2.__RET__
-
-       @Commutativity: Read_Trylock <-> Read_Trylock: true
-       @Commutativity: Read_Trylock <-> Write_Trylock: !_Method1.__RET__ || !_Method2.__RET__
-       @Commutativity: Read_Trylock <-> Write_Lock: !_Method1.__RET__
-       @Commutativity: Read_Trylock <-> Write_Unlock: !_Method1.__RET__
-
-       @Commutativity: Write_Trylock <-> Write_Trylock: !_Method1.__RET__ || !_Method2.__RET__
-       @Commutativity: Write_Trylock <-> Write_Unlock: !_Method1.__RET__
-       @Commutativity: Write_Trylock <-> Write_Lock: !_Method1.__RET__
-*/
-
-static inline int read_can_lock(rwlock_t *lock)
-{
-       return atomic_load_explicit(&lock->lock, memory_order_relaxed) > 0;
-}
-
-static inline int write_can_lock(rwlock_t *lock)
-{
-       return atomic_load_explicit(&lock->lock, memory_order_relaxed) == RW_LOCK_BIAS;
-}
-
-
-/**
-       @Begin
-       @Interface: Read_Lock
-       @Commit_point_set: Read_Lock_Success_1 | Read_Lock_Success_2
-       @Check: !writer_lock_acquired
-       @Action: reader_lock_cnt++;
-       @End
-*/
-static inline void read_lock(rwlock_t *rw)
-{
-       
-       /**********  Admissibility  **********/
-       int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
-       /**
-               @Begin
-               @Commit_point_define_check: priorvalue > 0
-               @Label:Read_Lock_Success_1
-               @End
-       */
-       while (priorvalue <= 0) {
-               atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
-               while (atomic_load_explicit(&rw->lock, memory_order_relaxed) <= 0) {
-                       thrd_yield();
-               }
-               /**********  Admissibility  **********/
-               priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
-               /**
-                       @Begin
-                       @Commit_point_define_check: priorvalue > 0
-                       @Label:Read_Lock_Success_2
-                       @End
-               */
-       }
-}
-
-
-/**
-       @Begin
-       @Interface: Write_Lock
-       @Commit_point_set: Write_Lock_Success_1 | Write_Lock_Success_2
-       @Check: !writer_lock_acquired && reader_lock_cnt == 0
-       @Action: writer_lock_acquired = true;
-       @End
-*/
-static inline void write_lock(rwlock_t *rw)
-{
-       /**********  Admissibility  **********/
-       int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
-       /**
-               @Begin
-               @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
-               @Label: Write_Lock_Success_1
-               @End
-       */
-       while (priorvalue != RW_LOCK_BIAS) {
-               atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
-               while (atomic_load_explicit(&rw->lock, memory_order_relaxed) != RW_LOCK_BIAS) {
-                       thrd_yield();
-               }
-               /**********  Admissibility  **********/
-               priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
-               /**
-                       @Begin
-                       @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
-                       @Label: Write_Lock_Success_2
-                       @End
-               */
-       }
-}
-
-/**
-       @Begin
-       @Interface: Read_Trylock
-       @Commit_point_set: Read_Trylock_Succ_Point | Read_Trylock_Fail_Point
-       //@Condition: writer_lock_acquired == false
-       @HB_condition: HB_Read_Trylock_Succ :: __RET__ == 1
-       @Action:
-               if (__RET__)
-                       reader_lock_cnt++;
-       @Post_check: __RET__ == !writer_lock_acquired || !__RET__
-       @End
-*/
-static inline int read_trylock(rwlock_t *rw)
-{
-       /**********  Admissibility  **********/
-       int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
-       /**
-               @Begin
-               @Potential_commit_point_define: true
-               @Label: Potential_Read_Trylock_Point
-               @End
-       */
-       if (priorvalue > 0) {
-               /**
-                       @Begin
-                       @Commit_point_define: true
-                       @Potential_commit_point_label: Potential_Read_Trylock_Point
-                       @Label:  Read_Trylock_Succ_Point
-                       @End
-               */
-               return 1;
-       }
-       /**
-               @Begin
-               @Commit_point_define: true
-               @Potential_commit_point_label: Potential_Read_Trylock_Point
-               @Label:  Read_Trylock_Fail_Point
-               @End
-       */
-       atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
-       return 0;
-}
-
-/**
-       @Begin
-       @Interface: Write_Trylock
-       @Commit_point_set: Write_Trylock_Succ_Point | Write_Trylock_Fail_Point
-       @HB_condition: HB_Write_Trylock_Succ :: __RET__ == 1
-       @Action:
-               if (__RET__ == 1)
-                       writer_lock_acquired = true;
-       @End
-*/
-static inline int write_trylock(rwlock_t *rw)
-{
-       /**********  Admissibility  **********/
-       int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
-       //model_print("write_trylock: priorvalue: %d\n", priorvalue);
-       /**
-               @Begin
-               @Potential_commit_point_define: true
-               @Label: Potential_Write_Trylock_Point
-               @End
-       */
-       if (priorvalue == RW_LOCK_BIAS) {
-               /**
-                       @Begin
-                       @Commit_point_define: true
-                       @Potential_commit_point_label: Potential_Write_Trylock_Point
-                       @Label: Write_Trylock_Succ_Point
-                       @End
-               */
-               return 1;
-       }
-       /**
-               @Begin
-               @Commit_point_define: true
-               @Potential_commit_point_label: Potential_Write_Trylock_Point
-               @Label: Write_Trylock_Fail_Point
-               @End
-       */
-       atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
-       return 0;
-}
-
-/**
-       @Begin
-       @Interface: Read_Unlock
-       @Commit_point_set: Read_Unlock_Point
-       @Check: reader_lock_cnt > 0 && !writer_lock_acquired
-       @Action: reader_lock_cnt--;
-       @End
-*/
-static inline void read_unlock(rwlock_t *rw)
-{
-       /**********  Admissibility  **********/
-       atomic_fetch_add_explicit(&rw->lock, 1, memory_order_release);
-       /**
-               @Begin
-               @Commit_point_define_check: true
-               @Label: Read_Unlock_Point
-               @End
-       */
-}
-
-/**
-       @Begin
-       @Interface: Write_Unlock
-       @Commit_point_set: Write_Unlock_Point | Write_Unlock_Clear
-       @Check: reader_lock_cnt == 0 && writer_lock_acquired
-       @Action: writer_lock_acquired = false;
-       @End
-*/
-static inline void write_unlock(rwlock_t *rw)
-{
-       /**********  Admissibility  **********/
-       atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_release);
-       /**
-               @Begin
-               @Commit_point_define_check: true
-               @Label: Write_Unlock_Point
-               @End
-       */
-
-       /**
-               //@Begin
-               @Commit_point_clear: true
-               @Label: Write_Unlock_Clear
-               @End
-       */
-}
-
-rwlock_t mylock;
-int shareddata;
-
-static void a(void *obj)
-{
-       read_lock(&mylock);
-       //load_32(&shareddata);
-       //printf("%d\n", shareddata);
-       read_unlock(&mylock);
-       
-       write_lock(&mylock);
-       //store_32(&shareddata,(unsigned int)i);
-       shareddata = 47;
-       write_unlock(&mylock);
-}
-
-static void b(void *obj)
-{
-       if (read_trylock(&mylock) == 1) {
-               //printf("%d\n", shareddata);
-               read_unlock(&mylock);
-       }
-       
-       if (write_trylock(&mylock) == 1) {
-               //shareddata = 47;
-               write_unlock(&mylock);
-       }
-}
-
-int user_main(int argc, char **argv)
-{
-       /**
-               @Begin
-               @Entry_point
-               @End
-       */
-       thrd_t t1, t2;
-       atomic_init(&mylock.lock, RW_LOCK_BIAS);
-
-       thrd_create(&t1, (thrd_start_t)&a, NULL);
-       thrd_create(&t2, (thrd_start_t)&b, NULL);
-
-       thrd_join(t1);
-       thrd_join(t2);
-
-       return 0;
-}
diff --git a/benchmark/linuxrwlocks/testcase1.c b/benchmark/linuxrwlocks/testcase1.c
deleted file mode 100644 (file)
index 9faad2a..0000000
+++ /dev/null
@@ -1,338 +0,0 @@
-#include <stdio.h>
-#include <threads.h>
-#include <stdatomic.h>
-
-#include <spec_lib.h>
-#include <stdlib.h>
-#include <cdsannotate.h>
-#include <specannotation.h>
-#include <model_memory.h>
-#include "common.h"
-
-#include "librace.h"
-
-#define RW_LOCK_BIAS            0x00100000
-#define WRITE_LOCK_CMP          RW_LOCK_BIAS
-
-typedef union {
-       atomic_int lock;
-} rwlock_t;
-
-/** Example implementation of linux rw lock along with 2 thread test
- *  driver... */
-
-/**
-       Properties to check:
-       1. At most 1 thread can acquire the write lock, and at the same time, no
-               other threads can acquire any lock (including read/write lock).
-       2. At most RW_LOCK_BIAS threads can successfully acquire the read lock.
-       3. A read_unlock release 1 read lock, and a write_unlock release the write
-       lock. They can not release a lock that they don't acquire.
-       ###
-       4. Read_lock and write_lock can not be grabbed at the same time.
-       5. Happpens-before relationship should be checked and guaranteed, which
-       should be as the following:
-               a. read_unlock hb-> write_lock
-               b. write_unlock hb-> write_lock
-               c. write_unlock hb-> read_lock
-*/
-
-/**
-       Interesting point for locks:
-       a. If the users unlock() before any lock(), then the model checker will fail.
-       For this case, we can not say that the data structure is buggy, how can we
-       tell them from a real data structure bug???
-       b. We should specify that for a specific thread, successful locks and
-       unlocks should always come in pairs. We could make this check as an
-       auxiliary check since it is an extra rule for how the interfaces should called.
-*/
-
-/**
-       @Begin
-       @Options:
-               LANG = C;
-       @Global_define:
-               @DeclareVar:
-                       bool writer_lock_acquired;
-                       int reader_lock_cnt;
-               @InitVar:
-                       writer_lock_acquired = false;
-                       reader_lock_cnt = 0;
-       @Happens_before:
-               # Since commit_point_set has no ID attached, A -> B means that for any B,
-               # the previous A happens before B.
-               #Read_Unlock -> Read_Lock
-               Read_Unlock -> Write_Lock
-               Read_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
-               #Read_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
-
-               Write_Unlock -> Write_Lock
-               Write_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
-
-               Write_Unlock -> Read_Lock
-               Write_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
-       
-       @Commutativity: Read_Lock <-> Read_Lock: true
-       @Commutativity: Read_Lock <-> Read_Unlock: true
-       @Commutativity: Read_Lock <-> Read_Trylock: true
-       @Commutativity: Read_Lock <-> Write_Trylock: !_Method2.__RET__
-       
-       @Commutativity: Read_Unlock <-> Read_Unlock: true
-       @Commutativity: Read_Unlock <-> Read_Trylock: true
-       @Commutativity: Read_Unlock <-> Write_Trylock: !_Method2.__RET__
-
-       @Commutativity: Read_Trylock <-> Read_Trylock: true
-       @Commutativity: Read_Trylock <-> Write_Trylock: !_Method1.__RET__ || !_Method2.__RET__
-       @Commutativity: Read_Trylock <-> Write_Lock: !_Method1.__RET__
-       @Commutativity: Read_Trylock <-> Write_Unlock: !_Method1.__RET__
-
-       @Commutativity: Write_Trylock <-> Write_Trylock: !_Method1.__RET__ || !_Method2.__RET__
-       @Commutativity: Write_Trylock <-> Write_Unlock: !_Method1.__RET__
-       @Commutativity: Write_Trylock <-> Write_Lock: !_Method1.__RET__
-       @End
-*/
-
-static inline int read_can_lock(rwlock_t *lock)
-{
-       return atomic_load_explicit(&lock->lock, memory_order_relaxed) > 0;
-}
-
-static inline int write_can_lock(rwlock_t *lock)
-{
-       return atomic_load_explicit(&lock->lock, memory_order_relaxed) == RW_LOCK_BIAS;
-}
-
-
-/**
-       @Begin
-       @Interface: Read_Lock
-       @Commit_point_set: Read_Lock_Success_1 | Read_Lock_Success_2
-       @Check: !writer_lock_acquired
-       @Action: reader_lock_cnt++;
-       @End
-*/
-static inline void read_lock(rwlock_t *rw)
-{
-       int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
-       /**
-               @Begin
-               @Commit_point_define_check: priorvalue > 0
-               @Label:Read_Lock_Success_1
-               @End
-       */
-       while (priorvalue <= 0) {
-               atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
-               while (atomic_load_explicit(&rw->lock, memory_order_relaxed) <= 0) {
-                       thrd_yield();
-               }
-               priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
-               /**
-                       @Begin
-                       @Commit_point_define_check: priorvalue > 0
-                       @Label:Read_Lock_Success_2
-                       @End
-               */
-       }
-}
-
-
-/**
-       @Begin
-       @Interface: Write_Lock
-       @Commit_point_set: Write_Lock_Success_1 | Write_Lock_Success_2
-       @Check: !writer_lock_acquired && reader_lock_cnt == 0
-       @Action: writer_lock_acquired = true;
-       @End
-*/
-static inline void write_lock(rwlock_t *rw)
-{
-       int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
-       /**
-               @Begin
-               @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
-               @Label: Write_Lock_Success_1
-               @End
-       */
-       while (priorvalue != RW_LOCK_BIAS) {
-               atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
-               while (atomic_load_explicit(&rw->lock, memory_order_relaxed) != RW_LOCK_BIAS) {
-                       thrd_yield();
-               }
-               priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
-               /**
-                       @Begin
-                       @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
-                       @Label: Write_Lock_Success_2
-                       @End
-               */
-       }
-}
-
-/**
-       @Begin
-       @Interface: Read_Trylock
-       @Commit_point_set: Read_Trylock_Succ_Point | Read_Trylock_Fail_Point
-       //@Condition: writer_lock_acquired == false
-       @HB_condition: HB_Read_Trylock_Succ :: __RET__ == 1
-       @Action:
-               if (__RET__)
-                       reader_lock_cnt++;
-       @Post_check: __RET__ == !writer_lock_acquired || !__RET__
-       @End
-*/
-static inline int read_trylock(rwlock_t *rw)
-{
-       int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
-       /**
-               @Begin
-               @Potential_commit_point_define: true
-               @Label: Potential_Read_Trylock_Point
-               @End
-       */
-       if (priorvalue > 0) {
-               /**
-                       @Begin
-                       @Commit_point_define: true
-                       @Potential_commit_point_label: Potential_Read_Trylock_Point
-                       @Label:  Read_Trylock_Succ_Point
-                       @End
-               */
-               return 1;
-       }
-       /**
-               @Begin
-               @Commit_point_define: true
-               @Potential_commit_point_label: Potential_Read_Trylock_Point
-               @Label:  Read_Trylock_Fail_Point
-               @End
-       */
-       atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
-       return 0;
-}
-
-/**
-       @Begin
-       @Interface: Write_Trylock
-       @Commit_point_set: Write_Trylock_Succ_Point | Write_Trylock_Fail_Point
-       @HB_condition: HB_Write_Trylock_Succ :: __RET__ == 1
-       @Action:
-               if (__RET__ == 1)
-                       writer_lock_acquired = true;
-       @End
-*/
-static inline int write_trylock(rwlock_t *rw)
-{
-       int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
-       //model_print("write_trylock: priorvalue: %d\n", priorvalue);
-       /**
-               @Begin
-               @Potential_commit_point_define: true
-               @Label: Potential_Write_Trylock_Point
-               @End
-       */
-       if (priorvalue == RW_LOCK_BIAS) {
-               /**
-                       @Begin
-                       @Commit_point_define: true
-                       @Potential_commit_point_label: Potential_Write_Trylock_Point
-                       @Label: Write_Trylock_Succ_Point
-                       @End
-               */
-               return 1;
-       }
-       /**
-               @Begin
-               @Commit_point_define: true
-               @Potential_commit_point_label: Potential_Write_Trylock_Point
-               @Label: Write_Trylock_Fail_Point
-               @End
-       */
-       atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
-       return 0;
-}
-
-/**
-       @Begin
-       @Interface: Read_Unlock
-       @Commit_point_set: Read_Unlock_Point
-       @Check: reader_lock_cnt > 0 && !writer_lock_acquired
-       @Action: reader_lock_cnt--;
-       @End
-*/
-static inline void read_unlock(rwlock_t *rw)
-{
-       /**********  Admissibility  **********/
-       atomic_fetch_add_explicit(&rw->lock, 1, memory_order_release);
-       /**
-               @Begin
-               @Commit_point_define_check: true
-               @Label: Read_Unlock_Point
-               @End
-       */
-}
-
-/**
-       @Begin
-       @Interface: Write_Unlock
-       @Commit_point_set: Write_Unlock_Point | Write_Unlock_Clear
-       @Check: reader_lock_cnt == 0 && writer_lock_acquired
-       @Action: writer_lock_acquired = false;
-       @End
-*/
-static inline void write_unlock(rwlock_t *rw)
-{
-       /**********  Admissibility  **********/
-       atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_release);
-       /**
-               @Begin
-               @Commit_point_define_check: true
-               @Label: Write_Unlock_Point
-               @End
-       */
-
-       /**
-               //@Begin
-               @Commit_point_clear: true
-               @Label: Write_Unlock_Clear
-               @End
-       */
-}
-
-rwlock_t mylock;
-int shareddata;
-
-static void a(void *obj)
-{
-       write_lock(&mylock);
-       //store_32(&shareddata,(unsigned int)i);
-       shareddata = 47;
-       write_unlock(&mylock);
-}
-
-static void b(void *obj)
-{
-       if (read_trylock(&mylock)) {
-               //load_32(&shareddata);
-               //printf("%d\n", shareddata);
-               read_unlock(&mylock);
-       }
-}
-
-int user_main(int argc, char **argv)
-{
-       /**
-               @Begin
-               @Entry_point
-               @End
-       */
-       thrd_t t1, t2;
-       atomic_init(&mylock.lock, RW_LOCK_BIAS);
-
-       thrd_create(&t1, (thrd_start_t)&a, NULL);
-       thrd_create(&t2, (thrd_start_t)&b, NULL);
-
-       thrd_join(t1);
-       thrd_join(t2);
-
-       return 0;
-}
diff --git a/benchmark/linuxrwlocks/testcase2.c b/benchmark/linuxrwlocks/testcase2.c
deleted file mode 100644 (file)
index f5f0820..0000000
+++ /dev/null
@@ -1,343 +0,0 @@
-#include <stdio.h>
-#include <threads.h>
-#include <stdatomic.h>
-
-#include <spec_lib.h>
-#include <stdlib.h>
-#include <cdsannotate.h>
-#include <specannotation.h>
-#include <model_memory.h>
-#include "common.h"
-
-#include "librace.h"
-
-#define RW_LOCK_BIAS            0x00100000
-#define WRITE_LOCK_CMP          RW_LOCK_BIAS
-
-typedef union {
-       atomic_int lock;
-} rwlock_t;
-
-/** Example implementation of linux rw lock along with 2 thread test
- *  driver... */
-
-/**
-       Properties to check:
-       1. At most 1 thread can acquire the write lock, and at the same time, no
-               other threads can acquire any lock (including read/write lock).
-       2. At most RW_LOCK_BIAS threads can successfully acquire the read lock.
-       3. A read_unlock release 1 read lock, and a write_unlock release the write
-       lock. They can not release a lock that they don't acquire.
-       ###
-       4. Read_lock and write_lock can not be grabbed at the same time.
-       5. Happpens-before relationship should be checked and guaranteed, which
-       should be as the following:
-               a. read_unlock hb-> write_lock
-               b. write_unlock hb-> write_lock
-               c. write_unlock hb-> read_lock
-*/
-
-/**
-       Interesting point for locks:
-       a. If the users unlock() before any lock(), then the model checker will fail.
-       For this case, we can not say that the data structure is buggy, how can we
-       tell them from a real data structure bug???
-       b. We should specify that for a specific thread, successful locks and
-       unlocks should always come in pairs. We could make this check as an
-       auxiliary check since it is an extra rule for how the interfaces should called.
-*/
-
-/**
-       @Begin
-       @Options:
-               LANG = C;
-       @Global_define:
-               @DeclareVar:
-                       bool writer_lock_acquired;
-                       int reader_lock_cnt;
-               @InitVar:
-                       writer_lock_acquired = false;
-                       reader_lock_cnt = 0;
-       @Happens_before:
-               # Since commit_point_set has no ID attached, A -> B means that for any B,
-               # the previous A happens before B.
-               #Read_Unlock -> Read_Lock
-               Read_Unlock -> Write_Lock
-               Read_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
-               #Read_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
-
-               Write_Unlock -> Write_Lock
-               Write_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
-
-               Write_Unlock -> Read_Lock
-               Write_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
-       
-       @Commutativity: Read_Lock <-> Read_Lock: true
-       @Commutativity: Read_Lock <-> Read_Unlock: true
-       @Commutativity: Read_Lock <-> Read_Trylock: true
-       @Commutativity: Read_Lock <-> Write_Trylock: !_Method2.__RET__
-       
-       @Commutativity: Read_Unlock <-> Read_Unlock: true
-       @Commutativity: Read_Unlock <-> Read_Trylock: true
-       @Commutativity: Read_Unlock <-> Write_Trylock: !_Method2.__RET__
-
-       @Commutativity: Read_Trylock <-> Read_Trylock: true
-       @Commutativity: Read_Trylock <-> Write_Trylock: !_Method1.__RET__ || !_Method2.__RET__
-       @Commutativity: Read_Trylock <-> Write_Lock: !_Method1.__RET__
-       @Commutativity: Read_Trylock <-> Write_Unlock: !_Method1.__RET__
-
-       @Commutativity: Write_Trylock <-> Write_Trylock: !_Method1.__RET__ || !_Method2.__RET__
-       @Commutativity: Write_Trylock <-> Write_Unlock: !_Method1.__RET__
-       @Commutativity: Write_Trylock <-> Write_Lock: !_Method1.__RET__
-       @End
-*/
-
-static inline int read_can_lock(rwlock_t *lock)
-{
-       return atomic_load_explicit(&lock->lock, memory_order_relaxed) > 0;
-}
-
-static inline int write_can_lock(rwlock_t *lock)
-{
-       return atomic_load_explicit(&lock->lock, memory_order_relaxed) == RW_LOCK_BIAS;
-}
-
-
-/**
-       @Begin
-       @Interface: Read_Lock
-       @Commit_point_set: Read_Lock_Success_1 | Read_Lock_Success_2
-       @Check: !writer_lock_acquired
-       @Action: reader_lock_cnt++;
-       @End
-*/
-static inline void read_lock(rwlock_t *rw)
-{
-       int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
-       /**
-               @Begin
-               @Commit_point_define_check: priorvalue > 0
-               @Label:Read_Lock_Success_1
-               @End
-       */
-       while (priorvalue <= 0) {
-               atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
-               while (atomic_load_explicit(&rw->lock, memory_order_relaxed) <= 0) {
-                       thrd_yield();
-               }
-               priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
-               /**
-                       @Begin
-                       @Commit_point_define_check: priorvalue > 0
-                       @Label:Read_Lock_Success_2
-                       @End
-               */
-       }
-}
-
-
-/**
-       @Begin
-       @Interface: Write_Lock
-       @Commit_point_set: Write_Lock_Success_1 | Write_Lock_Success_2
-       @Check: !writer_lock_acquired && reader_lock_cnt == 0
-       @Action: writer_lock_acquired = true;
-       @End
-*/
-static inline void write_lock(rwlock_t *rw)
-{
-       int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
-       /**
-               @Begin
-               @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
-               @Label: Write_Lock_Success_1
-               @End
-       */
-       while (priorvalue != RW_LOCK_BIAS) {
-               atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
-               while (atomic_load_explicit(&rw->lock, memory_order_relaxed) != RW_LOCK_BIAS) {
-                       thrd_yield();
-               }
-               priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
-               /**
-                       @Begin
-                       @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
-                       @Label: Write_Lock_Success_2
-                       @End
-               */
-       }
-}
-
-/**
-       @Begin
-       @Interface: Read_Trylock
-       @Commit_point_set: Read_Trylock_Succ_Point | Read_Trylock_Fail_Point
-       //@Condition: writer_lock_acquired == false
-       @HB_condition: HB_Read_Trylock_Succ :: __RET__ == 1
-       @Action:
-               if (__RET__)
-                       reader_lock_cnt++;
-       @Post_check: __RET__ == !writer_lock_acquired || !__RET__
-       @End
-*/
-static inline int read_trylock(rwlock_t *rw)
-{
-       int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
-       /**
-               @Begin
-               @Potential_commit_point_define: true
-               @Label: Potential_Read_Trylock_Point
-               @End
-       */
-       if (priorvalue > 0) {
-               /**
-                       @Begin
-                       @Commit_point_define: true
-                       @Potential_commit_point_label: Potential_Read_Trylock_Point
-                       @Label:  Read_Trylock_Succ_Point
-                       @End
-               */
-               return 1;
-       }
-       /**
-               @Begin
-               @Commit_point_define: true
-               @Potential_commit_point_label: Potential_Read_Trylock_Point
-               @Label:  Read_Trylock_Fail_Point
-               @End
-       */
-       atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
-       return 0;
-}
-
-/**
-       @Begin
-       @Interface: Write_Trylock
-       @Commit_point_set: Write_Trylock_Succ_Point | Write_Trylock_Fail_Point
-       @HB_condition: HB_Write_Trylock_Succ :: __RET__ == 1
-       @Action:
-               if (__RET__ == 1)
-                       writer_lock_acquired = true;
-       @End
-*/
-static inline int write_trylock(rwlock_t *rw)
-{
-       int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
-       //model_print("write_trylock: priorvalue: %d\n", priorvalue);
-       /**
-               @Begin
-               @Potential_commit_point_define: true
-               @Label: Potential_Write_Trylock_Point
-               @End
-       */
-       if (priorvalue == RW_LOCK_BIAS) {
-               /**
-                       @Begin
-                       @Commit_point_define: true
-                       @Potential_commit_point_label: Potential_Write_Trylock_Point
-                       @Label: Write_Trylock_Succ_Point
-                       @End
-               */
-               return 1;
-       }
-       /**
-               @Begin
-               @Commit_point_define: true
-               @Potential_commit_point_label: Potential_Write_Trylock_Point
-               @Label: Write_Trylock_Fail_Point
-               @End
-       */
-       atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
-       return 0;
-}
-
-/**
-       @Begin
-       @Interface: Read_Unlock
-       @Commit_point_set: Read_Unlock_Point
-       @Check: reader_lock_cnt > 0 && !writer_lock_acquired
-       @Action: reader_lock_cnt--;
-       @End
-*/
-static inline void read_unlock(rwlock_t *rw)
-{
-       /**********  Admissibility  **********/
-       atomic_fetch_add_explicit(&rw->lock, 1, memory_order_release);
-       /**
-               @Begin
-               @Commit_point_define_check: true
-               @Label: Read_Unlock_Point
-               @End
-       */
-}
-
-/**
-       @Begin
-       @Interface: Write_Unlock
-       @Commit_point_set: Write_Unlock_Point | Write_Unlock_Clear
-       @Check: reader_lock_cnt == 0 && writer_lock_acquired
-       @Action: writer_lock_acquired = false;
-       @End
-*/
-static inline void write_unlock(rwlock_t *rw)
-{
-       /**********  Admissibility  **********/
-       atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_release);
-       /**
-               @Begin
-               @Commit_point_define_check: true
-               @Label: Write_Unlock_Point
-               @End
-       */
-
-       /**
-               //@Begin
-               @Commit_point_clear: true
-               @Label: Write_Unlock_Clear
-               @End
-       */
-}
-
-rwlock_t mylock;
-int shareddata;
-
-static void a(void *obj)
-{
-       write_lock(&mylock);
-       //store_32(&shareddata,(unsigned int)i);
-       shareddata = 47;
-       write_unlock(&mylock);
-}
-
-static void b(void *obj)
-{
-       if (write_trylock(&mylock)) {
-               //store_32(&shareddata,(unsigned int)i);
-               shareddata = 47;
-               write_unlock(&mylock);
-       }
-
-       read_lock(&mylock);
-       //load_32(&shareddata);
-       //printf("%d\n", shareddata);
-       read_unlock(&mylock);
-}
-
-int user_main(int argc, char **argv)
-{
-       /**
-               @Begin
-               @Entry_point
-               @End
-       */
-       thrd_t t1, t2;
-       atomic_init(&mylock.lock, RW_LOCK_BIAS);
-
-       thrd_create(&t1, (thrd_start_t)&a, NULL);
-       thrd_create(&t2, (thrd_start_t)&b, NULL);
-
-       thrd_join(t1);
-       thrd_join(t2);
-
-       return 0;
-}
diff --git a/benchmark/mcs-lock/.gitignore b/benchmark/mcs-lock/.gitignore
deleted file mode 100644 (file)
index aef308c..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/mcs-lock
diff --git a/benchmark/mcs-lock/Makefile b/benchmark/mcs-lock/Makefile
deleted file mode 100644 (file)
index 5a311b3..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-include ../benchmarks.mk
-
-TESTNAME = mcs-lock
-
-all: $(TESTNAME)
-
-$(TESTNAME): $(TESTNAME).cc $(TESTNAME).h
-       $(CXX) -o $@ $< $(CXXFLAGS) $(LDFLAGS)
-
-clean:
-       rm -f $(TESTNAME) *.o
diff --git a/benchmark/mcs-lock/mcs-lock.cc b/benchmark/mcs-lock/mcs-lock.cc
deleted file mode 100644 (file)
index 43435b4..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-#include <stdio.h>
-#include <threads.h>
-
-#include "mcs-lock.h"
-
-/* For data race instrumentation */
-#include "librace.h"
-
-struct mcs_mutex *mutex;
-static uint32_t shared;
-
-void threadA(void *arg)
-{
-       mcs_mutex::guard g(mutex);
-       //printf("store: %d\n", 17);
-       //store_32(&shared, 17);
-       shared = 17;
-       mutex->unlock(&g);
-       mutex->lock(&g);
-       //printf("load: %u\n", load_32(&shared));
-       //printf("load: %u\n", shared);
-}
-
-void threadB(void *arg)
-{
-       mcs_mutex::guard g(mutex);
-       //printf("load: %u\n", load_32(&shared));
-       //printf("load: %u\n", shared);
-       mutex->unlock(&g);
-       mutex->lock(&g);
-       //printf("store: %d\n", 17);
-       shared = 17;
-       //store_32(&shared, 17);
-}
-
-int user_main(int argc, char **argv)
-{
-       thrd_t A, B;
-
-       mutex = new mcs_mutex();
-
-       thrd_create(&A, &threadA, NULL);
-       thrd_create(&B, &threadB, NULL);
-       thrd_join(A);
-       thrd_join(B);
-       return 0;
-}
diff --git a/benchmark/mcs-lock/mcs-lock.h b/benchmark/mcs-lock/mcs-lock.h
deleted file mode 100644 (file)
index 9f47812..0000000
+++ /dev/null
@@ -1,186 +0,0 @@
-// mcs on stack
-
-#include <stdatomic.h>
-#include <unrelacy.h>
-
-#include <spec_lib.h>
-#include <stdlib.h>
-#include <cdsannotate.h>
-#include <specannotation.h>
-#include <model_memory.h>
-#include "common.h" 
-
-struct mcs_node {
-       std::atomic<mcs_node *> next;
-       std::atomic<int> gate;
-
-       mcs_node() {
-               next.store(0);
-               gate.store(0);
-       }
-};
-
-
-struct mcs_mutex {
-public:
-       // tail is null when lock is not held
-       std::atomic<mcs_node *> m_tail;
-
-       mcs_mutex() {
-               /**
-                       @Begin
-               @Entry_point
-                       @End
-               */
-               m_tail.store( NULL );
-       }
-       ~mcs_mutex() {
-               //ASSERT( m_tail.load() == NULL );
-       }
-
-       // Each thread will have their own guard.
-       class guard {
-       public:
-               mcs_mutex * m_t;
-               mcs_node    m_node; // node held on the stack
-
-               guard(mcs_mutex * t) : m_t(t) { t->lock(this); }
-               ~guard() { m_t->unlock(this); }
-       };
-
-       /**
-               @Begin
-               @Options:
-                       LANG = CPP;
-                       CLASS = mcs_mutex;
-               @Global_define:
-                       @DeclareVar: bool _lock_acquired;
-                       @InitVar: _lock_acquired = false;
-               @Happens_before: Unlock -> Lock
-               @End
-       */
-
-       /**
-               @Begin
-               @Interface: Lock
-               @Commit_point_set: Lock_Enqueue_Point1 | Lock_Enqueue_Point2
-               @Check: _lock_acquired == false;
-               @Action: _lock_acquired = true;
-               @End
-       */
-       void lock(guard * I) {
-               mcs_node * me = &(I->m_node);
-
-               // set up my node :
-               // not published yet so relaxed :
-               me->next.store(NULL, std::mo_relaxed );
-               me->gate.store(1, std::mo_relaxed );
-
-               /**********  Inadmissible  **********/
-               /** Run this in the -Y mode to expose the HB bug */
-               // publish my node as the new tail :
-               mcs_node * pred = m_tail.exchange(me, std::mo_acq_rel);
-               /**
-                       @Begin
-                       @Commit_point_define_check: pred == NULL
-                       @Label: Lock_Enqueue_Point1
-                       @End
-               */
-               if ( pred != NULL ) {
-                       // (*1) race here
-                       // unlock of pred can see me in the tail before I fill next
-
-                       // publish me to previous lock-holder :
-                       // FIXME: detection miss, execution never ends
-                       // If this is relaxed, the store 0 to gate will be read before and
-                       // that lock will never ends.
-                       pred->next.store(me, std::mo_release );
-                       //printf("lock_miss1\n");
-
-                       // (*2) pred not touched any more       
-
-                       // now this is the spin -
-                       // wait on predecessor setting my flag -
-                       rl::linear_backoff bo;
-                       int my_gate = 1;
-                       while (my_gate ) {
-                               /**********  Inadmissibility  *********/
-                               my_gate = me->gate.load(std::mo_acquire);
-                               //if (my_gate == 0)
-                                       //printf("lock at gate!\n");
-                               /**
-                                       @Begin
-                                       @Commit_point_define_check: my_gate == 0
-                                       @Label: Lock_Enqueue_Point2
-                                       @End
-                               */
-                               thrd_yield();
-                       }
-               }
-       }
-
-       /**
-               @Begin
-               @Interface: Unlock
-               @Commit_point_set: Unlock_Point_Success_1 | Unlock_Point_Success_2
-               @Check: _lock_acquired == true
-               @Action: _lock_acquired = false;
-               @End
-       */
-       void unlock(guard * I) {
-               mcs_node * me = &(I->m_node);
-
-               // FIXME: detection miss, execution never ends
-               mcs_node * next = me->next.load(std::mo_acquire);
-               //printf("unlock_miss2\n");
-               if ( next == NULL )
-               {
-                       mcs_node * tail_was_me = me;
-                       bool success;
-
-                       /**********  Inadmissible  **********/
-                       success = m_tail.compare_exchange_strong(
-                               tail_was_me,NULL,std::mo_acq_rel);
-                       /**
-                               @Begin
-                               @Commit_point_define_check: success == true
-                               @Label: Unlock_Point_Success_1
-                               @End
-                       */
-                       if (success) {
-                               
-                               // got null in tail, mutex is unlocked
-                               return;
-                       }
-
-                       // (*1) catch the race :
-                       rl::linear_backoff bo;
-                       for(;;) {
-                               // FIXME: detection miss, execution never ends
-                               next = me->next.load(std::mo_acquire);
-                               //printf("unlock_miss3\n");
-                               if ( next != NULL )
-                                       break;
-                               thrd_yield();
-                       }
-               }
-
-               // (*2) - store to next must be done,
-               //  so no locker can be viewing my node any more        
-
-               /**********  Inadmissible  **********/
-               // let next guy in :
-               next->gate.store( 0, std::mo_release );
-               /**
-                       @Begin
-                       @Commit_point_define_check: true
-                       @Label: Unlock_Point_Success_2
-                       @End
-               */
-       }
-};
-/**
-       @Begin
-       @Class_end
-       @End
-*/
diff --git a/benchmark/mcs-lock/note.txt b/benchmark/mcs-lock/note.txt
deleted file mode 100644 (file)
index 93a5387..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-#1: The bug that we can't expose in mcs-lock is basically because of the
-following:
-
-the release/acquire are essential in building the hb relationship between the
-next field, such that it guarantees that the next lock() can see the update of
-gate by the previous unlock(). If no hb has been established, the lock() can
-never read the updated gate and will try forever until it gets the out-of-memory
-assertion bug. Thus our analysis cannot even have a valid execution to check!
diff --git a/benchmark/mpmc-queue/.gitignore b/benchmark/mpmc-queue/.gitignore
deleted file mode 100644 (file)
index 4b20d5b..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-/mpmc-queue
-/mpmc-1r2w
-/mpmc-2r1w
-/mpmc-queue-noinit
-/mpmc-1r2w-noinit
-/mpmc-2r1w-noinit
-/mpmc-queue-rdwr
-/mpmc-rdwr-noinit
diff --git a/benchmark/mpmc-queue/Makefile b/benchmark/mpmc-queue/Makefile
deleted file mode 100644 (file)
index 7c20177..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-include ../benchmarks.mk
-
-TESTS = mpmc-queue testcase1 testcase2 testcase3
-
-all: $(TESTS)
-
-$(TESTS): % : %.cc mpmc-queue.h
-       $(CXX) -o $@ $< $(CXXFLAGS) $(LDFLAGS)
-
-clean:
-       rm -f $(TESTS) *.o
diff --git a/benchmark/mpmc-queue/mpmc-queue.cc b/benchmark/mpmc-queue/mpmc-queue.cc
deleted file mode 100644 (file)
index 39c2c0a..0000000
+++ /dev/null
@@ -1,148 +0,0 @@
-#include <inttypes.h>
-#include <threads.h>
-#include <stdio.h>
-#include <unistd.h>
-#include <stdlib.h>
-
-#include <librace.h>
-
-#include "mpmc-queue.h"
-
-void threadA(struct mpmc_boundq_1_alt<int32_t, sizeof(int32_t)> *queue)
-{
-       int32_t *bin = queue->write_prepare();
-       //store_32(bin, 1);
-       *bin = 1;
-       printf("write_bin %d, val %d\n", bin, 1);
-       queue->write_publish(bin);
-}
-
-void threadB(struct mpmc_boundq_1_alt<int32_t, sizeof(int32_t)> *queue)
-{
-       int32_t *bin;
-       while (bin = queue->read_fetch()) {
-               //printf("Read: %d\n", load_32(bin));
-               //printf("read_bin %d, val %d\n", bin, load_32(bin));
-               printf("Read: %d\n", *bin);
-               queue->read_consume(bin);
-       }
-}
-
-void threadC(struct mpmc_boundq_1_alt<int32_t, sizeof(int32_t)> *queue)
-{
-       int32_t *bin = queue->write_prepare();
-       //store_32(bin, 1);
-       *bin = 1;
-       queue->write_publish(bin);
-
-       while (bin = queue->read_fetch()) {
-               //printf("Read: %d\n", load_32(bin));
-               printf("Read: %d\n", *bin);
-               queue->read_consume(bin);
-       }
-}
-
-#define MAXREADERS 3
-#define MAXWRITERS 3
-#define MAXRDWR 3
-
-#ifdef CONFIG_MPMC_READERS
-#define DEFAULT_READERS (CONFIG_MPMC_READERS)
-#else
-#define DEFAULT_READERS 2
-#endif
-
-#ifdef CONFIG_MPMC_WRITERS
-#define DEFAULT_WRITERS (CONFIG_MPMC_WRITERS)
-#else
-#define DEFAULT_WRITERS 2
-#endif
-
-#ifdef CONFIG_MPMC_RDWR
-#define DEFAULT_RDWR (CONFIG_MPMC_RDWR)
-#else
-#define DEFAULT_RDWR 0
-#endif
-
-int readers = DEFAULT_READERS, writers = DEFAULT_WRITERS, rdwr = DEFAULT_RDWR;
-
-void print_usage()
-{
-       printf("Error: use the following options\n"
-               " -r <num>              Choose number of reader threads\n"
-               " -w <num>              Choose number of writer threads\n");
-       exit(EXIT_FAILURE);
-}
-
-void process_params(int argc, char **argv)
-{
-       const char *shortopts = "hr:w:";
-       int opt;
-       bool error = false;
-
-       while (!error && (opt = getopt(argc, argv, shortopts)) != -1) {
-               switch (opt) {
-               case 'h':
-                       print_usage();
-                       break;
-               case 'r':
-                       readers = atoi(optarg);
-                       break;
-               case 'w':
-                       writers = atoi(optarg);
-                       break;
-               default: /* '?' */
-                       error = true;
-                       break;
-               }
-       }
-
-       if (writers < 1 || writers > MAXWRITERS)
-               error = true;
-       if (readers < 1 || readers > MAXREADERS)
-               error = true;
-
-       if (error)
-               print_usage();
-}
-
-int user_main(int argc, char **argv)
-{
-       struct mpmc_boundq_1_alt<int32_t, sizeof(int32_t)> queue;
-       thrd_t A[MAXWRITERS], B[MAXREADERS], C[MAXRDWR];
-
-       /* Note: optarg() / optind is broken in model-checker - workaround is
-        * to just copy&paste this test a few times */
-       //process_params(argc, argv);
-       printf("%d reader(s), %d writer(s)\n", readers, writers);
-
-#ifndef CONFIG_MPMC_NO_INITIAL_ELEMENT
-       printf("Adding initial element\n");
-       int32_t *bin = queue.write_prepare();
-       //store_32(bin, 17);
-       *bin, 17;
-       printf("init_write_bin %d, val %d\n", bin, 17);
-       queue.write_publish(bin);
-#endif
-
-       printf("Start threads\n");
-
-       for (int i = 0; i < writers; i++)
-               thrd_create(&A[i], (thrd_start_t)&threadA, &queue);
-       for (int i = 0; i < readers; i++)
-               thrd_create(&B[i], (thrd_start_t)&threadB, &queue);
-
-       for (int i = 0; i < rdwr; i++)
-               thrd_create(&C[i], (thrd_start_t)&threadC, &queue);
-
-       for (int i = 0; i < writers; i++)
-               thrd_join(A[i]);
-       for (int i = 0; i < readers; i++)
-               thrd_join(B[i]);
-       for (int i = 0; i < rdwr; i++)
-               thrd_join(C[i]);
-
-       printf("Threads complete\n");
-
-       return 0;
-}
diff --git a/benchmark/mpmc-queue/mpmc-queue.h b/benchmark/mpmc-queue/mpmc-queue.h
deleted file mode 100644 (file)
index 046a248..0000000
+++ /dev/null
@@ -1,310 +0,0 @@
-#include <stdatomic.h>
-#include <unrelacy.h>
-#include <common.h>
-
-#include <spec_lib.h>
-#include <stdlib.h>
-#include <cdsannotate.h>
-#include <specannotation.h>
-#include <model_memory.h>
-
-/**
-       @Begin
-       @Class_begin
-       @End
-*/
-template <typename t_element, size_t t_size>
-struct mpmc_boundq_1_alt
-{
-private:
-       
-       unsigned int MASK;
-       
-       atomic<t_element*> arr;
-       
-       // elements should generally be cache-line-size padded :
-       t_element               m_array[t_size];
-
-       // rdwr counts the reads & writes that have started
-       atomic<unsigned int>    m_rdwr;
-       // "read" and "written" count the number completed
-       atomic<unsigned int>    m_read;
-       atomic<unsigned int>    m_written;
-
-public:
-
-       mpmc_boundq_1_alt(int _MASK = 0xFFFF)
-       {
-       /**
-               @Begin
-                       @Entry_point
-                       @End
-               */
-               m_rdwr = 0;
-               m_read = 0;
-               m_written = 0;
-               // For this we want MASK = 1; MASK wrap around
-               MASK = _MASK; //0x11;
-               arr = m_array;
-       }
-       
-
-       /**
-               @Begin
-               @Options:
-                       LANG = CPP;
-                       CLASS = mpmc_boundq_1_alt;
-               @Global_define:
-               @Happens_before:
-                       Publish -> Fetch
-                       Consume -> Prepare
-               @Commutativity: Prepare <-> Prepare: _Method1.__RET__ !=
-               _Method2.__RET__ || !_Method1.__RET__ || !_Method2.__RET__
-               @Commutativity: Prepare <-> Publish: _Method1.__RET__ != _Method2.bin ||
-               !_Method1.__RET__
-               @Commutativity: Prepare <-> Fetch: _Method1.__RET__ != _Method2.__RET__
-               || !_Method1.__RET__ || !_Method2.__RET__
-               @Commutativity: Prepare <-> Consume : _Method1.__RET__ != _Method2.bin || !_Method1.__RET__
-
-               @Commutativity: Publish <-> Publish: _Method1.bin != _Method2.bin
-               @Commutativity: Publish <-> Fetch: _Method1.bin != _Method2.__RET__ ||
-               !_Method2.__RET__
-               @Commutativity: Publish <-> Consume : _Method1.bin != _Method2.bin
-
-               @Commutativity: Fetch <-> Fetch: _Method1.__RET__ != _Method2.__RET__ ||
-               !_Method1.__RET__ || !_Method2.__RET__
-               @Commutativity: Fetch <-> Consume : _Method1.__RET__ != _Method2.bin || !_Method1.__RET__
-
-               @Commutativity: Consume <-> Consume : _Method1.bin != _Method2.bin
-
-       @End
-       */
-
-       //-----------------------------------------------------
-
-       /**
-               @Begin
-               @Interface: Fetch
-               @Commit_point_set: FetchReadRW1 | FetchReadRW2 | FetchReadPointer
-               @ID: (call_id_t) __RET__
-               //@Action: model_print("Fetch: %d\n", __RET__);
-               @End
-       */
-       t_element * read_fetch() {
-               // Since we have a lool to CAS the value of m_rdwr, this can be relaxed
-               unsigned int rdwr = m_rdwr.load(mo_acquire);
-               //unsigned int rdwr = m_rdwr.load(mo_relaxed);
-               /**
-                       @Begin
-                       @Commit_point_define_check: true
-                       @Label: FetchReadRW1
-                       @End
-               */
-               unsigned int rd,wr;
-               for(;;) {
-                       rd = (rdwr>>16) & MASK;
-                       wr = rdwr & MASK;
-
-                       if ( wr == rd ) { // empty
-                               return false;
-                       }
-                       /**** Inadmissibility (testcase2.cc, MASK = 1, size = 1) ****/
-                       bool succ =
-                               m_rdwr.compare_exchange_weak(rdwr,rdwr+(1<<16),mo_acq_rel);
-                       if (succ) {
-                               break;
-                       } else {
-                               /**
-                                       @Begin
-                                       @Commit_point_clear: true
-                                       @Label: FetchClear1
-                                       @End
-                               */
-
-                               /**
-                                       @Begin
-                                       @Commit_point_define_check: true
-                                       @Label: FetchReadRW2
-                                       @End
-                               */
-                               thrd_yield();
-                       }
-               }
-
-               // (*1)
-               rl::backoff bo;
-               while (true) {
-                       /**** Inadmissibility (testcase2.c) ****/
-                       int written = m_written.load(mo_acquire);
-                       if ((written & MASK) != wr) {
-                               thrd_yield();
-                       } else {
-                               printf("Fetch: m_written=%d\n", written);
-                               break;
-                       }
-               }
-               t_element * p = & ( m_array[ rd % t_size ] );
-
-               // This is just a hack to tell the CDSChecker that we have a memory
-               // operation here
-               arr.load(memory_order_relaxed);
-               /**
-                       @Begin
-                       @Commit_point_clear: true
-                       @Label: FetchClear2
-                       @End
-               */
-               /**
-                       @Begin
-                       @Commit_point_define_check: true
-                       @Label: FetchReadPointer
-                       @End
-               */
-               
-               return p;
-       }
-
-       /**
-               @Begin
-               @Interface: Consume
-               @Commit_point_set: ConsumeFetchAdd
-               @ID: (call_id_t) bin 
-               //@Action: model_print("Consume: %d\n", bin);
-               @End
-       */
-       void read_consume(t_element *bin) {
-               /**** Inadmissibility (testcase1.c) ****/
-               m_read.fetch_add(1,mo_release);
-               /**
-                       @Begin
-                       @Commit_point_define_check: true
-                       @Label: ConsumeFetchAdd
-                       @End
-               */
-       }
-
-       //-----------------------------------------------------
-
-       /**
-               @Begin
-               @Interface: Prepare 
-               @Commit_point_set: PrepareReadRW1 | PrepareReadRW2 | PrepareReadPointer
-               @ID: (call_id_t) __RET__
-               //@Action: model_print("Prepare: %d\n", __RET__);
-               @End
-       */
-       t_element * write_prepare() {
-               // Try weaker semantics
-               // Since we have a lool to CAS the value of m_rdwr, this can be relaxed
-               unsigned int rdwr = m_rdwr.load(mo_acquire);
-               //unsigned int rdwr = m_rdwr.load(mo_relaxed);
-               /**
-                       @Begin
-                       @Commit_point_define_check: true
-                       @Label: PrepareReadRW1
-                       @End
-               */
-               unsigned int rd,wr;
-               for(;;) {
-                       rd = (rdwr>>16) & MASK;
-                       wr = rdwr & MASK;
-                       //printf("write_prepare: rd=%d, wr=%d\n", rd, wr);
-
-                       if ( wr == ((rd + t_size)&MASK) ) { // full
-                               return NULL;
-                       }
-                       
-                       // FIXME: Theoretically we have a bug here, it is not so likely to
-                       // happen!!! The following acq_rel can not guarantee that the load
-                       // acquire on m_read to read the most recent value. However, it can
-                       // decrease the chance of the load acquire of m_read to read a too
-                       // old value. Same as the case in read_fetch(). Maybe making things
-                       // SC can fix it. We can run testcase3.c to expose this bug (set the
-                       // array size  = 1, the MASK = 1).
-                       
-                       /**** Inadmissibility (testcase3.cc, MASK = 1, size = 1) ****/
-                       bool succ = m_rdwr.compare_exchange_weak(rdwr,(rd<<16) |
-                               ((wr+1)&MASK),mo_acq_rel);
-                       //printf("wr=%d\n", (wr+1)&MASK);
-                       if (succ) {
-                               break;
-                       } else {
-                               /**
-                                       @Begin
-                                       @Commit_point_clear: true
-                                       @Label: PrepareClear1
-                                       @End
-                               */
-
-                               /**
-                                       @Begin
-                                       @Commit_point_define_check: true
-                                       @Label: PrepareReadRW2
-                                       @End
-                               */
-                               thrd_yield();
-                       }
-               }
-
-               // (*1)
-               rl::backoff bo;
-               while (true) {
-                       /**** Inadmissibility (testcase1.c) ****/
-                       int read = m_read.load(mo_acquire);
-                       if ((read & MASK) != rd)
-                               thrd_yield();
-                       else
-                               break;
-               }
-
-               t_element * p = & ( m_array[ wr % t_size ] );
-
-               // This is also just a hack to tell the CDSChecker that we have a memory
-               // operation here
-               arr.load(memory_order_relaxed);
-               /**
-                       @Begin
-                       @Commit_point_clear: true
-                       @Label: PrepareClear2
-                       @End
-               */
-               /**
-                       @Begin
-                       @Commit_point_define_check: true
-                       @Label: PrepareReadPointer
-                       @End
-               */
-
-               return p;
-       }
-
-       /**
-               @Begin
-               @Interface: Publish 
-               @Commit_point_set: PublishFetchAdd
-               @ID: (call_id_t) bin 
-               //@Action: model_print("Publish: %d\n", bin);
-               @End
-       */
-       void write_publish(t_element *bin)
-       {
-               /**** Inadmissibility (testcase2.c) ****/
-               int tmp = m_written.fetch_add(1,mo_release);
-               /**
-                       @Begin
-                       @Commit_point_define_check: true
-                       @Label: PublishFetchAdd
-                       @End
-               */
-               printf("publish: m_written=%d\n", tmp + 1);
-       }
-
-       //-----------------------------------------------------
-
-
-};
-/**
-       @Begin
-       @Class_end
-       @End
-*/
diff --git a/benchmark/mpmc-queue/testcase1.cc b/benchmark/mpmc-queue/testcase1.cc
deleted file mode 100644 (file)
index 6a5e21c..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-#include <inttypes.h>
-#include <threads.h>
-#include <stdio.h>
-#include <unistd.h>
-#include <stdlib.h>
-
-#include <librace.h>
-
-#include "mpmc-queue.h"
-
-void threadA(struct mpmc_boundq_1_alt<int32_t, 1> *queue)
-{
-       int32_t *bin;
-       bin = queue->write_prepare();
-       if (bin) {
-               *bin = 1;
-               printf("write_bin %d, val %d\n", bin, 1);
-               queue->write_publish(bin);
-       } else {
-               printf("write failed\n");
-       }
-}
-
-void threadB(struct mpmc_boundq_1_alt<int32_t, 1> *queue)
-{
-       int32_t *bin;
-       bin = queue->read_fetch();
-       if (bin) {
-               printf("read_bin: %d, val %d\n", bin, *bin);
-               queue->read_consume(bin);
-       } else {
-               printf("Read failed\n");
-       }
-}
-
-int user_main(int argc, char **argv)
-{
-       struct mpmc_boundq_1_alt<int32_t, 1> queue(0xFFFF);
-       thrd_t A, B;
-
-       printf("Adding initial element\n");
-       int32_t *bin;
-       bin = queue.write_prepare();
-       *bin = 17;
-       printf("init_write_bin %d, val %d\n", bin, 17);
-       queue.write_publish(bin);
-
-/*
-       bin = queue.write_prepare();
-       *bin = 27;
-       printf("init_write_bin %d, val %d\n", bin, 27);
-       queue.write_publish(bin);
-*/
-
-       printf("Start threads\n");
-
-       thrd_create(&A, (thrd_start_t)&threadA, &queue);
-       thrd_create(&B, (thrd_start_t)&threadB, &queue);
-
-       thrd_join(A);
-       thrd_join(B);
-       printf("Threads complete\n");
-
-       return 0;
-}
diff --git a/benchmark/mpmc-queue/testcase2.cc b/benchmark/mpmc-queue/testcase2.cc
deleted file mode 100644 (file)
index f03ea41..0000000
+++ /dev/null
@@ -1,99 +0,0 @@
-#include <inttypes.h>
-#include <threads.h>
-#include <stdio.h>
-#include <unistd.h>
-#include <stdlib.h>
-
-#include <librace.h>
-
-#include "mpmc-queue.h"
-
-void threadA(struct mpmc_boundq_1_alt<int32_t, 1> *queue)
-{
-       int32_t *bin;
-       /*
-       bin = queue->write_prepare();
-       if (bin) {
-               *bin = 1;
-               printf("write_bin %d, val %d\n", bin, 1);
-               queue->write_publish(bin);
-       } else {
-               printf("write failed\n");
-       }
-       */
-
-       for (int i = 0; i < 1; i++) {
-               bin = queue->write_prepare();
-               if (bin) {
-                       *bin = 1;
-                       queue->write_publish(bin);
-                       printf("write_bin %d, val %d\n", bin, 1);
-               } else {
-                       printf("write failed\n");
-               }
-
-               bin = queue->read_fetch();
-               if (bin) {
-                       printf("read_bin: %d, val %d\n", bin, *bin);
-                       queue->read_consume(bin);
-               } else {
-                       printf("read failed\n");
-               }
-       }
-
-}
-
-void threadB(struct mpmc_boundq_1_alt<int32_t, 1> *queue)
-{
-       int32_t *bin;
-       for (int i = 0; i < 1; i++) {
-               bin = queue->read_fetch();
-               if (bin) {
-                       printf("read_bin: %d, val %d\n", bin, *bin);
-                       queue->read_consume(bin);
-               } else {
-                       printf("read failed\n");
-               }
-       }
-
-
-}
-
-int user_main(int argc, char **argv)
-{
-       struct mpmc_boundq_1_alt<int32_t, 1> queue(0x1);
-       thrd_t A, A1, B;
-
-       printf("Adding initial element\n");
-       int32_t *bin;
-       for (int i = 0; i < 1; i++) {
-               printf("#%d, \n", i);
-               bin = queue.write_prepare();
-               *bin = 17;
-               printf("init_write_bin %d, val %d\n", bin, 17);
-               queue.write_publish(bin);
-
-               bin = queue.read_fetch();
-               if (bin) {
-                       printf("init_read: %d, val %d\n", bin, *bin);
-                       queue.read_consume(bin);
-               }
-       }
-       
-       for (int i = 0; i < 3; i++) {
-               
-       }
-
-       printf("Start threads\n");
-
-       thrd_create(&A, (thrd_start_t)&threadA, &queue);
-       thrd_create(&A1, (thrd_start_t)&threadA, &queue);
-       thrd_create(&B, (thrd_start_t)&threadB, &queue);
-
-       thrd_join(A);
-       thrd_join(A1);
-       thrd_join(B);
-       printf("Threads complete\n");
-
-       return 0;
-}
diff --git a/benchmark/mpmc-queue/testcase3.cc b/benchmark/mpmc-queue/testcase3.cc
deleted file mode 100644 (file)
index a0a857e..0000000
+++ /dev/null
@@ -1,119 +0,0 @@
-#include <inttypes.h>
-#include <threads.h>
-#include <stdio.h>
-#include <unistd.h>
-#include <stdlib.h>
-
-#include <librace.h>
-
-#include "mpmc-queue.h"
-
-void threadA(struct mpmc_boundq_1_alt<int32_t, 1> *queue)
-{
-       int32_t *bin;
-       /*
-       bin = queue->write_prepare();
-       if (bin) {
-               *bin = 1;
-               printf("write_bin %d, val %d\n", bin, 1);
-               queue->write_publish(bin);
-       } else {
-               printf("write failed\n");
-       }
-       */
-
-       for (int i = 0; i < 1; i++) {
-               bin = queue->write_prepare();
-               if (bin) {
-                       *bin = 1;
-                       queue->write_publish(bin);
-                       printf("write_bin %d, val %d\n", bin, 1);
-               } else {
-                       printf("write failed\n");
-               }
-
-    &nb