11 #define relaxed memory_order_relaxed
12 #define release memory_order_release
13 #define acquire memory_order_acquire
14 #define acq_rel memory_order_acq_rel
15 #define seq_cst memory_order_seq_cst
20 For the sake of simplicity, we map Integer -> Integer.
32 Entry(int h, int k, int v, Entry *n) {
35 this->value.store(v, relaxed);
36 this->next.store(n, relaxed);
64 atomic<Entry*> *table;
69 static const int CONCURRENCY_LEVEL = 4;
71 static const int SEGMENT_MASK = CONCURRENCY_LEVEL - 1;
73 Segment *segments[CONCURRENCY_LEVEL];
75 static const int DEFAULT_INITIAL_CAPACITY = 16;
77 // Not gonna consider resize now...
81 this->capacity = DEFAULT_INITIAL_CAPACITY;
82 this->table = new atomic<Entry*>[capacity];
83 for (int i = 0; i < capacity; i++) {
84 atomic_init(&table[i], NULL);
86 for (int i = 0; i < CONCURRENCY_LEVEL; i++) {
87 segments[i] = new Segment;
91 int hashKey(int key) {
97 int hash = hashKey(key);
99 // Try first without locking...
100 atomic<Entry*> *tab = table;
101 int index = hash & (capacity - 1);
102 atomic<Entry*> *first = &tab[index];
106 // Should be a load acquire
107 // This load action here makes it problematic for the SC analysis, what
108 // we need to do is as follows: if the get() method ever acquires the
109 // lock, we ignore this operation for the SC analysis, and otherwise we
110 // take it into consideration
112 Entry *firstPtr = first->load(acquire);
117 res = e->value.load(seq_cst);
123 // Loading the next entry, this can be relaxed because the
124 // synchronization has been established by the load of head
125 e = e->next.load(relaxed);
128 // Recheck under synch if key apparently not there or interference
129 Segment *seg = segments[hash & SEGMENT_MASK];
130 seg->lock(); // Critical region begins
131 // Not considering resize now, so ignore the reload of table...
133 // Synchronized by locking, no need to be load acquire
134 Entry *newFirstPtr = first->load(relaxed);
135 if (e != NULL || firstPtr != newFirstPtr) {
139 // Protected by lock, no need to be SC
140 res = e->value.load(relaxed);
141 seg->unlock(); // Critical region ends
144 // Synchronized by locking
145 e = e->next.load(relaxed);
148 seg->unlock(); // Critical region ends
152 int put(int key, int value) {
153 ASSERT (key && value);
154 int hash = hashKey(key);
155 Segment *seg = segments[hash & SEGMENT_MASK];
158 seg->lock(); // Critical region begins
160 int index = hash & (capacity - 1);
162 atomic<Entry*> *first = &tab[index];
166 // The written of the entry is synchronized by locking
167 Entry *firstPtr = first->load(relaxed);
171 // This could be a relaxed (because locking synchronize
172 // with the previous put()), no need to be acquire
173 oldValue = e->value.load(relaxed);
174 e->value.store(value, seq_cst);
175 seg->unlock(); // Don't forget to unlock before return
178 // Synchronized by locking
179 e = e->next.load(relaxed);
182 // Add to front of list
183 Entry *newEntry = new Entry(hash, key, value, firstPtr);
184 // Publish the newEntry to others
185 first->store(newEntry, release);
186 seg->unlock(); // Critical region ends