11 //#include "sc_annotation.h"
13 #define relaxed memory_order_relaxed
14 #define release memory_order_release
15 #define acquire memory_order_acquire
16 #define acq_rel memory_order_acq_rel
17 #define seq_cst memory_order_seq_cst
22 For the sake of simplicity, we do not use template but some toy structs to
23 represent the Key and Value.
26 // Probably represent the coordinate (x, y, z)
32 return x + 31 * y + 31 * 31 * z;
35 bool equals(Key *other) {
38 return x == other->x && y == other->y && z == other->z;
41 Key(int x, int y, int z) :
51 // Probably represent the speed vector (vX, vY, vZ)
56 Value(int vX, int vY, int vZ) :
64 bool equals(Value *other) {
67 return vX == other->vX && vY == other->vY && vZ == other->vZ;
78 Entry(int h, Key *k, Value *v, Entry *n) {
81 this->value.store(v, relaxed);
82 this->next.store(n, relaxed);
106 atomic<Entry*> *table;
111 static const int CONCURRENCY_LEVEL = 16;
113 static const int SEGMENT_MASK = CONCURRENCY_LEVEL - 1;
115 Segment *segments[CONCURRENCY_LEVEL];
117 static const int DEFAULT_INITIAL_CAPACITY = 16;
119 // Not gonna consider resize now...
123 this->capacity = DEFAULT_INITIAL_CAPACITY;
124 this->table = new atomic<Entry*>[capacity];
125 for (int i = 0; i < capacity; i++) {
126 atomic_init(&table[i], NULL);
128 for (int i = 0; i < CONCURRENCY_LEVEL; i++) {
129 segments[i] = new Segment;
133 int hashKey(Key *x) {
134 int h = x->hashCode();
135 // Use logical right shift
136 unsigned int tmp = (unsigned int) h;
137 return ((h << 7) - h + (tmp >> 9) + (tmp >> 17));
140 bool eq(Key *x, Key *y) {
141 return x == y || x->equals(y);
144 Value* get(Key *key) {
145 //MODEL_ASSERT (key);
146 int hash = hashKey(key);
148 // Try first without locking...
149 atomic<Entry*> *tab = table;
150 int index = hash & (capacity - 1);
151 atomic<Entry*> *first = &tab[index];
155 // Should be a load acquire
156 // This load action here makes it problematic for the SC analysis, what
157 // we need to do is as follows: if the get() method ever acquires the
158 // lock, we ignore this operation for the SC analysis, and otherwise we
159 // take it into consideration
162 Entry *firstPtr = first->load(acquire);
167 if (e->hash == hash && eq(key, e->key)) {
168 res = e->value.load(seq_cst);
174 // Loading the next entry, this can be relaxed because the
175 // synchronization has been established by the load of head
176 e = e->next.load(relaxed);
179 // Recheck under synch if key apparently not there or interference
180 Segment *seg = segments[hash & SEGMENT_MASK];
181 seg->lock(); // Critical region begins
182 // Not considering resize now, so ignore the reload of table...
184 // Synchronized by locking, no need to be load acquire
185 Entry *newFirstPtr = first->load(relaxed);
186 if (e != NULL || firstPtr != newFirstPtr) {
189 if (e->hash == hash && eq(key, e->key)) {
190 // Protected by lock, no need to be SC
191 res = e->value.load(relaxed);
192 seg->unlock(); // Critical region ends
195 // Synchronized by locking
196 e = e->next.load(relaxed);
199 seg->unlock(); // Critical region ends
203 Value* put(Key *key, Value *value) {
204 // Don't allow NULL key or value
205 //MODEL_ASSERT (key && value);
207 int hash = hashKey(key);
208 Segment *seg = segments[hash & SEGMENT_MASK];
211 seg->lock(); // Critical region begins
213 int index = hash & (capacity - 1);
215 atomic<Entry*> *first = &tab[index];
217 Value *oldValue = NULL;
219 // The written of the entry is synchronized by locking
220 Entry *firstPtr = first->load(relaxed);
223 if (e->hash == hash && eq(key, e->key)) {
224 // FIXME: This could be a relaxed (because locking synchronize
225 // with the previous put())?? no need to be acquire
226 oldValue = e->value.load(relaxed);
227 e->value.store(value, seq_cst);
228 seg->unlock(); // Don't forget to unlock before return
231 // Synchronized by locking
232 e = e->next.load(relaxed);
235 // Add to front of list
236 Entry *newEntry = new Entry(hash, key, value, firstPtr);
237 // Publish the newEntry to others
238 first->store(newEntry, release);
239 seg->unlock(); // Critical region ends
243 Value* remove(Key *key, Value *value) {
244 //MODEL_ASSERT (key);
245 int hash = hashKey(key);
246 Segment *seg = segments[hash & SEGMENT_MASK];
249 seg->lock(); // Critical region begins
251 int index = hash & (capacity - 1);
253 atomic<Entry*> *first = &tab[index];
255 Value *oldValue = NULL;
257 // The written of the entry is synchronized by locking
258 Entry *firstPtr = first->load(relaxed);
263 seg->unlock(); // Don't forget to unlock
266 if (e->hash == hash && eq(key, e->key))
268 // Synchronized by locking
269 e = e->next.load(relaxed);
272 // FIXME: This could be a relaxed (because locking synchronize
273 // with the previous put())?? No need to be acquire
274 oldValue = e->value.load(relaxed);
275 // If the value parameter is NULL, we will remove the entry anyway
276 if (value != NULL && value->equals(oldValue)) {
281 // Force the get() to grab the lock and retry
282 e->value.store(NULL, relaxed);
284 // The strategy to remove the entry is to keep the entries after the
285 // removed one and copy the ones before it
286 Entry *head = e->next.load(relaxed);
288 p = first->load(relaxed);
290 head = new Entry(p->hash, p->key, p->value.load(relaxed), head);
291 p = p->next.load(relaxed);
294 // Publish the new head to readers
295 first->store(head, release);
296 seg->unlock(); // Critical region ends