10 //#include "sc_annotation.h"
12 #define relaxed memory_order_relaxed
13 #define release memory_order_release
14 #define acquire memory_order_acquire
15 #define acq_rel memory_order_acq_rel
16 #define seq_cst memory_order_seq_cst
21 For the sake of simplicity, we do not use template but some toy structs to
22 represent the Key and Value.
25 // Probably represent the coordinate (x, y, z)
31 return x + 31 * y + 31 * 31 * z;
34 bool equals(Key *other) {
37 return x == other->x && y == other->y && z == other->z;
40 Key(int x, int y, int z) :
50 // Probably represent the speed vector (vX, vY, vZ)
55 Value(int vX, int vY, int vZ) :
63 bool equals(Value *other) {
66 return vX == other->vX && vY == other->vY && vZ == other->vZ;
77 Entry(int h, Key *k, Value *v, Entry *n) {
80 this->value.store(v, relaxed);
81 this->next.store(n, relaxed);
105 atomic<Entry*> *table;
110 static const int CONCURRENCY_LEVEL = 16;
112 static const int SEGMENT_MASK = CONCURRENCY_LEVEL - 1;
114 Segment *segments[CONCURRENCY_LEVEL];
116 static const int DEFAULT_INITIAL_CAPACITY = 16;
118 // Not gonna consider resize now...
122 this->capacity = DEFAULT_INITIAL_CAPACITY;
123 this->table = new atomic<Entry*>[capacity];
124 for (int i = 0; i < capacity; i++) {
125 atomic_init(&table[i], NULL);
127 for (int i = 0; i < CONCURRENCY_LEVEL; i++) {
128 segments[i] = new Segment;
132 int hashKey(Key *x) {
133 int h = x->hashCode();
134 // Use logical right shift
135 unsigned int tmp = (unsigned int) h;
136 return ((h << 7) - h + (tmp >> 9) + (tmp >> 17));
139 bool eq(Key *x, Key *y) {
140 return x == y || x->equals(y);
143 Value* get(Key *key) {
144 //MODEL_ASSERT (key);
145 int hash = hashKey(key);
147 // Try first without locking...
148 atomic<Entry*> *tab = table;
149 int index = hash & (capacity - 1);
150 atomic<Entry*> *first = &tab[index];
154 // Should be a load acquire
155 // This load action here makes it problematic for the SC analysis, what
156 // we need to do is as follows: if the get() method ever acquires the
157 // lock, we ignore this operation for the SC analysis, and otherwise we
158 // take it into consideration
161 Entry *firstPtr = first->load(acquire);
166 if (e->hash == hash && eq(key, e->key)) {
167 res = e->value.load(seq_cst);
173 // Loading the next entry, this can be relaxed because the
174 // synchronization has been established by the load of head
175 e = e->next.load(relaxed);
178 // Recheck under synch if key apparently not there or interference
179 Segment *seg = segments[hash & SEGMENT_MASK];
180 seg->lock(); // Critical region begins
181 // Not considering resize now, so ignore the reload of table...
183 // Synchronized by locking, no need to be load acquire
184 Entry *newFirstPtr = first->load(relaxed);
185 if (e != NULL || firstPtr != newFirstPtr) {
188 if (e->hash == hash && eq(key, e->key)) {
189 // Protected by lock, no need to be SC
190 res = e->value.load(relaxed);
191 seg->unlock(); // Critical region ends
194 // Synchronized by locking
195 e = e->next.load(relaxed);
198 seg->unlock(); // Critical region ends
202 Value* put(Key *key, Value *value) {
203 // Don't allow NULL key or value
204 //MODEL_ASSERT (key && value);
206 int hash = hashKey(key);
207 Segment *seg = segments[hash & SEGMENT_MASK];
210 seg->lock(); // Critical region begins
212 int index = hash & (capacity - 1);
214 atomic<Entry*> *first = &tab[index];
216 Value *oldValue = NULL;
218 // The written of the entry is synchronized by locking
219 Entry *firstPtr = first->load(relaxed);
222 if (e->hash == hash && eq(key, e->key)) {
223 // FIXME: This could be a relaxed (because locking synchronize
224 // with the previous put())?? no need to be acquire
225 oldValue = e->value.load(relaxed);
226 e->value.store(value, seq_cst);
227 seg->unlock(); // Don't forget to unlock before return
230 // Synchronized by locking
231 e = e->next.load(relaxed);
234 // Add to front of list
235 Entry *newEntry = new Entry(hash, key, value, firstPtr);
236 // Publish the newEntry to others
237 first->store(newEntry, release);
238 seg->unlock(); // Critical region ends
242 Value* remove(Key *key, Value *value) {
243 //MODEL_ASSERT (key);
244 int hash = hashKey(key);
245 Segment *seg = segments[hash & SEGMENT_MASK];
248 seg->lock(); // Critical region begins
250 int index = hash & (capacity - 1);
252 atomic<Entry*> *first = &tab[index];
254 Value *oldValue = NULL;
256 // The written of the entry is synchronized by locking
257 Entry *firstPtr = first->load(relaxed);
262 seg->unlock(); // Don't forget to unlock
265 if (e->hash == hash && eq(key, e->key))
267 // Synchronized by locking
268 e = e->next.load(relaxed);
271 // FIXME: This could be a relaxed (because locking synchronize
272 // with the previous put())?? No need to be acquire
273 oldValue = e->value.load(relaxed);
274 // If the value parameter is NULL, we will remove the entry anyway
275 if (value != NULL && value->equals(oldValue)) {
280 // Force the get() to grab the lock and retry
281 e->value.store(NULL, relaxed);
283 // The strategy to remove the entry is to keep the entries after the
284 // removed one and copy the ones before it
285 Entry *head = e->next.load(relaxed);
287 p = first->load(relaxed);
289 head = new Entry(p->hash, p->key, p->value.load(relaxed), head);
290 p = p->next.load(relaxed);
293 // Publish the new head to readers
294 first->store(head, release);
295 seg->unlock(); // Critical region ends