simplified hashtable
[cdsspec-compiler.git] / benchmark / cliffc-hashtable / cliffc_hashtable.h
1 #ifndef CLIFFC_HASHTABLE_H
2 #define CLIFFC_HASHTABLE_H
3
4 #include <iostream>
5 #include <atomic>
6 #include <memory>
7 #include <assert.h>
8
9 using namespace std;
10
11 /**
12         This header file declares and defines a simplified version of Cliff Click's
13         NonblockingHashMap. It contains all the necessary structrues and main
14         functions. In simplified_cliffc_hashtable.cc file, it has the definition for
15         the static fields.
16 */
17
18 template<typename TypeK, typename TypeV>
19 class cliffc_hashtable;
20
21 /**
22         Corresponding the the Object[] array in Cliff Click's Java implementation.
23         It keeps the first two slots for CHM (Hashtable control unit) and the hash
24         records (an array of hash used for fast negative key-equality check).
25 */
26 struct kvs_data {
27         int _size;
28         atomic<void*> *_data;
29         
30         kvs_data(int sz) {
31                 _size = sz;
32                 int real_size = sizeof(atomic<void*>) * 2 + 2;
33                 _data = new atomic<void*>[real_size];
34                 // The control block should be initialized in resize()
35                 // Init the hash record array
36                 int *hashes = new int[_size];
37                 int i;
38                 for (i = 0; i < _size; i++) {
39                         hashes[i] = 0;
40                 }
41                 // Init the data to Null slot
42                 for (i = 2; i < real_size; i++) {
43                         _data[i].store(NULL, memory_order_relaxed);
44                 }
45                 _data[1].store(hashes, memory_order_release);
46         }
47
48         ~kvs_data() {
49                 int *hashes = (int*) _data[1].load(memory_order_relaxed);
50                 delete hashes;
51                 delete[] _data;
52         }
53 };
54
55 struct slot {
56         bool _prime;
57         void *_ptr;
58
59         slot(bool prime, void *ptr) {
60                 _prime = prime;
61                 _ptr = ptr;
62         }
63 };
64
65
66 /**
67         TypeK must have defined function "int hashCode()" which return the hash
68         code for the its object, and "int equals(TypeK anotherKey)" which is
69         used to judge equality.
70         TypeK and TypeV should define their own copy constructor.
71 */
72 template<typename TypeK, typename TypeV>
73 class cliffc_hashtable {
74         /**
75                 # The synchronization we have for the hashtable gives us the property of
76                 # serializability, so we should have a sequential hashtable when we check the
77                 # correctness. The key thing is to identify all the commit point.
78         
79                 @Begin
80                 @Options:
81                         LANG = C;
82                         CLASS = cliffc_hashtable;
83                 @Global_define:
84                         @DeclareVar:
85                         spec_hashtable<TypeK, TypeV*> map;
86                         spec_hashtable<TypeK, Tag> id_map;
87                         Tag tag;
88                         @InitVar:
89                                 map = spec_hashtable<TypeK, TypeV*>();
90                                 id_map = spec_hashtable<TypeK, TypeV*>();
91                                 tag = Tag();
92                         @DefineFunc:
93                         bool equals_val(TypeV *ptr1, TypeV *ptr2) {
94                                 // ...
95                         }
96                         
97                         @DefineFunc:
98                         # Update the tag for the current key slot if the corresponding tag
99                         # is NULL, otherwise just return that tag. It will update the next
100                         # available tag too if it requires a new tag for that key slot.
101                         Tag getKeyTag(TypeK &key) {
102                                 if (id_map.get(key) == NULL) {
103                                         Tag cur_tag = tag.current();
104                                         id_map.put(key, cur_tag);
105                                         tag.next();
106                                         return cur_tag;
107                                 } else {
108                                         return id_map.get(key);
109                                 }
110                         }
111                 
112                 @Interface_cluster:
113                         Read_interface = {
114                                 Get,
115                                 PutIfAbsent,
116                                 RemoveAny,
117                                 RemoveIfMatch,
118                                 ReplaceAny,
119                                 ReplaceIfMatch
120                         }
121                         
122                         Write_interface = {
123                                 Put,
124                                 PutIfAbsent(COND_PutIfAbsentSucc),
125                                 RemoveAny,
126                                 RemoveIfMatch(COND_RemoveIfMatchSucc),
127                                 ReplaceAny,
128                                 ReplaceIfMatch(COND_ReplaceIfMatchSucc)
129                         }
130                 @Happens_before:
131                         Write_interface -> Read_interface
132                 @End
133         */
134
135 friend class CHM;
136         /**
137                 The control structure for the hashtable
138         */
139         private:
140         class CHM {
141                 friend class cliffc_hashtable;
142                 private:
143                 atomic<kvs_data*> _newkvs;
144                 
145                 // Size of active K,V pairs
146                 atomic_int _size;
147         
148                 // Count of used slots
149                 atomic_int _slots;
150                 
151                 // The next part of the table to copy
152                 atomic_int _copy_idx;
153                 
154                 // Work-done reporting
155                 atomic_int _copy_done;
156         
157                 public:
158                 CHM(int size) {
159                         _size.store(size, memory_order_relaxed);
160                         _slots.store(0, memory_order_relaxed);
161         
162                         _copy_idx.store(0, memory_order_relaxed);
163                         _copy_done.store(0, memory_order_release);
164                 }
165         
166                 ~CHM() {}
167                 
168                 private:
169                         
170                 // Heuristic to decide if the table is too full
171                 bool table_full(int reprobe_cnt, int len) {
172                         return
173                                 reprobe_cnt >= REPROBE_LIMIT &&
174                                 _slots.load(memory_order_relaxed) >= reprobe_limit(len);
175                 }
176         
177                 kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
178                         kvs_data *newkvs = _newkvs.load(memory_order_acquire);
179                         if (newkvs != NULL)
180                                 return newkvs;
181         
182                         // No copy in-progress, start one; Only double the table size
183                         int oldlen = kvs->_size;
184                         int sz = _size.load(memory_order_relaxed);
185                         int newsz = sz;
186                         
187                         // Just follow Cliff Click's heuristic to decide the new size
188                         if (sz >= (oldlen >> 2)) { // If we are 25% full
189                                 newsz = oldlen << 1; // Double size
190                                 if (sz >= (oldlen >> 1))
191                                         newsz = oldlen << 2; // Double double size
192                         }
193         
194                         // We do not record the record timestamp
195                         if (newsz <= oldlen) newsz = oldlen << 1;
196                         // Do not shrink ever
197                         if (newsz < oldlen) newsz = oldlen;
198         
199                         // Last check cause the 'new' below is expensive
200                         newkvs = _newkvs.load(memory_order_acquire);
201                         if (newkvs != NULL) return newkvs;
202         
203                         newkvs = new kvs_data(newsz);
204                         void *chm = (void*) new CHM(sz);
205                         newkvs->_data[0].store(chm, memory_order_relaxed);
206         
207                         kvs_data *cur_newkvs; 
208                         // Another check after the slow allocation
209                         if ((cur_newkvs = _newkvs.load(memory_order_acquire)) != NULL)
210                                 return cur_newkvs;
211                         // CAS the _newkvs to the allocated table
212                         kvs_data *desired = (kvs_data*) NULL;
213                         kvs_data *expected = (kvs_data*) newkvs; 
214                         if (!_newkvs.compare_exchange_strong(desired, expected, memory_order_release,
215                                         memory_order_release)) {
216                                 // Should clean the allocated area
217                                 delete newkvs;
218                                 newkvs = _newkvs.load(memory_order_acquire);
219                         }
220                         return newkvs;
221                 }
222         
223                 void help_copy_impl(cliffc_hashtable *topmap, kvs_data *oldkvs,
224                         bool copy_all) {
225                         assert (get_chm(oldkvs) == this);
226                         kvs_data *newkvs = _newkvs.load(memory_order_acquire);
227                         int oldlen = oldkvs->_size;
228                         int min_copy_work = oldlen > 1024 ? 1024 : oldlen;
229                 
230                         // Just follow Cliff Click's code here
231                         int panic_start = -1;
232                         int copyidx;
233                         while (_copy_done.load(memory_order_acquire) < oldlen) {
234                                 copyidx = _copy_idx.load(memory_order_acquire);
235                                 if (panic_start == -1) { // No painc
236                                         copyidx = _copy_idx.load(memory_order_acquire);
237                                         while (copyidx < (oldlen << 1) &&
238                                                 !_copy_idx.compare_exchange_strong(copyidx, copyidx +
239                                                         min_copy_work, memory_order_release, memory_order_release))
240                                                 copyidx = _copy_idx.load(memory_order_relaxed);
241                                         if (!(copyidx < (oldlen << 1)))
242                                                 panic_start = copyidx;
243                                 }
244         
245                                 // Now copy the chunk of work we claimed
246                                 int workdone = 0;
247                                 for (int i = 0; i < min_copy_work; i++)
248                                         if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs,
249                                                 newkvs))
250                                                 workdone++;
251                                 if (workdone > 0)
252                                         copy_check_and_promote(topmap, oldkvs, workdone);
253         
254                                 copyidx += min_copy_work;
255                                 if (!copy_all && panic_start == -1)
256                                         return; // We are done with the work we claim
257                         }
258                         copy_check_and_promote(topmap, oldkvs, 0); // See if we can promote
259                 }
260         
261                 kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
262                         *oldkvs, int idx, void *should_help) {
263                         kvs_data *newkvs = _newkvs.load(memory_order_acquire);
264                         // We're only here cause the caller saw a Prime
265                         if (copy_slot(topmap, idx, oldkvs, newkvs))
266                                 copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied
267                         return (should_help == NULL) ? newkvs : topmap->help_copy(newkvs);
268                 }
269         
270                 void copy_check_and_promote(cliffc_hashtable *topmap, kvs_data*
271                         oldkvs, int workdone) {
272                         int oldlen = oldkvs->_size;
273                         int copyDone = _copy_done.load(memory_order_relaxed);
274                         if (workdone > 0) {
275                                 while (true) {
276                                         copyDone = _copy_done.load(memory_order_relaxed);
277                                         if (_copy_done.compare_exchange_weak(copyDone, copyDone +
278                                                 workdone, memory_order_relaxed, memory_order_relaxed))
279                                                 break;
280                                 }
281                         }
282         
283                         // Promote the new table to the current table
284                         if (copyDone + workdone == oldlen &&
285                                 topmap->_kvs.load(memory_order_acquire) == oldkvs) {
286                                 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
287                                 topmap->_kvs.compare_exchange_strong(oldkvs, newkvs, memory_order_release,
288                                         memory_order_release);
289                         }
290                 }
291         
292                 bool copy_slot(cliffc_hashtable *topmap, int idx, kvs_data *oldkvs,
293                         kvs_data *newkvs) {
294                         slot *key_slot;
295                         while ((key_slot = key(oldkvs, idx)) == NULL)
296                                 CAS_key(oldkvs, idx, NULL, TOMBSTONE);
297         
298                         // First CAS old to Prime
299                         slot *oldval = val(oldkvs, idx);
300                         while (!is_prime(oldval)) {
301                                 slot *box = (oldval == NULL || oldval == TOMBSTONE)
302                                         ? TOMBPRIME : new slot(true, oldval->_ptr);
303                                 if (CAS_val(oldkvs, idx, oldval, box)) {
304                                         if (box == TOMBPRIME)
305                                                 return 1; // Copy done
306                                         // Otherwise we CAS'd the box
307                                         oldval = box; // Record updated oldval
308                                         break;
309                                 }
310                                 oldval = val(oldkvs, idx); // Else re-try
311                         }
312         
313                         if (oldval == TOMBPRIME) return false; // Copy already completed here
314         
315                         slot *old_unboxed = new slot(false, oldval->_ptr);
316                         int copied_into_new = (putIfMatch(topmap, newkvs, key_slot, old_unboxed,
317                                 NULL) == NULL);
318         
319                         // Old value is exposed in the new table
320                         while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME))
321                                 oldval = val(oldkvs, idx);
322         
323                         return copied_into_new;
324                 }
325         };
326
327         
328
329         private:
330         static const int Default_Init_Size = 8; // Intial table size
331
332         static slot* const MATCH_ANY;
333         static slot* const NO_MATCH_OLD;
334
335         static slot* const TOMBPRIME;
336         static slot* const TOMBSTONE;
337
338         static const int REPROBE_LIMIT = 10; // Forces a table-resize
339
340         atomic<kvs_data*> _kvs;
341
342         public:
343         cliffc_hashtable() {
344                 // Should initialize the CHM for the construction of the table
345                 // For other CHM in kvs_data, they should be initialzed in resize()
346                 // because the size is determined dynamically
347                 kvs_data *kvs = new kvs_data(Default_Init_Size);
348                 void *chm = (void*) new CHM(0);
349                 kvs->_data[0].store(chm, memory_order_relaxed);
350                 _kvs.store(kvs, memory_order_release);
351         }
352
353         cliffc_hashtable(int init_size) {
354                 // Should initialize the CHM for the construction of the table
355                 // For other CHM in kvs_data, they should be initialzed in resize()
356                 // because the size is determined dynamically
357                 kvs_data *kvs = new kvs_data(init_size);
358                 void *chm = (void*) new CHM(0);
359                 kvs->_data[0].store(chm, memory_order_relaxed);
360                 _kvs.store(kvs, memory_order_release);
361         }
362
363         /**
364                 @Begin
365                 @Interface: Get
366                 @Commit_point_set: Read_Val_Point1 | Read_Val_Point2 | Read_Val_Point3
367                 @ID: __sequential.getKeyTag(key)
368                 @Action:
369                         TypeV *_Old_Val = __sequential.map.get(key)
370                 @Post_check:
371                         __sequential.equals_val(_Old_Val, __RET__)
372                 @End
373         */
374         TypeV* get(TypeK& key) {
375                 void *key_ptr = (void*) new TypeK(key);
376                 slot *key_slot = new slot(false, key_ptr);
377                 int fullhash = hash(key_slot);
378                 kvs_data *kvs = _kvs.load(memory_order_acquire);
379                 slot *V = get_impl(this, kvs, key_slot, fullhash);
380                 if (V == NULL) return NULL;
381                 assert (!is_prime(V));
382                 return (TypeV*) V->_ptr;
383         }
384
385         /**
386                 @Begin
387                 @Interface: Put
388                 @Commit_point_set: Write_Val_Point
389                 @ID: __sequential.getKeyTag(key)
390                 @Action:
391                         # Remember this old value at checking point
392                         TypeV *_Old_Val = __sequential.map.get(key)
393                         __sequential.map.put(key, &val);
394                 @Post_check:
395                         __sequential.equals_val(__RET__, _Old_Val)
396                 @End
397         */
398         TypeV* put(TypeK& key, TypeV& val) {
399                 return putIfMatch(key, val, NO_MATCH_OLD);
400         }
401
402         /**
403                 @Begin
404                 @Interface: PutIfAbsent
405                 @Commit_point_set:
406                         Write_Val_Point | PutIfAbsent_Fail_Point
407                 @Condition: __sequential.map.get(key) == NULL
408                 @HB_condition:
409                         COND_PutIfAbsentSucc :: __RET__ == NULL
410                 @ID: __sequential.getKeyTag(key)
411                 @Action:
412                         TypeV *_Old_Val = __sequential.map.get(key)
413                         if (__COND_SAT__)
414                                 __sequential.map.put(key, &value);
415                 @Post_check:
416                         __COND_SAT__ ? __RET__ == NULL : __sequential.equals_val(_Old_Val, __RET__) 
417                 @End
418         */
419         TypeV* putIfAbsent(TypeK& key, TypeV& value) {
420                 return putIfMatch(key, val, TOMBSTONE);
421         }
422
423         /**
424                 @Begin
425                 @Interface: RemoveAny
426                 @Commit_point_set: Write_Val_Point
427                 @ID: __sequential.getKeyTag(key)
428                 @Action:
429                         TypeV *_Old_Val = __sequential.map.get(key)
430                         __sequential.map.put(key, NULL);
431                 @Post_check:
432                         __sequential.equals_val(__RET__, _Old_Val)
433                 @End
434         */
435         TypeV* remove(TypeK& key) {
436                 return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD);
437         }
438
439         /**
440                 @Begin
441                 @Interface: RemoveIfMatch
442                 @Commit_point_set:
443                         Write_Val_Point | RemoveIfMatch_Fail_Point
444                 @Condition:
445                         __sequential.equals_val(__sequential.map.get(key), &val)
446                 @HB_condition:
447                         COND_RemoveIfMatchSucc :: __RET__ == true
448                 @ID: __sequential.getKeyTag(key)
449                 @Action:
450                         if (__COND_SAT__)
451                                 __sequential.map.put(key, NULL);
452                 @Post_check:
453                         __COND_SAT__ ? __RET__ : !__RET__
454                 @End
455         */
456         bool remove(TypeK& key, TypeV& val) {
457                 slot *val_slot = val == NULL ? NULL : new slot(false, val);
458                 return putIfMatch(key, TOMBSTONE, val) == val;
459
460         }
461
462         /**
463                 @Begin
464                 @Interface: ReplaceAny
465                 @Commit_point_set:
466                         Write_Val_Point
467                 @ID: __sequential.getKeyTag(key)
468                 @Action:
469                         TypeV *_Old_Val = __sequential.map.get(key)
470                 @Post_check:
471                         __sequential.equals_val(__RET__, _Old_Val)
472                 @End
473         */
474         TypeV* replace(TypeK& key, TypeV& val) {
475                 return putIfMatch(key, val, MATCH_ANY);
476         }
477
478         /**
479                 @Begin
480                 @Interface: ReplaceIfMatch
481                 @Commit_point_set:
482                         Write_Val_Point | ReplaceIfMatch_Fail_Point
483                 @Condition:
484                         __sequential.equals_val(__sequential.map.get(key), &oldval)
485                 @HB_condition:
486                         COND_ReplaceIfMatchSucc :: __RET__ == true
487                 @ID: __sequential.getKeyTag(key)
488                 @Action:
489                         if (__COND_SAT__)
490                                 __sequential.map.put(key, &newval);
491                 @Post_check:
492                         __COND_SAT__ ? __RET__ : !__RET__
493                 @End
494         */
495         bool replace(TypeK& key, TypeV& oldval, TypeV& newval) {
496                 return putIfMatch(key, newval, oldval) == oldval;
497         }
498
499         private:
500         static CHM* get_chm(kvs_data* kvs) {
501                 return (CHM*) kvs->_data[0].load(memory_order_relaxed);
502         }
503
504         static int* get_hashes(kvs_data *kvs) {
505                 return (int *) kvs->_data[1].load(memory_order_relaxed);
506         }
507         
508         // Preserve happens-before semantics on newly inserted keys
509         static inline slot* key(kvs_data *kvs, int idx) {
510                 assert (idx >= 0 && idx < kvs->_size);
511                 // Corresponding to the volatile read in get_impl() and putIfMatch in
512                 // Cliff Click's Java implementation
513                 return (slot*) kvs->_data[idx * 2 + 2].load(memory_order_acquire);
514         }
515
516         /**
517                 The atomic operation in val() function is a "potential" commit point,
518                 which means in some case it is a real commit point while it is not for
519                 some other cases. This so happens because the val() function is such a
520                 fundamental function that many internal operation will call. Our
521                 strategy is that we label any potential commit points and check if they
522                 really are the commit points later.
523         */
524         // Preserve happens-before semantics on newly inserted values
525         static inline slot* val(kvs_data *kvs, int idx) {
526                 assert (idx >= 0 && idx < kvs->_size);
527                 // Corresponding to the volatile read in get_impl() and putIfMatch in
528                 // Cliff Click's Java implementation
529                 slot *res = (slot*) kvs->_data[idx * 2 + 3].load(memory_order_acquire);
530                 /**
531                         @Begin
532                         # This is a complicated potential commit point since many many functions are
533                         # calling val().
534                         @Potential_commit_point_define: true
535                         @Label: Read_Val_Point
536                         @End
537                 */
538                 return res;
539
540
541         }
542
543         static int hash(slot *key_slot) {
544                 assert(key_slot != NULL && key_slot->_ptr != NULL);
545                 TypeK* key = (TypeK*) key_slot->_ptr;
546                 int h = key->hashCode();
547                 // Spread bits according to Cliff Click's code
548                 h += (h << 15) ^ 0xffffcd7d;
549                 h ^= (h >> 10);
550                 h += (h << 3);
551                 h ^= (h >> 6);
552                 h += (h << 2) + (h << 14);
553                 return h ^ (h >> 16);
554         }
555         
556         // Heuristic to decide if reprobed too many times. 
557         // Be careful here: Running over the limit on a 'get' acts as a 'miss'; on a
558         // put it triggers a table resize. Several places MUST have exact agreement.
559         static int reprobe_limit(int len) {
560                 return REPROBE_LIMIT + (len >> 2);
561         }
562         
563         static inline bool is_prime(slot *val) {
564                 return (val != NULL) && val->_prime;
565         }
566
567         // Check for key equality. Try direct pointer comparison first (fast
568         // negative teset) and then the full 'equals' call
569         static bool keyeq(slot *K, slot *key_slot, int *hashes, int hash,
570                 int fullhash) {
571                 // Caller should've checked this.
572                 assert (K != NULL);
573                 TypeK* key_ptr = (TypeK*) key_slot->_ptr;
574                 return
575                         K == key_slot ||
576                                 ((hashes[hash] == 0 || hashes[hash] == fullhash) &&
577                                 K != TOMBSTONE &&
578                                 key_ptr->equals(K->_ptr));
579         }
580
581         static bool valeq(slot *val_slot1, slot *val_slot2) {
582                 assert (val_slot1 != NULL);
583                 TypeK* ptr1 = (TypeV*) val_slot1->_ptr;
584                 if (val_slot2 == NULL || ptr1 == NULL) return false;
585                 return ptr1->equals(val_slot2->_ptr);
586         }
587         
588         // Together with key() preserve the happens-before relationship on newly
589         // inserted keys
590         static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
591                 return kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
592                         desired, memory_order_release, memory_order_release);
593         }
594
595         /**
596                 Same as the val() function, we only label the CAS operation as the
597                 potential commit point.
598         */
599         // Together with val() preserve the happens-before relationship on newly
600         // inserted values
601         static inline bool CAS_val(kvs_data *kvs, int idx, void *expected, void
602                 *desired) {
603                 bool res =  kvs->_data[2 * idx + 3].compare_exchange_strong(expected,
604                         desired, memory_order_release, memory_order_release);
605                 /**
606                         # If it is a successful put instead of a copy or any other internal
607                         # operantions, expected != NULL
608                         @Begin
609                         @Potential_commit_point_define: __ATOMIC_RET__ == true
610                         @Label: Write_Val_Point
611                         @End
612                 */
613                 return res;
614         }
615
616         slot* get_impl(cliffc_hashtable *topmap, kvs_data *kvs, slot* key_slot, int
617                 fullhash) {
618                 int len = kvs->_size;
619                 CHM *chm = get_chm(kvs);
620                 int *hashes = get_hashes(kvs);
621
622                 int idx = fullhash & (len - 1);
623                 int reprobe_cnt = 0;
624                 while (true) {
625                         slot *K = key(kvs, idx);
626                         slot *V = val(kvs, idx);
627                         /**
628                                 @Begin
629                                 @Commit_point_define: V == NULL
630                                 @Potential_commit_point_label: Read_Val_Point
631                                 @Label: Get_Success_Point_1
632                                 @End
633                         */
634
635                         if (V == NULL) return NULL; // A miss
636                         
637                         if (keyeq(K, key_slot, hashes, idx, fullhash)) {
638                                 // Key hit! Check if table-resize in progress
639                                 if (!is_prime(V)) {
640                                         /**
641                                                 @Begin
642                                                 @Commit_point_define: true
643                                                 @Potential_commit_point_label: Read_Val_Point
644                                                 @Label: Get_Success_Point_2
645                                                 @End
646                                         */
647                                         return (V == TOMBSTONE) ? NULL : V; // Return this value
648                                 }
649                                 // Otherwise, finish the copy & retry in the new table
650                                 return get_impl(topmap, chm->copy_slot_and_check(topmap, kvs,
651                                         idx, key_slot), key_slot, fullhash);
652                         }
653
654                         if (++reprobe_cnt >= REPROBE_LIMIT ||
655                                 key_slot == TOMBSTONE) {
656                                 // Retry in new table
657                                 // Atomic read (acquire) can be here 
658                                 kvs_data *newkvs = chm->_newkvs.load(memory_order_acquire);
659                                 /**
660                                         @Begin
661                                         @Commit_point_define_check: newkvs == NULL
662                                         @Label: Get_Success_Point_3
663                                         @End
664                                 */
665                                 return newkvs == NULL ? NULL : get_impl(topmap,
666                                         topmap->help_copy(newkvs), key_slot, fullhash);
667                         }
668
669                         idx = (idx + 1) & (len - 1); // Reprobe by 1
670                 }
671         }
672
673         // A wrapper of the essential function putIfMatch()
674         TypeV* putIfMatch(TypeK& key, TypeV& value, slot *old_val) {
675                 // TODO: Should throw an exception rather return NULL
676                 if (old_val == NULL) {
677                         return NULL;
678                 }
679                 void *key_ptr = (void*) new TypeK(key);
680                 slot *key_slot = new slot(false, key_ptr);
681
682                 void *val_ptr = (void*) new TypeV(value);
683                 slot *value_slot = new slot(false, val_ptr);
684                 kvs_data *kvs = _kvs.load(memory_order_acquire);
685                 slot *res = putIfMatch(this, kvs, key_slot, value_slot, old_val);
686                 // Only when copy_slot() call putIfMatch() will it return NULL
687                 assert (res != NULL); 
688                 assert (!is_prime(res));
689                 return res == TOMBSTONE ? NULL : (TypeV*) res->_ptr;
690         }
691
692         /**
693                 Put, Remove, PutIfAbsent, etc will call this function. Return the old
694                 value. If the returned value is equals to the expVal (or expVal is
695                 NO_MATCH_OLD), then this function puts the val_slot to the table 'kvs'.
696                 Only copy_slot will pass a NULL expVal, and putIfMatch only returns a
697                 NULL if passed a NULL expVal.
698         */
699         static slot* putIfMatch(cliffc_hashtable *topmap, kvs_data *kvs, slot
700                 *key_slot, slot *val_slot, slot *expVal) {
701                 assert (val_slot != NULL);
702                 assert (!is_prime(val_slot));
703                 assert (!is_prime(expVal));
704
705                 int fullhash = hash(key_slot);
706                 int len = kvs->_size;
707                 CHM *chm = get_chm(kvs);
708                 int *hashes = get_hashes(kvs);
709                 int idx = fullhash & (len - 1);
710
711                 // Claim a key slot
712                 int reprobe_cnt = 0;
713                 slot *K;
714                 slot *V;
715                 kvs_data *newkvs;
716                 
717                 while (true) { // Spin till we get a key slot
718                         K = key(kvs, idx);
719                         V = val(kvs, idx);
720                         if (K == NULL) { // Get a free slot
721                                 if (val_slot == TOMBSTONE) return val_slot;
722                                 // Claim the null key-slot
723                                 if (CAS_key(kvs, idx, NULL, key_slot)) {
724                                         chm->_slots.fetch_add(1, memory_order_relaxed); // Inc key-slots-used count
725                                         hashes[idx] = fullhash; // Memorize full hash
726                                         break;
727                                 }
728                                 K = key(kvs, idx); // CAS failed, get updated value
729                                 assert (K != NULL);
730                         }
731
732                         // Key slot not null, there exists a Key here
733                         if (keyeq(K, key_slot, hashes, idx, fullhash))
734                                 break; // Got it
735                         
736                         // Notice that the logic here should be consistent with that of get.
737                         // The first predicate means too many reprobes means nothing in the
738                         // old table.
739                         if (++reprobe_cnt >= reprobe_limit(len) ||
740                                 K == TOMBSTONE) { // Found a Tombstone key, no more keys
741                                 newkvs = chm->resize(topmap, kvs);
742                                 // Help along an existing copy
743                                 if (expVal != NULL) topmap->help_copy(newkvs);
744                                 return putIfMatch(topmap, newkvs, key_slot, val_slot, expVal);
745                         }
746
747                         idx = (idx + 1) & (len - 1); // Reprobe
748                 } // End of spinning till we get a Key slot
749
750                 if (val_slot == V) return V; // Fast cutout for no-change
751         
752                 // Here it tries to resize cause it doesn't want other threads to stop
753                 // its progress (eagerly try to resize soon)
754                 newkvs = chm->_newkvs.load(memory_order_acquire);
755                 if (newkvs == NULL &&
756                         ((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V)))
757                         newkvs = chm->resize(topmap, kvs); // Force the copy to start
758                 
759                 // Finish the copy and then put it in the new table
760                 if (newkvs != NULL)
761                         return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs, idx,
762                                 expVal), key_slot, val_slot, expVal);
763                 
764                 // Decided to update the existing table
765                 while (true) {
766                         assert (!is_prime(V));
767
768                         if (expVal != NO_MATCH_OLD &&
769                                 V != expVal &&
770                                 (expVal != MATCH_ANY || V == TOMBSTONE || V == NULL) &&
771                                 !(V == NULL && expVal == TOMBSTONE) &&
772                                 (expVal == NULL || !valeq(expVal, V))) {
773                                 /**
774                                         @Begin
775                                         @Commit_point_define: expVal == TOMBSTONE
776                                         @Potential_commit_point_label: Read_Val_Point
777                                         @Label: PutIfAbsent_Fail_Point
778                                                 # This is a check for the PutIfAbsent() when the value
779                                                 # is not absent
780                                         @End
781                                 */
782                                 /**
783                                         @Begin
784                                         @Commit_point_define: expVal != NULL && val_slot == TOMBSTONE
785                                         @Potential_commit_point_label: Read_Val_Point
786                                         @Label: RemoveIfMatch_Fail_Point
787                                         @End
788                                 */
789                                 /**
790                                         @Begin
791                                         @Commit_point_define: !valeq(expVal, V)
792                                         @Potential_commit_point_label: Read_Val_Point
793                                         @Label: ReplaceIfMatch_Fail_Point
794                                         @End
795                                 */
796                                 return V; // Do not update!
797                         }
798
799                         if (CAS_val(kvs, idx, V, val_slot)) {
800                                 /**
801                                         @Begin
802                                         # The only point where a successful put happens
803                                         @Commit_point_define: true
804                                         @Potential_commit_point_label: Write_Val_Point
805                                         @Label: Write_Success_Point
806                                         @End
807                                 */
808                                 if (expVal != NULL) { // Not called by a table-copy
809                                         // CAS succeeded, should adjust size
810                                         // Both normal put's and table-copy calls putIfMatch, but
811                                         // table-copy does not increase the number of live K/V pairs
812                                         if ((V == NULL || V == TOMBSTONE) &&
813                                                 val_slot != TOMBSTONE)
814                                                 chm->_size.fetch_add(1, memory_order_relaxed);
815                                         if (!(V == NULL || V == TOMBSTONE) &&
816                                                 val_slot == TOMBSTONE)
817                                                 chm->_size.fetch_add(-1, memory_order_relaxed);
818                                 }
819                                 return (V == NULL && expVal != NULL) ? TOMBSTONE : V;
820                         }
821                         // Else CAS failed
822                         V = val(kvs, idx);
823                         if (is_prime(V))
824                                 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs,
825                                         idx, expVal), key_slot, val_slot, expVal);
826                 }
827         }
828
829         // Help along an existing table-resize. This is a fast cut-out wrapper.
830         kvs_data* help_copy(kvs_data *helper) {
831                 kvs_data *topkvs = _kvs.load(memory_order_acquire);
832                 CHM *topchm = get_chm(topkvs);
833                 // No cpy in progress
834                 if (topchm->_newkvs.load(memory_order_acquire) == NULL) return helper;
835                 topchm->help_copy_impl(this, topkvs, false);
836                 return helper;
837         }
838 };
839
840 #endif