more fix
[cdsspec-compiler.git] / benchmark / cliffc-hashtable / cliffc_hashtable.h
1 #ifndef CLIFFC_HASHTABLE_H
2 #define CLIFFC_HASHTABLE_H
3
4 #include <iostream>
5 #include <atomic>
6 //#include <common.h>
7 #ifdef STANDALONE
8 #include <assert.h>
9 #define MODEL_ASSERT assert 
10 #else
11 #include <model-assert.h>
12 #endif
13
14 #include <spec_lib.h>
15 #include <stdlib.h>
16 #include <cdsannotate.h>
17 #include <specannotation.h>
18 #include <model_memory.h>
19
20 using namespace std;
21
22 /**
23         This header file declares and defines a simplified version of Cliff Click's
24         NonblockingHashMap. It contains all the necessary structrues and main
25         functions. In simplified_cliffc_hashtable.cc file, it has the definition for
26         the static fields.
27 */
28
29 template<typename TypeK, typename TypeV>
30 class cliffc_hashtable;
31
32 /**
33         Corresponding the the Object[] array in Cliff Click's Java implementation.
34         It keeps the first two slots for CHM (Hashtable control unit) and the hash
35         records (an array of hash used for fast negative key-equality check).
36 */
37 struct kvs_data {
38         int _size;
39         atomic<void*> *_data;
40         
41         kvs_data(int sz) {
42                 _size = sz;
43                 int real_size = sz * 2 + 2;
44                 _data = new atomic<void*>[real_size];
45                 // The control block should be initialized in resize()
46                 // Init the hash record array
47                 int *hashes = new int[_size];
48                 int i;
49                 for (i = 0; i < _size; i++) {
50                         hashes[i] = 0;
51                 }
52                 // Init the data to Null slot
53                 for (i = 2; i < real_size; i++) {
54                         _data[i].store(NULL, memory_order_relaxed);
55                 }
56                 _data[1].store(hashes, memory_order_release);
57         }
58
59         ~kvs_data() {
60                 int *hashes = (int*) _data[1].load(memory_order_relaxed);
61                 delete hashes;
62                 delete[] _data;
63         }
64 };
65
66 struct slot {
67         bool _prime;
68         void *_ptr;
69
70         slot(bool prime, void *ptr) {
71                 _prime = prime;
72                 _ptr = ptr;
73         }
74 };
75
76
77 /**
78         TypeK must have defined function "int hashCode()" which return the hash
79         code for the its object, and "int equals(TypeK anotherKey)" which is
80         used to judge equality.
81         TypeK and TypeV should define their own copy constructor.
82 */
83 /**
84         @Begin
85         @Class_begin
86         @End
87 */
88 template<typename TypeK, typename TypeV>
89 class cliffc_hashtable {
90         /**
91                 # The synchronization we have for the hashtable gives us the property of
92                 # serializability, so we should have a sequential hashtable when we check the
93                 # correctness. The key thing is to identify all the commit point.
94         
95                 @Begin
96                 @Options:
97                         LANG = CPP;
98                         CLASS = cliffc_hashtable;
99                 @Global_define:
100                         @DeclareVar:
101                         spec_table *map;
102                         spec_table *id_map;
103                         id_tag_t *tag;
104                         @InitVar:
105                                 map = new_spec_table_default(equals_key);
106                                 id_map = new_spec_table_default(equals_id);
107                                 tag = new_id_tag();
108
109                         @DefineFunc:
110                         bool equals_key(void *ptr1, void *ptr2) {
111                                 TypeK *key1 = (TypeK*) ptr1,
112                                         *key2 = (TypeK*) ptr2;
113                                 if (key1 == NULL || key2 == NULL)
114                                         return false;
115                                 return key1->equals(key2);
116                         }
117                         
118                         @DefineFunc:
119                         bool equals_val(void *ptr1, void *ptr2) {
120                                 TypeV *val1 = (TypeV*) ptr1,
121                                         *val2 = (TypeV*) ptr2;
122                                 if (val1 == NULL || val2 == NULL)
123                                         return false;
124                                 return val1->equals(val2);
125                         }
126
127                         @DefineFunc:
128                         bool equals_id(void *ptr1, void *ptr2) {
129                                 id_tag_t *id1 = (id_tag_t*) ptr1,
130                                         *id2 = (id_tag_t*) ptr2;
131                                 if (id1 == NULL || id2 == NULL)
132                                         return false;
133                                 return *id1 == *id2;
134                         }
135
136                         @DefineFunc:
137                         # Update the tag for the current key slot if the corresponding tag
138                         # is NULL, otherwise just return that tag. It will update the next
139                         # available tag too if it requires a new tag for that key slot.
140                         id_tag_t getKeyTag(TypeK *key) {
141                                 if (spec_table_contains(id_map, key)) {
142                                         id_tag_t *cur_tag = MODEL_MALLOC(sizeof(id_tag_t));
143                                         *cur_tag = current(tag);
144                                         spec_table_put(id_map, key, cur_tag);
145                                         next(tag);
146                                         return cur_tag;
147                                 } else {
148                                         id_tag_t *res = (id_tag_t*) spec_table_get(id_map, key);
149                                         return *res;
150                                 }
151                         }
152                 
153                 @Interface_cluster:
154                         Read_interface = {
155                                 Get,
156                                 PutIfAbsent,
157                                 RemoveAny,
158                                 RemoveIfMatch,
159                                 ReplaceAny,
160                                 ReplaceIfMatch
161                         }
162                         
163                         Write_interface = {
164                                 Put,
165                                 PutIfAbsent(COND_PutIfAbsentSucc),
166                                 RemoveAny,
167                                 RemoveIfMatch(COND_RemoveIfMatchSucc),
168                                 ReplaceAny,
169                                 ReplaceIfMatch(COND_ReplaceIfMatchSucc)
170                         }
171                 @Happens_before:
172                         //Write_interface -> Read_interface
173                         Put->Get
174                         Put->Put
175                 @End
176         */
177
178 friend class CHM;
179         /**
180                 The control structure for the hashtable
181         */
182         private:
183         class CHM {
184                 friend class cliffc_hashtable;
185                 private:
186                 atomic<kvs_data*> _newkvs;
187                 
188                 // Size of active K,V pairs
189                 atomic_int _size;
190         
191                 // Count of used slots
192                 atomic_int _slots;
193                 
194                 // The next part of the table to copy
195                 atomic_int _copy_idx;
196                 
197                 // Work-done reporting
198                 atomic_int _copy_done;
199         
200                 public:
201                 CHM(int size) {
202                         _newkvs.store(NULL, memory_order_relaxed);
203                         _size.store(size, memory_order_relaxed);
204                         _slots.store(0, memory_order_relaxed);
205         
206                         _copy_idx.store(0, memory_order_relaxed);
207                         _copy_done.store(0, memory_order_release);
208                 }
209         
210                 ~CHM() {}
211                 
212                 private:
213                         
214                 // Heuristic to decide if the table is too full
215                 bool table_full(int reprobe_cnt, int len) {
216                         return
217                                 reprobe_cnt >= REPROBE_LIMIT &&
218                                 _slots.load(memory_order_relaxed) >= reprobe_limit(len);
219                 }
220         
221                 kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
222                         //model_print("resizing...\n");
223                         kvs_data *newkvs = _newkvs.load(memory_order_acquire);
224                         if (newkvs != NULL)
225                                 return newkvs;
226         
227                         // No copy in-progress, start one; Only double the table size
228                         int oldlen = kvs->_size;
229                         int sz = _size.load(memory_order_relaxed);
230                         int newsz = sz;
231                         
232                         // Just follow Cliff Click's heuristic to decide the new size
233                         if (sz >= (oldlen >> 2)) { // If we are 25% full
234                                 newsz = oldlen << 1; // Double size
235                                 if (sz >= (oldlen >> 1))
236                                         newsz = oldlen << 2; // Double double size
237                         }
238         
239                         // We do not record the record timestamp
240                         if (newsz <= oldlen) newsz = oldlen << 1;
241                         // Do not shrink ever
242                         if (newsz < oldlen) newsz = oldlen;
243         
244                         // Last check cause the 'new' below is expensive
245                         newkvs = _newkvs.load(memory_order_acquire);
246                         if (newkvs != NULL) return newkvs;
247         
248                         newkvs = new kvs_data(newsz);
249                         void *chm = (void*) new CHM(sz);
250                         newkvs->_data[0].store(chm, memory_order_release);
251         
252                         kvs_data *cur_newkvs; 
253                         // Another check after the slow allocation
254                         if ((cur_newkvs = _newkvs.load(memory_order_acquire)) != NULL)
255                                 return cur_newkvs;
256                         // CAS the _newkvs to the allocated table
257                         kvs_data *desired = (kvs_data*) NULL;
258                         kvs_data *expected = (kvs_data*) newkvs; 
259                         if (!_newkvs.compare_exchange_strong(desired, expected, memory_order_release,
260                                         memory_order_release)) {
261                                 // Should clean the allocated area
262                                 delete newkvs;
263                                 newkvs = _newkvs.load(memory_order_acquire);
264                         }
265                         return newkvs;
266                 }
267         
268                 void help_copy_impl(cliffc_hashtable *topmap, kvs_data *oldkvs,
269                         bool copy_all) {
270                         MODEL_ASSERT (get_chm(oldkvs) == this);
271                         kvs_data *newkvs = _newkvs.load(memory_order_acquire);
272                         int oldlen = oldkvs->_size;
273                         int min_copy_work = oldlen > 1024 ? 1024 : oldlen;
274                 
275                         // Just follow Cliff Click's code here
276                         int panic_start = -1;
277                         int copyidx;
278                         while (_copy_done.load(memory_order_acquire) < oldlen) {
279                                 copyidx = _copy_idx.load(memory_order_acquire);
280                                 if (panic_start == -1) { // No painc
281                                         copyidx = _copy_idx.load(memory_order_acquire);
282                                         while (copyidx < (oldlen << 1) &&
283                                                 !_copy_idx.compare_exchange_strong(copyidx, copyidx +
284                                                         min_copy_work, memory_order_release, memory_order_release))
285                                                 copyidx = _copy_idx.load(memory_order_relaxed);
286                                         if (!(copyidx < (oldlen << 1)))
287                                                 panic_start = copyidx;
288                                 }
289         
290                                 // Now copy the chunk of work we claimed
291                                 int workdone = 0;
292                                 for (int i = 0; i < min_copy_work; i++)
293                                         if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs,
294                                                 newkvs))
295                                                 workdone++;
296                                 if (workdone > 0)
297                                         copy_check_and_promote(topmap, oldkvs, workdone);
298         
299                                 copyidx += min_copy_work;
300                                 if (!copy_all && panic_start == -1)
301                                         return; // We are done with the work we claim
302                         }
303                         copy_check_and_promote(topmap, oldkvs, 0); // See if we can promote
304                 }
305         
306                 kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
307                         *oldkvs, int idx, void *should_help) {
308                         kvs_data *newkvs = _newkvs.load(memory_order_acquire);
309                         // We're only here cause the caller saw a Prime
310                         if (copy_slot(topmap, idx, oldkvs, newkvs))
311                                 copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied
312                         return (should_help == NULL) ? newkvs : topmap->help_copy(newkvs);
313                 }
314         
315                 void copy_check_and_promote(cliffc_hashtable *topmap, kvs_data*
316                         oldkvs, int workdone) {
317                         int oldlen = oldkvs->_size;
318                         int copyDone = _copy_done.load(memory_order_relaxed);
319                         if (workdone > 0) {
320                                 while (true) {
321                                         copyDone = _copy_done.load(memory_order_relaxed);
322                                         if (_copy_done.compare_exchange_weak(copyDone, copyDone +
323                                                 workdone, memory_order_relaxed, memory_order_relaxed))
324                                                 break;
325                                 }
326                         }
327         
328                         // Promote the new table to the current table
329                         if (copyDone + workdone == oldlen &&
330                                 topmap->_kvs.load(memory_order_acquire) == oldkvs) {
331                                 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
332                                 topmap->_kvs.compare_exchange_strong(oldkvs, newkvs, memory_order_release,
333                                         memory_order_release);
334                         }
335                 }
336         
337                 bool copy_slot(cliffc_hashtable *topmap, int idx, kvs_data *oldkvs,
338                         kvs_data *newkvs) {
339                         slot *key_slot;
340                         while ((key_slot = key(oldkvs, idx)) == NULL)
341                                 CAS_key(oldkvs, idx, NULL, TOMBSTONE);
342         
343                         // First CAS old to Prime
344                         slot *oldval = val(oldkvs, idx);
345                         while (!is_prime(oldval)) {
346                                 slot *box = (oldval == NULL || oldval == TOMBSTONE)
347                                         ? TOMBPRIME : new slot(true, oldval->_ptr);
348                                 if (CAS_val(oldkvs, idx, oldval, box)) {
349                                         if (box == TOMBPRIME)
350                                                 return 1; // Copy done
351                                         // Otherwise we CAS'd the box
352                                         oldval = box; // Record updated oldval
353                                         break;
354                                 }
355                                 oldval = val(oldkvs, idx); // Else re-try
356                         }
357         
358                         if (oldval == TOMBPRIME) return false; // Copy already completed here
359         
360                         slot *old_unboxed = new slot(false, oldval->_ptr);
361                         int copied_into_new = (putIfMatch(topmap, newkvs, key_slot, old_unboxed,
362                                 NULL) == NULL);
363         
364                         // Old value is exposed in the new table
365                         while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME))
366                                 oldval = val(oldkvs, idx);
367         
368                         return copied_into_new;
369                 }
370         };
371
372         
373
374         private:
375         static const int Default_Init_Size = 4; // Intial table size
376
377         static slot* const MATCH_ANY;
378         static slot* const NO_MATCH_OLD;
379
380         static slot* const TOMBPRIME;
381         static slot* const TOMBSTONE;
382
383         static const int REPROBE_LIMIT = 10; // Forces a table-resize
384
385         atomic<kvs_data*> _kvs;
386
387         public:
388         cliffc_hashtable() {
389                 // Should initialize the CHM for the construction of the table
390                 // For other CHM in kvs_data, they should be initialzed in resize()
391                 // because the size is determined dynamically
392                 kvs_data *kvs = new kvs_data(Default_Init_Size);
393                 void *chm = (void*) new CHM(0);
394                 kvs->_data[0].store(chm, memory_order_relaxed);
395                 _kvs.store(kvs, memory_order_release);
396         }
397
398         cliffc_hashtable(int init_size) {
399                 // Should initialize the CHM for the construction of the table
400                 // For other CHM in kvs_data, they should be initialzed in resize()
401                 // because the size is determined dynamically
402                 kvs_data *kvs = new kvs_data(init_size);
403                 void *chm = (void*) new CHM(0);
404                 kvs->_data[0].store(chm, memory_order_relaxed);
405                 _kvs.store(kvs, memory_order_release);
406         }
407
408         /**
409                 @Begin
410                 @Interface: Get
411                 @Commit_point_set: Get_Success_Point1 | Get_Success_Point2 | Get_Success_Point3 
412                 @ID: getKeyTag(key)
413                 @Action:
414                         void *_Old_Val = spec_table_get(map, key);
415                 @Post_check:
416                         equals_val(_Old_Val, __RET__)
417                 @End
418         */
419         TypeV* get(TypeK *key) {
420                 slot *key_slot = new slot(false, key);
421                 int fullhash = hash(key_slot);
422                 kvs_data *kvs = _kvs.load(memory_order_acquire);
423                 slot *V = get_impl(this, kvs, key_slot, fullhash);
424                 if (V == NULL) return NULL;
425                 MODEL_ASSERT (!is_prime(V));
426                 return (TypeV*) V->_ptr;
427         }
428
429         /**
430                 @Begin
431                 @Interface: Put
432                 @Commit_point_set: Write_Success_Point
433                 @ID: getKeyTag(key)
434                 @Action:
435                         # Remember this old value at checking point
436                         void *_Old_Val = spec_table_get(map, key);
437                         spec_table_put(map, key, val);
438                 @Post_check:
439                         equals_val(__RET__, _Old_Val)
440                 @End
441         */
442         TypeV* put(TypeK *key, TypeV *val) {
443                 return putIfMatch(key, val, NO_MATCH_OLD);
444         }
445
446         /**
447 //              @Begin
448                 @Interface: PutIfAbsent
449                 @Commit_point_set:
450                         Write_Success_Point | PutIfAbsent_Fail_Point
451                 @Condition: !spec_table_contains(map, key)
452                 @HB_condition:
453                         COND_PutIfAbsentSucc :: __RET__ == NULL
454                 @ID: getKeyTag(key)
455                 @Action:
456                         void *_Old_Val = spec_table_get(map, key);
457                         if (__COND_SAT__)
458                                 spec_table_put(map, key, value);
459                 @Post_check:
460                         __COND_SAT__ ? __RET__ == NULL : equals_val(_Old_Val, __RET__) 
461                 @End
462         */
463         TypeV* putIfAbsent(TypeK *key, TypeV *value) {
464                 return putIfMatch(key, val, TOMBSTONE);
465         }
466
467         /**
468 //              @Begin
469                 @Interface: RemoveAny
470                 @Commit_point_set: Write_Success_Point
471                 @ID: getKeyTag(key)
472                 @Action:
473                         void *_Old_Val = spec_table_get(map, key);
474                         spec_table_put(map, key, NULL);
475                 @Post_check:
476                         equals_val(__RET__, _Old_Val)
477                 @End
478         */
479         TypeV* remove(TypeK *key) {
480                 return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD);
481         }
482
483         /**
484 //              @Begin
485                 @Interface: RemoveIfMatch
486                 @Commit_point_set:
487                         Write_Success_Point | RemoveIfMatch_Fail_Point
488                 @Condition:
489                         equals_val(spec_table_get(map, key), val)
490                 @HB_condition:
491                         COND_RemoveIfMatchSucc :: __RET__ == true
492                 @ID: getKeyTag(key)
493                 @Action:
494                         if (__COND_SAT__)
495                                 spec_table_put(map, key, NULL);
496                 @Post_check:
497                         __COND_SAT__ ? __RET__ : !__RET__
498                 @End
499         */
500         bool remove(TypeK *key, TypeV *val) {
501                 slot *val_slot = val == NULL ? NULL : new slot(false, val);
502                 return putIfMatch(key, TOMBSTONE, val) == val;
503
504         }
505
506         /**
507 //              @Begin
508                 @Interface: ReplaceAny
509                 @Commit_point_set:
510                         Write_Success_Point
511                 @ID: getKeyTag(key)
512                 @Action:
513                         void *_Old_Val = spec_table_get(map, key);
514                 @Post_check:
515                         equals_val(__RET__, _Old_Val)
516                 @End
517         */
518         TypeV* replace(TypeK *key, TypeV *val) {
519                 return putIfMatch(key, val, MATCH_ANY);
520         }
521
522         /**
523 //              @Begin
524                 @Interface: ReplaceIfMatch
525                 @Commit_point_set:
526                         Write_Success_Point | ReplaceIfMatch_Fail_Point
527                 @Condition:
528                         equals_val(spec_table_get(map, key), oldval)
529                 @HB_condition:
530                         COND_ReplaceIfMatchSucc :: __RET__ == true
531                 @ID: getKeyTag(key)
532                 @Action:
533                         if (__COND_SAT__)
534                                 spec_table_put(map, key, newval);
535                 @Post_check:
536                         __COND_SAT__ ? __RET__ : !__RET__
537                 @End
538         */
539         bool replace(TypeK *key, TypeV *oldval, TypeV *newval) {
540                 return putIfMatch(key, newval, oldval) == oldval;
541         }
542
543         private:
544         static CHM* get_chm(kvs_data* kvs) {
545                 CHM *res = (CHM*) kvs->_data[0].load(memory_order_relaxed);
546                 return res;
547         }
548
549         static int* get_hashes(kvs_data *kvs) {
550                 return (int *) kvs->_data[1].load(memory_order_relaxed);
551         }
552         
553         // Preserve happens-before semantics on newly inserted keys
554         static inline slot* key(kvs_data *kvs, int idx) {
555                 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
556                 // Corresponding to the volatile read in get_impl() and putIfMatch in
557                 // Cliff Click's Java implementation
558                 slot *res = (slot*) kvs->_data[idx * 2 + 2].load(memory_order_acquire);
559                 return res;
560         }
561
562         /**
563                 The atomic operation in val() function is a "potential" commit point,
564                 which means in some case it is a real commit point while it is not for
565                 some other cases. This so happens because the val() function is such a
566                 fundamental function that many internal operation will call. Our
567                 strategy is that we label any potential commit points and check if they
568                 really are the commit points later.
569         */
570         // Preserve happens-before semantics on newly inserted values
571         static inline slot* val(kvs_data *kvs, int idx) {
572                 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
573                 // Corresponding to the volatile read in get_impl() and putIfMatch in
574                 // Cliff Click's Java implementation
575                 slot *res = (slot*) kvs->_data[idx * 2 + 3].load(memory_order_acquire);
576                 /**
577                         @Begin
578                         # This is a complicated potential commit point since many many functions are
579                         # calling val().
580                         @Potential_commit_point_define: true
581                         @Label: Read_Val_Point
582                         @End
583                 */
584                 return res;
585
586
587         }
588
589         static int hash(slot *key_slot) {
590                 MODEL_ASSERT(key_slot != NULL && key_slot->_ptr != NULL);
591                 TypeK* key = (TypeK*) key_slot->_ptr;
592                 int h = key->hashCode();
593                 // Spread bits according to Cliff Click's code
594                 h += (h << 15) ^ 0xffffcd7d;
595                 h ^= (h >> 10);
596                 h += (h << 3);
597                 h ^= (h >> 6);
598                 h += (h << 2) + (h << 14);
599                 return h ^ (h >> 16);
600         }
601         
602         // Heuristic to decide if reprobed too many times. 
603         // Be careful here: Running over the limit on a 'get' acts as a 'miss'; on a
604         // put it triggers a table resize. Several places MUST have exact agreement.
605         static int reprobe_limit(int len) {
606                 return REPROBE_LIMIT + (len >> 2);
607         }
608         
609         static inline bool is_prime(slot *val) {
610                 return (val != NULL) && val->_prime;
611         }
612
613         // Check for key equality. Try direct pointer comparison first (fast
614         // negative teset) and then the full 'equals' call
615         static bool keyeq(slot *K, slot *key_slot, int *hashes, int hash,
616                 int fullhash) {
617                 // Caller should've checked this.
618                 MODEL_ASSERT (K != NULL);
619                 TypeK* key_ptr = (TypeK*) key_slot->_ptr;
620                 return
621                         K == key_slot ||
622                                 ((hashes[hash] == 0 || hashes[hash] == fullhash) &&
623                                 K != TOMBSTONE &&
624                                 key_ptr->equals(K->_ptr));
625         }
626
627         static bool valeq(slot *val_slot1, slot *val_slot2) {
628                 MODEL_ASSERT (val_slot1 != NULL);
629                 TypeK* ptr1 = (TypeV*) val_slot1->_ptr;
630                 if (val_slot2 == NULL || ptr1 == NULL) return false;
631                 return ptr1->equals(val_slot2->_ptr);
632         }
633         
634         // Together with key() preserve the happens-before relationship on newly
635         // inserted keys
636         static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
637                 return kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
638                         desired, memory_order_release, memory_order_release);
639         }
640
641         /**
642                 Same as the val() function, we only label the CAS operation as the
643                 potential commit point.
644         */
645         // Together with val() preserve the happens-before relationship on newly
646         // inserted values
647         static inline bool CAS_val(kvs_data *kvs, int idx, void *expected, void
648                 *desired) {
649                 bool res =  kvs->_data[2 * idx + 3].compare_exchange_strong(expected,
650                         desired, memory_order_release, memory_order_release);
651                 /**
652                         # If it is a successful put instead of a copy or any other internal
653                         # operantions, expected != NULL
654                         @Begin
655                         @Potential_commit_point_define: res == true
656                         @Label: Write_Val_Point
657                         @End
658                 */
659                 return res;
660         }
661
662         slot* get_impl(cliffc_hashtable *topmap, kvs_data *kvs, slot* key_slot, int
663                 fullhash) {
664                 int len = kvs->_size;
665                 CHM *chm = get_chm(kvs);
666                 int *hashes = get_hashes(kvs);
667
668                 int idx = fullhash & (len - 1);
669                 int reprobe_cnt = 0;
670                 while (true) {
671                         slot *K = key(kvs, idx);
672                         slot *V = val(kvs, idx);
673                         /**
674                                 @Begin
675                                 @Commit_point_define: V == NULL
676                                 @Potential_commit_point_label: Read_Val_Point
677                                 @Label: Get_Success_Point_1
678                                 @End
679                         */
680
681                         if (K == NULL) return NULL; // A miss
682                         
683                         if (keyeq(K, key_slot, hashes, idx, fullhash)) {
684                                 // Key hit! Check if table-resize in progress
685                                 if (!is_prime(V)) {
686                                         /**
687                                                 @Begin
688                                                 @Commit_point_define: true
689                                                 @Potential_commit_point_label: Read_Val_Point
690                                                 @Label: Get_Success_Point_2
691                                                 @End
692                                         */
693                                         return (V == TOMBSTONE) ? NULL : V; // Return this value
694                                 }
695                                 // Otherwise, finish the copy & retry in the new table
696                                 return get_impl(topmap, chm->copy_slot_and_check(topmap, kvs,
697                                         idx, key_slot), key_slot, fullhash);
698                         }
699
700                         if (++reprobe_cnt >= REPROBE_LIMIT ||
701                                 key_slot == TOMBSTONE) {
702                                 // Retry in new table
703                                 // Atomic read (acquire) can be here 
704                                 kvs_data *newkvs = chm->_newkvs.load(memory_order_acquire);
705                                 /**
706                                         @Begin
707                                         @Commit_point_define_check: newkvs == NULL
708                                         @Label: Get_Success_Point_3
709                                         @End
710                                 */
711                                 return newkvs == NULL ? NULL : get_impl(topmap,
712                                         topmap->help_copy(newkvs), key_slot, fullhash);
713                         }
714
715                         idx = (idx + 1) & (len - 1); // Reprobe by 1
716                 }
717         }
718
719         // A wrapper of the essential function putIfMatch()
720         TypeV* putIfMatch(TypeK *key, TypeV *value, slot *old_val) {
721                 // TODO: Should throw an exception rather return NULL
722                 if (old_val == NULL) {
723                         return NULL;
724                 }
725                 slot *key_slot = new slot(false, key);
726
727                 slot *value_slot = new slot(false, value);
728                 kvs_data *kvs = _kvs.load(memory_order_acquire);
729                 slot *res = putIfMatch(this, kvs, key_slot, value_slot, old_val);
730                 // Only when copy_slot() call putIfMatch() will it return NULL
731                 MODEL_ASSERT (res != NULL); 
732                 MODEL_ASSERT (!is_prime(res));
733                 return res == TOMBSTONE ? NULL : (TypeV*) res->_ptr;
734         }
735
736         /**
737                 Put, Remove, PutIfAbsent, etc will call this function. Return the old
738                 value. If the returned value is equals to the expVal (or expVal is
739                 NO_MATCH_OLD), then this function puts the val_slot to the table 'kvs'.
740                 Only copy_slot will pass a NULL expVal, and putIfMatch only returns a
741                 NULL if passed a NULL expVal.
742         */
743         static slot* putIfMatch(cliffc_hashtable *topmap, kvs_data *kvs, slot
744                 *key_slot, slot *val_slot, slot *expVal) {
745                 MODEL_ASSERT (val_slot != NULL);
746                 MODEL_ASSERT (!is_prime(val_slot));
747                 MODEL_ASSERT (!is_prime(expVal));
748
749                 int fullhash = hash(key_slot);
750                 int len = kvs->_size;
751                 CHM *chm = get_chm(kvs);
752                 int *hashes = get_hashes(kvs);
753                 int idx = fullhash & (len - 1);
754
755                 // Claim a key slot
756                 int reprobe_cnt = 0;
757                 slot *K;
758                 slot *V;
759                 kvs_data *newkvs;
760                 
761                 while (true) { // Spin till we get a key slot
762                         K = key(kvs, idx);
763                         V = val(kvs, idx);
764                         if (K == NULL) { // Get a free slot
765                                 if (val_slot == TOMBSTONE) return val_slot;
766                                 // Claim the null key-slot
767                                 if (CAS_key(kvs, idx, NULL, key_slot)) {
768                                         chm->_slots.fetch_add(1, memory_order_relaxed); // Inc key-slots-used count
769                                         hashes[idx] = fullhash; // Memorize full hash
770                                         break;
771                                 }
772                                 K = key(kvs, idx); // CAS failed, get updated value
773                                 MODEL_ASSERT (K != NULL);
774                         }
775
776                         // Key slot not null, there exists a Key here
777                         if (keyeq(K, key_slot, hashes, idx, fullhash))
778                                 break; // Got it
779                         
780                         // Notice that the logic here should be consistent with that of get.
781                         // The first predicate means too many reprobes means nothing in the
782                         // old table.
783                         if (++reprobe_cnt >= reprobe_limit(len) ||
784                                 K == TOMBSTONE) { // Found a Tombstone key, no more keys
785                                 newkvs = chm->resize(topmap, kvs);
786                                 // Help along an existing copy
787                                 if (expVal != NULL) topmap->help_copy(newkvs);
788                                 return putIfMatch(topmap, newkvs, key_slot, val_slot, expVal);
789                         }
790
791                         idx = (idx + 1) & (len - 1); // Reprobe
792                 } // End of spinning till we get a Key slot
793
794                 if (val_slot == V) return V; // Fast cutout for no-change
795         
796                 // Here it tries to resize cause it doesn't want other threads to stop
797                 // its progress (eagerly try to resize soon)
798                 newkvs = chm->_newkvs.load(memory_order_acquire);
799                 if (newkvs == NULL &&
800                         ((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V)))
801                         newkvs = chm->resize(topmap, kvs); // Force the copy to start
802                 
803                 // Finish the copy and then put it in the new table
804                 if (newkvs != NULL)
805                         return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs, idx,
806                                 expVal), key_slot, val_slot, expVal);
807                 
808                 // Decided to update the existing table
809                 while (true) {
810                         MODEL_ASSERT (!is_prime(V));
811
812                         if (expVal != NO_MATCH_OLD &&
813                                 V != expVal &&
814                                 (expVal != MATCH_ANY || V == TOMBSTONE || V == NULL) &&
815                                 !(V == NULL && expVal == TOMBSTONE) &&
816                                 (expVal == NULL || !valeq(expVal, V))) {
817                                 /**
818                                         @Begin
819                                         @Commit_point_define: expVal == TOMBSTONE
820                                         @Potential_commit_point_label: Read_Val_Point
821                                         @Label: PutIfAbsent_Fail_Point
822                                                 # This is a check for the PutIfAbsent() when the value
823                                                 # is not absent
824                                         @End
825                                 */
826                                 /**
827                                         @Begin
828                                         @Commit_point_define: expVal != NULL && val_slot == TOMBSTONE
829                                         @Potential_commit_point_label: Read_Val_Point
830                                         @Label: RemoveIfMatch_Fail_Point
831                                         @End
832                                 */
833                                 /**
834                                         @Begin
835                                         @Commit_point_define: !valeq(expVal, V)
836                                         @Potential_commit_point_label: Read_Val_Point
837                                         @Label: ReplaceIfMatch_Fail_Point
838                                         @End
839                                 */
840                                 return V; // Do not update!
841                         }
842
843                         if (CAS_val(kvs, idx, V, val_slot)) {
844                                 /**
845                                         @Begin
846                                         # The only point where a successful put happens
847                                         @Commit_point_define: true
848                                         @Potential_commit_point_label: Write_Val_Point
849                                         @Label: Write_Success_Point
850                                         @End
851                                 */
852                                 if (expVal != NULL) { // Not called by a table-copy
853                                         // CAS succeeded, should adjust size
854                                         // Both normal put's and table-copy calls putIfMatch, but
855                                         // table-copy does not increase the number of live K/V pairs
856                                         if ((V == NULL || V == TOMBSTONE) &&
857                                                 val_slot != TOMBSTONE)
858                                                 chm->_size.fetch_add(1, memory_order_relaxed);
859                                         if (!(V == NULL || V == TOMBSTONE) &&
860                                                 val_slot == TOMBSTONE)
861                                                 chm->_size.fetch_add(-1, memory_order_relaxed);
862                                 }
863                                 return (V == NULL && expVal != NULL) ? TOMBSTONE : V;
864                         }
865                         // Else CAS failed
866                         V = val(kvs, idx);
867                         if (is_prime(V))
868                                 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs,
869                                         idx, expVal), key_slot, val_slot, expVal);
870                 }
871         }
872
873         // Help along an existing table-resize. This is a fast cut-out wrapper.
874         kvs_data* help_copy(kvs_data *helper) {
875                 kvs_data *topkvs = _kvs.load(memory_order_acquire);
876                 CHM *topchm = get_chm(topkvs);
877                 // No cpy in progress
878                 if (topchm->_newkvs.load(memory_order_acquire) == NULL) return helper;
879                 topchm->help_copy_impl(this, topkvs, false);
880                 return helper;
881         }
882 };
883 /**
884         @Begin
885         @Class_end
886         @End
887 */
888
889 #endif