save
[cdsspec-compiler.git] / benchmark / cliffc-hashtable / cliffc_hashtable.h
1 #ifndef CLIFFC_HASHTABLE_H
2 #define CLIFFC_HASHTABLE_H
3
4 #include <iostream>
5 #include <atomic>
6 //#include <common.h>
7 #ifdef STANDALONE
8 #include <assert.h>
9 #define MODEL_ASSERT assert 
10 #else
11 #include <model-assert.h>
12 #endif
13
14 #include <spec_lib.h>
15 #include <stdlib.h>
16 #include <cdsannotate.h>
17 #include <specannotation.h>
18 #include <model_memory.h>
19 #include "common.h" 
20
21 using namespace std;
22
23 /**
24         This header file declares and defines a simplified version of Cliff Click's
25         NonblockingHashMap. It contains all the necessary structrues and main
26         functions. In simplified_cliffc_hashtable.cc file, it has the definition for
27         the static fields.
28 */
29
30 template<typename TypeK, typename TypeV>
31 class cliffc_hashtable;
32
33 /**
34         Corresponding the the Object[] array in Cliff Click's Java implementation.
35         It keeps the first two slots for CHM (Hashtable control unit) and the hash
36         records (an array of hash used for fast negative key-equality check).
37 */
38 struct kvs_data {
39         int _size;
40         atomic<void*> *_data;
41         
42         kvs_data(int sz) {
43                 _size = sz;
44                 int real_size = sz * 2 + 2;
45                 _data = new atomic<void*>[real_size];
46                 // The control block should be initialized in resize()
47                 // Init the hash record array
48                 int *hashes = new int[_size];
49                 int i;
50                 for (i = 0; i < _size; i++) {
51                         hashes[i] = 0;
52                 }
53                 // Init the data to Null slot
54                 for (i = 2; i < real_size; i++) {
55                         _data[i].store(NULL, memory_order_relaxed);
56                 }
57                 _data[1].store(hashes, memory_order_release);
58         }
59
60         ~kvs_data() {
61                 int *hashes = (int*) _data[1].load(memory_order_relaxed);
62                 delete hashes;
63                 delete[] _data;
64         }
65 };
66
67 struct slot {
68         bool _prime;
69         void *_ptr;
70
71         slot(bool prime, void *ptr) {
72                 _prime = prime;
73                 _ptr = ptr;
74         }
75 };
76
77
78 /**
79         TypeK must have defined function "int hashCode()" which return the hash
80         code for the its object, and "int equals(TypeK anotherKey)" which is
81         used to judge equality.
82         TypeK and TypeV should define their own copy constructor.
83 */
84 /**
85         @Begin
86         @Class_begin
87         @End
88 */
89 template<typename TypeK, typename TypeV>
90 class cliffc_hashtable {
91         /**
92                 # The synchronization we have for the hashtable gives us the property of
93                 # serializability, so we should have a sequential hashtable when we check the
94                 # correctness. The key thing is to identify all the commit point.
95         
96                 @Begin
97                 @Options:
98                         LANG = CPP;
99                         CLASS = cliffc_hashtable;
100                 @Global_define:
101                         @DeclareVar:
102                         spec_table *map;
103                         spec_table *id_map;
104                         id_tag_t *tag;
105                         @InitVar:
106                                 map = new_spec_table_default(equals_key);
107                                 id_map = new_spec_table_default(equals_id);
108                                 tag = new_id_tag();
109
110                         @DefineFunc:
111                         bool equals_key(void *ptr1, void *ptr2) {
112                                 TypeK *key1 = (TypeK*) ptr1,
113                                         *key2 = (TypeK*) ptr2;
114                                 if (key1 == NULL || key2 == NULL)
115                                         return false;
116                                 return key1->equals(key2);
117                         }
118                         
119                         @DefineFunc:
120                         bool equals_val(void *ptr1, void *ptr2) {
121                                 TypeV *val1 = (TypeV*) ptr1,
122                                         *val2 = (TypeV*) ptr2;
123                                 if (val1 == NULL || val2 == NULL)
124                                         return false;
125                                 return val1->equals(val2);
126                         }
127
128                         @DefineFunc:
129                         bool equals_id(void *ptr1, void *ptr2) {
130                                 id_tag_t *id1 = (id_tag_t*) ptr1,
131                                         *id2 = (id_tag_t*) ptr2;
132                                 if (id1 == NULL || id2 == NULL)
133                                         return false;
134                                 return (*id1).tag == (*id2).tag;
135                         }
136
137                         @DefineFunc:
138                         # Update the tag for the current key slot if the corresponding tag
139                         # is NULL, otherwise just return that tag. It will update the next
140                         # available tag too if it requires a new tag for that key slot.
141                         call_id_t getKeyTag(TypeK *key) {
142                                 if (!spec_table_contains(id_map, key)) {
143                                         call_id_t cur_id = current(tag);
144                                         spec_table_put(id_map, key, (void*) cur_id);
145                                         next(tag);
146                                         return cur_id;
147                                 } else {
148                                         call_id_t res = (call_id_t) spec_table_get(id_map, key);
149                                         return res;
150                                 }
151                         }
152                 
153                 @Interface_cluster:
154                         Read_interface = {
155                                 Get,
156                                 PutIfAbsent,
157                                 RemoveAny,
158                                 RemoveIfMatch,
159                                 ReplaceAny,
160                                 ReplaceIfMatch
161                         }
162                         
163                         Write_interface = {
164                                 Put,
165                                 PutIfAbsent(COND_PutIfAbsentSucc),
166                                 RemoveAny,
167                                 RemoveIfMatch(COND_RemoveIfMatchSucc),
168                                 ReplaceAny,
169                                 ReplaceIfMatch(COND_ReplaceIfMatchSucc)
170                         }
171                 @Happens_before:
172                         //Write_interface -> Read_interface
173                         Put->Get
174                         Put->Put
175                 @End
176         */
177
178 friend class CHM;
179         /**
180                 The control structure for the hashtable
181         */
182         private:
183         class CHM {
184                 friend class cliffc_hashtable;
185                 private:
186                 atomic<kvs_data*> _newkvs;
187                 
188                 // Size of active K,V pairs
189                 atomic_int _size;
190         
191                 // Count of used slots
192                 atomic_int _slots;
193                 
194                 // The next part of the table to copy
195                 atomic_int _copy_idx;
196                 
197                 // Work-done reporting
198                 atomic_int _copy_done;
199         
200                 public:
201                 CHM(int size) {
202                         _newkvs.store(NULL, memory_order_relaxed);
203                         _size.store(size, memory_order_relaxed);
204                         _slots.store(0, memory_order_relaxed);
205         
206                         _copy_idx.store(0, memory_order_relaxed);
207                         _copy_done.store(0, memory_order_release);
208                 }
209         
210                 ~CHM() {}
211                 
212                 private:
213                         
214                 // Heuristic to decide if the table is too full
215                 bool table_full(int reprobe_cnt, int len) {
216                         return
217                                 reprobe_cnt >= REPROBE_LIMIT &&
218                                 _slots.load(memory_order_relaxed) >= reprobe_limit(len);
219                 }
220         
221                 kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
222                         //model_print("resizing...\n");
223                         kvs_data *newkvs = _newkvs.load(memory_order_acquire);
224                         if (newkvs != NULL)
225                                 return newkvs;
226         
227                         // No copy in-progress, start one; Only double the table size
228                         int oldlen = kvs->_size;
229                         int sz = _size.load(memory_order_relaxed);
230                         int newsz = sz;
231                         
232                         // Just follow Cliff Click's heuristic to decide the new size
233                         if (sz >= (oldlen >> 2)) { // If we are 25% full
234                                 newsz = oldlen << 1; // Double size
235                                 if (sz >= (oldlen >> 1))
236                                         newsz = oldlen << 2; // Double double size
237                         }
238         
239                         // We do not record the record timestamp
240                         if (newsz <= oldlen) newsz = oldlen << 1;
241                         // Do not shrink ever
242                         if (newsz < oldlen) newsz = oldlen;
243         
244                         // Last check cause the 'new' below is expensive
245                         newkvs = _newkvs.load(memory_order_acquire);
246                         if (newkvs != NULL) return newkvs;
247         
248                         newkvs = new kvs_data(newsz);
249                         void *chm = (void*) new CHM(sz);
250                         newkvs->_data[0].store(chm, memory_order_release);
251         
252                         kvs_data *cur_newkvs; 
253                         // Another check after the slow allocation
254                         if ((cur_newkvs = _newkvs.load(memory_order_acquire)) != NULL)
255                                 return cur_newkvs;
256                         // CAS the _newkvs to the allocated table
257                         kvs_data *desired = (kvs_data*) NULL;
258                         kvs_data *expected = (kvs_data*) newkvs; 
259                         if (!_newkvs.compare_exchange_strong(desired, expected, memory_order_release,
260                                         memory_order_release)) {
261                                 // Should clean the allocated area
262                                 delete newkvs;
263                                 newkvs = _newkvs.load(memory_order_acquire);
264                         }
265                         return newkvs;
266                 }
267         
268                 void help_copy_impl(cliffc_hashtable *topmap, kvs_data *oldkvs,
269                         bool copy_all) {
270                         MODEL_ASSERT (get_chm(oldkvs) == this);
271                         kvs_data *newkvs = _newkvs.load(memory_order_acquire);
272                         int oldlen = oldkvs->_size;
273                         int min_copy_work = oldlen > 1024 ? 1024 : oldlen;
274                 
275                         // Just follow Cliff Click's code here
276                         int panic_start = -1;
277                         int copyidx;
278                         while (_copy_done.load(memory_order_acquire) < oldlen) {
279                                 copyidx = _copy_idx.load(memory_order_acquire);
280                                 if (panic_start == -1) { // No painc
281                                         copyidx = _copy_idx.load(memory_order_acquire);
282                                         while (copyidx < (oldlen << 1) &&
283                                                 !_copy_idx.compare_exchange_strong(copyidx, copyidx +
284                                                         min_copy_work, memory_order_release, memory_order_release))
285                                                 copyidx = _copy_idx.load(memory_order_relaxed);
286                                         if (!(copyidx < (oldlen << 1)))
287                                                 panic_start = copyidx;
288                                 }
289         
290                                 // Now copy the chunk of work we claimed
291                                 int workdone = 0;
292                                 for (int i = 0; i < min_copy_work; i++)
293                                         if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs,
294                                                 newkvs))
295                                                 workdone++;
296                                 if (workdone > 0)
297                                         copy_check_and_promote(topmap, oldkvs, workdone);
298         
299                                 copyidx += min_copy_work;
300                                 if (!copy_all && panic_start == -1)
301                                         return; // We are done with the work we claim
302                         }
303                         copy_check_and_promote(topmap, oldkvs, 0); // See if we can promote
304                 }
305         
306                 kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
307                         *oldkvs, int idx, void *should_help) {
308                         kvs_data *newkvs = _newkvs.load(memory_order_acquire);
309                         // We're only here cause the caller saw a Prime
310                         if (copy_slot(topmap, idx, oldkvs, newkvs))
311                                 copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied
312                         return (should_help == NULL) ? newkvs : topmap->help_copy(newkvs);
313                 }
314         
315                 void copy_check_and_promote(cliffc_hashtable *topmap, kvs_data*
316                         oldkvs, int workdone) {
317                         int oldlen = oldkvs->_size;
318                         int copyDone = _copy_done.load(memory_order_relaxed);
319                         if (workdone > 0) {
320                                 while (true) {
321                                         copyDone = _copy_done.load(memory_order_relaxed);
322                                         if (_copy_done.compare_exchange_weak(copyDone, copyDone +
323                                                 workdone, memory_order_relaxed, memory_order_relaxed))
324                                                 break;
325                                 }
326                         }
327         
328                         // Promote the new table to the current table
329                         if (copyDone + workdone == oldlen &&
330                                 topmap->_kvs.load(memory_order_acquire) == oldkvs) {
331                                 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
332                                 topmap->_kvs.compare_exchange_strong(oldkvs, newkvs, memory_order_release,
333                                         memory_order_release);
334                         }
335                 }
336         
337                 bool copy_slot(cliffc_hashtable *topmap, int idx, kvs_data *oldkvs,
338                         kvs_data *newkvs) {
339                         slot *key_slot;
340                         while ((key_slot = key(oldkvs, idx)) == NULL)
341                                 CAS_key(oldkvs, idx, NULL, TOMBSTONE);
342         
343                         // First CAS old to Prime
344                         slot *oldval = val(oldkvs, idx);
345                         while (!is_prime(oldval)) {
346                                 slot *box = (oldval == NULL || oldval == TOMBSTONE)
347                                         ? TOMBPRIME : new slot(true, oldval->_ptr);
348                                 if (CAS_val(oldkvs, idx, oldval, box)) {
349                                         if (box == TOMBPRIME)
350                                                 return 1; // Copy done
351                                         // Otherwise we CAS'd the box
352                                         oldval = box; // Record updated oldval
353                                         break;
354                                 }
355                                 oldval = val(oldkvs, idx); // Else re-try
356                         }
357         
358                         if (oldval == TOMBPRIME) return false; // Copy already completed here
359         
360                         slot *old_unboxed = new slot(false, oldval->_ptr);
361                         int copied_into_new = (putIfMatch(topmap, newkvs, key_slot, old_unboxed,
362                                 NULL) == NULL);
363         
364                         // Old value is exposed in the new table
365                         while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME))
366                                 oldval = val(oldkvs, idx);
367         
368                         return copied_into_new;
369                 }
370         };
371
372         
373
374         private:
375         static const int Default_Init_Size = 4; // Intial table size
376
377         static slot* const MATCH_ANY;
378         static slot* const NO_MATCH_OLD;
379
380         static slot* const TOMBPRIME;
381         static slot* const TOMBSTONE;
382
383         static const int REPROBE_LIMIT = 10; // Forces a table-resize
384
385         atomic<kvs_data*> _kvs;
386
387         public:
388         cliffc_hashtable() {
389                 // Should initialize the CHM for the construction of the table
390                 // For other CHM in kvs_data, they should be initialzed in resize()
391                 // because the size is determined dynamically
392                 /**
393                         @Begin
394                         @Entry_point
395                         @End
396                 */
397                 kvs_data *kvs = new kvs_data(Default_Init_Size);
398                 void *chm = (void*) new CHM(0);
399                 kvs->_data[0].store(chm, memory_order_relaxed);
400                 _kvs.store(kvs, memory_order_release);
401         }
402
403         cliffc_hashtable(int init_size) {
404                 // Should initialize the CHM for the construction of the table
405                 // For other CHM in kvs_data, they should be initialzed in resize()
406                 // because the size is determined dynamically
407
408                 /**
409                         @Begin
410                         @Entry_point
411                         @End
412                 */
413                 kvs_data *kvs = new kvs_data(init_size);
414                 void *chm = (void*) new CHM(0);
415                 kvs->_data[0].store(chm, memory_order_relaxed);
416                 _kvs.store(kvs, memory_order_release);
417         }
418
419         /**
420                 @Begin
421                 @Interface: Get
422                 @Commit_point_set: Get_Success_Point1 | Get_Success_Point2 | Get_Success_Point3 
423                 @ID: getKeyTag(key)
424                 @Action:
425                         void *_Old_Val = spec_table_get(map, key);
426                 @Post_check:
427                         equals_val(_Old_Val, __RET__)
428                 @End
429         */
430         TypeV* get(TypeK *key) {
431                 slot *key_slot = new slot(false, key);
432                 int fullhash = hash(key_slot);
433                 kvs_data *kvs = _kvs.load(memory_order_acquire);
434                 slot *V = get_impl(this, kvs, key_slot, fullhash);
435                 if (V == NULL) return NULL;
436                 MODEL_ASSERT (!is_prime(V));
437                 return (TypeV*) V->_ptr;
438         }
439
440         /**
441                 @Begin
442                 @Interface: Put
443                 @Commit_point_set: Write_Success_Point
444                 @ID: getKeyTag(key)
445                 @Action:
446                         # Remember this old value at checking point
447                         void *_Old_Val = spec_table_get(map, key);
448                         spec_table_put(map, key, val);
449                 @Post_check:
450                         equals_val(__RET__, _Old_Val)
451                 @End
452         */
453         TypeV* put(TypeK *key, TypeV *val) {
454                 return putIfMatch(key, val, NO_MATCH_OLD);
455         }
456
457         /**
458 //              @Begin
459                 @Interface: PutIfAbsent
460                 @Commit_point_set:
461                         Write_Success_Point | PutIfAbsent_Fail_Point
462                 @Condition: !spec_table_contains(map, key)
463                 @HB_condition:
464                         COND_PutIfAbsentSucc :: __RET__ == NULL
465                 @ID: getKeyTag(key)
466                 @Action:
467                         void *_Old_Val = spec_table_get(map, key);
468                         if (__COND_SAT__)
469                                 spec_table_put(map, key, value);
470                 @Post_check:
471                         __COND_SAT__ ? __RET__ == NULL : equals_val(_Old_Val, __RET__) 
472                 @End
473         */
474         TypeV* putIfAbsent(TypeK *key, TypeV *value) {
475                 return putIfMatch(key, val, TOMBSTONE);
476         }
477
478         /**
479 //              @Begin
480                 @Interface: RemoveAny
481                 @Commit_point_set: Write_Success_Point
482                 @ID: getKeyTag(key)
483                 @Action:
484                         void *_Old_Val = spec_table_get(map, key);
485                         spec_table_put(map, key, NULL);
486                 @Post_check:
487                         equals_val(__RET__, _Old_Val)
488                 @End
489         */
490         TypeV* remove(TypeK *key) {
491                 return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD);
492         }
493
494         /**
495 //              @Begin
496                 @Interface: RemoveIfMatch
497                 @Commit_point_set:
498                         Write_Success_Point | RemoveIfMatch_Fail_Point
499                 @Condition:
500                         equals_val(spec_table_get(map, key), val)
501                 @HB_condition:
502                         COND_RemoveIfMatchSucc :: __RET__ == true
503                 @ID: getKeyTag(key)
504                 @Action:
505                         if (__COND_SAT__)
506                                 spec_table_put(map, key, NULL);
507                 @Post_check:
508                         __COND_SAT__ ? __RET__ : !__RET__
509                 @End
510         */
511         bool remove(TypeK *key, TypeV *val) {
512                 slot *val_slot = val == NULL ? NULL : new slot(false, val);
513                 return putIfMatch(key, TOMBSTONE, val) == val;
514
515         }
516
517         /**
518 //              @Begin
519                 @Interface: ReplaceAny
520                 @Commit_point_set:
521                         Write_Success_Point
522                 @ID: getKeyTag(key)
523                 @Action:
524                         void *_Old_Val = spec_table_get(map, key);
525                 @Post_check:
526                         equals_val(__RET__, _Old_Val)
527                 @End
528         */
529         TypeV* replace(TypeK *key, TypeV *val) {
530                 return putIfMatch(key, val, MATCH_ANY);
531         }
532
533         /**
534 //              @Begin
535                 @Interface: ReplaceIfMatch
536                 @Commit_point_set:
537                         Write_Success_Point | ReplaceIfMatch_Fail_Point
538                 @Condition:
539                         equals_val(spec_table_get(map, key), oldval)
540                 @HB_condition:
541                         COND_ReplaceIfMatchSucc :: __RET__ == true
542                 @ID: getKeyTag(key)
543                 @Action:
544                         if (__COND_SAT__)
545                                 spec_table_put(map, key, newval);
546                 @Post_check:
547                         __COND_SAT__ ? __RET__ : !__RET__
548                 @End
549         */
550         bool replace(TypeK *key, TypeV *oldval, TypeV *newval) {
551                 return putIfMatch(key, newval, oldval) == oldval;
552         }
553
554         private:
555         static CHM* get_chm(kvs_data* kvs) {
556                 CHM *res = (CHM*) kvs->_data[0].load(memory_order_relaxed);
557                 return res;
558         }
559
560         static int* get_hashes(kvs_data *kvs) {
561                 return (int *) kvs->_data[1].load(memory_order_relaxed);
562         }
563         
564         // Preserve happens-before semantics on newly inserted keys
565         static inline slot* key(kvs_data *kvs, int idx) {
566                 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
567                 // Corresponding to the volatile read in get_impl() and putIfMatch in
568                 // Cliff Click's Java implementation
569                 slot *res = (slot*) kvs->_data[idx * 2 + 2].load(memory_order_acquire);
570                 return res;
571         }
572
573         /**
574                 The atomic operation in val() function is a "potential" commit point,
575                 which means in some case it is a real commit point while it is not for
576                 some other cases. This so happens because the val() function is such a
577                 fundamental function that many internal operation will call. Our
578                 strategy is that we label any potential commit points and check if they
579                 really are the commit points later.
580         */
581         // Preserve happens-before semantics on newly inserted values
582         static inline slot* val(kvs_data *kvs, int idx) {
583                 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
584                 // Corresponding to the volatile read in get_impl() and putIfMatch in
585                 // Cliff Click's Java implementation
586                 slot *res = (slot*) kvs->_data[idx * 2 + 3].load(memory_order_acquire);
587                 /**
588                         @Begin
589                         # This is a complicated potential commit point since many many functions are
590                         # calling val().
591                         @Potential_commit_point_define: true
592                         @Label: Read_Val_Point
593                         @End
594                 */
595                 return res;
596
597
598         }
599
600         static int hash(slot *key_slot) {
601                 MODEL_ASSERT(key_slot != NULL && key_slot->_ptr != NULL);
602                 TypeK* key = (TypeK*) key_slot->_ptr;
603                 int h = key->hashCode();
604                 // Spread bits according to Cliff Click's code
605                 h += (h << 15) ^ 0xffffcd7d;
606                 h ^= (h >> 10);
607                 h += (h << 3);
608                 h ^= (h >> 6);
609                 h += (h << 2) + (h << 14);
610                 return h ^ (h >> 16);
611         }
612         
613         // Heuristic to decide if reprobed too many times. 
614         // Be careful here: Running over the limit on a 'get' acts as a 'miss'; on a
615         // put it triggers a table resize. Several places MUST have exact agreement.
616         static int reprobe_limit(int len) {
617                 return REPROBE_LIMIT + (len >> 2);
618         }
619         
620         static inline bool is_prime(slot *val) {
621                 return (val != NULL) && val->_prime;
622         }
623
624         // Check for key equality. Try direct pointer comparison first (fast
625         // negative teset) and then the full 'equals' call
626         static bool keyeq(slot *K, slot *key_slot, int *hashes, int hash,
627                 int fullhash) {
628                 // Caller should've checked this.
629                 MODEL_ASSERT (K != NULL);
630                 TypeK* key_ptr = (TypeK*) key_slot->_ptr;
631                 return
632                         K == key_slot ||
633                                 ((hashes[hash] == 0 || hashes[hash] == fullhash) &&
634                                 K != TOMBSTONE &&
635                                 key_ptr->equals(K->_ptr));
636         }
637
638         static bool valeq(slot *val_slot1, slot *val_slot2) {
639                 MODEL_ASSERT (val_slot1 != NULL);
640                 TypeK* ptr1 = (TypeV*) val_slot1->_ptr;
641                 if (val_slot2 == NULL || ptr1 == NULL) return false;
642                 return ptr1->equals(val_slot2->_ptr);
643         }
644         
645         // Together with key() preserve the happens-before relationship on newly
646         // inserted keys
647         static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
648                 return kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
649                         desired, memory_order_release, memory_order_release);
650         }
651
652         /**
653                 Same as the val() function, we only label the CAS operation as the
654                 potential commit point.
655         */
656         // Together with val() preserve the happens-before relationship on newly
657         // inserted values
658         static inline bool CAS_val(kvs_data *kvs, int idx, void *expected, void
659                 *desired) {
660                 bool res =  kvs->_data[2 * idx + 3].compare_exchange_strong(expected,
661                         desired, memory_order_release, memory_order_release);
662                 /**
663                         # If it is a successful put instead of a copy or any other internal
664                         # operantions, expected != NULL
665                         @Begin
666                         @Potential_commit_point_define: res == true
667                         @Label: Write_Val_Point
668                         @End
669                 */
670                 return res;
671         }
672
673         slot* get_impl(cliffc_hashtable *topmap, kvs_data *kvs, slot* key_slot, int
674                 fullhash) {
675                 int len = kvs->_size;
676                 CHM *chm = get_chm(kvs);
677                 int *hashes = get_hashes(kvs);
678
679                 int idx = fullhash & (len - 1);
680                 int reprobe_cnt = 0;
681                 while (true) {
682                         slot *K = key(kvs, idx);
683                         slot *V = val(kvs, idx);
684                         /**
685                                 @Begin
686                                 @Commit_point_define: V == NULL
687                                 @Potential_commit_point_label: Read_Val_Point
688                                 @Label: Get_Success_Point_1
689                                 @End
690                         */
691
692                         if (K == NULL) return NULL; // A miss
693                         
694                         if (keyeq(K, key_slot, hashes, idx, fullhash)) {
695                                 // Key hit! Check if table-resize in progress
696                                 if (!is_prime(V)) {
697                                         /**
698                                                 @Begin
699                                                 @Commit_point_define: true
700                                                 @Potential_commit_point_label: Read_Val_Point
701                                                 @Label: Get_Success_Point_2
702                                                 @End
703                                         */
704                                         return (V == TOMBSTONE) ? NULL : V; // Return this value
705                                 }
706                                 // Otherwise, finish the copy & retry in the new table
707                                 return get_impl(topmap, chm->copy_slot_and_check(topmap, kvs,
708                                         idx, key_slot), key_slot, fullhash);
709                         }
710
711                         if (++reprobe_cnt >= REPROBE_LIMIT ||
712                                 key_slot == TOMBSTONE) {
713                                 // Retry in new table
714                                 // Atomic read (acquire) can be here 
715                                 kvs_data *newkvs = chm->_newkvs.load(memory_order_acquire);
716                                 /**
717                                         @Begin
718                                         @Commit_point_define_check: newkvs == NULL
719                                         @Label: Get_Success_Point_3
720                                         @End
721                                 */
722                                 return newkvs == NULL ? NULL : get_impl(topmap,
723                                         topmap->help_copy(newkvs), key_slot, fullhash);
724                         }
725
726                         idx = (idx + 1) & (len - 1); // Reprobe by 1
727                 }
728         }
729
730         // A wrapper of the essential function putIfMatch()
731         TypeV* putIfMatch(TypeK *key, TypeV *value, slot *old_val) {
732                 // TODO: Should throw an exception rather return NULL
733                 if (old_val == NULL) {
734                         return NULL;
735                 }
736                 slot *key_slot = new slot(false, key);
737
738                 slot *value_slot = new slot(false, value);
739                 kvs_data *kvs = _kvs.load(memory_order_acquire);
740                 slot *res = putIfMatch(this, kvs, key_slot, value_slot, old_val);
741                 // Only when copy_slot() call putIfMatch() will it return NULL
742                 MODEL_ASSERT (res != NULL); 
743                 MODEL_ASSERT (!is_prime(res));
744                 return res == TOMBSTONE ? NULL : (TypeV*) res->_ptr;
745         }
746
747         /**
748                 Put, Remove, PutIfAbsent, etc will call this function. Return the old
749                 value. If the returned value is equals to the expVal (or expVal is
750                 NO_MATCH_OLD), then this function puts the val_slot to the table 'kvs'.
751                 Only copy_slot will pass a NULL expVal, and putIfMatch only returns a
752                 NULL if passed a NULL expVal.
753         */
754         static slot* putIfMatch(cliffc_hashtable *topmap, kvs_data *kvs, slot
755                 *key_slot, slot *val_slot, slot *expVal) {
756                 MODEL_ASSERT (val_slot != NULL);
757                 MODEL_ASSERT (!is_prime(val_slot));
758                 MODEL_ASSERT (!is_prime(expVal));
759
760                 int fullhash = hash(key_slot);
761                 int len = kvs->_size;
762                 CHM *chm = get_chm(kvs);
763                 int *hashes = get_hashes(kvs);
764                 int idx = fullhash & (len - 1);
765
766                 // Claim a key slot
767                 int reprobe_cnt = 0;
768                 slot *K;
769                 slot *V;
770                 kvs_data *newkvs;
771                 
772                 while (true) { // Spin till we get a key slot
773                         K = key(kvs, idx);
774                         V = val(kvs, idx);
775                         if (K == NULL) { // Get a free slot
776                                 if (val_slot == TOMBSTONE) return val_slot;
777                                 // Claim the null key-slot
778                                 if (CAS_key(kvs, idx, NULL, key_slot)) {
779                                         chm->_slots.fetch_add(1, memory_order_relaxed); // Inc key-slots-used count
780                                         hashes[idx] = fullhash; // Memorize full hash
781                                         break;
782                                 }
783                                 K = key(kvs, idx); // CAS failed, get updated value
784                                 MODEL_ASSERT (K != NULL);
785                         }
786
787                         // Key slot not null, there exists a Key here
788                         if (keyeq(K, key_slot, hashes, idx, fullhash))
789                                 break; // Got it
790                         
791                         // Notice that the logic here should be consistent with that of get.
792                         // The first predicate means too many reprobes means nothing in the
793                         // old table.
794                         if (++reprobe_cnt >= reprobe_limit(len) ||
795                                 K == TOMBSTONE) { // Found a Tombstone key, no more keys
796                                 newkvs = chm->resize(topmap, kvs);
797                                 model_print("resize1\n");
798                                 // Help along an existing copy
799                                 if (expVal != NULL) topmap->help_copy(newkvs);
800                                 return putIfMatch(topmap, newkvs, key_slot, val_slot, expVal);
801                         }
802
803                         idx = (idx + 1) & (len - 1); // Reprobe
804                 } // End of spinning till we get a Key slot
805
806                 if (val_slot == V) return V; // Fast cutout for no-change
807         
808                 // Here it tries to resize cause it doesn't want other threads to stop
809                 // its progress (eagerly try to resize soon)
810                 newkvs = chm->_newkvs.load(memory_order_acquire);
811                 if (newkvs == NULL &&
812                         ((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V))) {
813                         model_print("resize2\n");
814                         newkvs = chm->resize(topmap, kvs); // Force the copy to start
815                 }
816                 
817                 // Finish the copy and then put it in the new table
818                 if (newkvs != NULL)
819                         return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs, idx,
820                                 expVal), key_slot, val_slot, expVal);
821                 
822                 // Decided to update the existing table
823                 while (true) {
824                         MODEL_ASSERT (!is_prime(V));
825
826                         if (expVal != NO_MATCH_OLD &&
827                                 V != expVal &&
828                                 (expVal != MATCH_ANY || V == TOMBSTONE || V == NULL) &&
829                                 !(V == NULL && expVal == TOMBSTONE) &&
830                                 (expVal == NULL || !valeq(expVal, V))) {
831                                 /**
832                                         @Begin
833                                         @Commit_point_define: expVal == TOMBSTONE
834                                         @Potential_commit_point_label: Read_Val_Point
835                                         @Label: PutIfAbsent_Fail_Point
836                                                 # This is a check for the PutIfAbsent() when the value
837                                                 # is not absent
838                                         @End
839                                 */
840                                 /**
841                                         @Begin
842                                         @Commit_point_define: expVal != NULL && val_slot == TOMBSTONE
843                                         @Potential_commit_point_label: Read_Val_Point
844                                         @Label: RemoveIfMatch_Fail_Point
845                                         @End
846                                 */
847                                 /**
848                                         @Begin
849                                         @Commit_point_define: expVal != NULL && !valeq(expVal, V)
850                                         @Potential_commit_point_label: Read_Val_Point
851                                         @Label: ReplaceIfMatch_Fail_Point
852                                         @End
853                                 */
854                                 return V; // Do not update!
855                         }
856
857                         if (CAS_val(kvs, idx, V, val_slot)) {
858                                 /**
859                                         @Begin
860                                         # The only point where a successful put happens
861                                         @Commit_point_define: true
862                                         @Potential_commit_point_label: Write_Val_Point
863                                         @Label: Write_Success_Point
864                                         @End
865                                 */
866                                 if (expVal != NULL) { // Not called by a table-copy
867                                         // CAS succeeded, should adjust size
868                                         // Both normal put's and table-copy calls putIfMatch, but
869                                         // table-copy does not increase the number of live K/V pairs
870                                         if ((V == NULL || V == TOMBSTONE) &&
871                                                 val_slot != TOMBSTONE)
872                                                 chm->_size.fetch_add(1, memory_order_relaxed);
873                                         if (!(V == NULL || V == TOMBSTONE) &&
874                                                 val_slot == TOMBSTONE)
875                                                 chm->_size.fetch_add(-1, memory_order_relaxed);
876                                 }
877                                 return (V == NULL && expVal != NULL) ? TOMBSTONE : V;
878                         }
879                         // Else CAS failed
880                         V = val(kvs, idx);
881                         if (is_prime(V))
882                                 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs,
883                                         idx, expVal), key_slot, val_slot, expVal);
884                 }
885         }
886
887         // Help along an existing table-resize. This is a fast cut-out wrapper.
888         kvs_data* help_copy(kvs_data *helper) {
889                 kvs_data *topkvs = _kvs.load(memory_order_acquire);
890                 CHM *topchm = get_chm(topkvs);
891                 // No cpy in progress
892                 if (topchm->_newkvs.load(memory_order_acquire) == NULL) return helper;
893                 topchm->help_copy_impl(this, topkvs, false);
894                 return helper;
895         }
896 };
897 /**
898         @Begin
899         @Class_end
900         @End
901 */
902
903 #endif