save
[cdsspec-compiler.git] / benchmark / cliffc-hashtable / cliffc_hashtable.h
1 #ifndef CLIFFC_HASHTABLE_H
2 #define CLIFFC_HASHTABLE_H
3
4 #include <iostream>
5 #include <atomic>
6 //#include <common.h>
7 #ifdef STANDALONE
8 #include <assert.h>
9 #define MODEL_ASSERT assert 
10 #else
11 #include <model-assert.h>
12 #endif
13
14 #include <spec_lib.h>
15 #include <stdlib.h>
16 #include <cdsannotate.h>
17 #include <specannotation.h>
18 #include <model_memory.h>
19 #include "common.h" 
20
21 using namespace std;
22
23 /**
24         This header file declares and defines a simplified version of Cliff Click's
25         NonblockingHashMap. It contains all the necessary structrues and main
26         functions. In simplified_cliffc_hashtable.cc file, it has the definition for
27         the static fields.
28 */
29
30 template<typename TypeK, typename TypeV>
31 class cliffc_hashtable;
32
33 /**
34         Corresponding the the Object[] array in Cliff Click's Java implementation.
35         It keeps the first two slots for CHM (Hashtable control unit) and the hash
36         records (an array of hash used for fast negative key-equality check).
37 */
38 struct kvs_data {
39         int _size;
40         atomic<void*> *_data;
41         
42         kvs_data(int sz) {
43                 _size = sz;
44                 int real_size = sz * 2 + 2;
45                 _data = new atomic<void*>[real_size];
46                 // The control block should be initialized in resize()
47                 // Init the hash record array
48                 int *hashes = new int[_size];
49                 int i;
50                 for (i = 0; i < _size; i++) {
51                         hashes[i] = 0;
52                 }
53                 // Init the data to Null slot
54                 for (i = 2; i < real_size; i++) {
55                         _data[i].store(NULL, memory_order_relaxed);
56                 }
57                 _data[1].store(hashes, memory_order_release);
58         }
59
60         ~kvs_data() {
61                 int *hashes = (int*) _data[1].load(memory_order_relaxed);
62                 delete hashes;
63                 delete[] _data;
64         }
65 };
66
67 struct slot {
68         bool _prime;
69         void *_ptr;
70
71         slot(bool prime, void *ptr) {
72                 _prime = prime;
73                 _ptr = ptr;
74         }
75 };
76
77
78 /**
79         TypeK must have defined function "int hashCode()" which return the hash
80         code for the its object, and "int equals(TypeK anotherKey)" which is
81         used to judge equality.
82         TypeK and TypeV should define their own copy constructor.
83 */
84 /**
85         @Begin
86         @Class_begin
87         @End
88 */
89 template<typename TypeK, typename TypeV>
90 class cliffc_hashtable {
91         /**
92                 # The synchronization we have for the hashtable gives us the property of
93                 # serializability, so we should have a sequential hashtable when we check the
94                 # correctness. The key thing is to identify all the commit point.
95         
96                 @Begin
97                 @Options:
98                         LANG = CPP;
99                         CLASS = cliffc_hashtable;
100                 @Global_define:
101                         @DeclareVar:
102                         spec_table *map;
103                         spec_table *id_map;
104                         id_tag_t *tag;
105                         @InitVar:
106                                 map = new_spec_table_default(equals_key);
107                                 id_map = new_spec_table_default(equals_id);
108                                 tag = new_id_tag();
109
110                         @DefineFunc:
111                         bool equals_key(void *ptr1, void *ptr2) {
112                                 TypeK *key1 = (TypeK*) ptr1,
113                                         *key2 = (TypeK*) ptr2;
114                                 if (key1 == NULL || key2 == NULL)
115                                         return false;
116                                 return key1->equals(key2);
117                         }
118                         
119                         @DefineFunc:
120                         bool equals_val(void *ptr1, void *ptr2) {
121                                 TypeV *val1 = (TypeV*) ptr1,
122                                         *val2 = (TypeV*) ptr2;
123                                 if (val1 == NULL || val2 == NULL)
124                                         return false;
125                                 return val1->equals(val2);
126                         }
127
128                         @DefineFunc:
129                         bool equals_id(void *ptr1, void *ptr2) {
130                                 id_tag_t *id1 = (id_tag_t*) ptr1,
131                                         *id2 = (id_tag_t*) ptr2;
132                                 if (id1 == NULL || id2 == NULL)
133                                         return false;
134                                 return (*id1).tag == (*id2).tag;
135                         }
136
137                         @DefineFunc:
138                         # Update the tag for the current key slot if the corresponding tag
139                         # is NULL, otherwise just return that tag. It will update the next
140                         # available tag too if it requires a new tag for that key slot.
141                         id_tag_t getKeyTag(TypeK *key) {
142                                 if (spec_table_contains(id_map, key)) {
143                                         id_tag_t *cur_tag = MODEL_MALLOC(sizeof(id_tag_t));
144                                         *cur_tag = current(tag);
145                                         spec_table_put(id_map, key, cur_tag);
146                                         next(tag);
147                                         return cur_tag;
148                                 } else {
149                                         id_tag_t *res = (id_tag_t*) spec_table_get(id_map, key);
150                                         return *res;
151                                 }
152                         }
153                 
154                 @Interface_cluster:
155                         Read_interface = {
156                                 Get,
157                                 PutIfAbsent,
158                                 RemoveAny,
159                                 RemoveIfMatch,
160                                 ReplaceAny,
161                                 ReplaceIfMatch
162                         }
163                         
164                         Write_interface = {
165                                 Put,
166                                 PutIfAbsent(COND_PutIfAbsentSucc),
167                                 RemoveAny,
168                                 RemoveIfMatch(COND_RemoveIfMatchSucc),
169                                 ReplaceAny,
170                                 ReplaceIfMatch(COND_ReplaceIfMatchSucc)
171                         }
172                 @Happens_before:
173                         //Write_interface -> Read_interface
174                         Put->Get
175                         Put->Put
176                 @End
177         */
178
179 friend class CHM;
180         /**
181                 The control structure for the hashtable
182         */
183         private:
184         class CHM {
185                 friend class cliffc_hashtable;
186                 private:
187                 atomic<kvs_data*> _newkvs;
188                 
189                 // Size of active K,V pairs
190                 atomic_int _size;
191         
192                 // Count of used slots
193                 atomic_int _slots;
194                 
195                 // The next part of the table to copy
196                 atomic_int _copy_idx;
197                 
198                 // Work-done reporting
199                 atomic_int _copy_done;
200         
201                 public:
202                 CHM(int size) {
203                         _newkvs.store(NULL, memory_order_relaxed);
204                         _size.store(size, memory_order_relaxed);
205                         _slots.store(0, memory_order_relaxed);
206         
207                         _copy_idx.store(0, memory_order_relaxed);
208                         _copy_done.store(0, memory_order_release);
209                 }
210         
211                 ~CHM() {}
212                 
213                 private:
214                         
215                 // Heuristic to decide if the table is too full
216                 bool table_full(int reprobe_cnt, int len) {
217                         return
218                                 reprobe_cnt >= REPROBE_LIMIT &&
219                                 _slots.load(memory_order_relaxed) >= reprobe_limit(len);
220                 }
221         
222                 kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
223                         //model_print("resizing...\n");
224                         kvs_data *newkvs = _newkvs.load(memory_order_acquire);
225                         if (newkvs != NULL)
226                                 return newkvs;
227         
228                         // No copy in-progress, start one; Only double the table size
229                         int oldlen = kvs->_size;
230                         int sz = _size.load(memory_order_relaxed);
231                         int newsz = sz;
232                         
233                         // Just follow Cliff Click's heuristic to decide the new size
234                         if (sz >= (oldlen >> 2)) { // If we are 25% full
235                                 newsz = oldlen << 1; // Double size
236                                 if (sz >= (oldlen >> 1))
237                                         newsz = oldlen << 2; // Double double size
238                         }
239         
240                         // We do not record the record timestamp
241                         if (newsz <= oldlen) newsz = oldlen << 1;
242                         // Do not shrink ever
243                         if (newsz < oldlen) newsz = oldlen;
244         
245                         // Last check cause the 'new' below is expensive
246                         newkvs = _newkvs.load(memory_order_acquire);
247                         if (newkvs != NULL) return newkvs;
248         
249                         newkvs = new kvs_data(newsz);
250                         void *chm = (void*) new CHM(sz);
251                         newkvs->_data[0].store(chm, memory_order_release);
252         
253                         kvs_data *cur_newkvs; 
254                         // Another check after the slow allocation
255                         if ((cur_newkvs = _newkvs.load(memory_order_acquire)) != NULL)
256                                 return cur_newkvs;
257                         // CAS the _newkvs to the allocated table
258                         kvs_data *desired = (kvs_data*) NULL;
259                         kvs_data *expected = (kvs_data*) newkvs; 
260                         if (!_newkvs.compare_exchange_strong(desired, expected, memory_order_release,
261                                         memory_order_release)) {
262                                 // Should clean the allocated area
263                                 delete newkvs;
264                                 newkvs = _newkvs.load(memory_order_acquire);
265                         }
266                         return newkvs;
267                 }
268         
269                 void help_copy_impl(cliffc_hashtable *topmap, kvs_data *oldkvs,
270                         bool copy_all) {
271                         MODEL_ASSERT (get_chm(oldkvs) == this);
272                         kvs_data *newkvs = _newkvs.load(memory_order_acquire);
273                         int oldlen = oldkvs->_size;
274                         int min_copy_work = oldlen > 1024 ? 1024 : oldlen;
275                 
276                         // Just follow Cliff Click's code here
277                         int panic_start = -1;
278                         int copyidx;
279                         while (_copy_done.load(memory_order_acquire) < oldlen) {
280                                 copyidx = _copy_idx.load(memory_order_acquire);
281                                 if (panic_start == -1) { // No painc
282                                         copyidx = _copy_idx.load(memory_order_acquire);
283                                         while (copyidx < (oldlen << 1) &&
284                                                 !_copy_idx.compare_exchange_strong(copyidx, copyidx +
285                                                         min_copy_work, memory_order_release, memory_order_release))
286                                                 copyidx = _copy_idx.load(memory_order_relaxed);
287                                         if (!(copyidx < (oldlen << 1)))
288                                                 panic_start = copyidx;
289                                 }
290         
291                                 // Now copy the chunk of work we claimed
292                                 int workdone = 0;
293                                 for (int i = 0; i < min_copy_work; i++)
294                                         if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs,
295                                                 newkvs))
296                                                 workdone++;
297                                 if (workdone > 0)
298                                         copy_check_and_promote(topmap, oldkvs, workdone);
299         
300                                 copyidx += min_copy_work;
301                                 if (!copy_all && panic_start == -1)
302                                         return; // We are done with the work we claim
303                         }
304                         copy_check_and_promote(topmap, oldkvs, 0); // See if we can promote
305                 }
306         
307                 kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
308                         *oldkvs, int idx, void *should_help) {
309                         kvs_data *newkvs = _newkvs.load(memory_order_acquire);
310                         // We're only here cause the caller saw a Prime
311                         if (copy_slot(topmap, idx, oldkvs, newkvs))
312                                 copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied
313                         return (should_help == NULL) ? newkvs : topmap->help_copy(newkvs);
314                 }
315         
316                 void copy_check_and_promote(cliffc_hashtable *topmap, kvs_data*
317                         oldkvs, int workdone) {
318                         int oldlen = oldkvs->_size;
319                         int copyDone = _copy_done.load(memory_order_relaxed);
320                         if (workdone > 0) {
321                                 while (true) {
322                                         copyDone = _copy_done.load(memory_order_relaxed);
323                                         if (_copy_done.compare_exchange_weak(copyDone, copyDone +
324                                                 workdone, memory_order_relaxed, memory_order_relaxed))
325                                                 break;
326                                 }
327                         }
328         
329                         // Promote the new table to the current table
330                         if (copyDone + workdone == oldlen &&
331                                 topmap->_kvs.load(memory_order_acquire) == oldkvs) {
332                                 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
333                                 topmap->_kvs.compare_exchange_strong(oldkvs, newkvs, memory_order_release,
334                                         memory_order_release);
335                         }
336                 }
337         
338                 bool copy_slot(cliffc_hashtable *topmap, int idx, kvs_data *oldkvs,
339                         kvs_data *newkvs) {
340                         slot *key_slot;
341                         while ((key_slot = key(oldkvs, idx)) == NULL)
342                                 CAS_key(oldkvs, idx, NULL, TOMBSTONE);
343         
344                         // First CAS old to Prime
345                         slot *oldval = val(oldkvs, idx);
346                         while (!is_prime(oldval)) {
347                                 slot *box = (oldval == NULL || oldval == TOMBSTONE)
348                                         ? TOMBPRIME : new slot(true, oldval->_ptr);
349                                 if (CAS_val(oldkvs, idx, oldval, box)) {
350                                         if (box == TOMBPRIME)
351                                                 return 1; // Copy done
352                                         // Otherwise we CAS'd the box
353                                         oldval = box; // Record updated oldval
354                                         break;
355                                 }
356                                 oldval = val(oldkvs, idx); // Else re-try
357                         }
358         
359                         if (oldval == TOMBPRIME) return false; // Copy already completed here
360         
361                         slot *old_unboxed = new slot(false, oldval->_ptr);
362                         int copied_into_new = (putIfMatch(topmap, newkvs, key_slot, old_unboxed,
363                                 NULL) == NULL);
364         
365                         // Old value is exposed in the new table
366                         while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME))
367                                 oldval = val(oldkvs, idx);
368         
369                         return copied_into_new;
370                 }
371         };
372
373         
374
375         private:
376         static const int Default_Init_Size = 4; // Intial table size
377
378         static slot* const MATCH_ANY;
379         static slot* const NO_MATCH_OLD;
380
381         static slot* const TOMBPRIME;
382         static slot* const TOMBSTONE;
383
384         static const int REPROBE_LIMIT = 10; // Forces a table-resize
385
386         atomic<kvs_data*> _kvs;
387
388         public:
389         cliffc_hashtable() {
390                 // Should initialize the CHM for the construction of the table
391                 // For other CHM in kvs_data, they should be initialzed in resize()
392                 // because the size is determined dynamically
393                 kvs_data *kvs = new kvs_data(Default_Init_Size);
394                 
395                 /*
396                 void *chm = (void*) new CHM(0);
397                 kvs->_data[0].store(chm, memory_order_relaxed);
398                 _kvs.store(kvs, memory_order_release);
399                 */
400         }
401
402         cliffc_hashtable(int init_size) {
403                 // Should initialize the CHM for the construction of the table
404                 // For other CHM in kvs_data, they should be initialzed in resize()
405                 // because the size is determined dynamically
406                 /**
407                         @Begin
408                         @Entry_point
409                         @End
410                 */
411
412                 kvs_data *kvs = new kvs_data(init_size);
413                 void *chm = (void*) new CHM(0);
414                 kvs->_data[0].store(chm, memory_order_relaxed);
415                 _kvs.store(kvs, memory_order_release);
416         }
417
418         /**
419                 @Begin
420                 @Interface: Get
421                 @Commit_point_set: Get_Success_Point1 | Get_Success_Point2 | Get_Success_Point3 
422                 @ID: getKeyTag(key)
423                 @Action:
424                         void *_Old_Val = spec_table_get(map, key);
425                 @Post_check:
426                         equals_val(_Old_Val, __RET__)
427                 @End
428         */
429         TypeV* get(TypeK *key) {
430                 slot *key_slot = new slot(false, key);
431                 int fullhash = hash(key_slot);
432                 kvs_data *kvs = _kvs.load(memory_order_acquire);
433                 slot *V = get_impl(this, kvs, key_slot, fullhash);
434                 if (V == NULL) return NULL;
435                 MODEL_ASSERT (!is_prime(V));
436                 return (TypeV*) V->_ptr;
437         }
438
439         /**
440                 @Begin
441                 @Interface: Put
442                 @Commit_point_set: Write_Success_Point
443                 @ID: getKeyTag(key)
444                 @Action:
445                         # Remember this old value at checking point
446                         void *_Old_Val = spec_table_get(map, key);
447                         spec_table_put(map, key, val);
448                 @Post_check:
449                         equals_val(__RET__, _Old_Val)
450                 @End
451         */
452         TypeV* put(TypeK *key, TypeV *val) {
453                 return putIfMatch(key, val, NO_MATCH_OLD);
454         }
455
456         /**
457 //              @Begin
458                 @Interface: PutIfAbsent
459                 @Commit_point_set:
460                         Write_Success_Point | PutIfAbsent_Fail_Point
461                 @Condition: !spec_table_contains(map, key)
462                 @HB_condition:
463                         COND_PutIfAbsentSucc :: __RET__ == NULL
464                 @ID: getKeyTag(key)
465                 @Action:
466                         void *_Old_Val = spec_table_get(map, key);
467                         if (__COND_SAT__)
468                                 spec_table_put(map, key, value);
469                 @Post_check:
470                         __COND_SAT__ ? __RET__ == NULL : equals_val(_Old_Val, __RET__) 
471                 @End
472         */
473         TypeV* putIfAbsent(TypeK *key, TypeV *value) {
474                 return putIfMatch(key, val, TOMBSTONE);
475         }
476
477         /**
478 //              @Begin
479                 @Interface: RemoveAny
480                 @Commit_point_set: Write_Success_Point
481                 @ID: getKeyTag(key)
482                 @Action:
483                         void *_Old_Val = spec_table_get(map, key);
484                         spec_table_put(map, key, NULL);
485                 @Post_check:
486                         equals_val(__RET__, _Old_Val)
487                 @End
488         */
489         TypeV* remove(TypeK *key) {
490                 return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD);
491         }
492
493         /**
494 //              @Begin
495                 @Interface: RemoveIfMatch
496                 @Commit_point_set:
497                         Write_Success_Point | RemoveIfMatch_Fail_Point
498                 @Condition:
499                         equals_val(spec_table_get(map, key), val)
500                 @HB_condition:
501                         COND_RemoveIfMatchSucc :: __RET__ == true
502                 @ID: getKeyTag(key)
503                 @Action:
504                         if (__COND_SAT__)
505                                 spec_table_put(map, key, NULL);
506                 @Post_check:
507                         __COND_SAT__ ? __RET__ : !__RET__
508                 @End
509         */
510         bool remove(TypeK *key, TypeV *val) {
511                 slot *val_slot = val == NULL ? NULL : new slot(false, val);
512                 return putIfMatch(key, TOMBSTONE, val) == val;
513
514         }
515
516         /**
517 //              @Begin
518                 @Interface: ReplaceAny
519                 @Commit_point_set:
520                         Write_Success_Point
521                 @ID: getKeyTag(key)
522                 @Action:
523                         void *_Old_Val = spec_table_get(map, key);
524                 @Post_check:
525                         equals_val(__RET__, _Old_Val)
526                 @End
527         */
528         TypeV* replace(TypeK *key, TypeV *val) {
529                 return putIfMatch(key, val, MATCH_ANY);
530         }
531
532         /**
533 //              @Begin
534                 @Interface: ReplaceIfMatch
535                 @Commit_point_set:
536                         Write_Success_Point | ReplaceIfMatch_Fail_Point
537                 @Condition:
538                         equals_val(spec_table_get(map, key), oldval)
539                 @HB_condition:
540                         COND_ReplaceIfMatchSucc :: __RET__ == true
541                 @ID: getKeyTag(key)
542                 @Action:
543                         if (__COND_SAT__)
544                                 spec_table_put(map, key, newval);
545                 @Post_check:
546                         __COND_SAT__ ? __RET__ : !__RET__
547                 @End
548         */
549         bool replace(TypeK *key, TypeV *oldval, TypeV *newval) {
550                 return putIfMatch(key, newval, oldval) == oldval;
551         }
552
553         private:
554         static CHM* get_chm(kvs_data* kvs) {
555                 CHM *res = (CHM*) kvs->_data[0].load(memory_order_relaxed);
556                 return res;
557         }
558
559         static int* get_hashes(kvs_data *kvs) {
560                 return (int *) kvs->_data[1].load(memory_order_relaxed);
561         }
562         
563         // Preserve happens-before semantics on newly inserted keys
564         static inline slot* key(kvs_data *kvs, int idx) {
565                 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
566                 // Corresponding to the volatile read in get_impl() and putIfMatch in
567                 // Cliff Click's Java implementation
568                 slot *res = (slot*) kvs->_data[idx * 2 + 2].load(memory_order_acquire);
569                 return res;
570         }
571
572         /**
573                 The atomic operation in val() function is a "potential" commit point,
574                 which means in some case it is a real commit point while it is not for
575                 some other cases. This so happens because the val() function is such a
576                 fundamental function that many internal operation will call. Our
577                 strategy is that we label any potential commit points and check if they
578                 really are the commit points later.
579         */
580         // Preserve happens-before semantics on newly inserted values
581         static inline slot* val(kvs_data *kvs, int idx) {
582                 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
583                 // Corresponding to the volatile read in get_impl() and putIfMatch in
584                 // Cliff Click's Java implementation
585                 slot *res = (slot*) kvs->_data[idx * 2 + 3].load(memory_order_acquire);
586                 /**
587                         @Begin
588                         # This is a complicated potential commit point since many many functions are
589                         # calling val().
590                         @Potential_commit_point_define: true
591                         @Label: Read_Val_Point
592                         @End
593                 */
594                 return res;
595
596
597         }
598
599         static int hash(slot *key_slot) {
600                 MODEL_ASSERT(key_slot != NULL && key_slot->_ptr != NULL);
601                 TypeK* key = (TypeK*) key_slot->_ptr;
602                 int h = key->hashCode();
603                 // Spread bits according to Cliff Click's code
604                 h += (h << 15) ^ 0xffffcd7d;
605                 h ^= (h >> 10);
606                 h += (h << 3);
607                 h ^= (h >> 6);
608                 h += (h << 2) + (h << 14);
609                 return h ^ (h >> 16);
610         }
611         
612         // Heuristic to decide if reprobed too many times. 
613         // Be careful here: Running over the limit on a 'get' acts as a 'miss'; on a
614         // put it triggers a table resize. Several places MUST have exact agreement.
615         static int reprobe_limit(int len) {
616                 return REPROBE_LIMIT + (len >> 2);
617         }
618         
619         static inline bool is_prime(slot *val) {
620                 return (val != NULL) && val->_prime;
621         }
622
623         // Check for key equality. Try direct pointer comparison first (fast
624         // negative teset) and then the full 'equals' call
625         static bool keyeq(slot *K, slot *key_slot, int *hashes, int hash,
626                 int fullhash) {
627                 // Caller should've checked this.
628                 MODEL_ASSERT (K != NULL);
629                 TypeK* key_ptr = (TypeK*) key_slot->_ptr;
630                 return
631                         K == key_slot ||
632                                 ((hashes[hash] == 0 || hashes[hash] == fullhash) &&
633                                 K != TOMBSTONE &&
634                                 key_ptr->equals(K->_ptr));
635         }
636
637         static bool valeq(slot *val_slot1, slot *val_slot2) {
638                 MODEL_ASSERT (val_slot1 != NULL);
639                 TypeK* ptr1 = (TypeV*) val_slot1->_ptr;
640                 if (val_slot2 == NULL || ptr1 == NULL) return false;
641                 return ptr1->equals(val_slot2->_ptr);
642         }
643         
644         // Together with key() preserve the happens-before relationship on newly
645         // inserted keys
646         static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
647                 return kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
648                         desired, memory_order_release, memory_order_release);
649         }
650
651         /**
652                 Same as the val() function, we only label the CAS operation as the
653                 potential commit point.
654         */
655         // Together with val() preserve the happens-before relationship on newly
656         // inserted values
657         static inline bool CAS_val(kvs_data *kvs, int idx, void *expected, void
658                 *desired) {
659                 bool res =  kvs->_data[2 * idx + 3].compare_exchange_strong(expected,
660                         desired, memory_order_release, memory_order_release);
661                 /**
662                         # If it is a successful put instead of a copy or any other internal
663                         # operantions, expected != NULL
664                         @Begin
665                         @Potential_commit_point_define: res == true
666                         @Label: Write_Val_Point
667                         @End
668                 */
669                 return res;
670         }
671
672         slot* get_impl(cliffc_hashtable *topmap, kvs_data *kvs, slot* key_slot, int
673                 fullhash) {
674                 int len = kvs->_size;
675                 CHM *chm = get_chm(kvs);
676                 int *hashes = get_hashes(kvs);
677
678                 int idx = fullhash & (len - 1);
679                 int reprobe_cnt = 0;
680                 while (true) {
681                         slot *K = key(kvs, idx);
682                         slot *V = val(kvs, idx);
683                         /**
684                                 @Begin
685                                 @Commit_point_define: V == NULL
686                                 @Potential_commit_point_label: Read_Val_Point
687                                 @Label: Get_Success_Point_1
688                                 @End
689                         */
690
691                         if (K == NULL) return NULL; // A miss
692                         
693                         if (keyeq(K, key_slot, hashes, idx, fullhash)) {
694                                 // Key hit! Check if table-resize in progress
695                                 if (!is_prime(V)) {
696                                         /**
697                                                 @Begin
698                                                 @Commit_point_define: true
699                                                 @Potential_commit_point_label: Read_Val_Point
700                                                 @Label: Get_Success_Point_2
701                                                 @End
702                                         */
703                                         return (V == TOMBSTONE) ? NULL : V; // Return this value
704                                 }
705                                 // Otherwise, finish the copy & retry in the new table
706                                 return get_impl(topmap, chm->copy_slot_and_check(topmap, kvs,
707                                         idx, key_slot), key_slot, fullhash);
708                         }
709
710                         if (++reprobe_cnt >= REPROBE_LIMIT ||
711                                 key_slot == TOMBSTONE) {
712                                 // Retry in new table
713                                 // Atomic read (acquire) can be here 
714                                 kvs_data *newkvs = chm->_newkvs.load(memory_order_acquire);
715                                 /**
716                                         @Begin
717                                         @Commit_point_define_check: newkvs == NULL
718                                         @Label: Get_Success_Point_3
719                                         @End
720                                 */
721                                 return newkvs == NULL ? NULL : get_impl(topmap,
722                                         topmap->help_copy(newkvs), key_slot, fullhash);
723                         }
724
725                         idx = (idx + 1) & (len - 1); // Reprobe by 1
726                 }
727         }
728
729         // A wrapper of the essential function putIfMatch()
730         TypeV* putIfMatch(TypeK *key, TypeV *value, slot *old_val) {
731                 // TODO: Should throw an exception rather return NULL
732                 if (old_val == NULL) {
733                         return NULL;
734                 }
735                 slot *key_slot = new slot(false, key);
736
737                 slot *value_slot = new slot(false, value);
738                 kvs_data *kvs = _kvs.load(memory_order_acquire);
739                 slot *res = putIfMatch(this, kvs, key_slot, value_slot, old_val);
740                 // Only when copy_slot() call putIfMatch() will it return NULL
741                 MODEL_ASSERT (res != NULL); 
742                 MODEL_ASSERT (!is_prime(res));
743                 return res == TOMBSTONE ? NULL : (TypeV*) res->_ptr;
744         }
745
746         /**
747                 Put, Remove, PutIfAbsent, etc will call this function. Return the old
748                 value. If the returned value is equals to the expVal (or expVal is
749                 NO_MATCH_OLD), then this function puts the val_slot to the table 'kvs'.
750                 Only copy_slot will pass a NULL expVal, and putIfMatch only returns a
751                 NULL if passed a NULL expVal.
752         */
753         static slot* putIfMatch(cliffc_hashtable *topmap, kvs_data *kvs, slot
754                 *key_slot, slot *val_slot, slot *expVal) {
755                 MODEL_ASSERT (val_slot != NULL);
756                 MODEL_ASSERT (!is_prime(val_slot));
757                 MODEL_ASSERT (!is_prime(expVal));
758
759                 int fullhash = hash(key_slot);
760                 int len = kvs->_size;
761                 CHM *chm = get_chm(kvs);
762                 int *hashes = get_hashes(kvs);
763                 int idx = fullhash & (len - 1);
764
765                 // Claim a key slot
766                 int reprobe_cnt = 0;
767                 slot *K;
768                 slot *V;
769                 kvs_data *newkvs;
770                 
771                 while (true) { // Spin till we get a key slot
772                         K = key(kvs, idx);
773                         V = val(kvs, idx);
774                         if (K == NULL) { // Get a free slot
775                                 if (val_slot == TOMBSTONE) return val_slot;
776                                 // Claim the null key-slot
777                                 if (CAS_key(kvs, idx, NULL, key_slot)) {
778                                         chm->_slots.fetch_add(1, memory_order_relaxed); // Inc key-slots-used count
779                                         hashes[idx] = fullhash; // Memorize full hash
780                                         break;
781                                 }
782                                 K = key(kvs, idx); // CAS failed, get updated value
783                                 MODEL_ASSERT (K != NULL);
784                         }
785
786                         // Key slot not null, there exists a Key here
787                         if (keyeq(K, key_slot, hashes, idx, fullhash))
788                                 break; // Got it
789                         
790                         // Notice that the logic here should be consistent with that of get.
791                         // The first predicate means too many reprobes means nothing in the
792                         // old table.
793                         if (++reprobe_cnt >= reprobe_limit(len) ||
794                                 K == TOMBSTONE) { // Found a Tombstone key, no more keys
795                                 newkvs = chm->resize(topmap, kvs);
796                                 // Help along an existing copy
797                                 if (expVal != NULL) topmap->help_copy(newkvs);
798                                 return putIfMatch(topmap, newkvs, key_slot, val_slot, expVal);
799                         }
800
801                         idx = (idx + 1) & (len - 1); // Reprobe
802                 } // End of spinning till we get a Key slot
803
804                 if (val_slot == V) return V; // Fast cutout for no-change
805         
806                 // Here it tries to resize cause it doesn't want other threads to stop
807                 // its progress (eagerly try to resize soon)
808                 newkvs = chm->_newkvs.load(memory_order_acquire);
809                 if (newkvs == NULL &&
810                         ((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V)))
811                         newkvs = chm->resize(topmap, kvs); // Force the copy to start
812                 
813                 // Finish the copy and then put it in the new table
814                 if (newkvs != NULL)
815                         return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs, idx,
816                                 expVal), key_slot, val_slot, expVal);
817                 
818                 // Decided to update the existing table
819                 while (true) {
820                         MODEL_ASSERT (!is_prime(V));
821
822                         if (expVal != NO_MATCH_OLD &&
823                                 V != expVal &&
824                                 (expVal != MATCH_ANY || V == TOMBSTONE || V == NULL) &&
825                                 !(V == NULL && expVal == TOMBSTONE) &&
826                                 (expVal == NULL || !valeq(expVal, V))) {
827                                 /**
828                                         @Begin
829                                         @Commit_point_define: expVal == TOMBSTONE
830                                         @Potential_commit_point_label: Read_Val_Point
831                                         @Label: PutIfAbsent_Fail_Point
832                                                 # This is a check for the PutIfAbsent() when the value
833                                                 # is not absent
834                                         @End
835                                 */
836                                 /**
837                                         @Begin
838                                         @Commit_point_define: expVal != NULL && val_slot == TOMBSTONE
839                                         @Potential_commit_point_label: Read_Val_Point
840                                         @Label: RemoveIfMatch_Fail_Point
841                                         @End
842                                 */
843                                 /**
844                                         @Begin
845                                         @Commit_point_define: !valeq(expVal, V)
846                                         @Potential_commit_point_label: Read_Val_Point
847                                         @Label: ReplaceIfMatch_Fail_Point
848                                         @End
849                                 */
850                                 return V; // Do not update!
851                         }
852
853                         if (CAS_val(kvs, idx, V, val_slot)) {
854                                 /**
855                                         @Begin
856                                         # The only point where a successful put happens
857                                         @Commit_point_define: true
858                                         @Potential_commit_point_label: Write_Val_Point
859                                         @Label: Write_Success_Point
860                                         @End
861                                 */
862                                 if (expVal != NULL) { // Not called by a table-copy
863                                         // CAS succeeded, should adjust size
864                                         // Both normal put's and table-copy calls putIfMatch, but
865                                         // table-copy does not increase the number of live K/V pairs
866                                         if ((V == NULL || V == TOMBSTONE) &&
867                                                 val_slot != TOMBSTONE)
868                                                 chm->_size.fetch_add(1, memory_order_relaxed);
869                                         if (!(V == NULL || V == TOMBSTONE) &&
870                                                 val_slot == TOMBSTONE)
871                                                 chm->_size.fetch_add(-1, memory_order_relaxed);
872                                 }
873                                 return (V == NULL && expVal != NULL) ? TOMBSTONE : V;
874                         }
875                         // Else CAS failed
876                         V = val(kvs, idx);
877                         if (is_prime(V))
878                                 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs,
879                                         idx, expVal), key_slot, val_slot, expVal);
880                 }
881         }
882
883         // Help along an existing table-resize. This is a fast cut-out wrapper.
884         kvs_data* help_copy(kvs_data *helper) {
885                 kvs_data *topkvs = _kvs.load(memory_order_acquire);
886                 CHM *topchm = get_chm(topkvs);
887                 // No cpy in progress
888                 if (topchm->_newkvs.load(memory_order_acquire) == NULL) return helper;
889                 topchm->help_copy_impl(this, topkvs, false);
890                 return helper;
891         }
892 };
893 /**
894         @Begin
895         @Class_end
896         @End
897 */
898
899 #endif