536f1c42304ffc470c1b0547686a8f19c94f4774
[cdsspec-compiler.git] / benchmark / cliffc-hashtable / cliffc_hashtable.h
1 #ifndef CLIFFC_HASHTABLE_H
2 #define CLIFFC_HASHTABLE_H
3
4 #include <iostream>
5 #include <atomic>
6 //#include <common.h>
7 #ifdef STANDALONE
8 #include <assert.h>
9 #define MODEL_ASSERT assert 
10 #else
11 #include <model-assert.h>
12 #endif
13
14 #include <spec_lib.h>
15 #include <stdlib.h>
16 #include <cdsannotate.h>
17 #include <specannotation.h>
18 #include <model_memory.h>
19 #include "common.h" 
20
21 using namespace std;
22
23 /**
24         This header file declares and defines a simplified version of Cliff Click's
25         NonblockingHashMap. It contains all the necessary structrues and main
26         functions. In simplified_cliffc_hashtable.cc file, it has the definition for
27         the static fields.
28 */
29
30 template<typename TypeK, typename TypeV>
31 class cliffc_hashtable;
32
33 /**
34         Corresponding the the Object[] array in Cliff Click's Java implementation.
35         It keeps the first two slots for CHM (Hashtable control unit) and the hash
36         records (an array of hash used for fast negative key-equality check).
37 */
38 struct kvs_data {
39         int _size;
40         atomic<void*> *_data;
41         
42         kvs_data(int sz) {
43                 _size = sz;
44                 int real_size = sz * 2 + 2;
45                 _data = new atomic<void*>[real_size];
46                 // The control block should be initialized in resize()
47                 // Init the hash record array
48                 int *hashes = new int[_size];
49                 int i;
50                 for (i = 0; i < _size; i++) {
51                         hashes[i] = 0;
52                 }
53                 // Init the data to Null slot
54                 for (i = 2; i < real_size; i++) {
55                         _data[i].store(NULL, memory_order_relaxed);
56                 }
57                 _data[1].store(hashes, memory_order_release);
58         }
59
60         ~kvs_data() {
61                 int *hashes = (int*) _data[1].load(memory_order_relaxed);
62                 delete hashes;
63                 delete[] _data;
64         }
65 };
66
67 struct slot {
68         bool _prime;
69         void *_ptr;
70
71         slot(bool prime, void *ptr) {
72                 _prime = prime;
73                 _ptr = ptr;
74         }
75 };
76
77
78 /**
79         TypeK must have defined function "int hashCode()" which return the hash
80         code for the its object, and "int equals(TypeK anotherKey)" which is
81         used to judge equality.
82         TypeK and TypeV should define their own copy constructor.
83 */
84 /**
85         @Begin
86         @Class_begin
87         @End
88 */
89 template<typename TypeK, typename TypeV>
90 class cliffc_hashtable {
91         /**
92                 # The synchronization we have for the hashtable gives us the property of
93                 # serializability, so we should have a sequential hashtable when we check the
94                 # correctness. The key thing is to identify all the commit point.
95         
96                 @Begin
97                 @Options:
98                         LANG = CPP;
99                         CLASS = cliffc_hashtable;
100                 @Global_define:
101                         @DeclareVar:
102                         spec_table *map;
103                         spec_table *id_map;
104                         id_tag_t *tag;
105                         @InitVar:
106                                 map = new_spec_table_default(equals_key);
107                                 id_map = new_spec_table_default(equals_id);
108                                 tag = new_id_tag();
109
110                         @DefineFunc:
111                         bool equals_key(void *ptr1, void *ptr2) {
112                                 TypeK *key1 = (TypeK*) ptr1,
113                                         *key2 = (TypeK*) ptr2;
114                                 if (key1 == NULL || key2 == NULL)
115                                         return false;
116                                 return key1->equals(key2);
117                         }
118                         
119                         @DefineFunc:
120                         bool equals_val(void *ptr1, void *ptr2) {
121                                 TypeV *val1 = (TypeV*) ptr1,
122                                         *val2 = (TypeV*) ptr2;
123                                 if (val1 == NULL || val2 == NULL)
124                                         return false;
125                                 return val1->equals(val2);
126                         }
127
128                         @DefineFunc:
129                         bool equals_id(void *ptr1, void *ptr2) {
130                                 id_tag_t *id1 = (id_tag_t*) ptr1,
131                                         *id2 = (id_tag_t*) ptr2;
132                                 if (id1 == NULL || id2 == NULL)
133                                         return false;
134                                 return (*id1).tag == (*id2).tag;
135                         }
136
137                         @DefineFunc:
138                         # Update the tag for the current key slot if the corresponding tag
139                         # is NULL, otherwise just return that tag. It will update the next
140                         # available tag too if it requires a new tag for that key slot.
141                         id_tag_t getKeyTag(TypeK *key) {
142                                 if (spec_table_contains(id_map, key)) {
143                                         id_tag_t *cur_tag = MODEL_MALLOC(sizeof(id_tag_t));
144                                         *cur_tag = current(tag);
145                                         spec_table_put(id_map, key, cur_tag);
146                                         next(tag);
147                                         return cur_tag;
148                                 } else {
149                                         id_tag_t *res = (id_tag_t*) spec_table_get(id_map, key);
150                                         return *res;
151                                 }
152                         }
153                 
154                 @Interface_cluster:
155                         Read_interface = {
156                                 Get,
157                                 PutIfAbsent,
158                                 RemoveAny,
159                                 RemoveIfMatch,
160                                 ReplaceAny,
161                                 ReplaceIfMatch
162                         }
163                         
164                         Write_interface = {
165                                 Put,
166                                 PutIfAbsent(COND_PutIfAbsentSucc),
167                                 RemoveAny,
168                                 RemoveIfMatch(COND_RemoveIfMatchSucc),
169                                 ReplaceAny,
170                                 ReplaceIfMatch(COND_ReplaceIfMatchSucc)
171                         }
172                 @Happens_before:
173                         //Write_interface -> Read_interface
174                         Put->Get
175                         Put->Put
176                 @End
177         */
178
179 friend class CHM;
180         /**
181                 The control structure for the hashtable
182         */
183         private:
184         class CHM {
185                 friend class cliffc_hashtable;
186                 private:
187                 atomic<kvs_data*> _newkvs;
188                 
189                 // Size of active K,V pairs
190                 atomic_int _size;
191         
192                 // Count of used slots
193                 atomic_int _slots;
194                 
195                 // The next part of the table to copy
196                 atomic_int _copy_idx;
197                 
198                 // Work-done reporting
199                 atomic_int _copy_done;
200         
201                 public:
202                 CHM(int size) {
203                         _newkvs.store(NULL, memory_order_relaxed);
204                         _size.store(size, memory_order_relaxed);
205                         _slots.store(0, memory_order_relaxed);
206         
207                         _copy_idx.store(0, memory_order_relaxed);
208                         _copy_done.store(0, memory_order_release);
209                 }
210         
211                 ~CHM() {}
212                 
213                 private:
214                         
215                 // Heuristic to decide if the table is too full
216                 bool table_full(int reprobe_cnt, int len) {
217                         return
218                                 reprobe_cnt >= REPROBE_LIMIT &&
219                                 _slots.load(memory_order_relaxed) >= reprobe_limit(len);
220                 }
221         
222                 kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
223                         //model_print("resizing...\n");
224                         kvs_data *newkvs = _newkvs.load(memory_order_acquire);
225                         if (newkvs != NULL)
226                                 return newkvs;
227         
228                         // No copy in-progress, start one; Only double the table size
229                         int oldlen = kvs->_size;
230                         int sz = _size.load(memory_order_relaxed);
231                         int newsz = sz;
232                         
233                         // Just follow Cliff Click's heuristic to decide the new size
234                         if (sz >= (oldlen >> 2)) { // If we are 25% full
235                                 newsz = oldlen << 1; // Double size
236                                 if (sz >= (oldlen >> 1))
237                                         newsz = oldlen << 2; // Double double size
238                         }
239         
240                         // We do not record the record timestamp
241                         if (newsz <= oldlen) newsz = oldlen << 1;
242                         // Do not shrink ever
243                         if (newsz < oldlen) newsz = oldlen;
244         
245                         // Last check cause the 'new' below is expensive
246                         newkvs = _newkvs.load(memory_order_acquire);
247                         if (newkvs != NULL) return newkvs;
248         
249                         newkvs = new kvs_data(newsz);
250                         void *chm = (void*) new CHM(sz);
251                         newkvs->_data[0].store(chm, memory_order_release);
252         
253                         kvs_data *cur_newkvs; 
254                         // Another check after the slow allocation
255                         if ((cur_newkvs = _newkvs.load(memory_order_acquire)) != NULL)
256                                 return cur_newkvs;
257                         // CAS the _newkvs to the allocated table
258                         kvs_data *desired = (kvs_data*) NULL;
259                         kvs_data *expected = (kvs_data*) newkvs; 
260                         if (!_newkvs.compare_exchange_strong(desired, expected, memory_order_release,
261                                         memory_order_release)) {
262                                 // Should clean the allocated area
263                                 delete newkvs;
264                                 newkvs = _newkvs.load(memory_order_acquire);
265                         }
266                         return newkvs;
267                 }
268         
269                 void help_copy_impl(cliffc_hashtable *topmap, kvs_data *oldkvs,
270                         bool copy_all) {
271                         MODEL_ASSERT (get_chm(oldkvs) == this);
272                         kvs_data *newkvs = _newkvs.load(memory_order_acquire);
273                         int oldlen = oldkvs->_size;
274                         int min_copy_work = oldlen > 1024 ? 1024 : oldlen;
275                 
276                         // Just follow Cliff Click's code here
277                         int panic_start = -1;
278                         int copyidx;
279                         while (_copy_done.load(memory_order_acquire) < oldlen) {
280                                 copyidx = _copy_idx.load(memory_order_acquire);
281                                 if (panic_start == -1) { // No painc
282                                         copyidx = _copy_idx.load(memory_order_acquire);
283                                         while (copyidx < (oldlen << 1) &&
284                                                 !_copy_idx.compare_exchange_strong(copyidx, copyidx +
285                                                         min_copy_work, memory_order_release, memory_order_release))
286                                                 copyidx = _copy_idx.load(memory_order_relaxed);
287                                         if (!(copyidx < (oldlen << 1)))
288                                                 panic_start = copyidx;
289                                 }
290         
291                                 // Now copy the chunk of work we claimed
292                                 int workdone = 0;
293                                 for (int i = 0; i < min_copy_work; i++)
294                                         if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs,
295                                                 newkvs))
296                                                 workdone++;
297                                 if (workdone > 0)
298                                         copy_check_and_promote(topmap, oldkvs, workdone);
299         
300                                 copyidx += min_copy_work;
301                                 if (!copy_all && panic_start == -1)
302                                         return; // We are done with the work we claim
303                         }
304                         copy_check_and_promote(topmap, oldkvs, 0); // See if we can promote
305                 }
306         
307                 kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
308                         *oldkvs, int idx, void *should_help) {
309                         kvs_data *newkvs = _newkvs.load(memory_order_acquire);
310                         // We're only here cause the caller saw a Prime
311                         if (copy_slot(topmap, idx, oldkvs, newkvs))
312                                 copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied
313                         return (should_help == NULL) ? newkvs : topmap->help_copy(newkvs);
314                 }
315         
316                 void copy_check_and_promote(cliffc_hashtable *topmap, kvs_data*
317                         oldkvs, int workdone) {
318                         int oldlen = oldkvs->_size;
319                         int copyDone = _copy_done.load(memory_order_relaxed);
320                         if (workdone > 0) {
321                                 while (true) {
322                                         copyDone = _copy_done.load(memory_order_relaxed);
323                                         if (_copy_done.compare_exchange_weak(copyDone, copyDone +
324                                                 workdone, memory_order_relaxed, memory_order_relaxed))
325                                                 break;
326                                 }
327                         }
328         
329                         // Promote the new table to the current table
330                         if (copyDone + workdone == oldlen &&
331                                 topmap->_kvs.load(memory_order_acquire) == oldkvs) {
332                                 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
333                                 topmap->_kvs.compare_exchange_strong(oldkvs, newkvs, memory_order_release,
334                                         memory_order_release);
335                         }
336                 }
337         
338                 bool copy_slot(cliffc_hashtable *topmap, int idx, kvs_data *oldkvs,
339                         kvs_data *newkvs) {
340                         slot *key_slot;
341                         while ((key_slot = key(oldkvs, idx)) == NULL)
342                                 CAS_key(oldkvs, idx, NULL, TOMBSTONE);
343         
344                         // First CAS old to Prime
345                         slot *oldval = val(oldkvs, idx);
346                         while (!is_prime(oldval)) {
347                                 slot *box = (oldval == NULL || oldval == TOMBSTONE)
348                                         ? TOMBPRIME : new slot(true, oldval->_ptr);
349                                 if (CAS_val(oldkvs, idx, oldval, box)) {
350                                         if (box == TOMBPRIME)
351                                                 return 1; // Copy done
352                                         // Otherwise we CAS'd the box
353                                         oldval = box; // Record updated oldval
354                                         break;
355                                 }
356                                 oldval = val(oldkvs, idx); // Else re-try
357                         }
358         
359                         if (oldval == TOMBPRIME) return false; // Copy already completed here
360         
361                         slot *old_unboxed = new slot(false, oldval->_ptr);
362                         int copied_into_new = (putIfMatch(topmap, newkvs, key_slot, old_unboxed,
363                                 NULL) == NULL);
364         
365                         // Old value is exposed in the new table
366                         while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME))
367                                 oldval = val(oldkvs, idx);
368         
369                         return copied_into_new;
370                 }
371         };
372
373         
374
375         private:
376         static const int Default_Init_Size = 4; // Intial table size
377
378         static slot* const MATCH_ANY;
379         static slot* const NO_MATCH_OLD;
380
381         static slot* const TOMBPRIME;
382         static slot* const TOMBSTONE;
383
384         static const int REPROBE_LIMIT = 10; // Forces a table-resize
385
386         atomic<kvs_data*> _kvs;
387
388         public:
389         cliffc_hashtable() {
390                 // Should initialize the CHM for the construction of the table
391                 // For other CHM in kvs_data, they should be initialzed in resize()
392                 // because the size is determined dynamically
393                 kvs_data *kvs = new kvs_data(Default_Init_Size);
394                 void *chm = (void*) new CHM(0);
395                 kvs->_data[0].store(chm, memory_order_relaxed);
396                 _kvs.store(kvs, memory_order_release);
397         }
398
399         cliffc_hashtable(int init_size) {
400                 // Should initialize the CHM for the construction of the table
401                 // For other CHM in kvs_data, they should be initialzed in resize()
402                 // because the size is determined dynamically
403                 /**
404                         @Begin
405                         @Entry_point
406                         @End
407                 */
408
409                 kvs_data *kvs = new kvs_data(init_size);
410                 void *chm = (void*) new CHM(0);
411                 kvs->_data[0].store(chm, memory_order_relaxed);
412                 _kvs.store(kvs, memory_order_release);
413         }
414
415         /**
416                 @Begin
417                 @Interface: Get
418                 @Commit_point_set: Get_Success_Point1 | Get_Success_Point2 | Get_Success_Point3 
419                 @ID: getKeyTag(key)
420                 @Action:
421                         void *_Old_Val = spec_table_get(map, key);
422                 @Post_check:
423                         equals_val(_Old_Val, __RET__)
424                 @End
425         */
426         TypeV* get(TypeK *key) {
427                 slot *key_slot = new slot(false, key);
428                 int fullhash = hash(key_slot);
429                 kvs_data *kvs = _kvs.load(memory_order_acquire);
430                 slot *V = get_impl(this, kvs, key_slot, fullhash);
431                 if (V == NULL) return NULL;
432                 MODEL_ASSERT (!is_prime(V));
433                 return (TypeV*) V->_ptr;
434         }
435
436         /**
437                 @Begin
438                 @Interface: Put
439                 @Commit_point_set: Write_Success_Point
440                 @ID: getKeyTag(key)
441                 @Action:
442                         # Remember this old value at checking point
443                         void *_Old_Val = spec_table_get(map, key);
444                         spec_table_put(map, key, val);
445                 @Post_check:
446                         equals_val(__RET__, _Old_Val)
447                 @End
448         */
449         TypeV* put(TypeK *key, TypeV *val) {
450                 return putIfMatch(key, val, NO_MATCH_OLD);
451         }
452
453         /**
454 //              @Begin
455                 @Interface: PutIfAbsent
456                 @Commit_point_set:
457                         Write_Success_Point | PutIfAbsent_Fail_Point
458                 @Condition: !spec_table_contains(map, key)
459                 @HB_condition:
460                         COND_PutIfAbsentSucc :: __RET__ == NULL
461                 @ID: getKeyTag(key)
462                 @Action:
463                         void *_Old_Val = spec_table_get(map, key);
464                         if (__COND_SAT__)
465                                 spec_table_put(map, key, value);
466                 @Post_check:
467                         __COND_SAT__ ? __RET__ == NULL : equals_val(_Old_Val, __RET__) 
468                 @End
469         */
470         TypeV* putIfAbsent(TypeK *key, TypeV *value) {
471                 return putIfMatch(key, val, TOMBSTONE);
472         }
473
474         /**
475 //              @Begin
476                 @Interface: RemoveAny
477                 @Commit_point_set: Write_Success_Point
478                 @ID: getKeyTag(key)
479                 @Action:
480                         void *_Old_Val = spec_table_get(map, key);
481                         spec_table_put(map, key, NULL);
482                 @Post_check:
483                         equals_val(__RET__, _Old_Val)
484                 @End
485         */
486         TypeV* remove(TypeK *key) {
487                 return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD);
488         }
489
490         /**
491 //              @Begin
492                 @Interface: RemoveIfMatch
493                 @Commit_point_set:
494                         Write_Success_Point | RemoveIfMatch_Fail_Point
495                 @Condition:
496                         equals_val(spec_table_get(map, key), val)
497                 @HB_condition:
498                         COND_RemoveIfMatchSucc :: __RET__ == true
499                 @ID: getKeyTag(key)
500                 @Action:
501                         if (__COND_SAT__)
502                                 spec_table_put(map, key, NULL);
503                 @Post_check:
504                         __COND_SAT__ ? __RET__ : !__RET__
505                 @End
506         */
507         bool remove(TypeK *key, TypeV *val) {
508                 slot *val_slot = val == NULL ? NULL : new slot(false, val);
509                 return putIfMatch(key, TOMBSTONE, val) == val;
510
511         }
512
513         /**
514 //              @Begin
515                 @Interface: ReplaceAny
516                 @Commit_point_set:
517                         Write_Success_Point
518                 @ID: getKeyTag(key)
519                 @Action:
520                         void *_Old_Val = spec_table_get(map, key);
521                 @Post_check:
522                         equals_val(__RET__, _Old_Val)
523                 @End
524         */
525         TypeV* replace(TypeK *key, TypeV *val) {
526                 return putIfMatch(key, val, MATCH_ANY);
527         }
528
529         /**
530 //              @Begin
531                 @Interface: ReplaceIfMatch
532                 @Commit_point_set:
533                         Write_Success_Point | ReplaceIfMatch_Fail_Point
534                 @Condition:
535                         equals_val(spec_table_get(map, key), oldval)
536                 @HB_condition:
537                         COND_ReplaceIfMatchSucc :: __RET__ == true
538                 @ID: getKeyTag(key)
539                 @Action:
540                         if (__COND_SAT__)
541                                 spec_table_put(map, key, newval);
542                 @Post_check:
543                         __COND_SAT__ ? __RET__ : !__RET__
544                 @End
545         */
546         bool replace(TypeK *key, TypeV *oldval, TypeV *newval) {
547                 return putIfMatch(key, newval, oldval) == oldval;
548         }
549
550         private:
551         static CHM* get_chm(kvs_data* kvs) {
552                 CHM *res = (CHM*) kvs->_data[0].load(memory_order_relaxed);
553                 return res;
554         }
555
556         static int* get_hashes(kvs_data *kvs) {
557                 return (int *) kvs->_data[1].load(memory_order_relaxed);
558         }
559         
560         // Preserve happens-before semantics on newly inserted keys
561         static inline slot* key(kvs_data *kvs, int idx) {
562                 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
563                 // Corresponding to the volatile read in get_impl() and putIfMatch in
564                 // Cliff Click's Java implementation
565                 slot *res = (slot*) kvs->_data[idx * 2 + 2].load(memory_order_acquire);
566                 return res;
567         }
568
569         /**
570                 The atomic operation in val() function is a "potential" commit point,
571                 which means in some case it is a real commit point while it is not for
572                 some other cases. This so happens because the val() function is such a
573                 fundamental function that many internal operation will call. Our
574                 strategy is that we label any potential commit points and check if they
575                 really are the commit points later.
576         */
577         // Preserve happens-before semantics on newly inserted values
578         static inline slot* val(kvs_data *kvs, int idx) {
579                 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
580                 // Corresponding to the volatile read in get_impl() and putIfMatch in
581                 // Cliff Click's Java implementation
582                 slot *res = (slot*) kvs->_data[idx * 2 + 3].load(memory_order_acquire);
583                 /**
584                         @Begin
585                         # This is a complicated potential commit point since many many functions are
586                         # calling val().
587                         @Potential_commit_point_define: true
588                         @Label: Read_Val_Point
589                         @End
590                 */
591                 return res;
592
593
594         }
595
596         static int hash(slot *key_slot) {
597                 MODEL_ASSERT(key_slot != NULL && key_slot->_ptr != NULL);
598                 TypeK* key = (TypeK*) key_slot->_ptr;
599                 int h = key->hashCode();
600                 // Spread bits according to Cliff Click's code
601                 h += (h << 15) ^ 0xffffcd7d;
602                 h ^= (h >> 10);
603                 h += (h << 3);
604                 h ^= (h >> 6);
605                 h += (h << 2) + (h << 14);
606                 return h ^ (h >> 16);
607         }
608         
609         // Heuristic to decide if reprobed too many times. 
610         // Be careful here: Running over the limit on a 'get' acts as a 'miss'; on a
611         // put it triggers a table resize. Several places MUST have exact agreement.
612         static int reprobe_limit(int len) {
613                 return REPROBE_LIMIT + (len >> 2);
614         }
615         
616         static inline bool is_prime(slot *val) {
617                 return (val != NULL) && val->_prime;
618         }
619
620         // Check for key equality. Try direct pointer comparison first (fast
621         // negative teset) and then the full 'equals' call
622         static bool keyeq(slot *K, slot *key_slot, int *hashes, int hash,
623                 int fullhash) {
624                 // Caller should've checked this.
625                 MODEL_ASSERT (K != NULL);
626                 TypeK* key_ptr = (TypeK*) key_slot->_ptr;
627                 return
628                         K == key_slot ||
629                                 ((hashes[hash] == 0 || hashes[hash] == fullhash) &&
630                                 K != TOMBSTONE &&
631                                 key_ptr->equals(K->_ptr));
632         }
633
634         static bool valeq(slot *val_slot1, slot *val_slot2) {
635                 MODEL_ASSERT (val_slot1 != NULL);
636                 TypeK* ptr1 = (TypeV*) val_slot1->_ptr;
637                 if (val_slot2 == NULL || ptr1 == NULL) return false;
638                 return ptr1->equals(val_slot2->_ptr);
639         }
640         
641         // Together with key() preserve the happens-before relationship on newly
642         // inserted keys
643         static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
644                 return kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
645                         desired, memory_order_release, memory_order_release);
646         }
647
648         /**
649                 Same as the val() function, we only label the CAS operation as the
650                 potential commit point.
651         */
652         // Together with val() preserve the happens-before relationship on newly
653         // inserted values
654         static inline bool CAS_val(kvs_data *kvs, int idx, void *expected, void
655                 *desired) {
656                 bool res =  kvs->_data[2 * idx + 3].compare_exchange_strong(expected,
657                         desired, memory_order_release, memory_order_release);
658                 /**
659                         # If it is a successful put instead of a copy or any other internal
660                         # operantions, expected != NULL
661                         @Begin
662                         @Potential_commit_point_define: res == true
663                         @Label: Write_Val_Point
664                         @End
665                 */
666                 return res;
667         }
668
669         slot* get_impl(cliffc_hashtable *topmap, kvs_data *kvs, slot* key_slot, int
670                 fullhash) {
671                 int len = kvs->_size;
672                 CHM *chm = get_chm(kvs);
673                 int *hashes = get_hashes(kvs);
674
675                 int idx = fullhash & (len - 1);
676                 int reprobe_cnt = 0;
677                 while (true) {
678                         slot *K = key(kvs, idx);
679                         slot *V = val(kvs, idx);
680                         /**
681                                 @Begin
682                                 @Commit_point_define: V == NULL
683                                 @Potential_commit_point_label: Read_Val_Point
684                                 @Label: Get_Success_Point_1
685                                 @End
686                         */
687
688                         if (K == NULL) return NULL; // A miss
689                         
690                         if (keyeq(K, key_slot, hashes, idx, fullhash)) {
691                                 // Key hit! Check if table-resize in progress
692                                 if (!is_prime(V)) {
693                                         /**
694                                                 @Begin
695                                                 @Commit_point_define: true
696                                                 @Potential_commit_point_label: Read_Val_Point
697                                                 @Label: Get_Success_Point_2
698                                                 @End
699                                         */
700                                         return (V == TOMBSTONE) ? NULL : V; // Return this value
701                                 }
702                                 // Otherwise, finish the copy & retry in the new table
703                                 return get_impl(topmap, chm->copy_slot_and_check(topmap, kvs,
704                                         idx, key_slot), key_slot, fullhash);
705                         }
706
707                         if (++reprobe_cnt >= REPROBE_LIMIT ||
708                                 key_slot == TOMBSTONE) {
709                                 // Retry in new table
710                                 // Atomic read (acquire) can be here 
711                                 kvs_data *newkvs = chm->_newkvs.load(memory_order_acquire);
712                                 /**
713                                         @Begin
714                                         @Commit_point_define_check: newkvs == NULL
715                                         @Label: Get_Success_Point_3
716                                         @End
717                                 */
718                                 return newkvs == NULL ? NULL : get_impl(topmap,
719                                         topmap->help_copy(newkvs), key_slot, fullhash);
720                         }
721
722                         idx = (idx + 1) & (len - 1); // Reprobe by 1
723                 }
724         }
725
726         // A wrapper of the essential function putIfMatch()
727         TypeV* putIfMatch(TypeK *key, TypeV *value, slot *old_val) {
728                 // TODO: Should throw an exception rather return NULL
729                 if (old_val == NULL) {
730                         return NULL;
731                 }
732                 slot *key_slot = new slot(false, key);
733
734                 slot *value_slot = new slot(false, value);
735                 kvs_data *kvs = _kvs.load(memory_order_acquire);
736                 slot *res = putIfMatch(this, kvs, key_slot, value_slot, old_val);
737                 // Only when copy_slot() call putIfMatch() will it return NULL
738                 MODEL_ASSERT (res != NULL); 
739                 MODEL_ASSERT (!is_prime(res));
740                 return res == TOMBSTONE ? NULL : (TypeV*) res->_ptr;
741         }
742
743         /**
744                 Put, Remove, PutIfAbsent, etc will call this function. Return the old
745                 value. If the returned value is equals to the expVal (or expVal is
746                 NO_MATCH_OLD), then this function puts the val_slot to the table 'kvs'.
747                 Only copy_slot will pass a NULL expVal, and putIfMatch only returns a
748                 NULL if passed a NULL expVal.
749         */
750         static slot* putIfMatch(cliffc_hashtable *topmap, kvs_data *kvs, slot
751                 *key_slot, slot *val_slot, slot *expVal) {
752                 MODEL_ASSERT (val_slot != NULL);
753                 MODEL_ASSERT (!is_prime(val_slot));
754                 MODEL_ASSERT (!is_prime(expVal));
755
756                 int fullhash = hash(key_slot);
757                 int len = kvs->_size;
758                 CHM *chm = get_chm(kvs);
759                 int *hashes = get_hashes(kvs);
760                 int idx = fullhash & (len - 1);
761
762                 // Claim a key slot
763                 int reprobe_cnt = 0;
764                 slot *K;
765                 slot *V;
766                 kvs_data *newkvs;
767                 
768                 while (true) { // Spin till we get a key slot
769                         K = key(kvs, idx);
770                         V = val(kvs, idx);
771                         if (K == NULL) { // Get a free slot
772                                 if (val_slot == TOMBSTONE) return val_slot;
773                                 // Claim the null key-slot
774                                 if (CAS_key(kvs, idx, NULL, key_slot)) {
775                                         chm->_slots.fetch_add(1, memory_order_relaxed); // Inc key-slots-used count
776                                         hashes[idx] = fullhash; // Memorize full hash
777                                         break;
778                                 }
779                                 K = key(kvs, idx); // CAS failed, get updated value
780                                 MODEL_ASSERT (K != NULL);
781                         }
782
783                         // Key slot not null, there exists a Key here
784                         if (keyeq(K, key_slot, hashes, idx, fullhash))
785                                 break; // Got it
786                         
787                         // Notice that the logic here should be consistent with that of get.
788                         // The first predicate means too many reprobes means nothing in the
789                         // old table.
790                         if (++reprobe_cnt >= reprobe_limit(len) ||
791                                 K == TOMBSTONE) { // Found a Tombstone key, no more keys
792                                 newkvs = chm->resize(topmap, kvs);
793                                 // Help along an existing copy
794                                 if (expVal != NULL) topmap->help_copy(newkvs);
795                                 return putIfMatch(topmap, newkvs, key_slot, val_slot, expVal);
796                         }
797
798                         idx = (idx + 1) & (len - 1); // Reprobe
799                 } // End of spinning till we get a Key slot
800
801                 if (val_slot == V) return V; // Fast cutout for no-change
802         
803                 // Here it tries to resize cause it doesn't want other threads to stop
804                 // its progress (eagerly try to resize soon)
805                 newkvs = chm->_newkvs.load(memory_order_acquire);
806                 if (newkvs == NULL &&
807                         ((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V)))
808                         newkvs = chm->resize(topmap, kvs); // Force the copy to start
809                 
810                 // Finish the copy and then put it in the new table
811                 if (newkvs != NULL)
812                         return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs, idx,
813                                 expVal), key_slot, val_slot, expVal);
814                 
815                 // Decided to update the existing table
816                 while (true) {
817                         MODEL_ASSERT (!is_prime(V));
818
819                         if (expVal != NO_MATCH_OLD &&
820                                 V != expVal &&
821                                 (expVal != MATCH_ANY || V == TOMBSTONE || V == NULL) &&
822                                 !(V == NULL && expVal == TOMBSTONE) &&
823                                 (expVal == NULL || !valeq(expVal, V))) {
824                                 /**
825                                         @Begin
826                                         @Commit_point_define: expVal == TOMBSTONE
827                                         @Potential_commit_point_label: Read_Val_Point
828                                         @Label: PutIfAbsent_Fail_Point
829                                                 # This is a check for the PutIfAbsent() when the value
830                                                 # is not absent
831                                         @End
832                                 */
833                                 /**
834                                         @Begin
835                                         @Commit_point_define: expVal != NULL && val_slot == TOMBSTONE
836                                         @Potential_commit_point_label: Read_Val_Point
837                                         @Label: RemoveIfMatch_Fail_Point
838                                         @End
839                                 */
840                                 /**
841                                         @Begin
842                                         @Commit_point_define: !valeq(expVal, V)
843                                         @Potential_commit_point_label: Read_Val_Point
844                                         @Label: ReplaceIfMatch_Fail_Point
845                                         @End
846                                 */
847                                 return V; // Do not update!
848                         }
849
850                         if (CAS_val(kvs, idx, V, val_slot)) {
851                                 /**
852                                         @Begin
853                                         # The only point where a successful put happens
854                                         @Commit_point_define: true
855                                         @Potential_commit_point_label: Write_Val_Point
856                                         @Label: Write_Success_Point
857                                         @End
858                                 */
859                                 if (expVal != NULL) { // Not called by a table-copy
860                                         // CAS succeeded, should adjust size
861                                         // Both normal put's and table-copy calls putIfMatch, but
862                                         // table-copy does not increase the number of live K/V pairs
863                                         if ((V == NULL || V == TOMBSTONE) &&
864                                                 val_slot != TOMBSTONE)
865                                                 chm->_size.fetch_add(1, memory_order_relaxed);
866                                         if (!(V == NULL || V == TOMBSTONE) &&
867                                                 val_slot == TOMBSTONE)
868                                                 chm->_size.fetch_add(-1, memory_order_relaxed);
869                                 }
870                                 return (V == NULL && expVal != NULL) ? TOMBSTONE : V;
871                         }
872                         // Else CAS failed
873                         V = val(kvs, idx);
874                         if (is_prime(V))
875                                 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs,
876                                         idx, expVal), key_slot, val_slot, expVal);
877                 }
878         }
879
880         // Help along an existing table-resize. This is a fast cut-out wrapper.
881         kvs_data* help_copy(kvs_data *helper) {
882                 kvs_data *topkvs = _kvs.load(memory_order_acquire);
883                 CHM *topchm = get_chm(topkvs);
884                 // No cpy in progress
885                 if (topchm->_newkvs.load(memory_order_acquire) == NULL) return helper;
886                 topchm->help_copy_impl(this, topkvs, false);
887                 return helper;
888         }
889 };
890 /**
891         @Begin
892         @Class_end
893         @End
894 */
895
896 #endif