try to generate in-place code
[cdsspec-compiler.git] / benchmark / cliffc-hashtable / simplified_cliffc_hashtable.h
1 #ifndef SIMPLIFIED_CLIFFC_HASHTABLE_H
2 #define SIMPLIFIED_CLIFFC_HASHTABLE_H
3
4 #include <iostream>
5 #include <atomic>
6 #include <memory>
7 #include <assert.h>
8
9 using namespace std;
10
11
12
13 /**
14         This header file declares and defines a simplified version of Cliff Click's
15         NonblockingHashMap. It contains all the necessary structrues and main
16         functions. In simplified_cliffc_hashtable.cc file, it has the definition for
17         the static fields.
18 */
19
20 template<typename TypeK, typename TypeV>
21 class cliffc_hashtable;
22
23 /**
24         Corresponding the the Object[] array in Cliff Click's Java implementation.
25         It keeps the first two slots for CHM (Hashtable control unit) and the hash
26         records (an array of hash used for fast negative key-equality check).
27 */
28 struct kvs_data {
29         int _size;
30         atomic<void*> *_data;
31         
32         kvs_data(int sz) {
33                 _size = sz;
34                 int real_size = sizeof(atomic<void*>) * 2 + 2;
35                 _data = new atomic<void*>[real_size];
36                 // The control block should be initialized in resize()
37                 // Init the hash record array
38                 int *hashes = new int[_size];
39                 int i;
40                 for (i = 0; i < _size; i++) {
41                         hashes[i] = 0;
42                 }
43                 _data[1].store(hashes, memory_order_relaxed);
44                 // Init the data to Null slot
45                 for (i = 2; i < real_size; i++) {
46                         _data[i].store(NULL, memory_order_relaxed);
47                 }
48         }
49
50         ~kvs_data() {
51                 int *hashes = (int*) _data[1].load(memory_order_relaxed);
52                 delete hashes;
53                 delete[] _data;
54         }
55 };
56
57 struct slot {
58         bool _prime;
59         shared_ptr<void> _ptr;
60
61         slot(bool prime, shared_ptr<void> ptr) {
62                 _prime = prime;
63                 _ptr = ptr;
64         }
65
66 };
67
68
69 /**
70         TypeK must have defined function "int hashCode()" which return the hash
71         code for the its object, and "int equals(TypeK anotherKey)" which is
72         used to judge equality.
73         TypeK and TypeV should define their own copy constructor.
74         To make the memory management safe and similar to Cliff Click's Java
75         implementation, we use shared_ptr instead of normal pointer in terms of the
76         pointers that point to TypeK and TypeV.
77 */
78 template<typename TypeK, typename TypeV>
79 class cliffc_hashtable {
80         /**
81                 # The synchronization we have for the hashtable gives us the property of
82                 # serializability, so we should have a sequential hashtable when we check the
83                 # correctness. The key thing is to identify all the commit point.
84         
85                 @Begin
86                 @Global_define:
87                         @DeclareVar:
88                         spec_hashtable<TypeK, TypeV*> map;
89                         spec_hashtable<TypeK, Tag> id_map;
90                         Tag tag;
91                         @InitVar:
92                                 map = spec_hashtable<TypeK, TypeV*>();
93                                 id_map = spec_hashtable<TypeK, TypeV*>();
94                                 tag = Tag();
95                         @DefineFunc:
96                         static bool equals_val(TypeV *ptr1, TypeV *ptr2) {
97                                 // ...
98                         }
99                         
100                         # Update the tag for the current key slot if the corresponding tag
101                         # is NULL, otherwise just return that tag. It will update the next
102                         # available tag too if it requires a new tag for that key slot.
103                         static Tag getKeyTag(TypeK &key) {
104                                 if (id_map.get(key) == NULL) {
105                                         Tag cur_tag = tag.current();
106                                         id_map.put(key, cur_tag);
107                                         tag.next();
108                                         return cur_tag;
109                                 } else {
110                                         return id_map.get(key);
111                                 }
112                         }
113                 
114                 @Interface_cluster:
115                         Read_interface = {
116                                 Get,
117                                 PutIfAbsent,
118                                 RemoveAny,
119                                 RemoveIfMatch,
120                                 ReplaceAny,
121                                 ReplaceIfMatch
122                         }
123                         
124                         Write_interface = {
125                                 Put,
126                                 PutIfAbsent(COND_PutIfAbsentSucc),
127                                 RemoveAny,
128                                 RemoveIfMatch(COND_RemoveIfMatchSucc),
129                                 ReplaceAny,
130                                 ReplaceIfMatch(COND_ReplaceIfMatchSucc)
131                         }
132                 @Happens_before:
133                         Write_interface -> Read_interface
134                 @End
135         */
136
137 friend class CHM;
138         /**
139                 The control structure for the hashtable
140         */
141         private:
142         class CHM {
143                 friend class cliffc_hashtable;
144                 private:
145                 atomic<kvs_data*> _newkvs;
146                 
147                 // Size of active K,V pairs
148                 atomic_int _size;
149         
150                 // Count of used slots
151                 atomic_int _slots;
152                 
153                 // The next part of the table to copy
154                 atomic_int _copy_idx;
155                 
156                 // Work-done reporting
157                 atomic_int _copy_done;
158         
159                 public:
160                 CHM(int size) {
161                         _size.store(size, memory_order_relaxed);
162                         _slots.store(0, memory_order_relaxed);
163         
164                         _copy_idx.store(0, memory_order_relaxed);
165                         _copy_done.store(0, memory_order_release);
166                 }
167         
168                 ~CHM() {}
169                 
170                 private:
171                         
172                 // Heuristic to decide if the table is too full
173                 bool table_full(int reprobe_cnt, int len) {
174                         return
175                                 reprobe_cnt >= REPROBE_LIMIT &&
176                                 _slots.load(memory_order_relaxed) >= reprobe_limit(len);
177                 }
178         
179                 kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
180                         kvs_data *newkvs = _newkvs.load(memory_order_acquire);
181                         if (newkvs != NULL)
182                                 return newkvs;
183         
184                         // No copy in-progress, start one; Only double the table size
185                         int oldlen = kvs->_size;
186                         int sz = _size.load(memory_order_relaxed);
187                         int newsz = sz;
188                         
189                         // Just follow Cliff Click's heuristic to decide the new size
190                         if (sz >= (oldlen >> 2)) { // If we are 25% full
191                                 newsz = oldlen << 1; // Double size
192                                 if (sz >= (oldlen >> 1))
193                                         newsz = oldlen << 2; // Double double size
194                         }
195         
196                         // We do not record the record timestamp
197                         if (newsz <= oldlen) newsz = oldlen << 1;
198                         // Do not shrink ever
199                         if (newsz < oldlen) newsz = oldlen;
200         
201                         // Last check cause the 'new' below is expensive
202                         newkvs = _newkvs.load(memory_order_acquire);
203                         if (newkvs != NULL) return newkvs;
204         
205                         newkvs = new kvs_data(newsz);
206                         void *chm = (void*) new CHM(sz);
207                         newkvs->_data[0].store(chm, memory_order_relaxed);
208         
209                         kvs_data *cur_newkvs; 
210                         // Another check after the slow allocation
211                         if ((cur_newkvs = _newkvs.load(memory_order_acquire)) != NULL)
212                                 return cur_newkvs;
213                         // CAS the _newkvs to the allocated table
214                         kvs_data *desired = (kvs_data*) NULL;
215                         kvs_data *expected = (kvs_data*) newkvs; 
216                         if (!_newkvs.compare_exchange_strong(desired, expected, memory_order_release,
217                                         memory_order_release)) {
218                                 // Should clean the allocated area
219                                 delete newkvs;
220                                 newkvs = _newkvs.load(memory_order_acquire);
221                         }
222                         return newkvs;
223                 }
224         
225                 void help_copy_impl(cliffc_hashtable *topmap, kvs_data *oldkvs,
226                         bool copy_all) {
227                         assert (get_chm(oldkvs) == this);
228                         kvs_data *newkvs = _newkvs.load(memory_order_acquire);
229                         int oldlen = oldkvs->_size;
230                         int min_copy_work = oldlen > 1024 ? 1024 : oldlen;
231                 
232                         // Just follow Cliff Click's code here
233                         int panic_start = -1;
234                         int copyidx;
235                         while (_copy_done.load(memory_order_acquire) < oldlen) {
236                                 copyidx = _copy_idx.load(memory_order_acquire);
237                                 if (panic_start == -1) { // No painc
238                                         copyidx = _copy_idx.load(memory_order_acquire);
239                                         while (copyidx < (oldlen << 1) &&
240                                                 !_copy_idx.compare_exchange_strong(copyidx, copyidx +
241                                                         min_copy_work, memory_order_release, memory_order_release))
242                                                 copyidx = _copy_idx.load(memory_order_relaxed);
243                                         if (!(copyidx < (oldlen << 1)))
244                                                 panic_start = copyidx;
245                                 }
246         
247                                 // Now copy the chunk of work we claimed
248                                 int workdone = 0;
249                                 for (int i = 0; i < min_copy_work; i++)
250                                         if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs,
251                                                 newkvs))
252                                                 workdone++;
253                                 if (workdone > 0)
254                                         copy_check_and_promote(topmap, oldkvs, workdone);
255         
256                                 copyidx += min_copy_work;
257                                 if (!copy_all && panic_start == -1)
258                                         return; // We are done with the work we claim
259                         }
260                         copy_check_and_promote(topmap, oldkvs, 0); // See if we can promote
261                 }
262         
263                 kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
264                         *oldkvs, int idx, void *should_help) {
265                         kvs_data *newkvs = _newkvs.load(memory_order_acquire);
266                         // We're only here cause the caller saw a Prime
267                         if (copy_slot(topmap, idx, oldkvs, _newkvs))
268                                 copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied
269                         return (should_help == NULL) ? newkvs : topmap->help_copy(newkvs);
270                 }
271         
272                 void copy_check_and_promote(cliffc_hashtable *topmap, kvs_data*
273                         oldkvs, int workdone) {
274                         int oldlen = oldkvs->_size;
275                         int copyDone = _copy_done.load(memory_order_relaxed);
276                         if (workdone > 0) {
277                                 while (true) {
278                                         copyDone = _copy_done.load(memory_order_relaxed);
279                                         if (_copy_done.compare_exchange_weak(copyDone, copyDone +
280                                                 workdone, memory_order_relaxed, memory_order_relaxed))
281                                                 break;
282                                 }
283                         }
284         
285                         // Promote the new table to the current table
286                         if (copyDone + workdone == oldlen &&
287                                 topmap->_kvs.load(memory_order_acquire) == oldkvs)
288                                 topmap->_kvs.compare_exchange_strong(oldkvs, _newkvs, memory_order_release,
289                                         memory_order_release);
290                 }
291         
292                 bool copy_slot(cliffc_hashtable *topmap, int idx, kvs_data *oldkvs,
293                         kvs_data *newkvs) {
294                         slot *key_slot;
295                         while ((key_slot = key(oldkvs, idx)) == NULL)
296                                 CAS_key(oldkvs, idx, NULL, TOMBSTONE);
297         
298                         // First CAS old to Prime
299                         slot *oldval = val(oldkvs, idx, NULL);
300                         while (!is_prime(oldval)) {
301                                 slot *box = (oldval == NULL || oldval == TOMBSTONE)
302                                         ? TOMBPRIME : new slot(true, oldval->_ptr);
303                                 if (CAS_val(oldkvs, idx, oldval, box)) {
304                                         if (box == TOMBPRIME)
305                                                 return 1; // Copy done
306                                         // Otherwise we CAS'd the box
307                                         oldval = box; // Record updated oldval
308                                         break;
309                                 }
310                                 oldval = val(oldkvs, idx, NULL); // Else re-try
311                         }
312         
313                         if (oldval == TOMBPRIME) return false; // Copy already completed here
314         
315                         slot *old_unboxed = new slot(false, oldval->_ptr);
316                         int copied_into_new = (putIfMatch(topmap, newkvs, key_slot, old_unboxed,
317                                 NULL) == NULL);
318         
319                         // Old value is exposed in the new table
320                         while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME))
321                                 oldval = val(oldkvs, idx, NULL);
322         
323                         return copied_into_new;
324                 }
325         };
326
327         
328
329         private:
330         static const int Default_Init_Size = 8; // Intial table size
331
332         static slot* const MATCH_ANY;
333         static slot* const NO_MATCH_OLD;
334
335         static slot* const TOMBPRIME;
336         static slot* const TOMBSTONE;
337
338         static const int REPROBE_LIMIT = 10; // Forces a table-resize
339
340         atomic<kvs_data*> _kvs;
341
342         public:
343         cliffc_hashtable() {
344                 // Should initialize the CHM for the construction of the table
345                 // For other CHM in kvs_data, they should be initialzed in resize()
346                 // because the size is determined dynamically
347                 kvs_data *kvs = new kvs_data(Default_Init_Size);
348                 void *chm = (void*) new CHM(0);
349                 kvs->_data[0].store(chm, memory_order_relaxed);
350                 _kvs.store(kvs, memory_order_release);
351         }
352
353         cliffc_hashtable(int init_size) {
354                 // Should initialize the CHM for the construction of the table
355                 // For other CHM in kvs_data, they should be initialzed in resize()
356                 // because the size is determined dynamically
357                 kvs_data *kvs = new kvs_data(init_size);
358                 void *chm = (void*) new CHM(0);
359                 kvs->_data[0].store(chm, memory_order_relaxed);
360                 _kvs.store(kvs, memory_order_release);
361         }
362
363         /**
364                 @Begin
365                 @Interface: Get
366                 @Commit_point_set: Read_Val_Point1 | Read_Val_Point2 | Read_Val_Point3
367                 @ID: __sequential.getKeyTag(key)
368                 @Action:
369                         @DefineVar: TypeV *_Old_Val = __sequential.map.get(key)
370                 @Post_check:
371                         __sequential.equals_val(_Old_Val, __RET__)
372                 @End
373         */
374         shared_ptr<TypeV> get(TypeK& key) {
375                 void *key_ptr = (void*) new TypeK(key);
376                 slot *key_slot = new slot(false, shared_ptr<void>(key_ptr));
377                 int fullhash = hash(key_slot);
378                 slot *V = get_impl(this, _kvs, key_slot, fullhash);
379                 if (V == NULL) return NULL;
380                 assert (!is_prime(V));
381                 return static_pointer_cast<TypeV>(V->_ptr);
382         }
383
384         /**
385                 @Begin
386                 @Interface: Put
387                 @Commit_point_set: Write_Val_Point
388                 @ID: __sequential.getKeyTag(key)
389                 @Action:
390                         # Remember this old value at checking point
391                         @DefineVar: TypeV *_Old_Val = __sequential.map.get(key)
392                         @Code: __sequential.map.put(key, &val);
393                 @Post_check:
394                         __sequential.equals_val(__RET__, _Old_Val)
395                 @End
396         */
397         shared_ptr<TypeV> put(TypeK& key, TypeV& val) {
398                 return putIfMatch(key, val, NO_MATCH_OLD);
399         }
400
401         /**
402                 @Begin
403                 @Interface: PutIfAbsent
404                 @Commit_point_set:
405                         Write_Val_Point | PutIfAbsent_Fail_Point
406                 @Condition: __sequential.map.get(key) == NULL
407                 @HB_condition:
408                         COND_PutIfAbsentSucc :: __RET__ == NULL
409                 @ID: __sequential.getKeyTag(key)
410                 @Action:
411                         @DefineVar: TypeV *_Old_Val = __sequential.map.get(key)
412                         @Code:
413                         if (__COND_SAY__)
414                                 __sequential.map.put(key, &value);
415                 @Post_check:
416                         __COND_SAY__ ? __RET__ == NULL : __sequential.equals_val(_Old_Val, __RET__) 
417                 @End
418         */
419         shared_ptr<TypeV> putIfAbsent(TypeK& key, TypeV& value) {
420                 return putIfMatch(key, val, TOMBSTONE);
421         }
422
423         /**
424                 @Begin
425                 @Interface: RemoveAny
426                 @Commit_point_set: Write_Val_Point
427                 @ID: __sequential.getKeyTag(key)
428                 @Action:
429                         @DefineVar: TypeV *_Old_Val = __sequential.map.get(key)
430                         @Code: __sequential.map.put(key, NULL);
431                 @Post_check:
432                         __sequential.equals_val(__RET__, _Old_Val)
433                 @End
434         */
435         shared_ptr<TypeV> remove(TypeK& key) {
436                 return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD);
437         }
438
439         /**
440                 @Begin
441                 @Interface: RemoveIfMatch
442                 @Commit_point_set:
443                         Write_Val_Point | RemoveIfMatch_Fail_Point
444                 @Condition:
445                         __sequential.equals_val(__sequential.map.get(key), &val)
446                 @HB_condition:
447                         COND_RemoveIfMatchSucc :: __RET__ == true
448                 @ID: __sequential.getKeyTag(key)
449                 @Action:
450                         @Code:
451                         if (__COND_SAY__)
452                                 __sequential.map.put(key, NULL);
453                 @Post_check:
454                         __COND_SAY__ ? __RET__ : !__RET__
455                 @End
456         */
457         bool remove(TypeK& key, TypeV& val) {
458                 slot *val_slot = val == NULL ? NULL : new slot(false, val);
459                 return putIfMatch(key, TOMBSTONE, val) == val;
460
461         }
462
463         /**
464                 @Begin
465                 @Interface: ReplaceAny
466                 @Commit_point_set:
467                         Write_Val_Point
468                 @ID: __sequential.getKeyTag(key)
469                 @Action:
470                         @DefineVar: TypeV *_Old_Val = __sequential.map.get(key)
471                 @Post_check:
472                         __sequential.equals_val(__RET__, _Old_Val)
473                 @End
474         */
475         shared_ptr<TypeV> replace(TypeK& key, TypeV& val) {
476                 return putIfMatch(key, val, MATCH_ANY);
477         }
478
479         /**
480                 @Begin
481                 @Interface: ReplaceIfMatch
482                 @Commit_point_set:
483                         Write_Val_Point | ReplaceIfMatch_Fail_Point
484                 @Condition:
485                         __sequential.equals_val(__sequential.map.get(key), &oldval)
486                 @HB_condition:
487                         COND_ReplaceIfMatchSucc :: __RET__ == true
488                 @ID: __sequential.getKeyTag(key)
489                 @Action:
490                         @Code:
491                         if (__COND_SAY__)
492                                 __sequential.map.put(key, &newval);
493                 @Post_check:
494                         __COND_SAY__ ? __RET__ : !__RET__
495                 @End
496         */
497         bool replace(TypeK& key, TypeV& oldval, TypeV& newval) {
498                 return putIfMatch(key, newval, oldval) == oldval;
499         }
500
501         private:
502         static CHM* get_chm(kvs_data* kvs) {
503                 return (CHM*) kvs->_data[0].load(memory_order_relaxed);
504         }
505
506         static int* get_hashes(kvs_data *kvs) {
507                 return (int *) kvs->_data[1].load(memory_order_relaxed);
508         }
509         
510         // Preserve happens-before semantics on newly inserted keys
511         static inline slot* key(kvs_data *kvs, int idx) {
512                 assert (idx >= 0 && idx < kvs->_size);
513                 // Corresponding to the volatile read in get_impl() and putIfMatch in
514                 // Cliff Click's Java implementation
515                 return (slot*) kvs->_data[idx * 2 + 2].load(memory_order_acquire);
516         }
517
518         /**
519                 The atomic operation in val() function is a "potential" commit point,
520                 which means in some case it is a real commit point while it is not for
521                 some other cases. This so happens because the val() function is such a
522                 fundamental function that many internal operation will call. Our
523                 strategy is that we label any potential commit points and check if they
524                 really are the commit points later.
525         */
526         // Preserve happens-before semantics on newly inserted values
527         static inline slot* val(kvs_data *kvs, int idx) {
528                 assert (idx >= 0 && idx < kvs->_size);
529                 // Corresponding to the volatile read in get_impl() and putIfMatch in
530                 // Cliff Click's Java implementation
531                 slot *res = (slot*) kvs->_data[idx * 2 + 3].load(memory_order_acquire);
532                 /**
533                         @Begin
534                         # This is a complicated potential commit point since many many functions are
535                         # calling val().
536                         @Potential_commit_point_define: true
537                         @Label: Read_Val_Point
538                         @End
539                 */
540                 return res;
541
542
543         }
544
545         static int hash(slot *key_slot) {
546                 assert(key_slot != NULL && key_slot->_ptr != NULL);
547                 shared_ptr<TypeK> key = static_pointer_cast<TypeK>(key_slot->_ptr);
548                 int h = key->hashCode();
549                 // Spread bits according to Cliff Click's code
550                 h += (h << 15) ^ 0xffffcd7d;
551                 h ^= (h >> 10);
552                 h += (h << 3);
553                 h ^= (h >> 6);
554                 h += (h << 2) + (h << 14);
555                 return h ^ (h >> 16);
556         }
557         
558         // Heuristic to decide if reprobed too many times. 
559         // Be careful here: Running over the limit on a 'get' acts as a 'miss'; on a
560         // put it triggers a table resize. Several places MUST have exact agreement.
561         static int reprobe_limit(int len) {
562                 return REPROBE_LIMIT + (len >> 2);
563         }
564         
565         static inline bool is_prime(slot *val) {
566                 return (val != NULL) && val->_prime;
567         }
568
569         // Check for key equality. Try direct pointer comparison first (fast
570         // negative teset) and then the full 'equals' call
571         static bool keyeq(slot *K, slot *key_slot, int *hashes, int hash,
572                 int fullhash) {
573                 // Caller should've checked this.
574                 assert (K != NULL);
575                 shared_ptr<TypeK> key_ptr = static_pointer_cast<TypeK>(key_slot->_ptr);
576                 return
577                         K == key_slot ||
578                                 ((hashes[hash] == 0 || hashes[hash] == fullhash) &&
579                                 K != TOMBSTONE &&
580                                 key_ptr->equals(K->_ptr));
581         }
582
583         static bool valeq(slot *val_slot1, slot *val_slot2) {
584                 assert (val_slot1 != NULL);
585                 shared_ptr<TypeK> ptr1 = static_pointer_cast<TypeV>(val_slot1->_ptr);
586                 if (val_slot2 == NULL || ptr1 == NULL) return false;
587                 return ptr1->equals(val_slot2->_ptr);
588         }
589         
590         // Together with key() preserve the happens-before relationship on newly
591         // inserted keys
592         static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
593                 return kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
594                         desired, memory_order_release, memory_order_release);
595         }
596
597         /**
598                 Same as the val() function, we only label the CAS operation as the
599                 potential commit point.
600         */
601         // Together with val() preserve the happens-before relationship on newly
602         // inserted values
603         static inline bool CAS_val(kvs_data *kvs, int idx, void *expected, void
604                 *desired) {
605                 bool res =  kvs->_data[2 * idx + 3].compare_exchange_strong(expected,
606                         desired, memory_order_release, memory_order_release);
607                 /**
608                         # If it is a successful put instead of a copy or any other internal
609                         # operantions, expected != NULL
610                         @Begin
611                         @Potential_commit_point_define: __ATOMIC_RET__ == true
612                         @Label: Write_Val_Point
613                         @End
614                 */
615                 return res;
616         }
617
618         slot* get_impl(cliffc_hashtable *topmap, kvs_data *kvs, slot* key_slot, int
619                 fullhash) {
620                 int len = kvs->_size;
621                 CHM *chm = get_chm(kvs);
622                 int *hashes = get_hashes(kvs);
623
624                 int idx = fullhash & (len - 1);
625                 int reprobe_cnt = 0;
626                 while (true) {
627                         slot *K = key(kvs, idx);
628                         slot *V = val(kvs, idx);
629                         /**
630                                 @Begin
631                                 @Commit_point_define: V == NULL
632                                 @Potential_commit_point_label: Read_Val_Point
633                                 @Label: Get_Success_Point_1
634                                 @End
635                         */
636
637                         if (V == NULL) return NULL; // A miss
638                         
639                         if (keyeq(K, key_slot, hashes, idx, fullhash)) {
640                                 // Key hit! Check if table-resize in progress
641                                 if (!is_prime(V)) {
642                                         /**
643                                                 @Begin
644                                                 @Commit_point_define: true
645                                                 @Potential_commit_point_label: Read_Val_Point
646                                                 @Label: Get_Success_Point_2
647                                                 @End
648                                         */
649                                         return (V == TOMBSTONE) ? NULL : V; // Return this value
650                                 }
651                                 // Otherwise, finish the copy & retry in the new table
652                                 return get_impl(topmap, chm->copy_slot_and_check(topmap, kvs,
653                                         idx, key_slot), key_slot, fullhash);
654                         }
655
656                         if (++reprobe_cnt >= REPROBE_LIMIT ||
657                                 key_slot == TOMBSTONE) {
658                                 // Retry in new table
659                                 // Atomic read (acquire) can be here 
660                                 kvs_data *newkvs = chm->_newkvs.load(memory_order_acquire);
661                                 /**
662                                         @Begin
663                                         @Commit_point_define_check: newkvs == NULL
664                                         @Label: Get_Success_Point_3
665                                         @End
666                                 */
667                                 return newkvs == NULL ? NULL : get_impl(topmap,
668                                         topmap->help_copy(newkvs), key_slot, fullhash);
669                         }
670
671                         idx = (idx + 1) & (len - 1); // Reprobe by 1
672                 }
673         }
674
675         // A wrapper of the essential function putIfMatch()
676         shared_ptr<TypeV> putIfMatch(TypeK& key, TypeV& value, slot *old_val) {
677                 // TODO: Should throw an exception rather return NULL
678                 if (old_val == NULL) {
679                         return NULL;
680                 }
681                 void *key_ptr = (void*) new TypeK(key);
682                 slot *key_slot = new slot(false, shared_ptr<void>(key_ptr));
683
684                 void *val_ptr = (void*) new TypeV(value);
685                 slot *value_slot = new slot(false, shared_ptr<void>(val_ptr));
686                 slot *res = putIfMatch(this, _kvs, key_slot, value_slot, old_val);
687                 // Only when copy_slot() call putIfMatch() will it return NULL
688                 assert (res != NULL); 
689                 assert (!is_prime(res));
690                 return res == TOMBSTONE ? NULL : static_pointer_cast<TypeV>(res->_ptr);
691         }
692
693         /**
694                 Put, Remove, PutIfAbsent, etc will call this function. Return the old
695                 value. If the returned value is equals to the expVal (or expVal is
696                 NO_MATCH_OLD), then this function puts the val_slot to the table 'kvs'.
697                 Only copy_slot will pass a NULL expVal, and putIfMatch only returns a
698                 NULL if passed a NULL expVal.
699         */
700         static slot* putIfMatch(cliffc_hashtable *topmap, kvs_data *kvs, slot
701                 *key_slot, slot *val_slot, slot *expVal) {
702                 assert (val_slot != NULL);
703                 assert (!is_prime(val_slot));
704                 assert (!is_prime(expVal));
705
706                 int fullhash = hash(key_slot);
707                 int len = kvs->_size;
708                 CHM *chm = get_chm(kvs);
709                 int *hashes = get_hashes(kvs);
710                 int idx = fullhash & (len - 1);
711
712                 // Claim a key slot
713                 int reprobe_cnt = 0;
714                 slot *K;
715                 slot *V;
716                 kvs_data *newkvs;
717                 
718                 while (true) { // Spin till we get a key slot
719                         K = key(kvs, idx);
720                         V = val(kvs, idx, NULL);
721                         if (K == NULL) { // Get a free slot
722                                 if (val_slot == TOMBSTONE) return val_slot;
723                                 // Claim the null key-slot
724                                 if (CAS_key(kvs, idx, NULL, key_slot)) {
725                                         chm->_slots.fetch_add(1, memory_order_relaxed); // Inc key-slots-used count
726                                         hashes[idx] = fullhash; // Memorize full hash
727                                         break;
728                                 }
729                                 K = key(kvs, idx); // CAS failed, get updated value
730                                 assert (K != NULL);
731                         }
732
733                         // Key slot not null, there exists a Key here
734                         if (keyeq(K, key_slot, hashes, idx, fullhash))
735                                 break; // Got it
736                         
737                         // Notice that the logic here should be consistent with that of get.
738                         // The first predicate means too many reprobes means nothing in the
739                         // old table.
740                         if (++reprobe_cnt >= reprobe_limit(len) ||
741                                 K == TOMBSTONE) { // Found a Tombstone key, no more keys
742                                 newkvs = chm->resize(topmap, kvs);
743                                 // Help along an existing copy
744                                 if (expVal != NULL) topmap->help_copy(newkvs);
745                                 return putIfMatch(topmap, newkvs, key_slot, val_slot, expVal);
746                         }
747
748                         idx = (idx + 1) & (len - 1); // Reprobe
749                 } // End of spinning till we get a Key slot
750
751                 if (val_slot == V) return V; // Fast cutout for no-change
752         
753                 // Here it tries to resize cause it doesn't want other threads to stop
754                 // its progress (eagerly try to resize soon)
755                 newkvs = chm->_newkvs.load(memory_order_acquire);
756                 if (newkvs == NULL &&
757                         ((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V)))
758                         newkvs = chm->resize(topmap, kvs); // Force the copy to start
759                 
760                 // Finish the copy and then put it in the new table
761                 if (newkvs != NULL)
762                         return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs, idx,
763                                 expVal), key_slot, val_slot, expVal);
764                 
765                 // Decided to update the existing table
766                 while (true) {
767                         assert (!is_prime(V));
768
769                         if (expVal != NO_MATCH_OLD &&
770                                 V != expVal &&
771                                 (expVal != MATCH_ANY || V == TOMBSTONE || V == NULL) &&
772                                 !(V == NULL && expVal == TOMBSTONE) &&
773                                 (expVal == NULL || !valeq(expVal, V))) {
774                                 /**
775                                         @Begin
776                                         @Commit_point_define: expVal == TOMBSTONE
777                                         @Potential_commit_point_label: Read_Val_Point
778                                         @Label: PutIfAbsent_Fail_Point
779                                                 # This is a check for the PutIfAbsent() when the value
780                                                 # is not absent
781                                         @End
782                                 */
783                                 /**
784                                         @Begin
785                                         @Commit_point_define: expVal != NULL && val_slot == TOMBSTONE
786                                         @Potential_commit_point_label: Read_Val_Point
787                                         @Label: RemoveIfMatch_Fail_Point
788                                         @End
789                                 */
790                                 /**
791                                         @Begin
792                                         @Commit_point_define: !valeq(expVal, V)
793                                         @Potential_commit_point_label: Read_Val_Point
794                                         @Label: ReplaceIfMatch_Fail_Point
795                                         @End
796                                 */
797                                 return V; // Do not update!
798                         }
799
800                         if (CAS_val(kvs, idx, V, val_slot)) {
801                                 /**
802                                         @Begin
803                                         # The only point where a successful put happens
804                                         @Commit_point_define: true
805                                         @Potential_commit_point_label: Write_Val_Point
806                                         @Label: Write_Success_Point
807                                         @End
808                                 */
809                                 if (expVal != NULL) { // Not called by a table-copy
810                                         // CAS succeeded, should adjust size
811                                         // Both normal put's and table-copy calls putIfMatch, but
812                                         // table-copy does not increase the number of live K/V pairs
813                                         if ((V == NULL || V == TOMBSTONE) &&
814                                                 val_slot != TOMBSTONE)
815                                                 chm->_size.fetch_add(1, memory_order_relaxed);
816                                         if (!(V == NULL || V == TOMBSTONE) &&
817                                                 val_slot == TOMBSTONE)
818                                                 chm->_size.fetch_add(-1, memory_order_relaxed);
819                                 }
820                                 return (V == NULL && expVal != NULL) ? TOMBSTONE : V;
821                         }
822                         // Else CAS failed
823                         V = val(kvs, idx, NULL);
824                         if (is_prime(V))
825                                 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs,
826                                         idx, expVal), key_slot, val_slot, expVal);
827                 }
828         }
829
830         // Help along an existing table-resize. This is a fast cut-out wrapper.
831         kvs_data* help_copy(kvs_data *helper) {
832                 kvs_data *topkvs = _kvs.load(memory_order_acquire);
833                 CHM *topchm = get_chm(topkvs);
834                 // No cpy in progress
835                 if (topchm->_newkvs.load(memory_order_acquire) == NULL) return helper;
836                 topchm->help_copy_impl(this, topkvs, false);
837                 return helper;
838         }
839 };
840
841 #endif