tweak
[cdsspec-compiler.git] / benchmark / cliffc-hashtable / simplified_cliffc_hashtable.h
1 #ifndef SIMPLIFIED_CLIFFC_HASHTABLE_H
2 #define SIMPLIFIED_CLIFFC_HASHTABLE_H
3
4 #include <iostream>
5 #include <atomic>
6 #include <memory>
7 #include <assert.h>
8
9 using namespace std;
10
11
12
13 /**
14         This header file declares and defines a simplified version of Cliff Click's
15         NonblockingHashMap. It contains all the necessary structrues and main
16         functions. In simplified_cliffc_hashtable.cc file, it has the definition for
17         the static fields.
18 */
19
20 template<typename TypeK, typename TypeV>
21 class cliffc_hashtable;
22
23 /**
24         Corresponding the the Object[] array in Cliff Click's Java implementation.
25         It keeps the first two slots for CHM (Hashtable control unit) and the hash
26         records (an array of hash used for fast negative key-equality check).
27 */
28 struct kvs_data {
29         int _size;
30         atomic<void*> *_data;
31         
32         kvs_data(int sz) {
33                 _size = sz;
34                 int real_size = sizeof(atomic<void*>) * 2 + 2;
35                 _data = new atomic<void*>[real_size];
36                 // The control block should be initialized in resize()
37                 // Init the hash record array
38                 int *hashes = new int[_size];
39                 int i;
40                 for (i = 0; i < _size; i++) {
41                         hashes[i] = 0;
42                 }
43                 _data[1].store(hashes, memory_order_relaxed);
44                 // Init the data to Null slot
45                 for (i = 2; i < real_size; i++) {
46                         _data[i].store(NULL, memory_order_relaxed);
47                 }
48         }
49
50         ~kvs_data() {
51                 int *hashes = (int*) _data[1].load(memory_order_relaxed);
52                 delete hashes;
53                 delete[] _data;
54         }
55 };
56
57 struct slot {
58         bool _prime;
59         shared_ptr<void> _ptr;
60
61         slot(bool prime, shared_ptr<void> ptr) {
62                 _prime = prime;
63                 _ptr = ptr;
64         }
65
66 };
67
68
69 /**
70         TypeK must have defined function "int hashCode()" which return the hash
71         code for the its object, and "int equals(TypeK anotherKey)" which is
72         used to judge equality.
73         TypeK and TypeV should define their own copy constructor.
74         To make the memory management safe and similar to Cliff Click's Java
75         implementation, we use shared_ptr instead of normal pointer in terms of the
76         pointers that point to TypeK and TypeV.
77 */
78 template<typename TypeK, typename TypeV>
79 class cliffc_hashtable {
80         /**
81                 # The synchronization we have for the hashtable gives us the property of
82                 # serializability, so we should have a sequential hashtable when we check the
83                 # correctness. The key thing is to identify all the commit point.
84         
85                 @Begin
86                 @Global_define:
87
88                         spec_hashtable<TypeK, TypeV*> __map;
89                         spec_hashtable<TypeK, Tag> __id_map;
90                         Tag __tag;
91
92                         static bool _val_equals(TypeV *ptr1, TypeV *ptr2) {
93                                 // ...
94                         }
95                         
96                         # Update the tag for the current key slot if the corresponding tag
97                         # is NULL, otherwise just return that tag. It will update the next
98                         # available tag too if it requires a new tag for that key slot.
99                         static Tag _getKeyTag(TypeK &key) {
100                                 if (__id_map.get(key) == NULL) {
101                                         Tag cur_tag = tag.current();
102                                         __id_map.put(key, cur_tag);
103                                         __tag.next();
104                                         return cur_tag;
105                                 } else {
106                                         return __id_map.get(key);
107                                 }
108                         }
109                 
110                 @Interface_cluster:
111                         Read_interface = {
112                                 Get,
113                                 PutIfAbsent,
114                                 RemoveAny,
115                                 RemoveIfMatch,
116                                 ReplaceAny,
117                                 ReplaceIfMatch
118                         }
119                         
120                         Write_interface = {
121                                 Put,
122                                 PutIfAbsent(COND_PutIfAbsentSucc),
123                                 RemoveAny,
124                                 RemoveIfMatch(COND_RemoveIfMatchSucc),
125                                 ReplaceAny,
126                                 ReplaceIfMatch(COND_ReplaceIfMatchSucc)
127                         }
128                 @Happens_before:
129                         Write_interface -> Read_interface
130                 @End
131         */
132
133 friend class CHM;
134         /**
135                 The control structure for the hashtable
136         */
137         private:
138         class CHM {
139                 friend class cliffc_hashtable;
140                 private:
141                 atomic<kvs_data*> _newkvs;
142                 
143                 // Size of active K,V pairs
144                 atomic_int _size;
145         
146                 // Count of used slots
147                 atomic_int _slots;
148                 
149                 // The next part of the table to copy
150                 atomic_int _copy_idx;
151                 
152                 // Work-done reporting
153                 atomic_int _copy_done;
154         
155                 public:
156                 CHM(int size) {
157                         _size.store(size, memory_order_relaxed);
158                         _slots.store(0, memory_order_relaxed);
159         
160                         _copy_idx.store(0, memory_order_relaxed);
161                         _copy_done.store(0, memory_order_release);
162                 }
163         
164                 ~CHM() {}
165                 
166                 private:
167                         
168                 // Heuristic to decide if the table is too full
169                 bool table_full(int reprobe_cnt, int len) {
170                         return
171                                 reprobe_cnt >= REPROBE_LIMIT &&
172                                 _slots.load(memory_order_relaxed) >= reprobe_limit(len);
173                 }
174         
175                 kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
176                         kvs_data *newkvs = _newkvs.load(memory_order_acquire);
177                         if (newkvs != NULL)
178                                 return newkvs;
179         
180                         // No copy in-progress, start one; Only double the table size
181                         int oldlen = kvs->_size;
182                         int sz = _size.load(memory_order_relaxed);
183                         int newsz = sz;
184                         
185                         // Just follow Cliff Click's heuristic to decide the new size
186                         if (sz >= (oldlen >> 2)) { // If we are 25% full
187                                 newsz = oldlen << 1; // Double size
188                                 if (sz >= (oldlen >> 1))
189                                         newsz = oldlen << 2; // Double double size
190                         }
191         
192                         // We do not record the record timestamp
193                         if (newsz <= oldlen) newsz = oldlen << 1;
194                         // Do not shrink ever
195                         if (newsz < oldlen) newsz = oldlen;
196         
197                         // Last check cause the 'new' below is expensive
198                         newkvs = _newkvs.load(memory_order_acquire);
199                         if (newkvs != NULL) return newkvs;
200         
201                         newkvs = new kvs_data(newsz);
202                         void *chm = (void*) new CHM(sz);
203                         newkvs->_data[0].store(chm, memory_order_relaxed);
204         
205                         kvs_data *cur_newkvs; 
206                         // Another check after the slow allocation
207                         if ((cur_newkvs = _newkvs.load(memory_order_acquire)) != NULL)
208                                 return cur_newkvs;
209                         // CAS the _newkvs to the allocated table
210                         kvs_data *desired = (kvs_data*) NULL;
211                         kvs_data *expected = (kvs_data*) newkvs; 
212                         if (!_newkvs.compare_exchange_strong(desired, expected, memory_order_release,
213                                         memory_order_release)) {
214                                 // Should clean the allocated area
215                                 delete newkvs;
216                                 newkvs = _newkvs.load(memory_order_acquire);
217                         }
218                         return newkvs;
219                 }
220         
221                 void help_copy_impl(cliffc_hashtable *topmap, kvs_data *oldkvs,
222                         bool copy_all) {
223                         assert (get_chm(oldkvs) == this);
224                         kvs_data *newkvs = _newkvs.load(memory_order_acquire);
225                         int oldlen = oldkvs->_size;
226                         int min_copy_work = oldlen > 1024 ? 1024 : oldlen;
227                 
228                         // Just follow Cliff Click's code here
229                         int panic_start = -1;
230                         int copyidx;
231                         while (_copy_done.load(memory_order_acquire) < oldlen) {
232                                 copyidx = _copy_idx.load(memory_order_acquire);
233                                 if (panic_start == -1) { // No painc
234                                         copyidx = _copy_idx.load(memory_order_acquire);
235                                         while (copyidx < (oldlen << 1) &&
236                                                 !_copy_idx.compare_exchange_strong(copyidx, copyidx +
237                                                         min_copy_work, memory_order_release, memory_order_release))
238                                                 copyidx = _copy_idx.load(memory_order_relaxed);
239                                         if (!(copyidx < (oldlen << 1)))
240                                                 panic_start = copyidx;
241                                 }
242         
243                                 // Now copy the chunk of work we claimed
244                                 int workdone = 0;
245                                 for (int i = 0; i < min_copy_work; i++)
246                                         if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs,
247                                                 newkvs))
248                                                 workdone++;
249                                 if (workdone > 0)
250                                         copy_check_and_promote(topmap, oldkvs, workdone);
251         
252                                 copyidx += min_copy_work;
253                                 if (!copy_all && panic_start == -1)
254                                         return; // We are done with the work we claim
255                         }
256                         copy_check_and_promote(topmap, oldkvs, 0); // See if we can promote
257                 }
258         
259                 kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
260                         *oldkvs, int idx, void *should_help) {
261                         kvs_data *newkvs = _newkvs.load(memory_order_acquire);
262                         // We're only here cause the caller saw a Prime
263                         if (copy_slot(topmap, idx, oldkvs, _newkvs))
264                                 copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied
265                         return (should_help == NULL) ? newkvs : topmap->help_copy(newkvs);
266                 }
267         
268                 void copy_check_and_promote(cliffc_hashtable *topmap, kvs_data*
269                         oldkvs, int workdone) {
270                         int oldlen = oldkvs->_size;
271                         int copyDone = _copy_done.load(memory_order_relaxed);
272                         if (workdone > 0) {
273                                 while (true) {
274                                         copyDone = _copy_done.load(memory_order_relaxed);
275                                         if (_copy_done.compare_exchange_weak(copyDone, copyDone +
276                                                 workdone, memory_order_relaxed, memory_order_relaxed))
277                                                 break;
278                                 }
279                         }
280         
281                         // Promote the new table to the current table
282                         if (copyDone + workdone == oldlen &&
283                                 topmap->_kvs.load(memory_order_acquire) == oldkvs)
284                                 topmap->_kvs.compare_exchange_strong(oldkvs, _newkvs, memory_order_release,
285                                         memory_order_release);
286                 }
287         
288                 bool copy_slot(cliffc_hashtable *topmap, int idx, kvs_data *oldkvs,
289                         kvs_data *newkvs) {
290                         slot *key_slot;
291                         while ((key_slot = key(oldkvs, idx)) == NULL)
292                                 CAS_key(oldkvs, idx, NULL, TOMBSTONE);
293         
294                         // First CAS old to Prime
295                         slot *oldval = val(oldkvs, idx, NULL);
296                         while (!is_prime(oldval)) {
297                                 slot *box = (oldval == NULL || oldval == TOMBSTONE)
298                                         ? TOMBPRIME : new slot(true, oldval->_ptr);
299                                 if (CAS_val(oldkvs, idx, oldval, box)) {
300                                         if (box == TOMBPRIME)
301                                                 return 1; // Copy done
302                                         // Otherwise we CAS'd the box
303                                         oldval = box; // Record updated oldval
304                                         break;
305                                 }
306                                 oldval = val(oldkvs, idx, NULL); // Else re-try
307                         }
308         
309                         if (oldval == TOMBPRIME) return false; // Copy already completed here
310         
311                         slot *old_unboxed = new slot(false, oldval->_ptr);
312                         int copied_into_new = (putIfMatch(topmap, newkvs, key_slot, old_unboxed,
313                                 NULL) == NULL);
314         
315                         // Old value is exposed in the new table
316                         while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME))
317                                 oldval = val(oldkvs, idx, NULL);
318         
319                         return copied_into_new;
320                 }
321         };
322
323         
324
325         private:
326         static const int Default_Init_Size = 8; // Intial table size
327
328         static slot* const MATCH_ANY;
329         static slot* const NO_MATCH_OLD;
330
331         static slot* const TOMBPRIME;
332         static slot* const TOMBSTONE;
333
334         static const int REPROBE_LIMIT = 10; // Forces a table-resize
335
336         atomic<kvs_data*> _kvs;
337
338         public:
339         cliffc_hashtable() {
340                 // Should initialize the CHM for the construction of the table
341                 // For other CHM in kvs_data, they should be initialzed in resize()
342                 // because the size is determined dynamically
343                 kvs_data *kvs = new kvs_data(Default_Init_Size);
344                 void *chm = (void*) new CHM(0);
345                 kvs->_data[0].store(chm, memory_order_relaxed);
346                 _kvs.store(kvs, memory_order_release);
347         }
348
349         cliffc_hashtable(int init_size) {
350                 // Should initialize the CHM for the construction of the table
351                 // For other CHM in kvs_data, they should be initialzed in resize()
352                 // because the size is determined dynamically
353                 kvs_data *kvs = new kvs_data(init_size);
354                 void *chm = (void*) new CHM(0);
355                 kvs->_data[0].store(chm, memory_order_relaxed);
356                 _kvs.store(kvs, memory_order_release);
357         }
358
359         /**
360                 @Begin
361                 @Interface: Get
362                 @Commit_point_set: Read_Val_Point1 | Read_Val_Point2 | Read_Val_Point3
363                 @ID: _getKeyTag(key)
364                 @Action:
365                         @DefineVar: TypeV *_Old_Val = __map.get(key)
366                 @Post_check:
367                         _equals_val(_Old_Val, __RET__)
368                 @End
369         */
370         shared_ptr<TypeV> get(TypeK& key) {
371                 void *key_ptr = (void*) new TypeK(key);
372                 slot *key_slot = new slot(false, shared_ptr<void>(key_ptr));
373                 int fullhash = hash(key_slot);
374                 slot *V = get_impl(this, _kvs, key_slot, fullhash);
375                 if (V == NULL) return NULL;
376                 assert (!is_prime(V));
377                 return static_pointer_cast<TypeV>(V->_ptr);
378         }
379
380         /**
381                 @Begin
382                 @Interface: Put
383                 @Commit_point_set: Write_Val_Point
384                 @ID: _getKeyTag(key)
385                 @Action:
386                         # Remember this old value at checking point
387                         @DefineVar: TypeV *_Old_Val = __map.get(key)
388                         @Code: __map.put(key, &val);
389                 @Post_check:
390                         _equals_val(__RET__, _Old_Val)
391                 @End
392         */
393         shared_ptr<TypeV> put(TypeK& key, TypeV& val) {
394                 return putIfMatch(key, val, NO_MATCH_OLD);
395         }
396
397         /**
398                 @Begin
399                 @Interface: PutIfAbsent
400                 @Commit_point_set:
401                         Write_Val_Point | PutIfAbsent_Fail_Point
402                 @Condition: __map.get(key) == NULL
403                 @HB_condition:
404                         COND_PutIfAbsentSucc :: __RET__ == NULL
405                 @ID: _getKeyTag(key)
406                 @Action:
407                         @DefineVar: TypeV *_Old_Val = __map.get(key)
408                         @Code:
409                         if (COND_SAT)
410                                 __map.put(key, &value);
411                 @Post_check:
412                         COND_SAT ? __RET__ == NULL : _equals_val(_Old_Val, __RET__) 
413                 @End
414         */
415         shared_ptr<TypeV> putIfAbsent(TypeK& key, TypeV& value) {
416                 return putIfMatch(key, val, TOMBSTONE);
417         }
418
419         /**
420                 @Begin
421                 @Interface: RemoveAny
422                 @Commit_point_set: Write_Val_Point
423                 @ID: _getKeyTag(key)
424                 @Action:
425                         @DefineVar: TypeV *_Old_Val = __map.get(key)
426                         @Code: __map.put(key, NULL);
427                 @Post_check:
428                         _equals_val(__RET__, _Old_Val)
429                 @End
430         */
431         shared_ptr<TypeV> remove(TypeK& key) {
432                 return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD);
433         }
434
435         /**
436                 @Begin
437                 @Interface: RemoveIfMatch
438                 @Commit_point_set:
439                         Write_Val_Point | RemoveIfMatch_Fail_Point
440                 @Condition:
441                         _equals_val(__map.get(key), &val)
442                 @HB_condition:
443                         COND_RemoveIfMatchSucc :: __RET__ == true
444                 @ID: _getKeyTag(key)
445                 @Action:
446                         @Code:
447                         if (COND_SAT)
448                                 __map.put(key, NULL);
449                 @Post_check:
450                         COND_SAT ? __RET__ : !__RET__
451                 @End
452         */
453         bool remove(TypeK& key, TypeV& val) {
454                 slot *val_slot = val == NULL ? NULL : new slot(false, val);
455                 return putIfMatch(key, TOMBSTONE, val) == val;
456
457         }
458
459         /**
460                 @Begin
461                 @Interface: ReplaceAny
462                 @Commit_point_set:
463                         Write_Val_Point
464                 @ID: _getKeyTag(key)
465                 @Action:
466                         @DefineVar: TypeV *_Old_Val = __map.get(key)
467                 @Post_check:
468                         _equals_val(__RET__, _Old_Val)
469                 @End
470         */
471         shared_ptr<TypeV> replace(TypeK& key, TypeV& val) {
472                 return putIfMatch(key, val, MATCH_ANY);
473         }
474
475         /**
476                 @Begin
477                 @Interface: ReplaceIfMatch
478                 @Commit_point_set:
479                         Write_Val_Point | ReplaceIfMatch_Fail_Point
480                 @Condition:
481                         _equals_val(__map.get(key), &oldval)
482                 @HB_condition:
483                         COND_ReplaceIfMatchSucc :: __RET__ == true
484                 @ID: _getKeyTag(key)
485                 @Action:
486                         @Code:
487                         if (COND_SAT)
488                                 __map.put(key, &newval);
489                 @Post_check:
490                         COND_SAT ? __RET__ : !__RET__
491                 @End
492         */
493         bool replace(TypeK& key, TypeV& oldval, TypeV& newval) {
494                 return putIfMatch(key, newval, oldval) == oldval;
495         }
496
497         private:
498         static CHM* get_chm(kvs_data* kvs) {
499                 return (CHM*) kvs->_data[0].load(memory_order_relaxed);
500         }
501
502         static int* get_hashes(kvs_data *kvs) {
503                 return (int *) kvs->_data[1].load(memory_order_relaxed);
504         }
505         
506         // Preserve happens-before semantics on newly inserted keys
507         static inline slot* key(kvs_data *kvs, int idx) {
508                 assert (idx >= 0 && idx < kvs->_size);
509                 // Corresponding to the volatile read in get_impl() and putIfMatch in
510                 // Cliff Click's Java implementation
511                 return (slot*) kvs->_data[idx * 2 + 2].load(memory_order_acquire);
512         }
513
514         /**
515                 The atomic operation in val() function is a "potential" commit point,
516                 which means in some case it is a real commit point while it is not for
517                 some other cases. This so happens because the val() function is such a
518                 fundamental function that many internal operation will call. Our
519                 strategy is that we label any potential commit points and check if they
520                 really are the commit points later.
521         */
522         // Preserve happens-before semantics on newly inserted values
523         static inline slot* val(kvs_data *kvs, int idx) {
524                 assert (idx >= 0 && idx < kvs->_size);
525                 // Corresponding to the volatile read in get_impl() and putIfMatch in
526                 // Cliff Click's Java implementation
527                 slot *res = (slot*) kvs->_data[idx * 2 + 3].load(memory_order_acquire);
528                 /**
529                         @Begin
530                         # This is a complicated potential commit point since many many functions are
531                         # calling val().
532                         @Potential_commit_point_define: true
533                         @Label: Read_Val_Point
534                         @End
535                 */
536                 return res;
537
538
539         }
540
541         static int hash(slot *key_slot) {
542                 assert(key_slot != NULL && key_slot->_ptr != NULL);
543                 shared_ptr<TypeK> key = static_pointer_cast<TypeK>(key_slot->_ptr);
544                 int h = key->hashCode();
545                 // Spread bits according to Cliff Click's code
546                 h += (h << 15) ^ 0xffffcd7d;
547                 h ^= (h >> 10);
548                 h += (h << 3);
549                 h ^= (h >> 6);
550                 h += (h << 2) + (h << 14);
551                 return h ^ (h >> 16);
552         }
553         
554         // Heuristic to decide if reprobed too many times. 
555         // Be careful here: Running over the limit on a 'get' acts as a 'miss'; on a
556         // put it triggers a table resize. Several places MUST have exact agreement.
557         static int reprobe_limit(int len) {
558                 return REPROBE_LIMIT + (len >> 2);
559         }
560         
561         static inline bool is_prime(slot *val) {
562                 return (val != NULL) && val->_prime;
563         }
564
565         // Check for key equality. Try direct pointer comparison first (fast
566         // negative teset) and then the full 'equals' call
567         static bool keyeq(slot *K, slot *key_slot, int *hashes, int hash,
568                 int fullhash) {
569                 // Caller should've checked this.
570                 assert (K != NULL);
571                 shared_ptr<TypeK> key_ptr = static_pointer_cast<TypeK>(key_slot->_ptr);
572                 return
573                         K == key_slot ||
574                                 ((hashes[hash] == 0 || hashes[hash] == fullhash) &&
575                                 K != TOMBSTONE &&
576                                 key_ptr->equals(K->_ptr));
577         }
578
579         static bool valeq(slot *val_slot1, slot *val_slot2) {
580                 assert (val_slot1 != NULL);
581                 shared_ptr<TypeK> ptr1 = static_pointer_cast<TypeV>(val_slot1->_ptr);
582                 if (val_slot2 == NULL || ptr1 == NULL) return false;
583                 return ptr1->equals(val_slot2->_ptr);
584         }
585         
586         // Together with key() preserve the happens-before relationship on newly
587         // inserted keys
588         static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
589                 return kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
590                         desired, memory_order_release, memory_order_release);
591         }
592
593         /**
594                 Same as the val() function, we only label the CAS operation as the
595                 potential commit point.
596         */
597         // Together with val() preserve the happens-before relationship on newly
598         // inserted values
599         static inline bool CAS_val(kvs_data *kvs, int idx, void *expected, void
600                 *desired) {
601                 bool res =  kvs->_data[2 * idx + 3].compare_exchange_strong(expected,
602                         desired, memory_order_release, memory_order_release);
603                 /**
604                         # If it is a successful put instead of a copy or any other internal
605                         # operantions, expected != NULL
606                         @Begin
607                         @Potential_commit_point_define: __ATOMIC_RET__ == true
608                         @Label: Write_Val_Point
609                         @End
610                 */
611                 return res;
612         }
613
614         slot* get_impl(cliffc_hashtable *topmap, kvs_data *kvs, slot* key_slot, int
615                 fullhash) {
616                 int len = kvs->_size;
617                 CHM *chm = get_chm(kvs);
618                 int *hashes = get_hashes(kvs);
619
620                 int idx = fullhash & (len - 1);
621                 int reprobe_cnt = 0;
622                 while (true) {
623                         slot *K = key(kvs, idx);
624                         slot *V = val(kvs, idx);
625                         /**
626                                 @Begin
627                                 @Commit_point_define: V == NULL
628                                 @Potential_commit_point_label: Read_Val_Point
629                                 @Label: Get_Success_Point_1
630                                 @End
631                         */
632
633                         if (V == NULL) return NULL; // A miss
634                         
635                         if (keyeq(K, key_slot, hashes, idx, fullhash)) {
636                                 // Key hit! Check if table-resize in progress
637                                 if (!is_prime(V)) {
638                                         /**
639                                                 @Begin
640                                                 @Commit_point_define: true
641                                                 @Potential_commit_point_label: Read_Val_Point
642                                                 @Label: Get_Success_Point_2
643                                                 @End
644                                         */
645                                         return (V == TOMBSTONE) ? NULL : V; // Return this value
646                                 }
647                                 // Otherwise, finish the copy & retry in the new table
648                                 return get_impl(topmap, chm->copy_slot_and_check(topmap, kvs,
649                                         idx, key_slot), key_slot, fullhash);
650                         }
651
652                         if (++reprobe_cnt >= REPROBE_LIMIT ||
653                                 key_slot == TOMBSTONE) {
654                                 // Retry in new table
655                                 // Atomic read (acquire) can be here 
656                                 kvs_data *newkvs = chm->_newkvs.load(memory_order_acquire);
657                                 /**
658                                         @Begin
659                                         @Commit_point_define_check: newkvs == NULL
660                                         @Label: Get_Success_Point_3
661                                         @End
662                                 */
663                                 return newkvs == NULL ? NULL : get_impl(topmap,
664                                         topmap->help_copy(newkvs), key_slot, fullhash);
665                         }
666
667                         idx = (idx + 1) & (len - 1); // Reprobe by 1
668                 }
669         }
670
671         // A wrapper of the essential function putIfMatch()
672         shared_ptr<TypeV> putIfMatch(TypeK& key, TypeV& value, slot *old_val) {
673                 // TODO: Should throw an exception rather return NULL
674                 if (old_val == NULL) {
675                         return NULL;
676                 }
677                 void *key_ptr = (void*) new TypeK(key);
678                 slot *key_slot = new slot(false, shared_ptr<void>(key_ptr));
679
680                 void *val_ptr = (void*) new TypeV(value);
681                 slot *value_slot = new slot(false, shared_ptr<void>(val_ptr));
682                 slot *res = putIfMatch(this, _kvs, key_slot, value_slot, old_val);
683                 // Only when copy_slot() call putIfMatch() will it return NULL
684                 assert (res != NULL); 
685                 assert (!is_prime(res));
686                 return res == TOMBSTONE ? NULL : static_pointer_cast<TypeV>(res->_ptr);
687         }
688
689         /**
690                 Put, Remove, PutIfAbsent, etc will call this function. Return the old
691                 value. If the returned value is equals to the expVal (or expVal is
692                 NO_MATCH_OLD), then this function puts the val_slot to the table 'kvs'.
693                 Only copy_slot will pass a NULL expVal, and putIfMatch only returns a
694                 NULL if passed a NULL expVal.
695         */
696         static slot* putIfMatch(cliffc_hashtable *topmap, kvs_data *kvs, slot
697                 *key_slot, slot *val_slot, slot *expVal) {
698                 assert (val_slot != NULL);
699                 assert (!is_prime(val_slot));
700                 assert (!is_prime(expVal));
701
702                 int fullhash = hash(key_slot);
703                 int len = kvs->_size;
704                 CHM *chm = get_chm(kvs);
705                 int *hashes = get_hashes(kvs);
706                 int idx = fullhash & (len - 1);
707
708                 // Claim a key slot
709                 int reprobe_cnt = 0;
710                 slot *K;
711                 slot *V;
712                 kvs_data *newkvs;
713                 
714                 while (true) { // Spin till we get a key slot
715                         K = key(kvs, idx);
716                         V = val(kvs, idx, NULL);
717                         if (K == NULL) { // Get a free slot
718                                 if (val_slot == TOMBSTONE) return val_slot;
719                                 // Claim the null key-slot
720                                 if (CAS_key(kvs, idx, NULL, key_slot)) {
721                                         chm->_slots.fetch_add(1, memory_order_relaxed); // Inc key-slots-used count
722                                         hashes[idx] = fullhash; // Memorize full hash
723                                         break;
724                                 }
725                                 K = key(kvs, idx); // CAS failed, get updated value
726                                 assert (K != NULL);
727                         }
728
729                         // Key slot not null, there exists a Key here
730                         if (keyeq(K, key_slot, hashes, idx, fullhash))
731                                 break; // Got it
732                         
733                         // Notice that the logic here should be consistent with that of get.
734                         // The first predicate means too many reprobes means nothing in the
735                         // old table.
736                         if (++reprobe_cnt >= reprobe_limit(len) ||
737                                 K == TOMBSTONE) { // Found a Tombstone key, no more keys
738                                 newkvs = chm->resize(topmap, kvs);
739                                 // Help along an existing copy
740                                 if (expVal != NULL) topmap->help_copy(newkvs);
741                                 return putIfMatch(topmap, newkvs, key_slot, val_slot, expVal);
742                         }
743
744                         idx = (idx + 1) & (len - 1); // Reprobe
745                 } // End of spinning till we get a Key slot
746
747                 if (val_slot == V) return V; // Fast cutout for no-change
748         
749                 // Here it tries to resize cause it doesn't want other threads to stop
750                 // its progress (eagerly try to resize soon)
751                 newkvs = chm->_newkvs.load(memory_order_acquire);
752                 if (newkvs == NULL &&
753                         ((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V)))
754                         newkvs = chm->resize(topmap, kvs); // Force the copy to start
755                 
756                 // Finish the copy and then put it in the new table
757                 if (newkvs != NULL)
758                         return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs, idx,
759                                 expVal), key_slot, val_slot, expVal);
760                 
761                 // Decided to update the existing table
762                 while (true) {
763                         assert (!is_prime(V));
764
765                         if (expVal != NO_MATCH_OLD &&
766                                 V != expVal &&
767                                 (expVal != MATCH_ANY || V == TOMBSTONE || V == NULL) &&
768                                 !(V == NULL && expVal == TOMBSTONE) &&
769                                 (expVal == NULL || !valeq(expVal, V))) {
770                                 /**
771                                         @Begin
772                                         @Commit_point_define: expVal == TOMBSTONE
773                                         @Potential_commit_point_label: Read_Val_Point
774                                         @Label: PutIfAbsent_Fail_Point
775                                                 # This is a check for the PutIfAbsent() when the value
776                                                 # is not absent
777                                         @End
778                                 */
779                                 /**
780                                         @Begin
781                                         @Commit_point_define: expVal != NULL && val_slot == TOMBSTONE
782                                         @Potential_commit_point_label: Read_Val_Point
783                                         @Label: RemoveIfMatch_Fail_Point
784                                         @End
785                                 */
786                                 /**
787                                         @Begin
788                                         @Commit_point_define: !valeq(expVal, V)
789                                         @Potential_commit_point_label: Read_Val_Point
790                                         @Label: ReplaceIfMatch_Fail_Point
791                                         @End
792                                 */
793                                 return V; // Do not update!
794                         }
795
796                         if (CAS_val(kvs, idx, V, val_slot)) {
797                                 /**
798                                         @Begin
799                                         # The only point where a successful put happens
800                                         @Commit_point_define: true
801                                         @Potential_commit_point_label: Write_Val_Point
802                                         @Label: Write_Success_Point
803                                         @End
804                                 */
805                                 if (expVal != NULL) { // Not called by a table-copy
806                                         // CAS succeeded, should adjust size
807                                         // Both normal put's and table-copy calls putIfMatch, but
808                                         // table-copy does not increase the number of live K/V pairs
809                                         if ((V == NULL || V == TOMBSTONE) &&
810                                                 val_slot != TOMBSTONE)
811                                                 chm->_size.fetch_add(1, memory_order_relaxed);
812                                         if (!(V == NULL || V == TOMBSTONE) &&
813                                                 val_slot == TOMBSTONE)
814                                                 chm->_size.fetch_add(-1, memory_order_relaxed);
815                                 }
816                                 return (V == NULL && expVal != NULL) ? TOMBSTONE : V;
817                         }
818                         // Else CAS failed
819                         V = val(kvs, idx, NULL);
820                         if (is_prime(V))
821                                 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs,
822                                         idx, expVal), key_slot, val_slot, expVal);
823                 }
824         }
825
826         // Help along an existing table-resize. This is a fast cut-out wrapper.
827         kvs_data* help_copy(kvs_data *helper) {
828                 kvs_data *topkvs = _kvs.load(memory_order_acquire);
829                 CHM *topchm = get_chm(topkvs);
830                 // No cpy in progress
831                 if (topchm->_newkvs.load(memory_order_acquire) == NULL) return helper;
832                 topchm->help_copy_impl(this, topkvs, false);
833                 return helper;
834         }
835 };
836
837 #endif