6edee89d33e1def8358b3e6b749355de08ef6ad2
[cdsspec-compiler.git] / benchmark / cliffc-hashtable / cliffc_hashtable.h
1 #ifndef CLIFFC_HASHTABLE_H
2 #define CLIFFC_HASHTABLE_H
3
4 #include <iostream>
5 #include <atomic>
6 #include <memory>
7 #include <assert.h>
8
9 using namespace std;
10
11 /**
12         This header file declares and defines a simplified version of Cliff Click's
13         NonblockingHashMap. It contains all the necessary structrues and main
14         functions. In simplified_cliffc_hashtable.cc file, it has the definition for
15         the static fields.
16 */
17
18 template<typename TypeK, typename TypeV>
19 class cliffc_hashtable;
20
21 /**
22         Corresponding the the Object[] array in Cliff Click's Java implementation.
23         It keeps the first two slots for CHM (Hashtable control unit) and the hash
24         records (an array of hash used for fast negative key-equality check).
25 */
26 struct kvs_data {
27         int _size;
28         atomic<void*> *_data;
29         
30         kvs_data(int sz) {
31                 _size = sz;
32                 int real_size = sizeof(atomic<void*>) * 2 + 2;
33                 _data = new atomic<void*>[real_size];
34                 // The control block should be initialized in resize()
35                 // Init the hash record array
36                 int *hashes = new int[_size];
37                 int i;
38                 for (i = 0; i < _size; i++) {
39                         hashes[i] = 0;
40                 }
41                 // Init the data to Null slot
42                 for (i = 2; i < real_size; i++) {
43                         _data[i].store(NULL, memory_order_relaxed);
44                 }
45                 _data[1].store(hashes, memory_order_release);
46         }
47
48         ~kvs_data() {
49                 int *hashes = (int*) _data[1].load(memory_order_relaxed);
50                 delete hashes;
51                 delete[] _data;
52         }
53 };
54
55 struct slot {
56         bool _prime;
57         void *_ptr;
58
59         slot(bool prime, void *ptr) {
60                 _prime = prime;
61                 _ptr = ptr;
62         }
63 };
64
65
66 /**
67         TypeK must have defined function "int hashCode()" which return the hash
68         code for the its object, and "int equals(TypeK anotherKey)" which is
69         used to judge equality.
70         TypeK and TypeV should define their own copy constructor.
71 */
72 template<typename TypeK, typename TypeV>
73 class cliffc_hashtable {
74         /**
75                 # The synchronization we have for the hashtable gives us the property of
76                 # serializability, so we should have a sequential hashtable when we check the
77                 # correctness. The key thing is to identify all the commit point.
78         
79                 @Begin
80                 @Options:
81                         LANG = C;
82                         CLASS = cliffc_hashtable;
83                 @Global_define:
84                         @DeclareVar:
85                         spec_hashtable<TypeK, TypeV*> map;
86                         spec_hashtable<TypeK, Tag> id_map;
87                         Tag tag;
88                         @InitVar:
89                                 map = spec_hashtable<TypeK, TypeV*>();
90                                 id_map = spec_hashtable<TypeK, TypeV*>();
91                                 tag = Tag();
92                         @DefineFunc:
93                         bool equals_val(TypeV *ptr1, TypeV *ptr2) {
94                                 // ...
95                         }
96                         
97                         @DefineFunc:
98                         # Update the tag for the current key slot if the corresponding tag
99                         # is NULL, otherwise just return that tag. It will update the next
100                         # available tag too if it requires a new tag for that key slot.
101                         Tag getKeyTag(TypeK &key) {
102                                 if (id_map.get(key) == NULL) {
103                                         Tag cur_tag = tag.current();
104                                         id_map.put(key, cur_tag);
105                                         tag.next();
106                                         return cur_tag;
107                                 } else {
108                                         return id_map.get(key);
109                                 }
110                         }
111                 
112                 @Interface_cluster:
113                         Read_interface = {
114                                 Get,
115                                 PutIfAbsent,
116                                 RemoveAny,
117                                 RemoveIfMatch,
118                                 ReplaceAny,
119                                 ReplaceIfMatch
120                         }
121                         
122                         Write_interface = {
123                                 Put,
124                                 PutIfAbsent(COND_PutIfAbsentSucc),
125                                 RemoveAny,
126                                 RemoveIfMatch(COND_RemoveIfMatchSucc),
127                                 ReplaceAny,
128                                 ReplaceIfMatch(COND_ReplaceIfMatchSucc)
129                         }
130                 @Happens_before:
131                         Write_interface -> Read_interface
132                 @End
133         */
134
135 friend class CHM;
136         /**
137                 The control structure for the hashtable
138         */
139         private:
140         class CHM {
141                 friend class cliffc_hashtable;
142                 private:
143                 atomic<kvs_data*> _newkvs;
144                 
145                 // Size of active K,V pairs
146                 atomic_int _size;
147         
148                 // Count of used slots
149                 atomic_int _slots;
150                 
151                 // The next part of the table to copy
152                 atomic_int _copy_idx;
153                 
154                 // Work-done reporting
155                 atomic_int _copy_done;
156         
157                 public:
158                 CHM(int size) {
159                         _newkvs.store(NULL, memory_order_relaxed);
160                         _size.store(size, memory_order_relaxed);
161                         _slots.store(0, memory_order_relaxed);
162         
163                         _copy_idx.store(0, memory_order_relaxed);
164                         _copy_done.store(0, memory_order_release);
165                 }
166         
167                 ~CHM() {}
168                 
169                 private:
170                         
171                 // Heuristic to decide if the table is too full
172                 bool table_full(int reprobe_cnt, int len) {
173                         return
174                                 reprobe_cnt >= REPROBE_LIMIT &&
175                                 _slots.load(memory_order_relaxed) >= reprobe_limit(len);
176                 }
177         
178                 kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
179                         kvs_data *newkvs = _newkvs.load(memory_order_acquire);
180                         if (newkvs != NULL)
181                                 return newkvs;
182         
183                         // No copy in-progress, start one; Only double the table size
184                         int oldlen = kvs->_size;
185                         int sz = _size.load(memory_order_relaxed);
186                         int newsz = sz;
187                         
188                         // Just follow Cliff Click's heuristic to decide the new size
189                         if (sz >= (oldlen >> 2)) { // If we are 25% full
190                                 newsz = oldlen << 1; // Double size
191                                 if (sz >= (oldlen >> 1))
192                                         newsz = oldlen << 2; // Double double size
193                         }
194         
195                         // We do not record the record timestamp
196                         if (newsz <= oldlen) newsz = oldlen << 1;
197                         // Do not shrink ever
198                         if (newsz < oldlen) newsz = oldlen;
199         
200                         // Last check cause the 'new' below is expensive
201                         newkvs = _newkvs.load(memory_order_acquire);
202                         if (newkvs != NULL) return newkvs;
203         
204                         newkvs = new kvs_data(newsz);
205                         void *chm = (void*) new CHM(sz);
206                         newkvs->_data[0].store(chm, memory_order_relaxed);
207         
208                         kvs_data *cur_newkvs; 
209                         // Another check after the slow allocation
210                         if ((cur_newkvs = _newkvs.load(memory_order_acquire)) != NULL)
211                                 return cur_newkvs;
212                         // CAS the _newkvs to the allocated table
213                         kvs_data *desired = (kvs_data*) NULL;
214                         kvs_data *expected = (kvs_data*) newkvs; 
215                         if (!_newkvs.compare_exchange_strong(desired, expected, memory_order_release,
216                                         memory_order_release)) {
217                                 // Should clean the allocated area
218                                 delete newkvs;
219                                 newkvs = _newkvs.load(memory_order_acquire);
220                         }
221                         return newkvs;
222                 }
223         
224                 void help_copy_impl(cliffc_hashtable *topmap, kvs_data *oldkvs,
225                         bool copy_all) {
226                         assert (get_chm(oldkvs) == this);
227                         kvs_data *newkvs = _newkvs.load(memory_order_acquire);
228                         int oldlen = oldkvs->_size;
229                         int min_copy_work = oldlen > 1024 ? 1024 : oldlen;
230                 
231                         // Just follow Cliff Click's code here
232                         int panic_start = -1;
233                         int copyidx;
234                         while (_copy_done.load(memory_order_acquire) < oldlen) {
235                                 copyidx = _copy_idx.load(memory_order_acquire);
236                                 if (panic_start == -1) { // No painc
237                                         copyidx = _copy_idx.load(memory_order_acquire);
238                                         while (copyidx < (oldlen << 1) &&
239                                                 !_copy_idx.compare_exchange_strong(copyidx, copyidx +
240                                                         min_copy_work, memory_order_release, memory_order_release))
241                                                 copyidx = _copy_idx.load(memory_order_relaxed);
242                                         if (!(copyidx < (oldlen << 1)))
243                                                 panic_start = copyidx;
244                                 }
245         
246                                 // Now copy the chunk of work we claimed
247                                 int workdone = 0;
248                                 for (int i = 0; i < min_copy_work; i++)
249                                         if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs,
250                                                 newkvs))
251                                                 workdone++;
252                                 if (workdone > 0)
253                                         copy_check_and_promote(topmap, oldkvs, workdone);
254         
255                                 copyidx += min_copy_work;
256                                 if (!copy_all && panic_start == -1)
257                                         return; // We are done with the work we claim
258                         }
259                         copy_check_and_promote(topmap, oldkvs, 0); // See if we can promote
260                 }
261         
262                 kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
263                         *oldkvs, int idx, void *should_help) {
264                         kvs_data *newkvs = _newkvs.load(memory_order_acquire);
265                         // We're only here cause the caller saw a Prime
266                         if (copy_slot(topmap, idx, oldkvs, newkvs))
267                                 copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied
268                         return (should_help == NULL) ? newkvs : topmap->help_copy(newkvs);
269                 }
270         
271                 void copy_check_and_promote(cliffc_hashtable *topmap, kvs_data*
272                         oldkvs, int workdone) {
273                         int oldlen = oldkvs->_size;
274                         int copyDone = _copy_done.load(memory_order_relaxed);
275                         if (workdone > 0) {
276                                 while (true) {
277                                         copyDone = _copy_done.load(memory_order_relaxed);
278                                         if (_copy_done.compare_exchange_weak(copyDone, copyDone +
279                                                 workdone, memory_order_relaxed, memory_order_relaxed))
280                                                 break;
281                                 }
282                         }
283         
284                         // Promote the new table to the current table
285                         if (copyDone + workdone == oldlen &&
286                                 topmap->_kvs.load(memory_order_acquire) == oldkvs) {
287                                 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
288                                 topmap->_kvs.compare_exchange_strong(oldkvs, newkvs, memory_order_release,
289                                         memory_order_release);
290                         }
291                 }
292         
293                 bool copy_slot(cliffc_hashtable *topmap, int idx, kvs_data *oldkvs,
294                         kvs_data *newkvs) {
295                         slot *key_slot;
296                         while ((key_slot = key(oldkvs, idx)) == NULL)
297                                 CAS_key(oldkvs, idx, NULL, TOMBSTONE);
298         
299                         // First CAS old to Prime
300                         slot *oldval = val(oldkvs, idx);
301                         while (!is_prime(oldval)) {
302                                 slot *box = (oldval == NULL || oldval == TOMBSTONE)
303                                         ? TOMBPRIME : new slot(true, oldval->_ptr);
304                                 if (CAS_val(oldkvs, idx, oldval, box)) {
305                                         if (box == TOMBPRIME)
306                                                 return 1; // Copy done
307                                         // Otherwise we CAS'd the box
308                                         oldval = box; // Record updated oldval
309                                         break;
310                                 }
311                                 oldval = val(oldkvs, idx); // Else re-try
312                         }
313         
314                         if (oldval == TOMBPRIME) return false; // Copy already completed here
315         
316                         slot *old_unboxed = new slot(false, oldval->_ptr);
317                         int copied_into_new = (putIfMatch(topmap, newkvs, key_slot, old_unboxed,
318                                 NULL) == NULL);
319         
320                         // Old value is exposed in the new table
321                         while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME))
322                                 oldval = val(oldkvs, idx);
323         
324                         return copied_into_new;
325                 }
326         };
327
328         
329
330         private:
331         static const int Default_Init_Size = 8; // Intial table size
332
333         static slot* const MATCH_ANY;
334         static slot* const NO_MATCH_OLD;
335
336         static slot* const TOMBPRIME;
337         static slot* const TOMBSTONE;
338
339         static const int REPROBE_LIMIT = 10; // Forces a table-resize
340
341         atomic<kvs_data*> _kvs;
342
343         public:
344         cliffc_hashtable() {
345                 // Should initialize the CHM for the construction of the table
346                 // For other CHM in kvs_data, they should be initialzed in resize()
347                 // because the size is determined dynamically
348                 kvs_data *kvs = new kvs_data(Default_Init_Size);
349                 void *chm = (void*) new CHM(0);
350                 kvs->_data[0].store(chm, memory_order_relaxed);
351                 _kvs.store(kvs, memory_order_release);
352         }
353
354         cliffc_hashtable(int init_size) {
355                 // Should initialize the CHM for the construction of the table
356                 // For other CHM in kvs_data, they should be initialzed in resize()
357                 // because the size is determined dynamically
358                 kvs_data *kvs = new kvs_data(init_size);
359                 void *chm = (void*) new CHM(0);
360                 kvs->_data[0].store(chm, memory_order_relaxed);
361                 _kvs.store(kvs, memory_order_release);
362         }
363
364         /**
365                 @Begin
366                 @Interface: Get
367                 @Commit_point_set: Read_Val_Point1 | Read_Val_Point2 | Read_Val_Point3
368                 @ID: __sequential.getKeyTag(key)
369                 @Action:
370                         TypeV *_Old_Val = __sequential.map.get(key)
371                 @Post_check:
372                         __sequential.equals_val(_Old_Val, __RET__)
373                 @End
374         */
375         TypeV* get(TypeK& key) {
376                 void *key_ptr = (void*) new TypeK(key);
377                 slot *key_slot = new slot(false, key_ptr);
378                 int fullhash = hash(key_slot);
379                 kvs_data *kvs = _kvs.load(memory_order_acquire);
380                 slot *V = get_impl(this, kvs, key_slot, fullhash);
381                 if (V == NULL) return NULL;
382                 assert (!is_prime(V));
383                 return (TypeV*) V->_ptr;
384         }
385
386         /**
387                 @Begin
388                 @Interface: Put
389                 @Commit_point_set: Write_Val_Point
390                 @ID: __sequential.getKeyTag(key)
391                 @Action:
392                         # Remember this old value at checking point
393                         TypeV *_Old_Val = __sequential.map.get(key)
394                         __sequential.map.put(key, &val);
395                 @Post_check:
396                         __sequential.equals_val(__RET__, _Old_Val)
397                 @End
398         */
399         TypeV* put(TypeK& key, TypeV& val) {
400                 return putIfMatch(key, val, NO_MATCH_OLD);
401         }
402
403         /**
404                 @Begin
405                 @Interface: PutIfAbsent
406                 @Commit_point_set:
407                         Write_Val_Point | PutIfAbsent_Fail_Point
408                 @Condition: __sequential.map.get(key) == NULL
409                 @HB_condition:
410                         COND_PutIfAbsentSucc :: __RET__ == NULL
411                 @ID: __sequential.getKeyTag(key)
412                 @Action:
413                         TypeV *_Old_Val = __sequential.map.get(key)
414                         if (__COND_SAT__)
415                                 __sequential.map.put(key, &value);
416                 @Post_check:
417                         __COND_SAT__ ? __RET__ == NULL : __sequential.equals_val(_Old_Val, __RET__) 
418                 @End
419         */
420         TypeV* putIfAbsent(TypeK& key, TypeV& value) {
421                 return putIfMatch(key, val, TOMBSTONE);
422         }
423
424         /**
425                 @Begin
426                 @Interface: RemoveAny
427                 @Commit_point_set: Write_Val_Point
428                 @ID: __sequential.getKeyTag(key)
429                 @Action:
430                         TypeV *_Old_Val = __sequential.map.get(key)
431                         __sequential.map.put(key, NULL);
432                 @Post_check:
433                         __sequential.equals_val(__RET__, _Old_Val)
434                 @End
435         */
436         TypeV* remove(TypeK& key) {
437                 return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD);
438         }
439
440         /**
441                 @Begin
442                 @Interface: RemoveIfMatch
443                 @Commit_point_set:
444                         Write_Val_Point | RemoveIfMatch_Fail_Point
445                 @Condition:
446                         __sequential.equals_val(__sequential.map.get(key), &val)
447                 @HB_condition:
448                         COND_RemoveIfMatchSucc :: __RET__ == true
449                 @ID: __sequential.getKeyTag(key)
450                 @Action:
451                         if (__COND_SAT__)
452                                 __sequential.map.put(key, NULL);
453                 @Post_check:
454                         __COND_SAT__ ? __RET__ : !__RET__
455                 @End
456         */
457         bool remove(TypeK& key, TypeV& val) {
458                 slot *val_slot = val == NULL ? NULL : new slot(false, val);
459                 return putIfMatch(key, TOMBSTONE, val) == val;
460
461         }
462
463         /**
464                 @Begin
465                 @Interface: ReplaceAny
466                 @Commit_point_set:
467                         Write_Val_Point
468                 @ID: __sequential.getKeyTag(key)
469                 @Action:
470                         TypeV *_Old_Val = __sequential.map.get(key)
471                 @Post_check:
472                         __sequential.equals_val(__RET__, _Old_Val)
473                 @End
474         */
475         TypeV* replace(TypeK& key, TypeV& val) {
476                 return putIfMatch(key, val, MATCH_ANY);
477         }
478
479         /**
480                 @Begin
481                 @Interface: ReplaceIfMatch
482                 @Commit_point_set:
483                         Write_Val_Point | ReplaceIfMatch_Fail_Point
484                 @Condition:
485                         __sequential.equals_val(__sequential.map.get(key), &oldval)
486                 @HB_condition:
487                         COND_ReplaceIfMatchSucc :: __RET__ == true
488                 @ID: __sequential.getKeyTag(key)
489                 @Action:
490                         if (__COND_SAT__)
491                                 __sequential.map.put(key, &newval);
492                 @Post_check:
493                         __COND_SAT__ ? __RET__ : !__RET__
494                 @End
495         */
496         bool replace(TypeK& key, TypeV& oldval, TypeV& newval) {
497                 return putIfMatch(key, newval, oldval) == oldval;
498         }
499
500         private:
501         static CHM* get_chm(kvs_data* kvs) {
502                 return (CHM*) kvs->_data[0].load(memory_order_relaxed);
503         }
504
505         static int* get_hashes(kvs_data *kvs) {
506                 return (int *) kvs->_data[1].load(memory_order_relaxed);
507         }
508         
509         // Preserve happens-before semantics on newly inserted keys
510         static inline slot* key(kvs_data *kvs, int idx) {
511                 assert (idx >= 0 && idx < kvs->_size);
512                 // Corresponding to the volatile read in get_impl() and putIfMatch in
513                 // Cliff Click's Java implementation
514                 return (slot*) kvs->_data[idx * 2 + 2].load(memory_order_acquire);
515         }
516
517         /**
518                 The atomic operation in val() function is a "potential" commit point,
519                 which means in some case it is a real commit point while it is not for
520                 some other cases. This so happens because the val() function is such a
521                 fundamental function that many internal operation will call. Our
522                 strategy is that we label any potential commit points and check if they
523                 really are the commit points later.
524         */
525         // Preserve happens-before semantics on newly inserted values
526         static inline slot* val(kvs_data *kvs, int idx) {
527                 assert (idx >= 0 && idx < kvs->_size);
528                 // Corresponding to the volatile read in get_impl() and putIfMatch in
529                 // Cliff Click's Java implementation
530                 slot *res = (slot*) kvs->_data[idx * 2 + 3].load(memory_order_acquire);
531                 /**
532                         @Begin
533                         # This is a complicated potential commit point since many many functions are
534                         # calling val().
535                         @Potential_commit_point_define: true
536                         @Label: Read_Val_Point
537                         @End
538                 */
539                 return res;
540
541
542         }
543
544         static int hash(slot *key_slot) {
545                 assert(key_slot != NULL && key_slot->_ptr != NULL);
546                 TypeK* key = (TypeK*) key_slot->_ptr;
547                 int h = key->hashCode();
548                 // Spread bits according to Cliff Click's code
549                 h += (h << 15) ^ 0xffffcd7d;
550                 h ^= (h >> 10);
551                 h += (h << 3);
552                 h ^= (h >> 6);
553                 h += (h << 2) + (h << 14);
554                 return h ^ (h >> 16);
555         }
556         
557         // Heuristic to decide if reprobed too many times. 
558         // Be careful here: Running over the limit on a 'get' acts as a 'miss'; on a
559         // put it triggers a table resize. Several places MUST have exact agreement.
560         static int reprobe_limit(int len) {
561                 return REPROBE_LIMIT + (len >> 2);
562         }
563         
564         static inline bool is_prime(slot *val) {
565                 return (val != NULL) && val->_prime;
566         }
567
568         // Check for key equality. Try direct pointer comparison first (fast
569         // negative teset) and then the full 'equals' call
570         static bool keyeq(slot *K, slot *key_slot, int *hashes, int hash,
571                 int fullhash) {
572                 // Caller should've checked this.
573                 assert (K != NULL);
574                 TypeK* key_ptr = (TypeK*) key_slot->_ptr;
575                 return
576                         K == key_slot ||
577                                 ((hashes[hash] == 0 || hashes[hash] == fullhash) &&
578                                 K != TOMBSTONE &&
579                                 key_ptr->equals(K->_ptr));
580         }
581
582         static bool valeq(slot *val_slot1, slot *val_slot2) {
583                 assert (val_slot1 != NULL);
584                 TypeK* ptr1 = (TypeV*) val_slot1->_ptr;
585                 if (val_slot2 == NULL || ptr1 == NULL) return false;
586                 return ptr1->equals(val_slot2->_ptr);
587         }
588         
589         // Together with key() preserve the happens-before relationship on newly
590         // inserted keys
591         static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
592                 return kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
593                         desired, memory_order_release, memory_order_release);
594         }
595
596         /**
597                 Same as the val() function, we only label the CAS operation as the
598                 potential commit point.
599         */
600         // Together with val() preserve the happens-before relationship on newly
601         // inserted values
602         static inline bool CAS_val(kvs_data *kvs, int idx, void *expected, void
603                 *desired) {
604                 bool res =  kvs->_data[2 * idx + 3].compare_exchange_strong(expected,
605                         desired, memory_order_release, memory_order_release);
606                 /**
607                         # If it is a successful put instead of a copy or any other internal
608                         # operantions, expected != NULL
609                         @Begin
610                         @Potential_commit_point_define: __ATOMIC_RET__ == true
611                         @Label: Write_Val_Point
612                         @End
613                 */
614                 return res;
615         }
616
617         slot* get_impl(cliffc_hashtable *topmap, kvs_data *kvs, slot* key_slot, int
618                 fullhash) {
619                 int len = kvs->_size;
620                 CHM *chm = get_chm(kvs);
621                 int *hashes = get_hashes(kvs);
622
623                 int idx = fullhash & (len - 1);
624                 int reprobe_cnt = 0;
625                 while (true) {
626                         slot *K = key(kvs, idx);
627                         slot *V = val(kvs, idx);
628                         /**
629                                 @Begin
630                                 @Commit_point_define: V == NULL
631                                 @Potential_commit_point_label: Read_Val_Point
632                                 @Label: Get_Success_Point_1
633                                 @End
634                         */
635
636                         if (V == NULL) return NULL; // A miss
637                         
638                         if (keyeq(K, key_slot, hashes, idx, fullhash)) {
639                                 // Key hit! Check if table-resize in progress
640                                 if (!is_prime(V)) {
641                                         /**
642                                                 @Begin
643                                                 @Commit_point_define: true
644                                                 @Potential_commit_point_label: Read_Val_Point
645                                                 @Label: Get_Success_Point_2
646                                                 @End
647                                         */
648                                         return (V == TOMBSTONE) ? NULL : V; // Return this value
649                                 }
650                                 // Otherwise, finish the copy & retry in the new table
651                                 return get_impl(topmap, chm->copy_slot_and_check(topmap, kvs,
652                                         idx, key_slot), key_slot, fullhash);
653                         }
654
655                         if (++reprobe_cnt >= REPROBE_LIMIT ||
656                                 key_slot == TOMBSTONE) {
657                                 // Retry in new table
658                                 // Atomic read (acquire) can be here 
659                                 kvs_data *newkvs = chm->_newkvs.load(memory_order_acquire);
660                                 /**
661                                         @Begin
662                                         @Commit_point_define_check: newkvs == NULL
663                                         @Label: Get_Success_Point_3
664                                         @End
665                                 */
666                                 return newkvs == NULL ? NULL : get_impl(topmap,
667                                         topmap->help_copy(newkvs), key_slot, fullhash);
668                         }
669
670                         idx = (idx + 1) & (len - 1); // Reprobe by 1
671                 }
672         }
673
674         // A wrapper of the essential function putIfMatch()
675         TypeV* putIfMatch(TypeK& key, TypeV& value, slot *old_val) {
676                 // TODO: Should throw an exception rather return NULL
677                 if (old_val == NULL) {
678                         return NULL;
679                 }
680                 void *key_ptr = (void*) new TypeK(key);
681                 slot *key_slot = new slot(false, key_ptr);
682
683                 void *val_ptr = (void*) new TypeV(value);
684                 slot *value_slot = new slot(false, val_ptr);
685                 kvs_data *kvs = _kvs.load(memory_order_acquire);
686                 slot *res = putIfMatch(this, kvs, key_slot, value_slot, old_val);
687                 // Only when copy_slot() call putIfMatch() will it return NULL
688                 assert (res != NULL); 
689                 assert (!is_prime(res));
690                 return res == TOMBSTONE ? NULL : (TypeV*) res->_ptr;
691         }
692
693         /**
694                 Put, Remove, PutIfAbsent, etc will call this function. Return the old
695                 value. If the returned value is equals to the expVal (or expVal is
696                 NO_MATCH_OLD), then this function puts the val_slot to the table 'kvs'.
697                 Only copy_slot will pass a NULL expVal, and putIfMatch only returns a
698                 NULL if passed a NULL expVal.
699         */
700         static slot* putIfMatch(cliffc_hashtable *topmap, kvs_data *kvs, slot
701                 *key_slot, slot *val_slot, slot *expVal) {
702                 assert (val_slot != NULL);
703                 assert (!is_prime(val_slot));
704                 assert (!is_prime(expVal));
705
706                 int fullhash = hash(key_slot);
707                 int len = kvs->_size;
708                 CHM *chm = get_chm(kvs);
709                 int *hashes = get_hashes(kvs);
710                 int idx = fullhash & (len - 1);
711
712                 // Claim a key slot
713                 int reprobe_cnt = 0;
714                 slot *K;
715                 slot *V;
716                 kvs_data *newkvs;
717                 
718                 while (true) { // Spin till we get a key slot
719                         K = key(kvs, idx);
720                         V = val(kvs, idx);
721                         if (K == NULL) { // Get a free slot
722                                 if (val_slot == TOMBSTONE) return val_slot;
723                                 // Claim the null key-slot
724                                 if (CAS_key(kvs, idx, NULL, key_slot)) {
725                                         chm->_slots.fetch_add(1, memory_order_relaxed); // Inc key-slots-used count
726                                         hashes[idx] = fullhash; // Memorize full hash
727                                         break;
728                                 }
729                                 K = key(kvs, idx); // CAS failed, get updated value
730                                 assert (K != NULL);
731                         }
732
733                         // Key slot not null, there exists a Key here
734                         if (keyeq(K, key_slot, hashes, idx, fullhash))
735                                 break; // Got it
736                         
737                         // Notice that the logic here should be consistent with that of get.
738                         // The first predicate means too many reprobes means nothing in the
739                         // old table.
740                         if (++reprobe_cnt >= reprobe_limit(len) ||
741                                 K == TOMBSTONE) { // Found a Tombstone key, no more keys
742                                 newkvs = chm->resize(topmap, kvs);
743                                 // Help along an existing copy
744                                 if (expVal != NULL) topmap->help_copy(newkvs);
745                                 return putIfMatch(topmap, newkvs, key_slot, val_slot, expVal);
746                         }
747
748                         idx = (idx + 1) & (len - 1); // Reprobe
749                 } // End of spinning till we get a Key slot
750
751                 if (val_slot == V) return V; // Fast cutout for no-change
752         
753                 // Here it tries to resize cause it doesn't want other threads to stop
754                 // its progress (eagerly try to resize soon)
755                 newkvs = chm->_newkvs.load(memory_order_acquire);
756                 if (newkvs == NULL &&
757                         ((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V)))
758                         newkvs = chm->resize(topmap, kvs); // Force the copy to start
759                 
760                 // Finish the copy and then put it in the new table
761                 if (newkvs != NULL)
762                         return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs, idx,
763                                 expVal), key_slot, val_slot, expVal);
764                 
765                 // Decided to update the existing table
766                 while (true) {
767                         assert (!is_prime(V));
768
769                         if (expVal != NO_MATCH_OLD &&
770                                 V != expVal &&
771                                 (expVal != MATCH_ANY || V == TOMBSTONE || V == NULL) &&
772                                 !(V == NULL && expVal == TOMBSTONE) &&
773                                 (expVal == NULL || !valeq(expVal, V))) {
774                                 /**
775                                         @Begin
776                                         @Commit_point_define: expVal == TOMBSTONE
777                                         @Potential_commit_point_label: Read_Val_Point
778                                         @Label: PutIfAbsent_Fail_Point
779                                                 # This is a check for the PutIfAbsent() when the value
780                                                 # is not absent
781                                         @End
782                                 */
783                                 /**
784                                         @Begin
785                                         @Commit_point_define: expVal != NULL && val_slot == TOMBSTONE
786                                         @Potential_commit_point_label: Read_Val_Point
787                                         @Label: RemoveIfMatch_Fail_Point
788                                         @End
789                                 */
790                                 /**
791                                         @Begin
792                                         @Commit_point_define: !valeq(expVal, V)
793                                         @Potential_commit_point_label: Read_Val_Point
794                                         @Label: ReplaceIfMatch_Fail_Point
795                                         @End
796                                 */
797                                 return V; // Do not update!
798                         }
799
800                         if (CAS_val(kvs, idx, V, val_slot)) {
801                                 /**
802                                         @Begin
803                                         # The only point where a successful put happens
804                                         @Commit_point_define: true
805                                         @Potential_commit_point_label: Write_Val_Point
806                                         @Label: Write_Success_Point
807                                         @End
808                                 */
809                                 if (expVal != NULL) { // Not called by a table-copy
810                                         // CAS succeeded, should adjust size
811                                         // Both normal put's and table-copy calls putIfMatch, but
812                                         // table-copy does not increase the number of live K/V pairs
813                                         if ((V == NULL || V == TOMBSTONE) &&
814                                                 val_slot != TOMBSTONE)
815                                                 chm->_size.fetch_add(1, memory_order_relaxed);
816                                         if (!(V == NULL || V == TOMBSTONE) &&
817                                                 val_slot == TOMBSTONE)
818                                                 chm->_size.fetch_add(-1, memory_order_relaxed);
819                                 }
820                                 return (V == NULL && expVal != NULL) ? TOMBSTONE : V;
821                         }
822                         // Else CAS failed
823                         V = val(kvs, idx);
824                         if (is_prime(V))
825                                 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs,
826                                         idx, expVal), key_slot, val_slot, expVal);
827                 }
828         }
829
830         // Help along an existing table-resize. This is a fast cut-out wrapper.
831         kvs_data* help_copy(kvs_data *helper) {
832                 kvs_data *topkvs = _kvs.load(memory_order_acquire);
833                 CHM *topchm = get_chm(topkvs);
834                 // No cpy in progress
835                 if (topchm->_newkvs.load(memory_order_acquire) == NULL) return helper;
836                 topchm->help_copy_impl(this, topkvs, false);
837                 return helper;
838         }
839 };
840
841 #endif