add more & need C/C++ code recognition
[cdsspec-compiler.git] / grammer / simplified_cliffc_hashtable.h
1 #ifndef SIMPLIFIED_CLIFFC_HASHTABLE_H
2 #define SIMPLIFIED_CLIFFC_HASHTABLE_H
3
4 #include <iostream>
5 #include <atomic>
6 #include <memory>
7 #include <assert.h>
8
9 using namespace std;
10
11
12
13 /**
14         This header file declares and defines a simplified version of Cliff Click's
15         NonblockingHashMap. It contains all the necessary structrues and main
16         functions. In simplified_cliffc_hashtable.cc file, it has the definition for
17         the static fields.
18 */
19
20 template<typename TypeK, typename TypeV>
21 class cliffc_hashtable;
22
23 /**
24         Corresponding the the Object[] array in Cliff Click's Java implementation.
25         It keeps the first two slots for CHM (Hashtable control unit) and the hash
26         records (an array of hash used for fast negative key-equality check).
27 */
28 struct kvs_data {
29         int _size;
30         atomic<void*> *_data;
31         
32         kvs_data(int sz) {
33                 _size = sz;
34                 int real_size = sizeof(atomic<void*>) * 2 + 2;
35                 _data = new atomic<void*>[real_size];
36                 // The control block should be initialized in resize()
37                 // Init the hash record array
38                 int *hashes = new int[_size];
39                 int i;
40                 for (i = 0; i < _size; i++) {
41                         hashes[i] = 0;
42                 }
43                 _data[1].store(hashes, memory_order_relaxed);
44                 // Init the data to Null slot
45                 for (i = 2; i < real_size; i++) {
46                         _data[i].store(NULL, memory_order_relaxed);
47                 }
48         }
49
50         ~kvs_data() {
51                 int *hashes = (int*) _data[1].load(memory_order_relaxed);
52                 delete hashes;
53                 delete[] _data;
54         }
55 };
56
57 struct slot {
58         bool _prime;
59         shared_ptr<void> _ptr;
60
61         slot(bool prime, shared_ptr<void> ptr) {
62                 _prime = prime;
63                 _ptr = ptr;
64         }
65
66 };
67
68
69 /**
70         TypeK must have defined function "int hashCode()" which return the hash
71         code for the its object, and "int equals(TypeK anotherKey)" which is
72         used to judge equality.
73         TypeK and TypeV should define their own copy constructor.
74         To make the memory management safe and similar to Cliff Click's Java
75         implementation, we use shared_ptr instead of normal pointer in terms of the
76         pointers that point to TypeK and TypeV.
77 */
78 template<typename TypeK, typename TypeV>
79 class cliffc_hashtable {
80         /**
81                 # The synchronization we have for the hashtable gives us the property of
82                 # serializability, so we should have a sequential hashtable when we check the
83                 # correctness. The key thing is to identify all the commit point.
84         
85                 @Begin
86                 @Global_define:
87
88                         spec_hashtable<TypeK, TypeV*> __map;
89                         spec_hashtable<TypeK, Tag> __id_map;
90                         Tag __tag;
91
92                         static bool _val_equals(TypeV *ptr1, TypeV *ptr2) {
93                                 // ...
94                         }
95                         
96                         # Update the tag for the current key slot if the corresponding tag
97                         # is NULL, otherwise just return that tag. It will update the next
98                         # available tag too if it requires a new tag for that key slot.
99                         static Tag _getKeyTag(TypeK &key) {
100                                 if (__id_map.get(key) == NULL) {
101                                         Tag cur_tag = tag.current();
102                                         __id_map.put(key, cur_tag);
103                                         __tag.next();
104                                         return cur_tag;
105                                 } else {
106                                         return __id_map.get(key);
107                                 }
108                         }
109                 
110                 @Interface_cluster:
111                         Read_interface = Get + PutIfAbsent + RemoveAny + RemoveIfMatch +
112                                 ReplaceAny + ReplaceIfMatch;
113                         Write_interface = Put + PutIfAbsent(__RET__ == NULL) + RemoveAny +
114                                 RemoveIfMatch(__RET__ == true) + ReplaceAny +
115                                         ReplaceIfMatch(__RET__ == true);
116                 @Happens_before:
117                         Write_interface -> Read_interface
118                 @End
119         */
120
121 friend class CHM;
122         /**
123                 The control structure for the hashtable
124         */
125         private:
126         class CHM {
127                 friend class cliffc_hashtable;
128                 private:
129                 atomic<kvs_data*> _newkvs;
130                 
131                 // Size of active K,V pairs
132                 atomic_int _size;
133         
134                 // Count of used slots
135                 atomic_int _slots;
136                 
137                 // The next part of the table to copy
138                 atomic_int _copy_idx;
139                 
140                 // Work-done reporting
141                 atomic_int _copy_done;
142         
143                 public:
144                 CHM(int size) {
145                         _size.store(size, memory_order_relaxed);
146                         _slots.store(0, memory_order_relaxed);
147         
148                         _copy_idx.store(0, memory_order_relaxed);
149                         _copy_done.store(0, memory_order_release);
150                 }
151         
152                 ~CHM() {}
153                 
154                 private:
155                         
156                 // Heuristic to decide if the table is too full
157                 bool table_full(int reprobe_cnt, int len) {
158                         return
159                                 reprobe_cnt >= REPROBE_LIMIT &&
160                                 _slots.load(memory_order_relaxed) >= reprobe_limit(len);
161                 }
162         
163                 kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
164                         kvs_data *newkvs = _newkvs.load(memory_order_acquire);
165                         if (newkvs != NULL)
166                                 return newkvs;
167         
168                         // No copy in-progress, start one; Only double the table size
169                         int oldlen = kvs->_size;
170                         int sz = _size.load(memory_order_relaxed);
171                         int newsz = sz;
172                         
173                         // Just follow Cliff Click's heuristic to decide the new size
174                         if (sz >= (oldlen >> 2)) { // If we are 25% full
175                                 newsz = oldlen << 1; // Double size
176                                 if (sz >= (oldlen >> 1))
177                                         newsz = oldlen << 2; // Double double size
178                         }
179         
180                         // We do not record the record timestamp
181                         if (newsz <= oldlen) newsz = oldlen << 1;
182                         // Do not shrink ever
183                         if (newsz < oldlen) newsz = oldlen;
184         
185                         // Last check cause the 'new' below is expensive
186                         newkvs = _newkvs.load(memory_order_acquire);
187                         if (newkvs != NULL) return newkvs;
188         
189                         newkvs = new kvs_data(newsz);
190                         void *chm = (void*) new CHM(sz);
191                         newkvs->_data[0].store(chm, memory_order_relaxed);
192         
193                         kvs_data *cur_newkvs; 
194                         // Another check after the slow allocation
195                         if ((cur_newkvs = _newkvs.load(memory_order_acquire)) != NULL)
196                                 return cur_newkvs;
197                         // CAS the _newkvs to the allocated table
198                         kvs_data *desired = (kvs_data*) NULL;
199                         kvs_data *expected = (kvs_data*) newkvs; 
200                         if (!_newkvs.compare_exchange_strong(desired, expected, memory_order_release,
201                                         memory_order_release)) {
202                                 // Should clean the allocated area
203                                 delete newkvs;
204                                 newkvs = _newkvs.load(memory_order_acquire);
205                         }
206                         return newkvs;
207                 }
208         
209                 void help_copy_impl(cliffc_hashtable *topmap, kvs_data *oldkvs,
210                         bool copy_all) {
211                         assert (get_chm(oldkvs) == this);
212                         kvs_data *newkvs = _newkvs.load(memory_order_acquire);
213                         int oldlen = oldkvs->_size;
214                         int min_copy_work = oldlen > 1024 ? 1024 : oldlen;
215                 
216                         // Just follow Cliff Click's code here
217                         int panic_start = -1;
218                         int copyidx;
219                         while (_copy_done.load(memory_order_acquire) < oldlen) {
220                                 copyidx = _copy_idx.load(memory_order_acquire);
221                                 if (panic_start == -1) { // No painc
222                                         copyidx = _copy_idx.load(memory_order_acquire);
223                                         while (copyidx < (oldlen << 1) &&
224                                                 !_copy_idx.compare_exchange_strong(copyidx, copyidx +
225                                                         min_copy_work, memory_order_release, memory_order_release))
226                                                 copyidx = _copy_idx.load(memory_order_relaxed);
227                                         if (!(copyidx < (oldlen << 1)))
228                                                 panic_start = copyidx;
229                                 }
230         
231                                 // Now copy the chunk of work we claimed
232                                 int workdone = 0;
233                                 for (int i = 0; i < min_copy_work; i++)
234                                         if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs,
235                                                 newkvs))
236                                                 workdone++;
237                                 if (workdone > 0)
238                                         copy_check_and_promote(topmap, oldkvs, workdone);
239         
240                                 copyidx += min_copy_work;
241                                 if (!copy_all && panic_start == -1)
242                                         return; // We are done with the work we claim
243                         }
244                         copy_check_and_promote(topmap, oldkvs, 0); // See if we can promote
245                 }
246         
247                 kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
248                         *oldkvs, int idx, void *should_help) {
249                         kvs_data *newkvs = _newkvs.load(memory_order_acquire);
250                         // We're only here cause the caller saw a Prime
251                         if (copy_slot(topmap, idx, oldkvs, _newkvs))
252                                 copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied
253                         return (should_help == NULL) ? newkvs : topmap->help_copy(newkvs);
254                 }
255         
256                 void copy_check_and_promote(cliffc_hashtable *topmap, kvs_data*
257                         oldkvs, int workdone) {
258                         int oldlen = oldkvs->_size;
259                         int copyDone = _copy_done.load(memory_order_relaxed);
260                         if (workdone > 0) {
261                                 while (true) {
262                                         copyDone = _copy_done.load(memory_order_relaxed);
263                                         if (_copy_done.compare_exchange_weak(copyDone, copyDone +
264                                                 workdone, memory_order_relaxed, memory_order_relaxed))
265                                                 break;
266                                 }
267                         }
268         
269                         // Promote the new table to the current table
270                         if (copyDone + workdone == oldlen &&
271                                 topmap->_kvs.load(memory_order_acquire) == oldkvs)
272                                 topmap->_kvs.compare_exchange_strong(oldkvs, _newkvs, memory_order_release,
273                                         memory_order_release);
274                 }
275         
276                 bool copy_slot(cliffc_hashtable *topmap, int idx, kvs_data *oldkvs,
277                         kvs_data *newkvs) {
278                         slot *key_slot;
279                         while ((key_slot = key(oldkvs, idx)) == NULL)
280                                 CAS_key(oldkvs, idx, NULL, TOMBSTONE);
281         
282                         // First CAS old to Prime
283                         slot *oldval = val(oldkvs, idx, NULL);
284                         while (!is_prime(oldval)) {
285                                 slot *box = (oldval == NULL || oldval == TOMBSTONE)
286                                         ? TOMBPRIME : new slot(true, oldval->_ptr);
287                                 if (CAS_val(oldkvs, idx, oldval, box)) {
288                                         if (box == TOMBPRIME)
289                                                 return 1; // Copy done
290                                         // Otherwise we CAS'd the box
291                                         oldval = box; // Record updated oldval
292                                         break;
293                                 }
294                                 oldval = val(oldkvs, idx, NULL); // Else re-try
295                         }
296         
297                         if (oldval == TOMBPRIME) return false; // Copy already completed here
298         
299                         slot *old_unboxed = new slot(false, oldval->_ptr);
300                         int copied_into_new = (putIfMatch(topmap, newkvs, key_slot, old_unboxed,
301                                 NULL) == NULL);
302         
303                         // Old value is exposed in the new table
304                         while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME))
305                                 oldval = val(oldkvs, idx, NULL);
306         
307                         return copied_into_new;
308                 }
309         };
310
311         
312
313         private:
314         static const int Default_Init_Size = 8; // Intial table size
315
316         static slot* const MATCH_ANY;
317         static slot* const NO_MATCH_OLD;
318
319         static slot* const TOMBPRIME;
320         static slot* const TOMBSTONE;
321
322         static const int REPROBE_LIMIT = 10; // Forces a table-resize
323
324         atomic<kvs_data*> _kvs;
325
326         public:
327         cliffc_hashtable() {
328                 // Should initialize the CHM for the construction of the table
329                 // For other CHM in kvs_data, they should be initialzed in resize()
330                 // because the size is determined dynamically
331                 kvs_data *kvs = new kvs_data(Default_Init_Size);
332                 void *chm = (void*) new CHM(0);
333                 kvs->_data[0].store(chm, memory_order_relaxed);
334                 _kvs.store(kvs, memory_order_release);
335         }
336
337         cliffc_hashtable(int init_size) {
338                 // Should initialize the CHM for the construction of the table
339                 // For other CHM in kvs_data, they should be initialzed in resize()
340                 // because the size is determined dynamically
341                 kvs_data *kvs = new kvs_data(init_size);
342                 void *chm = (void*) new CHM(0);
343                 kvs->_data[0].store(chm, memory_order_relaxed);
344                 _kvs.store(kvs, memory_order_release);
345         }
346
347         /**
348                 @Begin
349                 @Interface
350                 @Commit_point_set:
351                         Get = Read_Val_Point1 | Read_Val_Point2 | Read_Val_Point3
352                 @ID: _getKeyTag(key)
353                 @Action:
354                         TypeV *_Old_Val = __map.get(key);
355                 @Post_check:
356                         _equals_val(_Old_Val, __RET__)
357                 @End
358         */
359         shared_ptr<TypeV> get(TypeK& key) {
360                 void *key_ptr = (void*) new TypeK(key);
361                 slot *key_slot = new slot(false, shared_ptr<void>(key_ptr));
362                 int fullhash = hash(key_slot);
363                 slot *V = get_impl(this, _kvs, key_slot, fullhash);
364                 if (V == NULL) return NULL;
365                 assert (!is_prime(V));
366                 return static_pointer_cast<TypeV>(V->_ptr);
367         }
368
369         /**
370                 @Begin
371                 @Interface
372                 @Commit_point_set:
373                         Put = Write_Val_Point
374                 @ID: _getKeyTag(key)
375                 @Action:
376                         # Remember this old value at checking point
377                         TypeV *_Old_Val = __map.get(key);
378                         __map.put(key, &val);
379                 @Post_check:
380                         _equals_val(__RET__, _Old_Val)
381                 @End
382         */
383         shared_ptr<TypeV> put(TypeK& key, TypeV& val) {
384                 return putIfMatch(key, val, NO_MATCH_OLD);
385         }
386
387         /**
388                 @Begin
389                 @Interface: PutIfAbsent
390                 @Commit_point_set:
391                         Write_Val_Point | PutIfAbsent_Fail_Point
392                 @Condition: __map.get(key) == NULL
393                 @ID: _getKeyTag(key)
394                 @Action:
395                         TypeV *_Old_Val = __map.get(key);
396                         if (COND_SAT)
397                                 __map.put(key, &value);
398                         else
399                 @Post_check:
400                         COND_SAT ? __RET__ == NULL : _equals_val(_Old_Val, __RET__) 
401                 @End
402         */
403         shared_ptr<TypeV> putIfAbsent(TypeK& key, TypeV& value) {
404                 return putIfMatch(key, val, TOMBSTONE);
405         }
406
407         /**
408                 @Begin
409                 @Interface: RemoveAny
410                 @Commit_point_set: Write_Val_Point
411                 @ID: _getKeyTag(key)
412                 @Action:
413                         TypeV *_Old_Val = __map.get(key);
414                         __map.put(key, NULL);
415                 @Post_check:
416                         _equals_val(__RET__, _Old_Val)
417                 @End
418         */
419         shared_ptr<TypeV> remove(TypeK& key) {
420                 return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD);
421         }
422
423         /**
424                 @Begin
425                 @Interface: RemoveIfMatcfh
426                 @Commit_point_set:
427                         Write_Val_Point | RemoveIfMatch_Fail_Point
428                 @Condition:
429                         _equals_val(__map.get(key), &val)
430                 @ID: _getKeyTag(key)
431                 @Action:
432                         if (COND_SAT)
433                                 __map.put(key, NULL);
434                 @Post_check:
435                         COND_SAT ? __RET__ : !__RET__
436                 @End
437         */
438         bool remove(TypeK& key, TypeV& val) {
439                 slot *val_slot = val == NULL ? NULL : new slot(false, val);
440                 return putIfMatch(key, TOMBSTONE, val) == val;
441
442         }
443
444         /**
445                 @Begin
446                 @Interface: ReplaceAny
447                 @Commit_point_set:
448                         Write_Val_Point
449                 @ID: _getKeyTag(key)
450                 @Action:
451                         TypeV *_Old_Val = __map.get(key);
452                 @Post_check:
453                         _equals_val(__RET__, _Old_Val)
454                 @End
455         */
456         shared_ptr<TypeV> replace(TypeK& key, TypeV& val) {
457                 return putIfMatch(key, val, MATCH_ANY);
458         }
459
460         /**
461                 @Begin
462                 @Interface: ReplaceIfMatch
463                 @Commit_point_set:
464                         Write_Val_Point | ReplaceIfMatch_Fail_Point
465                 @Condition:
466                         _equals_val(__map.get(key), &oldval)
467                 @ID: _getKeyTag(key)
468                 @Action:
469                         if (COND_SAT)
470                                 __map.put(key, &newval);
471                 @Post_check:
472                         COND_SAT ? __RET__ : !__RET__
473                 @End
474         */
475         bool replace(TypeK& key, TypeV& oldval, TypeV& newval) {
476                 return putIfMatch(key, newval, oldval) == oldval;
477         }
478
479         private:
480         static CHM* get_chm(kvs_data* kvs) {
481                 return (CHM*) kvs->_data[0].load(memory_order_relaxed);
482         }
483
484         static int* get_hashes(kvs_data *kvs) {
485                 return (int *) kvs->_data[1].load(memory_order_relaxed);
486         }
487         
488         // Preserve happens-before semantics on newly inserted keys
489         static inline slot* key(kvs_data *kvs, int idx) {
490                 assert (idx >= 0 && idx < kvs->_size);
491                 // Corresponding to the volatile read in get_impl() and putIfMatch in
492                 // Cliff Click's Java implementation
493                 return (slot*) kvs->_data[idx * 2 + 2].load(memory_order_acquire);
494         }
495
496         /**
497                 The atomic operation in val() function is a "potential" commit point,
498                 which means in some case it is a real commit point while it is not for
499                 some other cases. This so happens because the val() function is such a
500                 fundamental function that many internal operation will call. Our
501                 strategy is that we label any potential commit points and check if they
502                 really are the commit points later.
503         */
504         // Preserve happens-before semantics on newly inserted values
505         static inline slot* val(kvs_data *kvs, int idx) {
506                 assert (idx >= 0 && idx < kvs->_size);
507                 // Corresponding to the volatile read in get_impl() and putIfMatch in
508                 // Cliff Click's Java implementation
509                 slot *res = (slot*) kvs->_data[idx * 2 + 3].load(memory_order_acquire);
510                 /**
511                         @Begin
512                         # This is a complicated potential commit point since many many functions are
513                         # calling val().
514                         @Potential_commit_point_define: true
515                         @Label: Read_Val_Point
516                         @End
517                 */
518                 return res;
519
520
521         }
522
523         static int hash(slot *key_slot) {
524                 assert(key_slot != NULL && key_slot->_ptr != NULL);
525                 shared_ptr<TypeK> key = static_pointer_cast<TypeK>(key_slot->_ptr);
526                 int h = key->hashCode();
527                 // Spread bits according to Cliff Click's code
528                 h += (h << 15) ^ 0xffffcd7d;
529                 h ^= (h >> 10);
530                 h += (h << 3);
531                 h ^= (h >> 6);
532                 h += (h << 2) + (h << 14);
533                 return h ^ (h >> 16);
534         }
535         
536         // Heuristic to decide if reprobed too many times. 
537         // Be careful here: Running over the limit on a 'get' acts as a 'miss'; on a
538         // put it triggers a table resize. Several places MUST have exact agreement.
539         static int reprobe_limit(int len) {
540                 return REPROBE_LIMIT + (len >> 2);
541         }
542         
543         static inline bool is_prime(slot *val) {
544                 return (val != NULL) && val->_prime;
545         }
546
547         // Check for key equality. Try direct pointer comparison first (fast
548         // negative teset) and then the full 'equals' call
549         static bool keyeq(slot *K, slot *key_slot, int *hashes, int hash,
550                 int fullhash) {
551                 // Caller should've checked this.
552                 assert (K != NULL);
553                 shared_ptr<TypeK> key_ptr = static_pointer_cast<TypeK>(key_slot->_ptr);
554                 return
555                         K == key_slot ||
556                                 ((hashes[hash] == 0 || hashes[hash] == fullhash) &&
557                                 K != TOMBSTONE &&
558                                 key_ptr->equals(K->_ptr));
559         }
560
561         static bool valeq(slot *val_slot1, slot *val_slot2) {
562                 assert (val_slot1 != NULL);
563                 shared_ptr<TypeK> ptr1 = static_pointer_cast<TypeV>(val_slot1->_ptr);
564                 if (val_slot2 == NULL || ptr1 == NULL) return false;
565                 return ptr1->equals(val_slot2->_ptr);
566         }
567         
568         // Together with key() preserve the happens-before relationship on newly
569         // inserted keys
570         static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
571                 return kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
572                         desired, memory_order_release, memory_order_release);
573         }
574
575         /**
576                 Same as the val() function, we only label the CAS operation as the
577                 potential commit point.
578         */
579         // Together with val() preserve the happens-before relationship on newly
580         // inserted values
581         static inline bool CAS_val(kvs_data *kvs, int idx, void *expected, void
582                 *desired) {
583                 bool res =  kvs->_data[2 * idx + 3].compare_exchange_strong(expected,
584                         desired, memory_order_release, memory_order_release);
585                 /**
586                         # If it is a successful put instead of a copy or any other internal
587                         # operantions, expected != NULL
588                         @Begin
589                         @Potential_commit_point_define: __ATOMIC_RET__ == true
590                         @Label: Write_Val_Point
591                         @End
592                 */
593                 return res;
594         }
595
596         slot* get_impl(cliffc_hashtable *topmap, kvs_data *kvs, slot* key_slot, int
597                 fullhash) {
598                 int len = kvs->_size;
599                 CHM *chm = get_chm(kvs);
600                 int *hashes = get_hashes(kvs);
601
602                 int idx = fullhash & (len - 1);
603                 int reprobe_cnt = 0;
604                 while (true) {
605                         slot *K = key(kvs, idx);
606                         slot *V = val(kvs, idx);
607                         /**
608                                 @Begin
609                                 @Commit_point_define: V == NULL
610                                 @Potential_commit_point_label: Read_Val_Point
611                                 @Label: Get_Success_Point_1
612                                 @End
613                         */
614
615                         if (V == NULL) return NULL; // A miss
616                         
617                         if (keyeq(K, key_slot, hashes, idx, fullhash)) {
618                                 // Key hit! Check if table-resize in progress
619                                 if (!is_prime(V)) {
620                                         /**
621                                                 @Begin
622                                                 @Commit_point_define: true
623                                                 @Potential_commit_point_label: Read_Val_Point
624                                                 @Label: Get_Success_Point_2
625                                                 @End
626                                         */
627                                         return (V == TOMBSTONE) ? NULL : V; // Return this value
628                                 }
629                                 // Otherwise, finish the copy & retry in the new table
630                                 return get_impl(topmap, chm->copy_slot_and_check(topmap, kvs,
631                                         idx, key_slot), key_slot, fullhash);
632                         }
633
634                         if (++reprobe_cnt >= REPROBE_LIMIT ||
635                                 key_slot == TOMBSTONE) {
636                                 // Retry in new table
637                                 // Atomic read (acquire) can be here 
638                                 kvs_data *newkvs = chm->_newkvs.load(memory_order_acquire);
639                                 /**
640                                         @Begin
641                                         @Commit_point_define_check: newkvs == NULL
642                                         @Label: Get_Success_Point_3
643                                         @End
644                                 */
645                                 return newkvs == NULL ? NULL : get_impl(topmap,
646                                         topmap->help_copy(newkvs), key_slot, fullhash);
647                         }
648
649                         idx = (idx + 1) & (len - 1); // Reprobe by 1
650                 }
651         }
652
653         // A wrapper of the essential function putIfMatch()
654         shared_ptr<TypeV> putIfMatch(TypeK& key, TypeV& value, slot *old_val) {
655                 // TODO: Should throw an exception rather return NULL
656                 if (old_val == NULL) {
657                         return NULL;
658                 }
659                 void *key_ptr = (void*) new TypeK(key);
660                 slot *key_slot = new slot(false, shared_ptr<void>(key_ptr));
661
662                 void *val_ptr = (void*) new TypeV(value);
663                 slot *value_slot = new slot(false, shared_ptr<void>(val_ptr));
664                 slot *res = putIfMatch(this, _kvs, key_slot, value_slot, old_val);
665                 // Only when copy_slot() call putIfMatch() will it return NULL
666                 assert (res != NULL); 
667                 assert (!is_prime(res));
668                 return res == TOMBSTONE ? NULL : static_pointer_cast<TypeV>(res->_ptr);
669         }
670
671         /**
672                 Put, Remove, PutIfAbsent, etc will call this function. Return the old
673                 value. If the returned value is equals to the expVal (or expVal is
674                 NO_MATCH_OLD), then this function puts the val_slot to the table 'kvs'.
675                 Only copy_slot will pass a NULL expVal, and putIfMatch only returns a
676                 NULL if passed a NULL expVal.
677         */
678         static slot* putIfMatch(cliffc_hashtable *topmap, kvs_data *kvs, slot
679                 *key_slot, slot *val_slot, slot *expVal) {
680                 assert (val_slot != NULL);
681                 assert (!is_prime(val_slot));
682                 assert (!is_prime(expVal));
683
684                 int fullhash = hash(key_slot);
685                 int len = kvs->_size;
686                 CHM *chm = get_chm(kvs);
687                 int *hashes = get_hashes(kvs);
688                 int idx = fullhash & (len - 1);
689
690                 // Claim a key slot
691                 int reprobe_cnt = 0;
692                 slot *K;
693                 slot *V;
694                 kvs_data *newkvs;
695                 
696                 while (true) { // Spin till we get a key slot
697                         K = key(kvs, idx);
698                         V = val(kvs, idx, NULL);
699                         if (K == NULL) { // Get a free slot
700                                 if (val_slot == TOMBSTONE) return val_slot;
701                                 // Claim the null key-slot
702                                 if (CAS_key(kvs, idx, NULL, key_slot)) {
703                                         chm->_slots.fetch_add(1, memory_order_relaxed); // Inc key-slots-used count
704                                         hashes[idx] = fullhash; // Memorize full hash
705                                         break;
706                                 }
707                                 K = key(kvs, idx); // CAS failed, get updated value
708                                 assert (K != NULL);
709                         }
710
711                         // Key slot not null, there exists a Key here
712                         if (keyeq(K, key_slot, hashes, idx, fullhash))
713                                 break; // Got it
714                         
715                         // Notice that the logic here should be consistent with that of get.
716                         // The first predicate means too many reprobes means nothing in the
717                         // old table.
718                         if (++reprobe_cnt >= reprobe_limit(len) ||
719                                 K == TOMBSTONE) { // Found a Tombstone key, no more keys
720                                 newkvs = chm->resize(topmap, kvs);
721                                 // Help along an existing copy
722                                 if (expVal != NULL) topmap->help_copy(newkvs);
723                                 return putIfMatch(topmap, newkvs, key_slot, val_slot, expVal);
724                         }
725
726                         idx = (idx + 1) & (len - 1); // Reprobe
727                 } // End of spinning till we get a Key slot
728
729                 if (val_slot == V) return V; // Fast cutout for no-change
730         
731                 // Here it tries to resize cause it doesn't want other threads to stop
732                 // its progress (eagerly try to resize soon)
733                 newkvs = chm->_newkvs.load(memory_order_acquire);
734                 if (newkvs == NULL &&
735                         ((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V)))
736                         newkvs = chm->resize(topmap, kvs); // Force the copy to start
737                 
738                 // Finish the copy and then put it in the new table
739                 if (newkvs != NULL)
740                         return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs, idx,
741                                 expVal), key_slot, val_slot, expVal);
742                 
743                 // Decided to update the existing table
744                 while (true) {
745                         assert (!is_prime(V));
746
747                         if (expVal != NO_MATCH_OLD &&
748                                 V != expVal &&
749                                 (expVal != MATCH_ANY || V == TOMBSTONE || V == NULL) &&
750                                 !(V == NULL && expVal == TOMBSTONE) &&
751                                 (expVal == NULL || !valeq(expVal, V))) {
752                                 /**
753                                         @Begin
754                                         @Commit_point_define: expVal == TOMBSTONE
755                                         @Potential_commit_point_label: Read_Val_Point
756                                         @Label: PutIfAbsent_Fail_Point
757                                                 # This is a check for the PutIfAbsent() when the value
758                                                 # is not absent
759                                         @End
760
761                                         @Begin
762                                         @Commit_point_define: expVal != NULL && val_slot == TOMBSTONE
763                                         @Potential_commit_point_label: Read_Val_Point
764                                         @Label: RemoveIfMatch_Fail_Point
765                                         @End
766                                         
767                                         @Begin
768                                         @Commit_point_define: !valeq(expVal, V)
769                                         @Potential_commit_point_label: Read_Val_Point
770                                         @Label: ReplaceIfMatch_Fail_Point
771                                         @End
772                                 */
773                                 return V; // Do not update!
774                         }
775
776                         if (CAS_val(kvs, idx, V, val_slot)) {
777                                 /**
778                                         @Begin
779                                         # The only point where a successful put happens
780                                         @Commit_point_define: true
781                                         @Potential_commit_point_label: Write_Val_Point
782                                         @Label: Write_Success_Point
783                                         @End
784                                 */
785                                 if (expVal != NULL) { // Not called by a table-copy
786                                         // CAS succeeded, should adjust size
787                                         // Both normal put's and table-copy calls putIfMatch, but
788                                         // table-copy does not increase the number of live K/V pairs
789                                         if ((V == NULL || V == TOMBSTONE) &&
790                                                 val_slot != TOMBSTONE)
791                                                 chm->_size.fetch_add(1, memory_order_relaxed);
792                                         if (!(V == NULL || V == TOMBSTONE) &&
793                                                 val_slot == TOMBSTONE)
794                                                 chm->_size.fetch_add(-1, memory_order_relaxed);
795                                 }
796                                 return (V == NULL && expVal != NULL) ? TOMBSTONE : V;
797                         }
798                         // Else CAS failed
799                         V = val(kvs, idx, NULL);
800                         if (is_prime(V))
801                                 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs,
802                                         idx, expVal), key_slot, val_slot, expVal);
803                 }
804         }
805
806         // Help along an existing table-resize. This is a fast cut-out wrapper.
807         kvs_data* help_copy(kvs_data *helper) {
808                 kvs_data *topkvs = _kvs.load(memory_order_acquire);
809                 CHM *topchm = get_chm(topkvs);
810                 // No cpy in progress
811                 if (topchm->_newkvs.load(memory_order_acquire) == NULL) return helper;
812                 topchm->help_copy_impl(this, topkvs, false);
813                 return helper;
814         }
815 };
816
817 #endif