1 #ifndef CLIFFC_HASHTABLE_H
2 #define CLIFFC_HASHTABLE_H
9 #define MODEL_ASSERT assert
11 #include <model-assert.h>
16 #include <cdsannotate.h>
17 #include <specannotation.h>
18 #include <model_memory.h>
24 This header file declares and defines a simplified version of Cliff Click's
25 NonblockingHashMap. It contains all the necessary structrues and main
26 functions. In simplified_cliffc_hashtable.cc file, it has the definition for
30 template<typename TypeK, typename TypeV>
31 class cliffc_hashtable;
34 Corresponding the the Object[] array in Cliff Click's Java implementation.
35 It keeps the first two slots for CHM (Hashtable control unit) and the hash
36 records (an array of hash used for fast negative key-equality check).
44 int real_size = sz * 2 + 2;
45 _data = new atomic<void*>[real_size];
46 // The control block should be initialized in resize()
47 // Init the hash record array
48 int *hashes = new int[_size];
50 for (i = 0; i < _size; i++) {
53 // Init the data to Null slot
54 for (i = 2; i < real_size; i++) {
55 _data[i].store(NULL, memory_order_relaxed);
57 _data[1].store(hashes, memory_order_relaxed);
61 int *hashes = (int*) _data[1].load(memory_order_relaxed);
71 slot(bool prime, void *ptr) {
79 TypeK must have defined function "int hashCode()" which return the hash
80 code for the its object, and "int equals(TypeK anotherKey)" which is
81 used to judge equality.
82 TypeK and TypeV should define their own copy constructor.
89 template<typename TypeK, typename TypeV>
90 class cliffc_hashtable {
92 # The synchronization we have for the hashtable gives us the property of
93 # serializability, so we should have a sequential hashtable when we check the
94 # correctness. The key thing is to identify all the commit point.
99 CLASS = cliffc_hashtable;
106 map = new_spec_table_default(equals_key);
107 id_map = new_spec_table_default(equals_id);
111 bool equals_key(void *ptr1, void *ptr2) {
112 TypeK *key1 = (TypeK*) ptr1,
113 *key2 = (TypeK*) ptr2;
114 if (key1 == NULL || key2 == NULL)
116 return key1->equals(key2);
120 bool equals_val(void *ptr1, void *ptr2) {
123 TypeV *val1 = (TypeV*) ptr1,
124 *val2 = (TypeV*) ptr2;
125 if (val1 == NULL || val2 == NULL)
127 return val1->equals(val2);
131 bool equals_id(void *ptr1, void *ptr2) {
132 id_tag_t *id1 = (id_tag_t*) ptr1,
133 *id2 = (id_tag_t*) ptr2;
134 if (id1 == NULL || id2 == NULL)
136 return (*id1).tag == (*id2).tag;
140 # Update the tag for the current key slot if the corresponding tag
141 # is NULL, otherwise just return that tag. It will update the next
142 # available tag too if it requires a new tag for that key slot.
143 call_id_t getKeyTag(TypeK *key) {
144 if (!spec_table_contains(id_map, key)) {
145 call_id_t cur_id = current(tag);
146 spec_table_put(id_map, key, (void*) cur_id);
150 call_id_t res = (call_id_t) spec_table_get(id_map, key);
162 The control structure for the hashtable
166 friend class cliffc_hashtable;
168 atomic<kvs_data*> _newkvs;
170 // Size of active K,V pairs
173 // Count of used slots
176 // The next part of the table to copy
177 atomic_int _copy_idx;
179 // Work-done reporting
180 atomic_int _copy_done;
184 _newkvs.store(NULL, memory_order_relaxed);
185 _size.store(size, memory_order_relaxed);
186 _slots.store(0, memory_order_relaxed);
188 _copy_idx.store(0, memory_order_relaxed);
189 _copy_done.store(0, memory_order_relaxed);
196 // Heuristic to decide if the table is too full
197 bool table_full(int reprobe_cnt, int len) {
199 reprobe_cnt >= REPROBE_LIMIT &&
200 _slots.load(memory_order_relaxed) >= reprobe_limit(len);
203 kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
204 //model_print("resizing...\n");
205 /**** FIXME: miss ****/
206 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
210 // No copy in-progress, start one; Only double the table size
211 int oldlen = kvs->_size;
212 int sz = _size.load(memory_order_relaxed);
215 // Just follow Cliff Click's heuristic to decide the new size
216 if (sz >= (oldlen >> 2)) { // If we are 25% full
217 newsz = oldlen << 1; // Double size
218 if (sz >= (oldlen >> 1))
219 newsz = oldlen << 2; // Double double size
222 // We do not record the record timestamp
223 if (newsz <= oldlen) newsz = oldlen << 1;
224 // Do not shrink ever
225 if (newsz < oldlen) newsz = oldlen;
227 // Last check cause the 'new' below is expensive
228 /**** FIXME: miss ****/
229 newkvs = _newkvs.load(memory_order_acquire);
230 //model_print("hey1\n");
231 if (newkvs != NULL) return newkvs;
233 newkvs = new kvs_data(newsz);
234 void *chm = (void*) new CHM(sz);
235 //model_print("hey2\n");
236 newkvs->_data[0].store(chm, memory_order_relaxed);
238 kvs_data *cur_newkvs;
239 // Another check after the slow allocation
240 /**** FIXME: miss ****/
241 if ((cur_newkvs = _newkvs.load(memory_order_acquire)) != NULL)
243 // CAS the _newkvs to the allocated table
244 kvs_data *desired = (kvs_data*) NULL;
245 kvs_data *expected = (kvs_data*) newkvs;
246 /**** FIXME: miss ****/
247 //model_print("release in resize!\n");
248 if (!_newkvs.compare_exchange_strong(desired, expected, memory_order_release,
249 memory_order_relaxed)) {
250 // Should clean the allocated area
252 /**** FIXME: miss ****/
253 newkvs = _newkvs.load(memory_order_acquire);
258 void help_copy_impl(cliffc_hashtable *topmap, kvs_data *oldkvs,
260 MODEL_ASSERT (get_chm(oldkvs) == this);
261 /**** FIXME: miss ****/
262 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
263 int oldlen = oldkvs->_size;
264 int min_copy_work = oldlen > 1024 ? 1024 : oldlen;
266 // Just follow Cliff Click's code here
267 int panic_start = -1;
269 while (_copy_done.load(memory_order_relaxed) < oldlen) {
270 copyidx = _copy_idx.load(memory_order_relaxed);
271 if (panic_start == -1) { // No painc
272 copyidx = _copy_idx.load(memory_order_relaxed);
273 while (copyidx < (oldlen << 1) &&
274 !_copy_idx.compare_exchange_strong(copyidx, copyidx +
275 min_copy_work, memory_order_relaxed, memory_order_relaxed))
276 copyidx = _copy_idx.load(memory_order_relaxed);
277 if (!(copyidx < (oldlen << 1)))
278 panic_start = copyidx;
281 // Now copy the chunk of work we claimed
283 for (int i = 0; i < min_copy_work; i++)
284 if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs,
288 copy_check_and_promote(topmap, oldkvs, workdone);
290 copyidx += min_copy_work;
291 if (!copy_all && panic_start == -1)
292 return; // We are done with the work we claim
294 copy_check_and_promote(topmap, oldkvs, 0); // See if we can promote
297 kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
298 *oldkvs, int idx, void *should_help) {
299 /**** FIXME: miss ****/
300 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
301 // We're only here cause the caller saw a Prime
302 if (copy_slot(topmap, idx, oldkvs, newkvs))
303 copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied
304 return (should_help == NULL) ? newkvs : topmap->help_copy(newkvs);
307 void copy_check_and_promote(cliffc_hashtable *topmap, kvs_data*
308 oldkvs, int workdone) {
309 int oldlen = oldkvs->_size;
310 int copyDone = _copy_done.load(memory_order_relaxed);
313 copyDone = _copy_done.load(memory_order_relaxed);
314 if (_copy_done.compare_exchange_weak(copyDone, copyDone +
315 workdone, memory_order_relaxed, memory_order_relaxed))
320 // Promote the new table to the current table
321 if (copyDone + workdone == oldlen &&
322 topmap->_kvs.load(memory_order_relaxed) == oldkvs) {
323 /**** FIXME: miss ****/
324 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
325 /**** CDSChecker error ****/
326 topmap->_kvs.compare_exchange_strong(oldkvs, newkvs, memory_order_release,
327 memory_order_relaxed);
331 bool copy_slot(cliffc_hashtable *topmap, int idx, kvs_data *oldkvs,
334 while ((key_slot = key(oldkvs, idx)) == NULL)
335 CAS_key(oldkvs, idx, NULL, TOMBSTONE);
337 // First CAS old to Prime
338 slot *oldval = val(oldkvs, idx);
339 while (!is_prime(oldval)) {
340 slot *box = (oldval == NULL || oldval == TOMBSTONE)
341 ? TOMBPRIME : new slot(true, oldval->_ptr);
342 if (CAS_val(oldkvs, idx, oldval, box)) {
343 if (box == TOMBPRIME)
344 return 1; // Copy done
345 // Otherwise we CAS'd the box
346 oldval = box; // Record updated oldval
349 oldval = val(oldkvs, idx); // Else re-try
352 if (oldval == TOMBPRIME) return false; // Copy already completed here
354 slot *old_unboxed = new slot(false, oldval->_ptr);
355 int copied_into_new = (putIfMatch(topmap, newkvs, key_slot, old_unboxed,
358 // Old value is exposed in the new table
359 while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME))
360 oldval = val(oldkvs, idx);
362 return copied_into_new;
369 static const int Default_Init_Size = 4; // Intial table size
371 static slot* const MATCH_ANY;
372 static slot* const NO_MATCH_OLD;
374 static slot* const TOMBPRIME;
375 static slot* const TOMBSTONE;
377 static const int REPROBE_LIMIT = 10; // Forces a table-resize
379 atomic<kvs_data*> _kvs;
383 // Should initialize the CHM for the construction of the table
384 // For other CHM in kvs_data, they should be initialzed in resize()
385 // because the size is determined dynamically
391 kvs_data *kvs = new kvs_data(Default_Init_Size);
392 void *chm = (void*) new CHM(0);
393 kvs->_data[0].store(chm, memory_order_relaxed);
394 _kvs.store(kvs, memory_order_relaxed);
397 cliffc_hashtable(int init_size) {
398 // Should initialize the CHM for the construction of the table
399 // For other CHM in kvs_data, they should be initialzed in resize()
400 // because the size is determined dynamically
407 kvs_data *kvs = new kvs_data(init_size);
408 void *chm = (void*) new CHM(0);
409 kvs->_data[0].store(chm, memory_order_relaxed);
410 _kvs.store(kvs, memory_order_relaxed);
416 //@Commit_point_set: Get_Point1 | Get_Point2 | Get_Point3 | Get_ReadKVS
417 @Commit_point_set: Get_Point1 | Get_Point2 | Get_Point3
420 TypeV *_Old_Val = (TypeV*) spec_table_get(map, key);
421 //bool passed = equals_val(_Old_Val, __RET__);
424 int old = _Old_Val == NULL ? 0 : _Old_Val->_val;
425 int ret = __RET__ == NULL ? 0 : __RET__->_val;
426 //model_print("Get: key: %d, _Old_Val: %d, RET: %d\n",
427 //key->_val, old, ret);
430 //__RET__ == NULL ? true : equals_val(_Old_Val, __RET__)
431 equals_val(_Old_Val, __RET__)
434 TypeV* get(TypeK *key) {
435 slot *key_slot = new slot(false, key);
436 int fullhash = hash(key_slot);
437 /**** CDSChecker error ****/
438 kvs_data *kvs = _kvs.load(memory_order_acquire);
441 @Commit_point_define_check: true
445 slot *V = get_impl(this, kvs, key_slot, fullhash);
446 if (V == NULL) return NULL;
447 MODEL_ASSERT (!is_prime(V));
448 return (TypeV*) V->_ptr;
454 @Commit_point_set: Put_Point
457 # Remember this old value at checking point
458 TypeV *_Old_Val = (TypeV*) spec_table_get(map, key);
459 spec_table_put(map, key, val);
460 //bool passed = equals_val(__RET__, _Old_Val);
463 int old = _Old_Val == NULL ? 0 : _Old_Val->_val;
464 int ret = __RET__ == NULL ? 0 : __RET__->_val;
465 //model_print("Put: key: %d, val: %d, _Old_Val: %d, RET: %d\n",
466 // key->_val, val->_val, old, ret);
469 equals_val(__RET__, _Old_Val)
472 TypeV* put(TypeK *key, TypeV *val) {
473 return putIfMatch(key, val, NO_MATCH_OLD);
478 @Interface: PutIfAbsent
480 Write_Success_Point | PutIfAbsent_Fail_Point
481 @Condition: !spec_table_contains(map, key)
483 COND_PutIfAbsentSucc :: __RET__ == NULL
486 void *_Old_Val = spec_table_get(map, key);
488 spec_table_put(map, key, value);
490 __COND_SAT__ ? __RET__ == NULL : equals_val(_Old_Val, __RET__)
493 TypeV* putIfAbsent(TypeK *key, TypeV *value) {
494 return putIfMatch(key, val, TOMBSTONE);
499 @Interface: RemoveAny
500 @Commit_point_set: Write_Success_Point
503 void *_Old_Val = spec_table_get(map, key);
504 spec_table_put(map, key, NULL);
506 equals_val(__RET__, _Old_Val)
509 TypeV* remove(TypeK *key) {
510 return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD);
515 @Interface: RemoveIfMatch
517 Write_Success_Point | RemoveIfMatch_Fail_Point
519 equals_val(spec_table_get(map, key), val)
521 COND_RemoveIfMatchSucc :: __RET__ == true
525 spec_table_put(map, key, NULL);
527 __COND_SAT__ ? __RET__ : !__RET__
530 bool remove(TypeK *key, TypeV *val) {
531 slot *val_slot = val == NULL ? NULL : new slot(false, val);
532 return putIfMatch(key, TOMBSTONE, val) == val;
538 @Interface: ReplaceAny
543 void *_Old_Val = spec_table_get(map, key);
545 equals_val(__RET__, _Old_Val)
548 TypeV* replace(TypeK *key, TypeV *val) {
549 return putIfMatch(key, val, MATCH_ANY);
554 @Interface: ReplaceIfMatch
556 Write_Success_Point | ReplaceIfMatch_Fail_Point
558 equals_val(spec_table_get(map, key), oldval)
560 COND_ReplaceIfMatchSucc :: __RET__ == true
564 spec_table_put(map, key, newval);
566 __COND_SAT__ ? __RET__ : !__RET__
569 bool replace(TypeK *key, TypeV *oldval, TypeV *newval) {
570 return putIfMatch(key, newval, oldval) == oldval;
574 static CHM* get_chm(kvs_data* kvs) {
575 CHM *res = (CHM*) kvs->_data[0].load(memory_order_relaxed);
579 static int* get_hashes(kvs_data *kvs) {
580 return (int *) kvs->_data[1].load(memory_order_relaxed);
583 // Preserve happens-before semantics on newly inserted keys
584 static inline slot* key(kvs_data *kvs, int idx) {
585 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
586 // Corresponding to the volatile read in get_impl() and putIfMatch in
587 // Cliff Click's Java implementation
588 slot *res = (slot*) kvs->_data[idx * 2 + 2].load(memory_order_relaxed);
591 # This is a complicated potential commit point since many many functions are
593 @Potential_commit_point_define: true
594 @Label: Read_Key_Point
601 The atomic operation in val() function is a "potential" commit point,
602 which means in some case it is a real commit point while it is not for
603 some other cases. This so happens because the val() function is such a
604 fundamental function that many internal operation will call. Our
605 strategy is that we label any potential commit points and check if they
606 really are the commit points later.
608 // Preserve happens-before semantics on newly inserted values
609 static inline slot* val(kvs_data *kvs, int idx) {
610 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
611 // Corresponding to the volatile read in get_impl() and putIfMatch in
612 // Cliff Click's Java implementation
613 /**** CDSChecker error & hb violation ****/
614 slot *res = (slot*) kvs->_data[idx * 2 + 3].load(memory_order_acquire);
617 # This is a complicated potential commit point since many many functions are
619 @Potential_commit_point_define: true
620 @Label: Read_Val_Point
628 static int hash(slot *key_slot) {
629 MODEL_ASSERT(key_slot != NULL && key_slot->_ptr != NULL);
630 TypeK* key = (TypeK*) key_slot->_ptr;
631 int h = key->hashCode();
632 // Spread bits according to Cliff Click's code
633 h += (h << 15) ^ 0xffffcd7d;
637 h += (h << 2) + (h << 14);
638 return h ^ (h >> 16);
641 // Heuristic to decide if reprobed too many times.
642 // Be careful here: Running over the limit on a 'get' acts as a 'miss'; on a
643 // put it triggers a table resize. Several places MUST have exact agreement.
644 static int reprobe_limit(int len) {
645 return REPROBE_LIMIT + (len >> 2);
648 static inline bool is_prime(slot *val) {
649 return (val != NULL) && val->_prime;
652 // Check for key equality. Try direct pointer comparison first (fast
653 // negative teset) and then the full 'equals' call
654 static bool keyeq(slot *K, slot *key_slot, int *hashes, int hash,
656 // Caller should've checked this.
657 MODEL_ASSERT (K != NULL);
658 TypeK* key_ptr = (TypeK*) key_slot->_ptr;
661 ((hashes[hash] == 0 || hashes[hash] == fullhash) &&
663 key_ptr->equals(K->_ptr));
666 static bool valeq(slot *val_slot1, slot *val_slot2) {
667 MODEL_ASSERT (val_slot1 != NULL);
668 TypeK* ptr1 = (TypeV*) val_slot1->_ptr;
669 if (val_slot2 == NULL || ptr1 == NULL) return false;
670 return ptr1->equals(val_slot2->_ptr);
673 // Together with key() preserve the happens-before relationship on newly
675 static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
676 bool res = kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
677 desired, memory_order_relaxed, memory_order_relaxed);
679 # If it is a successful put instead of a copy or any other internal
680 # operantions, expected != NULL
682 @Potential_commit_point_define: res
683 @Label: Write_Key_Point
690 Same as the val() function, we only label the CAS operation as the
691 potential commit point.
693 // Together with val() preserve the happens-before relationship on newly
695 static inline bool CAS_val(kvs_data *kvs, int idx, void *expected, void
697 /**** CDSChecker error & HB violation ****/
698 bool res = kvs->_data[2 * idx + 3].compare_exchange_strong(expected,
699 desired, memory_order_acq_rel, memory_order_relaxed);
701 # If it is a successful put instead of a copy or any other internal
702 # operantions, expected != NULL
704 @Potential_commit_point_define: res
705 @Label: Write_Val_Point
711 slot* get_impl(cliffc_hashtable *topmap, kvs_data *kvs, slot* key_slot, int
713 int len = kvs->_size;
714 CHM *chm = get_chm(kvs);
715 int *hashes = get_hashes(kvs);
717 int idx = fullhash & (len - 1);
720 slot *K = key(kvs, idx);
723 @Commit_point_define: K == NULL
724 @Potential_commit_point_label: Read_Key_Point
728 slot *V = val(kvs, idx);
731 //model_print("Key is null\n");
732 return NULL; // A miss
735 if (keyeq(K, key_slot, hashes, idx, fullhash)) {
736 // Key hit! Check if table-resize in progress
740 @Commit_point_define: true
741 @Potential_commit_point_label: Read_Val_Point
745 return (V == TOMBSTONE) ? NULL : V; // Return this value
747 // Otherwise, finish the copy & retry in the new table
748 return get_impl(topmap, chm->copy_slot_and_check(topmap, kvs,
749 idx, key_slot), key_slot, fullhash);
752 if (++reprobe_cnt >= REPROBE_LIMIT ||
753 key_slot == TOMBSTONE) {
754 // Retry in new table
755 // Atomic read can be here
756 /**** FIXME: miss ****/
757 kvs_data *newkvs = chm->_newkvs.load(memory_order_acquire);
760 @Commit_point_define_check: newkvs == NULL
764 return newkvs == NULL ? NULL : get_impl(topmap,
765 topmap->help_copy(newkvs), key_slot, fullhash);
768 idx = (idx + 1) & (len - 1); // Reprobe by 1
772 // A wrapper of the essential function putIfMatch()
773 TypeV* putIfMatch(TypeK *key, TypeV *value, slot *old_val) {
774 // TODO: Should throw an exception rather return NULL
775 if (old_val == NULL) {
778 slot *key_slot = new slot(false, key);
780 slot *value_slot = new slot(false, value);
781 /**** FIXME: miss ****/
782 kvs_data *kvs = _kvs.load(memory_order_acquire);
785 @Commit_point_define_check: true
789 slot *res = putIfMatch(this, kvs, key_slot, value_slot, old_val);
790 // Only when copy_slot() call putIfMatch() will it return NULL
791 MODEL_ASSERT (res != NULL);
792 MODEL_ASSERT (!is_prime(res));
793 return res == TOMBSTONE ? NULL : (TypeV*) res->_ptr;
797 Put, Remove, PutIfAbsent, etc will call this function. Return the old
798 value. If the returned value is equals to the expVal (or expVal is
799 NO_MATCH_OLD), then this function puts the val_slot to the table 'kvs'.
800 Only copy_slot will pass a NULL expVal, and putIfMatch only returns a
801 NULL if passed a NULL expVal.
803 static slot* putIfMatch(cliffc_hashtable *topmap, kvs_data *kvs, slot
804 *key_slot, slot *val_slot, slot *expVal) {
805 MODEL_ASSERT (val_slot != NULL);
806 MODEL_ASSERT (!is_prime(val_slot));
807 MODEL_ASSERT (!is_prime(expVal));
809 int fullhash = hash(key_slot);
810 int len = kvs->_size;
811 CHM *chm = get_chm(kvs);
812 int *hashes = get_hashes(kvs);
813 int idx = fullhash & (len - 1);
821 while (true) { // Spin till we get a key slot
824 if (K == NULL) { // Get a free slot
825 if (val_slot == TOMBSTONE) return val_slot;
826 // Claim the null key-slot
827 if (CAS_key(kvs, idx, NULL, key_slot)) {
828 chm->_slots.fetch_add(1, memory_order_relaxed); // Inc key-slots-used count
829 hashes[idx] = fullhash; // Memorize full hash
832 K = key(kvs, idx); // CAS failed, get updated value
833 MODEL_ASSERT (K != NULL);
836 // Key slot not null, there exists a Key here
837 if (keyeq(K, key_slot, hashes, idx, fullhash))
840 // Notice that the logic here should be consistent with that of get.
841 // The first predicate means too many reprobes means nothing in the
843 if (++reprobe_cnt >= reprobe_limit(len) ||
844 K == TOMBSTONE) { // Found a Tombstone key, no more keys
845 newkvs = chm->resize(topmap, kvs);
846 //model_print("resize1\n");
847 // Help along an existing copy
848 if (expVal != NULL) topmap->help_copy(newkvs);
849 return putIfMatch(topmap, newkvs, key_slot, val_slot, expVal);
852 idx = (idx + 1) & (len - 1); // Reprobe
853 } // End of spinning till we get a Key slot
855 if (val_slot == V) return V; // Fast cutout for no-change
857 // Here it tries to resize cause it doesn't want other threads to stop
858 // its progress (eagerly try to resize soon)
859 /**** FIXME: miss ****/
860 newkvs = chm->_newkvs.load(memory_order_acquire);
861 if (newkvs == NULL &&
862 ((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V))) {
863 //model_print("resize2\n");
864 newkvs = chm->resize(topmap, kvs); // Force the copy to start
867 // Finish the copy and then put it in the new table
869 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs, idx,
870 expVal), key_slot, val_slot, expVal);
872 // Decided to update the existing table
874 MODEL_ASSERT (!is_prime(V));
876 if (expVal != NO_MATCH_OLD &&
878 (expVal != MATCH_ANY || V == TOMBSTONE || V == NULL) &&
879 !(V == NULL && expVal == TOMBSTONE) &&
880 (expVal == NULL || !valeq(expVal, V))) {
883 @Commit_point_define: expVal == TOMBSTONE
884 @Potential_commit_point_label: Read_Val_Point
885 @Label: PutIfAbsent_Fail_Point
886 # This is a check for the PutIfAbsent() when the value
892 @Commit_point_define: expVal != NULL && val_slot == TOMBSTONE
893 @Potential_commit_point_label: Read_Val_Point
894 @Label: RemoveIfMatch_Fail_Point
899 @Commit_point_define: expVal != NULL && !valeq(expVal, V)
900 @Potential_commit_point_label: Read_Val_Point
901 @Label: ReplaceIfMatch_Fail_Point
904 return V; // Do not update!
907 if (CAS_val(kvs, idx, V, val_slot)) {
910 # The only point where a successful put happens
911 @Commit_point_define: true
912 @Potential_commit_point_label: Write_Val_Point
916 if (expVal != NULL) { // Not called by a table-copy
917 // CAS succeeded, should adjust size
918 // Both normal put's and table-copy calls putIfMatch, but
919 // table-copy does not increase the number of live K/V pairs
920 if ((V == NULL || V == TOMBSTONE) &&
921 val_slot != TOMBSTONE)
922 chm->_size.fetch_add(1, memory_order_relaxed);
923 if (!(V == NULL || V == TOMBSTONE) &&
924 val_slot == TOMBSTONE)
925 chm->_size.fetch_add(-1, memory_order_relaxed);
927 return (V == NULL && expVal != NULL) ? TOMBSTONE : V;
932 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs,
933 idx, expVal), key_slot, val_slot, expVal);
937 // Help along an existing table-resize. This is a fast cut-out wrapper.
938 kvs_data* help_copy(kvs_data *helper) {
939 /**** FIXME: miss ****/
940 kvs_data *topkvs = _kvs.load(memory_order_acquire);
941 CHM *topchm = get_chm(topkvs);
942 // No cpy in progress
943 if (topchm->_newkvs.load(memory_order_relaxed) == NULL) return helper;
944 topchm->help_copy_impl(this, topkvs, false);