1 #ifndef CLIFFC_HASHTABLE_H
2 #define CLIFFC_HASHTABLE_H
9 #define MODEL_ASSERT assert
11 #include <model-assert.h>
16 #include <cdsannotate.h>
17 #include <specannotation.h>
18 #include <model_memory.h>
23 This header file declares and defines a simplified version of Cliff Click's
24 NonblockingHashMap. It contains all the necessary structrues and main
25 functions. In simplified_cliffc_hashtable.cc file, it has the definition for
29 template<typename TypeK, typename TypeV>
30 class cliffc_hashtable;
33 Corresponding the the Object[] array in Cliff Click's Java implementation.
34 It keeps the first two slots for CHM (Hashtable control unit) and the hash
35 records (an array of hash used for fast negative key-equality check).
43 int real_size = sz * 2 + 2;
44 _data = new atomic<void*>[real_size];
45 // The control block should be initialized in resize()
46 // Init the hash record array
47 int *hashes = new int[_size];
49 for (i = 0; i < _size; i++) {
52 // Init the data to Null slot
53 for (i = 2; i < real_size; i++) {
54 _data[i].store(NULL, memory_order_relaxed);
56 _data[1].store(hashes, memory_order_release);
60 int *hashes = (int*) _data[1].load(memory_order_relaxed);
70 slot(bool prime, void *ptr) {
78 TypeK must have defined function "int hashCode()" which return the hash
79 code for the its object, and "int equals(TypeK anotherKey)" which is
80 used to judge equality.
81 TypeK and TypeV should define their own copy constructor.
88 template<typename TypeK, typename TypeV>
89 class cliffc_hashtable {
91 # The synchronization we have for the hashtable gives us the property of
92 # serializability, so we should have a sequential hashtable when we check the
93 # correctness. The key thing is to identify all the commit point.
98 CLASS = cliffc_hashtable;
105 map = new_spec_table_default(equals_key);
106 id_map = new_spec_table_default(equals_id);
110 bool equals_key(void *ptr1, void *ptr2) {
111 TypeK *key1 = (TypeK*) ptr1,
112 *key2 = (TypeK*) ptr2;
113 if (key1 == NULL || key2 == NULL)
115 return key1->equals(key2);
119 bool equals_val(void *ptr1, void *ptr2) {
120 TypeV *val1 = (TypeV*) ptr1,
121 *val2 = (TypeV*) ptr2;
122 if (val1 == NULL || val2 == NULL)
124 return val1->equals(val2);
128 bool equals_id(void *ptr1, void *ptr2) {
129 id_tag_t *id1 = (id_tag_t*) ptr1,
130 *id2 = (id_tag_t*) ptr2;
131 if (id1 == NULL || id2 == NULL)
133 return (*id1).tag == (*id2).tag;
137 # Update the tag for the current key slot if the corresponding tag
138 # is NULL, otherwise just return that tag. It will update the next
139 # available tag too if it requires a new tag for that key slot.
140 id_tag_t getKeyTag(TypeK *key) {
141 if (spec_table_contains(id_map, key)) {
142 id_tag_t *cur_tag = MODEL_MALLOC(sizeof(id_tag_t));
143 *cur_tag = current(tag);
144 spec_table_put(id_map, key, cur_tag);
148 id_tag_t *res = (id_tag_t*) spec_table_get(id_map, key);
165 PutIfAbsent(COND_PutIfAbsentSucc),
167 RemoveIfMatch(COND_RemoveIfMatchSucc),
169 ReplaceIfMatch(COND_ReplaceIfMatchSucc)
172 //Write_interface -> Read_interface
180 The control structure for the hashtable
184 friend class cliffc_hashtable;
186 atomic<kvs_data*> _newkvs;
188 // Size of active K,V pairs
191 // Count of used slots
194 // The next part of the table to copy
195 atomic_int _copy_idx;
197 // Work-done reporting
198 atomic_int _copy_done;
202 _newkvs.store(NULL, memory_order_relaxed);
203 _size.store(size, memory_order_relaxed);
204 _slots.store(0, memory_order_relaxed);
206 _copy_idx.store(0, memory_order_relaxed);
207 _copy_done.store(0, memory_order_release);
214 // Heuristic to decide if the table is too full
215 bool table_full(int reprobe_cnt, int len) {
217 reprobe_cnt >= REPROBE_LIMIT &&
218 _slots.load(memory_order_relaxed) >= reprobe_limit(len);
221 kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
222 //model_print("resizing...\n");
223 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
227 // No copy in-progress, start one; Only double the table size
228 int oldlen = kvs->_size;
229 int sz = _size.load(memory_order_relaxed);
232 // Just follow Cliff Click's heuristic to decide the new size
233 if (sz >= (oldlen >> 2)) { // If we are 25% full
234 newsz = oldlen << 1; // Double size
235 if (sz >= (oldlen >> 1))
236 newsz = oldlen << 2; // Double double size
239 // We do not record the record timestamp
240 if (newsz <= oldlen) newsz = oldlen << 1;
241 // Do not shrink ever
242 if (newsz < oldlen) newsz = oldlen;
244 // Last check cause the 'new' below is expensive
245 newkvs = _newkvs.load(memory_order_acquire);
246 if (newkvs != NULL) return newkvs;
248 newkvs = new kvs_data(newsz);
249 void *chm = (void*) new CHM(sz);
250 newkvs->_data[0].store(chm, memory_order_release);
252 kvs_data *cur_newkvs;
253 // Another check after the slow allocation
254 if ((cur_newkvs = _newkvs.load(memory_order_acquire)) != NULL)
256 // CAS the _newkvs to the allocated table
257 kvs_data *desired = (kvs_data*) NULL;
258 kvs_data *expected = (kvs_data*) newkvs;
259 if (!_newkvs.compare_exchange_strong(desired, expected, memory_order_release,
260 memory_order_release)) {
261 // Should clean the allocated area
263 newkvs = _newkvs.load(memory_order_acquire);
268 void help_copy_impl(cliffc_hashtable *topmap, kvs_data *oldkvs,
270 MODEL_ASSERT (get_chm(oldkvs) == this);
271 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
272 int oldlen = oldkvs->_size;
273 int min_copy_work = oldlen > 1024 ? 1024 : oldlen;
275 // Just follow Cliff Click's code here
276 int panic_start = -1;
278 while (_copy_done.load(memory_order_acquire) < oldlen) {
279 copyidx = _copy_idx.load(memory_order_acquire);
280 if (panic_start == -1) { // No painc
281 copyidx = _copy_idx.load(memory_order_acquire);
282 while (copyidx < (oldlen << 1) &&
283 !_copy_idx.compare_exchange_strong(copyidx, copyidx +
284 min_copy_work, memory_order_release, memory_order_release))
285 copyidx = _copy_idx.load(memory_order_relaxed);
286 if (!(copyidx < (oldlen << 1)))
287 panic_start = copyidx;
290 // Now copy the chunk of work we claimed
292 for (int i = 0; i < min_copy_work; i++)
293 if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs,
297 copy_check_and_promote(topmap, oldkvs, workdone);
299 copyidx += min_copy_work;
300 if (!copy_all && panic_start == -1)
301 return; // We are done with the work we claim
303 copy_check_and_promote(topmap, oldkvs, 0); // See if we can promote
306 kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
307 *oldkvs, int idx, void *should_help) {
308 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
309 // We're only here cause the caller saw a Prime
310 if (copy_slot(topmap, idx, oldkvs, newkvs))
311 copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied
312 return (should_help == NULL) ? newkvs : topmap->help_copy(newkvs);
315 void copy_check_and_promote(cliffc_hashtable *topmap, kvs_data*
316 oldkvs, int workdone) {
317 int oldlen = oldkvs->_size;
318 int copyDone = _copy_done.load(memory_order_relaxed);
321 copyDone = _copy_done.load(memory_order_relaxed);
322 if (_copy_done.compare_exchange_weak(copyDone, copyDone +
323 workdone, memory_order_relaxed, memory_order_relaxed))
328 // Promote the new table to the current table
329 if (copyDone + workdone == oldlen &&
330 topmap->_kvs.load(memory_order_acquire) == oldkvs) {
331 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
332 topmap->_kvs.compare_exchange_strong(oldkvs, newkvs, memory_order_release,
333 memory_order_release);
337 bool copy_slot(cliffc_hashtable *topmap, int idx, kvs_data *oldkvs,
340 while ((key_slot = key(oldkvs, idx)) == NULL)
341 CAS_key(oldkvs, idx, NULL, TOMBSTONE);
343 // First CAS old to Prime
344 slot *oldval = val(oldkvs, idx);
345 while (!is_prime(oldval)) {
346 slot *box = (oldval == NULL || oldval == TOMBSTONE)
347 ? TOMBPRIME : new slot(true, oldval->_ptr);
348 if (CAS_val(oldkvs, idx, oldval, box)) {
349 if (box == TOMBPRIME)
350 return 1; // Copy done
351 // Otherwise we CAS'd the box
352 oldval = box; // Record updated oldval
355 oldval = val(oldkvs, idx); // Else re-try
358 if (oldval == TOMBPRIME) return false; // Copy already completed here
360 slot *old_unboxed = new slot(false, oldval->_ptr);
361 int copied_into_new = (putIfMatch(topmap, newkvs, key_slot, old_unboxed,
364 // Old value is exposed in the new table
365 while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME))
366 oldval = val(oldkvs, idx);
368 return copied_into_new;
375 static const int Default_Init_Size = 4; // Intial table size
377 static slot* const MATCH_ANY;
378 static slot* const NO_MATCH_OLD;
380 static slot* const TOMBPRIME;
381 static slot* const TOMBSTONE;
383 static const int REPROBE_LIMIT = 10; // Forces a table-resize
385 atomic<kvs_data*> _kvs;
389 // Should initialize the CHM for the construction of the table
390 // For other CHM in kvs_data, they should be initialzed in resize()
391 // because the size is determined dynamically
392 kvs_data *kvs = new kvs_data(Default_Init_Size);
393 void *chm = (void*) new CHM(0);
394 kvs->_data[0].store(chm, memory_order_relaxed);
395 _kvs.store(kvs, memory_order_release);
398 cliffc_hashtable(int init_size) {
399 // Should initialize the CHM for the construction of the table
400 // For other CHM in kvs_data, they should be initialzed in resize()
401 // because the size is determined dynamically
408 kvs_data *kvs = new kvs_data(init_size);
409 void *chm = (void*) new CHM(0);
410 kvs->_data[0].store(chm, memory_order_relaxed);
411 _kvs.store(kvs, memory_order_release);
417 @Commit_point_set: Get_Success_Point1 | Get_Success_Point2 | Get_Success_Point3
420 void *_Old_Val = spec_table_get(map, key);
422 equals_val(_Old_Val, __RET__)
425 TypeV* get(TypeK *key) {
426 slot *key_slot = new slot(false, key);
427 int fullhash = hash(key_slot);
428 kvs_data *kvs = _kvs.load(memory_order_acquire);
429 slot *V = get_impl(this, kvs, key_slot, fullhash);
430 if (V == NULL) return NULL;
431 MODEL_ASSERT (!is_prime(V));
432 return (TypeV*) V->_ptr;
438 @Commit_point_set: Write_Success_Point
441 # Remember this old value at checking point
442 void *_Old_Val = spec_table_get(map, key);
443 spec_table_put(map, key, val);
445 equals_val(__RET__, _Old_Val)
448 TypeV* put(TypeK *key, TypeV *val) {
449 return putIfMatch(key, val, NO_MATCH_OLD);
454 @Interface: PutIfAbsent
456 Write_Success_Point | PutIfAbsent_Fail_Point
457 @Condition: !spec_table_contains(map, key)
459 COND_PutIfAbsentSucc :: __RET__ == NULL
462 void *_Old_Val = spec_table_get(map, key);
464 spec_table_put(map, key, value);
466 __COND_SAT__ ? __RET__ == NULL : equals_val(_Old_Val, __RET__)
469 TypeV* putIfAbsent(TypeK *key, TypeV *value) {
470 return putIfMatch(key, val, TOMBSTONE);
475 @Interface: RemoveAny
476 @Commit_point_set: Write_Success_Point
479 void *_Old_Val = spec_table_get(map, key);
480 spec_table_put(map, key, NULL);
482 equals_val(__RET__, _Old_Val)
485 TypeV* remove(TypeK *key) {
486 return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD);
491 @Interface: RemoveIfMatch
493 Write_Success_Point | RemoveIfMatch_Fail_Point
495 equals_val(spec_table_get(map, key), val)
497 COND_RemoveIfMatchSucc :: __RET__ == true
501 spec_table_put(map, key, NULL);
503 __COND_SAT__ ? __RET__ : !__RET__
506 bool remove(TypeK *key, TypeV *val) {
507 slot *val_slot = val == NULL ? NULL : new slot(false, val);
508 return putIfMatch(key, TOMBSTONE, val) == val;
514 @Interface: ReplaceAny
519 void *_Old_Val = spec_table_get(map, key);
521 equals_val(__RET__, _Old_Val)
524 TypeV* replace(TypeK *key, TypeV *val) {
525 return putIfMatch(key, val, MATCH_ANY);
530 @Interface: ReplaceIfMatch
532 Write_Success_Point | ReplaceIfMatch_Fail_Point
534 equals_val(spec_table_get(map, key), oldval)
536 COND_ReplaceIfMatchSucc :: __RET__ == true
540 spec_table_put(map, key, newval);
542 __COND_SAT__ ? __RET__ : !__RET__
545 bool replace(TypeK *key, TypeV *oldval, TypeV *newval) {
546 return putIfMatch(key, newval, oldval) == oldval;
550 static CHM* get_chm(kvs_data* kvs) {
551 CHM *res = (CHM*) kvs->_data[0].load(memory_order_relaxed);
555 static int* get_hashes(kvs_data *kvs) {
556 return (int *) kvs->_data[1].load(memory_order_relaxed);
559 // Preserve happens-before semantics on newly inserted keys
560 static inline slot* key(kvs_data *kvs, int idx) {
561 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
562 // Corresponding to the volatile read in get_impl() and putIfMatch in
563 // Cliff Click's Java implementation
564 slot *res = (slot*) kvs->_data[idx * 2 + 2].load(memory_order_acquire);
569 The atomic operation in val() function is a "potential" commit point,
570 which means in some case it is a real commit point while it is not for
571 some other cases. This so happens because the val() function is such a
572 fundamental function that many internal operation will call. Our
573 strategy is that we label any potential commit points and check if they
574 really are the commit points later.
576 // Preserve happens-before semantics on newly inserted values
577 static inline slot* val(kvs_data *kvs, int idx) {
578 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
579 // Corresponding to the volatile read in get_impl() and putIfMatch in
580 // Cliff Click's Java implementation
581 slot *res = (slot*) kvs->_data[idx * 2 + 3].load(memory_order_acquire);
584 # This is a complicated potential commit point since many many functions are
586 @Potential_commit_point_define: true
587 @Label: Read_Val_Point
595 static int hash(slot *key_slot) {
596 MODEL_ASSERT(key_slot != NULL && key_slot->_ptr != NULL);
597 TypeK* key = (TypeK*) key_slot->_ptr;
598 int h = key->hashCode();
599 // Spread bits according to Cliff Click's code
600 h += (h << 15) ^ 0xffffcd7d;
604 h += (h << 2) + (h << 14);
605 return h ^ (h >> 16);
608 // Heuristic to decide if reprobed too many times.
609 // Be careful here: Running over the limit on a 'get' acts as a 'miss'; on a
610 // put it triggers a table resize. Several places MUST have exact agreement.
611 static int reprobe_limit(int len) {
612 return REPROBE_LIMIT + (len >> 2);
615 static inline bool is_prime(slot *val) {
616 return (val != NULL) && val->_prime;
619 // Check for key equality. Try direct pointer comparison first (fast
620 // negative teset) and then the full 'equals' call
621 static bool keyeq(slot *K, slot *key_slot, int *hashes, int hash,
623 // Caller should've checked this.
624 MODEL_ASSERT (K != NULL);
625 TypeK* key_ptr = (TypeK*) key_slot->_ptr;
628 ((hashes[hash] == 0 || hashes[hash] == fullhash) &&
630 key_ptr->equals(K->_ptr));
633 static bool valeq(slot *val_slot1, slot *val_slot2) {
634 MODEL_ASSERT (val_slot1 != NULL);
635 TypeK* ptr1 = (TypeV*) val_slot1->_ptr;
636 if (val_slot2 == NULL || ptr1 == NULL) return false;
637 return ptr1->equals(val_slot2->_ptr);
640 // Together with key() preserve the happens-before relationship on newly
642 static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
643 return kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
644 desired, memory_order_release, memory_order_release);
648 Same as the val() function, we only label the CAS operation as the
649 potential commit point.
651 // Together with val() preserve the happens-before relationship on newly
653 static inline bool CAS_val(kvs_data *kvs, int idx, void *expected, void
655 bool res = kvs->_data[2 * idx + 3].compare_exchange_strong(expected,
656 desired, memory_order_release, memory_order_release);
658 # If it is a successful put instead of a copy or any other internal
659 # operantions, expected != NULL
661 @Potential_commit_point_define: res == true
662 @Label: Write_Val_Point
668 slot* get_impl(cliffc_hashtable *topmap, kvs_data *kvs, slot* key_slot, int
670 int len = kvs->_size;
671 CHM *chm = get_chm(kvs);
672 int *hashes = get_hashes(kvs);
674 int idx = fullhash & (len - 1);
677 slot *K = key(kvs, idx);
678 slot *V = val(kvs, idx);
681 @Commit_point_define: V == NULL
682 @Potential_commit_point_label: Read_Val_Point
683 @Label: Get_Success_Point_1
687 if (K == NULL) return NULL; // A miss
689 if (keyeq(K, key_slot, hashes, idx, fullhash)) {
690 // Key hit! Check if table-resize in progress
694 @Commit_point_define: true
695 @Potential_commit_point_label: Read_Val_Point
696 @Label: Get_Success_Point_2
699 return (V == TOMBSTONE) ? NULL : V; // Return this value
701 // Otherwise, finish the copy & retry in the new table
702 return get_impl(topmap, chm->copy_slot_and_check(topmap, kvs,
703 idx, key_slot), key_slot, fullhash);
706 if (++reprobe_cnt >= REPROBE_LIMIT ||
707 key_slot == TOMBSTONE) {
708 // Retry in new table
709 // Atomic read (acquire) can be here
710 kvs_data *newkvs = chm->_newkvs.load(memory_order_acquire);
713 @Commit_point_define_check: newkvs == NULL
714 @Label: Get_Success_Point_3
717 return newkvs == NULL ? NULL : get_impl(topmap,
718 topmap->help_copy(newkvs), key_slot, fullhash);
721 idx = (idx + 1) & (len - 1); // Reprobe by 1
725 // A wrapper of the essential function putIfMatch()
726 TypeV* putIfMatch(TypeK *key, TypeV *value, slot *old_val) {
727 // TODO: Should throw an exception rather return NULL
728 if (old_val == NULL) {
731 slot *key_slot = new slot(false, key);
733 slot *value_slot = new slot(false, value);
734 kvs_data *kvs = _kvs.load(memory_order_acquire);
735 slot *res = putIfMatch(this, kvs, key_slot, value_slot, old_val);
736 // Only when copy_slot() call putIfMatch() will it return NULL
737 MODEL_ASSERT (res != NULL);
738 MODEL_ASSERT (!is_prime(res));
739 return res == TOMBSTONE ? NULL : (TypeV*) res->_ptr;
743 Put, Remove, PutIfAbsent, etc will call this function. Return the old
744 value. If the returned value is equals to the expVal (or expVal is
745 NO_MATCH_OLD), then this function puts the val_slot to the table 'kvs'.
746 Only copy_slot will pass a NULL expVal, and putIfMatch only returns a
747 NULL if passed a NULL expVal.
749 static slot* putIfMatch(cliffc_hashtable *topmap, kvs_data *kvs, slot
750 *key_slot, slot *val_slot, slot *expVal) {
751 MODEL_ASSERT (val_slot != NULL);
752 MODEL_ASSERT (!is_prime(val_slot));
753 MODEL_ASSERT (!is_prime(expVal));
755 int fullhash = hash(key_slot);
756 int len = kvs->_size;
757 CHM *chm = get_chm(kvs);
758 int *hashes = get_hashes(kvs);
759 int idx = fullhash & (len - 1);
767 while (true) { // Spin till we get a key slot
770 if (K == NULL) { // Get a free slot
771 if (val_slot == TOMBSTONE) return val_slot;
772 // Claim the null key-slot
773 if (CAS_key(kvs, idx, NULL, key_slot)) {
774 chm->_slots.fetch_add(1, memory_order_relaxed); // Inc key-slots-used count
775 hashes[idx] = fullhash; // Memorize full hash
778 K = key(kvs, idx); // CAS failed, get updated value
779 MODEL_ASSERT (K != NULL);
782 // Key slot not null, there exists a Key here
783 if (keyeq(K, key_slot, hashes, idx, fullhash))
786 // Notice that the logic here should be consistent with that of get.
787 // The first predicate means too many reprobes means nothing in the
789 if (++reprobe_cnt >= reprobe_limit(len) ||
790 K == TOMBSTONE) { // Found a Tombstone key, no more keys
791 newkvs = chm->resize(topmap, kvs);
792 // Help along an existing copy
793 if (expVal != NULL) topmap->help_copy(newkvs);
794 return putIfMatch(topmap, newkvs, key_slot, val_slot, expVal);
797 idx = (idx + 1) & (len - 1); // Reprobe
798 } // End of spinning till we get a Key slot
800 if (val_slot == V) return V; // Fast cutout for no-change
802 // Here it tries to resize cause it doesn't want other threads to stop
803 // its progress (eagerly try to resize soon)
804 newkvs = chm->_newkvs.load(memory_order_acquire);
805 if (newkvs == NULL &&
806 ((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V)))
807 newkvs = chm->resize(topmap, kvs); // Force the copy to start
809 // Finish the copy and then put it in the new table
811 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs, idx,
812 expVal), key_slot, val_slot, expVal);
814 // Decided to update the existing table
816 MODEL_ASSERT (!is_prime(V));
818 if (expVal != NO_MATCH_OLD &&
820 (expVal != MATCH_ANY || V == TOMBSTONE || V == NULL) &&
821 !(V == NULL && expVal == TOMBSTONE) &&
822 (expVal == NULL || !valeq(expVal, V))) {
825 @Commit_point_define: expVal == TOMBSTONE
826 @Potential_commit_point_label: Read_Val_Point
827 @Label: PutIfAbsent_Fail_Point
828 # This is a check for the PutIfAbsent() when the value
834 @Commit_point_define: expVal != NULL && val_slot == TOMBSTONE
835 @Potential_commit_point_label: Read_Val_Point
836 @Label: RemoveIfMatch_Fail_Point
841 @Commit_point_define: !valeq(expVal, V)
842 @Potential_commit_point_label: Read_Val_Point
843 @Label: ReplaceIfMatch_Fail_Point
846 return V; // Do not update!
849 if (CAS_val(kvs, idx, V, val_slot)) {
852 # The only point where a successful put happens
853 @Commit_point_define: true
854 @Potential_commit_point_label: Write_Val_Point
855 @Label: Write_Success_Point
858 if (expVal != NULL) { // Not called by a table-copy
859 // CAS succeeded, should adjust size
860 // Both normal put's and table-copy calls putIfMatch, but
861 // table-copy does not increase the number of live K/V pairs
862 if ((V == NULL || V == TOMBSTONE) &&
863 val_slot != TOMBSTONE)
864 chm->_size.fetch_add(1, memory_order_relaxed);
865 if (!(V == NULL || V == TOMBSTONE) &&
866 val_slot == TOMBSTONE)
867 chm->_size.fetch_add(-1, memory_order_relaxed);
869 return (V == NULL && expVal != NULL) ? TOMBSTONE : V;
874 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs,
875 idx, expVal), key_slot, val_slot, expVal);
879 // Help along an existing table-resize. This is a fast cut-out wrapper.
880 kvs_data* help_copy(kvs_data *helper) {
881 kvs_data *topkvs = _kvs.load(memory_order_acquire);
882 CHM *topchm = get_chm(topkvs);
883 // No cpy in progress
884 if (topchm->_newkvs.load(memory_order_acquire) == NULL) return helper;
885 topchm->help_copy_impl(this, topkvs, false);