1 #ifndef CLIFFC_HASHTABLE_H
2 #define CLIFFC_HASHTABLE_H
9 #define MODEL_ASSERT assert
11 #include <model-assert.h>
17 This header file declares and defines a simplified version of Cliff Click's
18 NonblockingHashMap. It contains all the necessary structrues and main
19 functions. In simplified_cliffc_hashtable.cc file, it has the definition for
23 template<typename TypeK, typename TypeV>
24 class cliffc_hashtable;
27 Corresponding the the Object[] array in Cliff Click's Java implementation.
28 It keeps the first two slots for CHM (Hashtable control unit) and the hash
29 records (an array of hash used for fast negative key-equality check).
37 int real_size = sz * 2 + 2;
38 _data = new atomic<void*>[real_size];
39 // The control block should be initialized in resize()
40 // Init the hash record array
41 int *hashes = new int[_size];
43 for (i = 0; i < _size; i++) {
46 // Init the data to Null slot
47 for (i = 2; i < real_size; i++) {
48 _data[i].store(NULL, memory_order_relaxed);
50 _data[1].store(hashes, memory_order_release);
54 int *hashes = (int*) _data[1].load(memory_order_relaxed);
64 slot(bool prime, void *ptr) {
72 TypeK must have defined function "int hashCode()" which return the hash
73 code for the its object, and "int equals(TypeK anotherKey)" which is
74 used to judge equality.
75 TypeK and TypeV should define their own copy constructor.
82 template<typename TypeK, typename TypeV>
83 class cliffc_hashtable {
85 # The synchronization we have for the hashtable gives us the property of
86 # serializability, so we should have a sequential hashtable when we check the
87 # correctness. The key thing is to identify all the commit point.
92 CLASS = cliffc_hashtable;
99 map = new_spec_table_default(equals_key);
100 id_map = new_spec_table_default(equals_id);
104 bool equals_key(void *ptr1, void *ptr2) {
105 TypeK *key1 = (TypeK*) ptr1,
106 *key2 = (TypeK*) ptr2;
107 if (key1 == NULL || key2 == NULL)
109 return key1->equals(key2);
113 bool equals_val(void *ptr1, void *ptr2) {
114 TypeV *val1 = (TypeV*) ptr1,
115 *val2 = (TypeV*) ptr2;
116 if (val1 == NULL || val2 == NULL)
118 return val1->equals(val2);
122 bool equals_id(void *ptr1, void *ptr2) {
123 id_tag_t *id1 = (id_tag_t*) ptr1,
124 *id2 = (id_tag_t*) ptr2;
125 if (id1 == NULL || id2 == NULL)
131 # Update the tag for the current key slot if the corresponding tag
132 # is NULL, otherwise just return that tag. It will update the next
133 # available tag too if it requires a new tag for that key slot.
134 id_tag_t getKeyTag(TypeK *key) {
135 if (spec_table_contains(id_map, key)) {
136 id_tag_t *cur_tag = MODEL_MALLOC(sizeof(id_tag_t));
137 *cur_tag = current(tag);
138 spec_table_put(id_map, key, cur_tag);
142 id_tag_t *res = (id_tag_t*) spec_table_get(id_map, key);
159 PutIfAbsent(COND_PutIfAbsentSucc),
161 RemoveIfMatch(COND_RemoveIfMatchSucc),
163 ReplaceIfMatch(COND_ReplaceIfMatchSucc)
166 Write_interface -> Read_interface
172 The control structure for the hashtable
176 friend class cliffc_hashtable;
178 atomic<kvs_data*> _newkvs;
180 // Size of active K,V pairs
183 // Count of used slots
186 // The next part of the table to copy
187 atomic_int _copy_idx;
189 // Work-done reporting
190 atomic_int _copy_done;
194 _newkvs.store(NULL, memory_order_relaxed);
195 _size.store(size, memory_order_relaxed);
196 _slots.store(0, memory_order_relaxed);
198 _copy_idx.store(0, memory_order_relaxed);
199 _copy_done.store(0, memory_order_release);
206 // Heuristic to decide if the table is too full
207 bool table_full(int reprobe_cnt, int len) {
209 reprobe_cnt >= REPROBE_LIMIT &&
210 _slots.load(memory_order_relaxed) >= reprobe_limit(len);
213 kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
214 //model_print("resizing...\n");
215 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
219 // No copy in-progress, start one; Only double the table size
220 int oldlen = kvs->_size;
221 int sz = _size.load(memory_order_relaxed);
224 // Just follow Cliff Click's heuristic to decide the new size
225 if (sz >= (oldlen >> 2)) { // If we are 25% full
226 newsz = oldlen << 1; // Double size
227 if (sz >= (oldlen >> 1))
228 newsz = oldlen << 2; // Double double size
231 // We do not record the record timestamp
232 if (newsz <= oldlen) newsz = oldlen << 1;
233 // Do not shrink ever
234 if (newsz < oldlen) newsz = oldlen;
236 // Last check cause the 'new' below is expensive
237 newkvs = _newkvs.load(memory_order_acquire);
238 if (newkvs != NULL) return newkvs;
240 newkvs = new kvs_data(newsz);
241 void *chm = (void*) new CHM(sz);
242 newkvs->_data[0].store(chm, memory_order_release);
244 kvs_data *cur_newkvs;
245 // Another check after the slow allocation
246 if ((cur_newkvs = _newkvs.load(memory_order_acquire)) != NULL)
248 // CAS the _newkvs to the allocated table
249 kvs_data *desired = (kvs_data*) NULL;
250 kvs_data *expected = (kvs_data*) newkvs;
251 if (!_newkvs.compare_exchange_strong(desired, expected, memory_order_release,
252 memory_order_release)) {
253 // Should clean the allocated area
255 newkvs = _newkvs.load(memory_order_acquire);
260 void help_copy_impl(cliffc_hashtable *topmap, kvs_data *oldkvs,
262 MODEL_ASSERT (get_chm(oldkvs) == this);
263 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
264 int oldlen = oldkvs->_size;
265 int min_copy_work = oldlen > 1024 ? 1024 : oldlen;
267 // Just follow Cliff Click's code here
268 int panic_start = -1;
270 while (_copy_done.load(memory_order_acquire) < oldlen) {
271 copyidx = _copy_idx.load(memory_order_acquire);
272 if (panic_start == -1) { // No painc
273 copyidx = _copy_idx.load(memory_order_acquire);
274 while (copyidx < (oldlen << 1) &&
275 !_copy_idx.compare_exchange_strong(copyidx, copyidx +
276 min_copy_work, memory_order_release, memory_order_release))
277 copyidx = _copy_idx.load(memory_order_relaxed);
278 if (!(copyidx < (oldlen << 1)))
279 panic_start = copyidx;
282 // Now copy the chunk of work we claimed
284 for (int i = 0; i < min_copy_work; i++)
285 if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs,
289 copy_check_and_promote(topmap, oldkvs, workdone);
291 copyidx += min_copy_work;
292 if (!copy_all && panic_start == -1)
293 return; // We are done with the work we claim
295 copy_check_and_promote(topmap, oldkvs, 0); // See if we can promote
298 kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
299 *oldkvs, int idx, void *should_help) {
300 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
301 // We're only here cause the caller saw a Prime
302 if (copy_slot(topmap, idx, oldkvs, newkvs))
303 copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied
304 return (should_help == NULL) ? newkvs : topmap->help_copy(newkvs);
307 void copy_check_and_promote(cliffc_hashtable *topmap, kvs_data*
308 oldkvs, int workdone) {
309 int oldlen = oldkvs->_size;
310 int copyDone = _copy_done.load(memory_order_relaxed);
313 copyDone = _copy_done.load(memory_order_relaxed);
314 if (_copy_done.compare_exchange_weak(copyDone, copyDone +
315 workdone, memory_order_relaxed, memory_order_relaxed))
320 // Promote the new table to the current table
321 if (copyDone + workdone == oldlen &&
322 topmap->_kvs.load(memory_order_acquire) == oldkvs) {
323 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
324 topmap->_kvs.compare_exchange_strong(oldkvs, newkvs, memory_order_release,
325 memory_order_release);
329 bool copy_slot(cliffc_hashtable *topmap, int idx, kvs_data *oldkvs,
332 while ((key_slot = key(oldkvs, idx)) == NULL)
333 CAS_key(oldkvs, idx, NULL, TOMBSTONE);
335 // First CAS old to Prime
336 slot *oldval = val(oldkvs, idx);
337 while (!is_prime(oldval)) {
338 slot *box = (oldval == NULL || oldval == TOMBSTONE)
339 ? TOMBPRIME : new slot(true, oldval->_ptr);
340 if (CAS_val(oldkvs, idx, oldval, box)) {
341 if (box == TOMBPRIME)
342 return 1; // Copy done
343 // Otherwise we CAS'd the box
344 oldval = box; // Record updated oldval
347 oldval = val(oldkvs, idx); // Else re-try
350 if (oldval == TOMBPRIME) return false; // Copy already completed here
352 slot *old_unboxed = new slot(false, oldval->_ptr);
353 int copied_into_new = (putIfMatch(topmap, newkvs, key_slot, old_unboxed,
356 // Old value is exposed in the new table
357 while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME))
358 oldval = val(oldkvs, idx);
360 return copied_into_new;
367 static const int Default_Init_Size = 4; // Intial table size
369 static slot* const MATCH_ANY;
370 static slot* const NO_MATCH_OLD;
372 static slot* const TOMBPRIME;
373 static slot* const TOMBSTONE;
375 static const int REPROBE_LIMIT = 10; // Forces a table-resize
377 atomic<kvs_data*> _kvs;
381 // Should initialize the CHM for the construction of the table
382 // For other CHM in kvs_data, they should be initialzed in resize()
383 // because the size is determined dynamically
384 kvs_data *kvs = new kvs_data(Default_Init_Size);
385 void *chm = (void*) new CHM(0);
386 kvs->_data[0].store(chm, memory_order_relaxed);
387 _kvs.store(kvs, memory_order_release);
390 cliffc_hashtable(int init_size) {
391 // Should initialize the CHM for the construction of the table
392 // For other CHM in kvs_data, they should be initialzed in resize()
393 // because the size is determined dynamically
394 kvs_data *kvs = new kvs_data(init_size);
395 void *chm = (void*) new CHM(0);
396 kvs->_data[0].store(chm, memory_order_relaxed);
397 _kvs.store(kvs, memory_order_release);
403 @Commit_point_set: Get_Success_Point1 | Get_Success_Point2 | Get_Success_Point3
406 void *_Old_Val = spec_table_get(map, key);
408 equals_val(_Old_Val, __RET__)
411 TypeV* get(TypeK *key) {
412 slot *key_slot = new slot(false, key);
413 int fullhash = hash(key_slot);
414 kvs_data *kvs = _kvs.load(memory_order_acquire);
415 slot *V = get_impl(this, kvs, key_slot, fullhash);
416 if (V == NULL) return NULL;
417 MODEL_ASSERT (!is_prime(V));
418 return (TypeV*) V->_ptr;
424 @Commit_point_set: Write_Success_Point
427 # Remember this old value at checking point
428 void *_Old_Val = spec_table_get(map, key);
429 spec_table_put(map, key, val);
431 equals_val(__RET__, _Old_Val)
434 TypeV* put(TypeK *key, TypeV *val) {
435 return putIfMatch(key, val, NO_MATCH_OLD);
440 @Interface: PutIfAbsent
442 Write_Success_Point | PutIfAbsent_Fail_Point
443 @Condition: !spec_table_contains(map, key)
445 COND_PutIfAbsentSucc :: __RET__ == NULL
448 void *_Old_Val = spec_table_get(map, key);
450 spec_table_put(map, key, value);
452 __COND_SAT__ ? __RET__ == NULL : equals_val(_Old_Val, __RET__)
455 TypeV* putIfAbsent(TypeK *key, TypeV *value) {
456 return putIfMatch(key, val, TOMBSTONE);
461 @Interface: RemoveAny
462 @Commit_point_set: Write_Success_Point
465 void *_Old_Val = spec_table_get(map, key);
466 spec_table_put(map, key, NULL);
468 equals_val(__RET__, _Old_Val)
471 TypeV* remove(TypeK *key) {
472 return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD);
477 @Interface: RemoveIfMatch
479 Write_Success_Point | RemoveIfMatch_Fail_Point
481 equals_val(spec_table_get(map, key), val)
483 COND_RemoveIfMatchSucc :: __RET__ == true
487 spec_table_put(map, key, NULL);
489 __COND_SAT__ ? __RET__ : !__RET__
492 bool remove(TypeK *key, TypeV *val) {
493 slot *val_slot = val == NULL ? NULL : new slot(false, val);
494 return putIfMatch(key, TOMBSTONE, val) == val;
500 @Interface: ReplaceAny
505 void *_Old_Val = spec_table_get(map, key);
507 equals_val(__RET__, _Old_Val)
510 TypeV* replace(TypeK *key, TypeV *val) {
511 return putIfMatch(key, val, MATCH_ANY);
516 @Interface: ReplaceIfMatch
518 Write_Success_Point | ReplaceIfMatch_Fail_Point
520 equals_val(spec_table_get(map, key), oldval)
522 COND_ReplaceIfMatchSucc :: __RET__ == true
526 spec_table_put(map, key, newval);
528 __COND_SAT__ ? __RET__ : !__RET__
531 bool replace(TypeK *key, TypeV *oldval, TypeV *newval) {
532 return putIfMatch(key, newval, oldval) == oldval;
536 static CHM* get_chm(kvs_data* kvs) {
537 CHM *res = (CHM*) kvs->_data[0].load(memory_order_relaxed);
541 static int* get_hashes(kvs_data *kvs) {
542 return (int *) kvs->_data[1].load(memory_order_relaxed);
545 // Preserve happens-before semantics on newly inserted keys
546 static inline slot* key(kvs_data *kvs, int idx) {
547 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
548 // Corresponding to the volatile read in get_impl() and putIfMatch in
549 // Cliff Click's Java implementation
550 slot *res = (slot*) kvs->_data[idx * 2 + 2].load(memory_order_acquire);
555 The atomic operation in val() function is a "potential" commit point,
556 which means in some case it is a real commit point while it is not for
557 some other cases. This so happens because the val() function is such a
558 fundamental function that many internal operation will call. Our
559 strategy is that we label any potential commit points and check if they
560 really are the commit points later.
562 // Preserve happens-before semantics on newly inserted values
563 static inline slot* val(kvs_data *kvs, int idx) {
564 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
565 // Corresponding to the volatile read in get_impl() and putIfMatch in
566 // Cliff Click's Java implementation
567 slot *res = (slot*) kvs->_data[idx * 2 + 3].load(memory_order_acquire);
570 # This is a complicated potential commit point since many many functions are
572 @Potential_commit_point_define: true
573 @Label: Read_Val_Point
581 static int hash(slot *key_slot) {
582 MODEL_ASSERT(key_slot != NULL && key_slot->_ptr != NULL);
583 TypeK* key = (TypeK*) key_slot->_ptr;
584 int h = key->hashCode();
585 // Spread bits according to Cliff Click's code
586 h += (h << 15) ^ 0xffffcd7d;
590 h += (h << 2) + (h << 14);
591 return h ^ (h >> 16);
594 // Heuristic to decide if reprobed too many times.
595 // Be careful here: Running over the limit on a 'get' acts as a 'miss'; on a
596 // put it triggers a table resize. Several places MUST have exact agreement.
597 static int reprobe_limit(int len) {
598 return REPROBE_LIMIT + (len >> 2);
601 static inline bool is_prime(slot *val) {
602 return (val != NULL) && val->_prime;
605 // Check for key equality. Try direct pointer comparison first (fast
606 // negative teset) and then the full 'equals' call
607 static bool keyeq(slot *K, slot *key_slot, int *hashes, int hash,
609 // Caller should've checked this.
610 MODEL_ASSERT (K != NULL);
611 TypeK* key_ptr = (TypeK*) key_slot->_ptr;
614 ((hashes[hash] == 0 || hashes[hash] == fullhash) &&
616 key_ptr->equals(K->_ptr));
619 static bool valeq(slot *val_slot1, slot *val_slot2) {
620 MODEL_ASSERT (val_slot1 != NULL);
621 TypeK* ptr1 = (TypeV*) val_slot1->_ptr;
622 if (val_slot2 == NULL || ptr1 == NULL) return false;
623 return ptr1->equals(val_slot2->_ptr);
626 // Together with key() preserve the happens-before relationship on newly
628 static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
629 return kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
630 desired, memory_order_release, memory_order_release);
634 Same as the val() function, we only label the CAS operation as the
635 potential commit point.
637 // Together with val() preserve the happens-before relationship on newly
639 static inline bool CAS_val(kvs_data *kvs, int idx, void *expected, void
641 bool res = kvs->_data[2 * idx + 3].compare_exchange_strong(expected,
642 desired, memory_order_release, memory_order_release);
644 # If it is a successful put instead of a copy or any other internal
645 # operantions, expected != NULL
647 @Potential_commit_point_define: res == true
648 @Label: Write_Val_Point
654 slot* get_impl(cliffc_hashtable *topmap, kvs_data *kvs, slot* key_slot, int
656 int len = kvs->_size;
657 CHM *chm = get_chm(kvs);
658 int *hashes = get_hashes(kvs);
660 int idx = fullhash & (len - 1);
663 slot *K = key(kvs, idx);
664 slot *V = val(kvs, idx);
667 @Commit_point_define: V == NULL
668 @Potential_commit_point_label: Read_Val_Point
669 @Label: Get_Success_Point_1
673 if (K == NULL) return NULL; // A miss
675 if (keyeq(K, key_slot, hashes, idx, fullhash)) {
676 // Key hit! Check if table-resize in progress
680 @Commit_point_define: true
681 @Potential_commit_point_label: Read_Val_Point
682 @Label: Get_Success_Point_2
685 return (V == TOMBSTONE) ? NULL : V; // Return this value
687 // Otherwise, finish the copy & retry in the new table
688 return get_impl(topmap, chm->copy_slot_and_check(topmap, kvs,
689 idx, key_slot), key_slot, fullhash);
692 if (++reprobe_cnt >= REPROBE_LIMIT ||
693 key_slot == TOMBSTONE) {
694 // Retry in new table
695 // Atomic read (acquire) can be here
696 kvs_data *newkvs = chm->_newkvs.load(memory_order_acquire);
699 @Commit_point_define_check: newkvs == NULL
700 @Label: Get_Success_Point_3
703 return newkvs == NULL ? NULL : get_impl(topmap,
704 topmap->help_copy(newkvs), key_slot, fullhash);
707 idx = (idx + 1) & (len - 1); // Reprobe by 1
711 // A wrapper of the essential function putIfMatch()
712 TypeV* putIfMatch(TypeK *key, TypeV *value, slot *old_val) {
713 // TODO: Should throw an exception rather return NULL
714 if (old_val == NULL) {
717 slot *key_slot = new slot(false, key);
719 slot *value_slot = new slot(false, value);
720 kvs_data *kvs = _kvs.load(memory_order_acquire);
721 slot *res = putIfMatch(this, kvs, key_slot, value_slot, old_val);
722 // Only when copy_slot() call putIfMatch() will it return NULL
723 MODEL_ASSERT (res != NULL);
724 MODEL_ASSERT (!is_prime(res));
725 return res == TOMBSTONE ? NULL : (TypeV*) res->_ptr;
729 Put, Remove, PutIfAbsent, etc will call this function. Return the old
730 value. If the returned value is equals to the expVal (or expVal is
731 NO_MATCH_OLD), then this function puts the val_slot to the table 'kvs'.
732 Only copy_slot will pass a NULL expVal, and putIfMatch only returns a
733 NULL if passed a NULL expVal.
735 static slot* putIfMatch(cliffc_hashtable *topmap, kvs_data *kvs, slot
736 *key_slot, slot *val_slot, slot *expVal) {
737 MODEL_ASSERT (val_slot != NULL);
738 MODEL_ASSERT (!is_prime(val_slot));
739 MODEL_ASSERT (!is_prime(expVal));
741 int fullhash = hash(key_slot);
742 int len = kvs->_size;
743 CHM *chm = get_chm(kvs);
744 int *hashes = get_hashes(kvs);
745 int idx = fullhash & (len - 1);
753 while (true) { // Spin till we get a key slot
756 if (K == NULL) { // Get a free slot
757 if (val_slot == TOMBSTONE) return val_slot;
758 // Claim the null key-slot
759 if (CAS_key(kvs, idx, NULL, key_slot)) {
760 chm->_slots.fetch_add(1, memory_order_relaxed); // Inc key-slots-used count
761 hashes[idx] = fullhash; // Memorize full hash
764 K = key(kvs, idx); // CAS failed, get updated value
765 MODEL_ASSERT (K != NULL);
768 // Key slot not null, there exists a Key here
769 if (keyeq(K, key_slot, hashes, idx, fullhash))
772 // Notice that the logic here should be consistent with that of get.
773 // The first predicate means too many reprobes means nothing in the
775 if (++reprobe_cnt >= reprobe_limit(len) ||
776 K == TOMBSTONE) { // Found a Tombstone key, no more keys
777 newkvs = chm->resize(topmap, kvs);
778 // Help along an existing copy
779 if (expVal != NULL) topmap->help_copy(newkvs);
780 return putIfMatch(topmap, newkvs, key_slot, val_slot, expVal);
783 idx = (idx + 1) & (len - 1); // Reprobe
784 } // End of spinning till we get a Key slot
786 if (val_slot == V) return V; // Fast cutout for no-change
788 // Here it tries to resize cause it doesn't want other threads to stop
789 // its progress (eagerly try to resize soon)
790 newkvs = chm->_newkvs.load(memory_order_acquire);
791 if (newkvs == NULL &&
792 ((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V)))
793 newkvs = chm->resize(topmap, kvs); // Force the copy to start
795 // Finish the copy and then put it in the new table
797 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs, idx,
798 expVal), key_slot, val_slot, expVal);
800 // Decided to update the existing table
802 MODEL_ASSERT (!is_prime(V));
804 if (expVal != NO_MATCH_OLD &&
806 (expVal != MATCH_ANY || V == TOMBSTONE || V == NULL) &&
807 !(V == NULL && expVal == TOMBSTONE) &&
808 (expVal == NULL || !valeq(expVal, V))) {
811 @Commit_point_define: expVal == TOMBSTONE
812 @Potential_commit_point_label: Read_Val_Point
813 @Label: PutIfAbsent_Fail_Point
814 # This is a check for the PutIfAbsent() when the value
820 @Commit_point_define: expVal != NULL && val_slot == TOMBSTONE
821 @Potential_commit_point_label: Read_Val_Point
822 @Label: RemoveIfMatch_Fail_Point
827 @Commit_point_define: !valeq(expVal, V)
828 @Potential_commit_point_label: Read_Val_Point
829 @Label: ReplaceIfMatch_Fail_Point
832 return V; // Do not update!
835 if (CAS_val(kvs, idx, V, val_slot)) {
838 # The only point where a successful put happens
839 @Commit_point_define: true
840 @Potential_commit_point_label: Write_Val_Point
841 @Label: Write_Success_Point
844 if (expVal != NULL) { // Not called by a table-copy
845 // CAS succeeded, should adjust size
846 // Both normal put's and table-copy calls putIfMatch, but
847 // table-copy does not increase the number of live K/V pairs
848 if ((V == NULL || V == TOMBSTONE) &&
849 val_slot != TOMBSTONE)
850 chm->_size.fetch_add(1, memory_order_relaxed);
851 if (!(V == NULL || V == TOMBSTONE) &&
852 val_slot == TOMBSTONE)
853 chm->_size.fetch_add(-1, memory_order_relaxed);
855 return (V == NULL && expVal != NULL) ? TOMBSTONE : V;
860 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs,
861 idx, expVal), key_slot, val_slot, expVal);
865 // Help along an existing table-resize. This is a fast cut-out wrapper.
866 kvs_data* help_copy(kvs_data *helper) {
867 kvs_data *topkvs = _kvs.load(memory_order_acquire);
868 CHM *topchm = get_chm(topkvs);
869 // No cpy in progress
870 if (topchm->_newkvs.load(memory_order_acquire) == NULL) return helper;
871 topchm->help_copy_impl(this, topkvs, false);