1 #ifndef CLIFFC_HASHTABLE_H
2 #define CLIFFC_HASHTABLE_H
12 This header file declares and defines a simplified version of Cliff Click's
13 NonblockingHashMap. It contains all the necessary structrues and main
14 functions. In simplified_cliffc_hashtable.cc file, it has the definition for
18 template<typename TypeK, typename TypeV>
19 class cliffc_hashtable;
22 Corresponding the the Object[] array in Cliff Click's Java implementation.
23 It keeps the first two slots for CHM (Hashtable control unit) and the hash
24 records (an array of hash used for fast negative key-equality check).
32 int real_size = sizeof(atomic<void*>) * 2 + 2;
33 _data = new atomic<void*>[real_size];
34 // The control block should be initialized in resize()
35 // Init the hash record array
36 int *hashes = new int[_size];
38 for (i = 0; i < _size; i++) {
41 // Init the data to Null slot
42 for (i = 2; i < real_size; i++) {
43 _data[i].store(NULL, memory_order_relaxed);
45 _data[1].store(hashes, memory_order_release);
49 int *hashes = (int*) _data[1].load(memory_order_relaxed);
59 slot(bool prime, void *ptr) {
67 TypeK must have defined function "int hashCode()" which return the hash
68 code for the its object, and "int equals(TypeK anotherKey)" which is
69 used to judge equality.
70 TypeK and TypeV should define their own copy constructor.
72 template<typename TypeK, typename TypeV>
73 class cliffc_hashtable {
75 # The synchronization we have for the hashtable gives us the property of
76 # serializability, so we should have a sequential hashtable when we check the
77 # correctness. The key thing is to identify all the commit point.
82 CLASS = cliffc_hashtable;
85 spec_hashtable<TypeK, TypeV*> map;
86 spec_hashtable<TypeK, Tag> id_map;
89 map = spec_hashtable<TypeK, TypeV*>();
90 id_map = spec_hashtable<TypeK, TypeV*>();
93 bool equals_val(TypeV *ptr1, TypeV *ptr2) {
98 # Update the tag for the current key slot if the corresponding tag
99 # is NULL, otherwise just return that tag. It will update the next
100 # available tag too if it requires a new tag for that key slot.
101 Tag getKeyTag(TypeK &key) {
102 if (id_map.get(key) == NULL) {
103 Tag cur_tag = tag.current();
104 id_map.put(key, cur_tag);
108 return id_map.get(key);
124 PutIfAbsent(COND_PutIfAbsentSucc),
126 RemoveIfMatch(COND_RemoveIfMatchSucc),
128 ReplaceIfMatch(COND_ReplaceIfMatchSucc)
131 Write_interface -> Read_interface
137 The control structure for the hashtable
141 friend class cliffc_hashtable;
143 atomic<kvs_data*> _newkvs;
145 // Size of active K,V pairs
148 // Count of used slots
151 // The next part of the table to copy
152 atomic_int _copy_idx;
154 // Work-done reporting
155 atomic_int _copy_done;
159 _newkvs.store(NULL, memory_order_relaxed);
160 _size.store(size, memory_order_relaxed);
161 _slots.store(0, memory_order_relaxed);
163 _copy_idx.store(0, memory_order_relaxed);
164 _copy_done.store(0, memory_order_release);
171 // Heuristic to decide if the table is too full
172 bool table_full(int reprobe_cnt, int len) {
174 reprobe_cnt >= REPROBE_LIMIT &&
175 _slots.load(memory_order_relaxed) >= reprobe_limit(len);
178 kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
179 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
183 // No copy in-progress, start one; Only double the table size
184 int oldlen = kvs->_size;
185 int sz = _size.load(memory_order_relaxed);
188 // Just follow Cliff Click's heuristic to decide the new size
189 if (sz >= (oldlen >> 2)) { // If we are 25% full
190 newsz = oldlen << 1; // Double size
191 if (sz >= (oldlen >> 1))
192 newsz = oldlen << 2; // Double double size
195 // We do not record the record timestamp
196 if (newsz <= oldlen) newsz = oldlen << 1;
197 // Do not shrink ever
198 if (newsz < oldlen) newsz = oldlen;
200 // Last check cause the 'new' below is expensive
201 newkvs = _newkvs.load(memory_order_acquire);
202 if (newkvs != NULL) return newkvs;
204 newkvs = new kvs_data(newsz);
205 void *chm = (void*) new CHM(sz);
206 newkvs->_data[0].store(chm, memory_order_relaxed);
208 kvs_data *cur_newkvs;
209 // Another check after the slow allocation
210 if ((cur_newkvs = _newkvs.load(memory_order_acquire)) != NULL)
212 // CAS the _newkvs to the allocated table
213 kvs_data *desired = (kvs_data*) NULL;
214 kvs_data *expected = (kvs_data*) newkvs;
215 if (!_newkvs.compare_exchange_strong(desired, expected, memory_order_release,
216 memory_order_release)) {
217 // Should clean the allocated area
219 newkvs = _newkvs.load(memory_order_acquire);
224 void help_copy_impl(cliffc_hashtable *topmap, kvs_data *oldkvs,
226 assert (get_chm(oldkvs) == this);
227 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
228 int oldlen = oldkvs->_size;
229 int min_copy_work = oldlen > 1024 ? 1024 : oldlen;
231 // Just follow Cliff Click's code here
232 int panic_start = -1;
234 while (_copy_done.load(memory_order_acquire) < oldlen) {
235 copyidx = _copy_idx.load(memory_order_acquire);
236 if (panic_start == -1) { // No painc
237 copyidx = _copy_idx.load(memory_order_acquire);
238 while (copyidx < (oldlen << 1) &&
239 !_copy_idx.compare_exchange_strong(copyidx, copyidx +
240 min_copy_work, memory_order_release, memory_order_release))
241 copyidx = _copy_idx.load(memory_order_relaxed);
242 if (!(copyidx < (oldlen << 1)))
243 panic_start = copyidx;
246 // Now copy the chunk of work we claimed
248 for (int i = 0; i < min_copy_work; i++)
249 if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs,
253 copy_check_and_promote(topmap, oldkvs, workdone);
255 copyidx += min_copy_work;
256 if (!copy_all && panic_start == -1)
257 return; // We are done with the work we claim
259 copy_check_and_promote(topmap, oldkvs, 0); // See if we can promote
262 kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
263 *oldkvs, int idx, void *should_help) {
264 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
265 // We're only here cause the caller saw a Prime
266 if (copy_slot(topmap, idx, oldkvs, newkvs))
267 copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied
268 return (should_help == NULL) ? newkvs : topmap->help_copy(newkvs);
271 void copy_check_and_promote(cliffc_hashtable *topmap, kvs_data*
272 oldkvs, int workdone) {
273 int oldlen = oldkvs->_size;
274 int copyDone = _copy_done.load(memory_order_relaxed);
277 copyDone = _copy_done.load(memory_order_relaxed);
278 if (_copy_done.compare_exchange_weak(copyDone, copyDone +
279 workdone, memory_order_relaxed, memory_order_relaxed))
284 // Promote the new table to the current table
285 if (copyDone + workdone == oldlen &&
286 topmap->_kvs.load(memory_order_acquire) == oldkvs) {
287 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
288 topmap->_kvs.compare_exchange_strong(oldkvs, newkvs, memory_order_release,
289 memory_order_release);
293 bool copy_slot(cliffc_hashtable *topmap, int idx, kvs_data *oldkvs,
296 while ((key_slot = key(oldkvs, idx)) == NULL)
297 CAS_key(oldkvs, idx, NULL, TOMBSTONE);
299 // First CAS old to Prime
300 slot *oldval = val(oldkvs, idx);
301 while (!is_prime(oldval)) {
302 slot *box = (oldval == NULL || oldval == TOMBSTONE)
303 ? TOMBPRIME : new slot(true, oldval->_ptr);
304 if (CAS_val(oldkvs, idx, oldval, box)) {
305 if (box == TOMBPRIME)
306 return 1; // Copy done
307 // Otherwise we CAS'd the box
308 oldval = box; // Record updated oldval
311 oldval = val(oldkvs, idx); // Else re-try
314 if (oldval == TOMBPRIME) return false; // Copy already completed here
316 slot *old_unboxed = new slot(false, oldval->_ptr);
317 int copied_into_new = (putIfMatch(topmap, newkvs, key_slot, old_unboxed,
320 // Old value is exposed in the new table
321 while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME))
322 oldval = val(oldkvs, idx);
324 return copied_into_new;
331 static const int Default_Init_Size = 8; // Intial table size
333 static slot* const MATCH_ANY;
334 static slot* const NO_MATCH_OLD;
336 static slot* const TOMBPRIME;
337 static slot* const TOMBSTONE;
339 static const int REPROBE_LIMIT = 10; // Forces a table-resize
341 atomic<kvs_data*> _kvs;
345 // Should initialize the CHM for the construction of the table
346 // For other CHM in kvs_data, they should be initialzed in resize()
347 // because the size is determined dynamically
348 kvs_data *kvs = new kvs_data(Default_Init_Size);
349 void *chm = (void*) new CHM(0);
350 kvs->_data[0].store(chm, memory_order_relaxed);
351 _kvs.store(kvs, memory_order_release);
354 cliffc_hashtable(int init_size) {
355 // Should initialize the CHM for the construction of the table
356 // For other CHM in kvs_data, they should be initialzed in resize()
357 // because the size is determined dynamically
358 kvs_data *kvs = new kvs_data(init_size);
359 void *chm = (void*) new CHM(0);
360 kvs->_data[0].store(chm, memory_order_relaxed);
361 _kvs.store(kvs, memory_order_release);
367 @Commit_point_set: Read_Val_Point1 | Read_Val_Point2 | Read_Val_Point3
368 @ID: __sequential.getKeyTag(key)
370 TypeV *_Old_Val = __sequential.map.get(key)
372 __sequential.equals_val(_Old_Val, __RET__)
375 TypeV* get(TypeK& key) {
376 void *key_ptr = (void*) new TypeK(key);
377 slot *key_slot = new slot(false, key_ptr);
378 int fullhash = hash(key_slot);
379 kvs_data *kvs = _kvs.load(memory_order_acquire);
380 slot *V = get_impl(this, kvs, key_slot, fullhash);
381 if (V == NULL) return NULL;
382 assert (!is_prime(V));
383 return (TypeV*) V->_ptr;
389 @Commit_point_set: Write_Val_Point
390 @ID: __sequential.getKeyTag(key)
392 # Remember this old value at checking point
393 TypeV *_Old_Val = __sequential.map.get(key)
394 __sequential.map.put(key, &val);
396 __sequential.equals_val(__RET__, _Old_Val)
399 TypeV* put(TypeK& key, TypeV& val) {
400 return putIfMatch(key, val, NO_MATCH_OLD);
405 @Interface: PutIfAbsent
407 Write_Val_Point | PutIfAbsent_Fail_Point
408 @Condition: __sequential.map.get(key) == NULL
410 COND_PutIfAbsentSucc :: __RET__ == NULL
411 @ID: __sequential.getKeyTag(key)
413 TypeV *_Old_Val = __sequential.map.get(key)
415 __sequential.map.put(key, &value);
417 __COND_SAT__ ? __RET__ == NULL : __sequential.equals_val(_Old_Val, __RET__)
420 TypeV* putIfAbsent(TypeK& key, TypeV& value) {
421 return putIfMatch(key, val, TOMBSTONE);
426 @Interface: RemoveAny
427 @Commit_point_set: Write_Val_Point
428 @ID: __sequential.getKeyTag(key)
430 TypeV *_Old_Val = __sequential.map.get(key)
431 __sequential.map.put(key, NULL);
433 __sequential.equals_val(__RET__, _Old_Val)
436 TypeV* remove(TypeK& key) {
437 return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD);
442 @Interface: RemoveIfMatch
444 Write_Val_Point | RemoveIfMatch_Fail_Point
446 __sequential.equals_val(__sequential.map.get(key), &val)
448 COND_RemoveIfMatchSucc :: __RET__ == true
449 @ID: __sequential.getKeyTag(key)
452 __sequential.map.put(key, NULL);
454 __COND_SAT__ ? __RET__ : !__RET__
457 bool remove(TypeK& key, TypeV& val) {
458 slot *val_slot = val == NULL ? NULL : new slot(false, val);
459 return putIfMatch(key, TOMBSTONE, val) == val;
465 @Interface: ReplaceAny
468 @ID: __sequential.getKeyTag(key)
470 TypeV *_Old_Val = __sequential.map.get(key)
472 __sequential.equals_val(__RET__, _Old_Val)
475 TypeV* replace(TypeK& key, TypeV& val) {
476 return putIfMatch(key, val, MATCH_ANY);
481 @Interface: ReplaceIfMatch
483 Write_Val_Point | ReplaceIfMatch_Fail_Point
485 __sequential.equals_val(__sequential.map.get(key), &oldval)
487 COND_ReplaceIfMatchSucc :: __RET__ == true
488 @ID: __sequential.getKeyTag(key)
491 __sequential.map.put(key, &newval);
493 __COND_SAT__ ? __RET__ : !__RET__
496 bool replace(TypeK& key, TypeV& oldval, TypeV& newval) {
497 return putIfMatch(key, newval, oldval) == oldval;
501 static CHM* get_chm(kvs_data* kvs) {
502 return (CHM*) kvs->_data[0].load(memory_order_relaxed);
505 static int* get_hashes(kvs_data *kvs) {
506 return (int *) kvs->_data[1].load(memory_order_relaxed);
509 // Preserve happens-before semantics on newly inserted keys
510 static inline slot* key(kvs_data *kvs, int idx) {
511 assert (idx >= 0 && idx < kvs->_size);
512 // Corresponding to the volatile read in get_impl() and putIfMatch in
513 // Cliff Click's Java implementation
514 return (slot*) kvs->_data[idx * 2 + 2].load(memory_order_acquire);
518 The atomic operation in val() function is a "potential" commit point,
519 which means in some case it is a real commit point while it is not for
520 some other cases. This so happens because the val() function is such a
521 fundamental function that many internal operation will call. Our
522 strategy is that we label any potential commit points and check if they
523 really are the commit points later.
525 // Preserve happens-before semantics on newly inserted values
526 static inline slot* val(kvs_data *kvs, int idx) {
527 assert (idx >= 0 && idx < kvs->_size);
528 // Corresponding to the volatile read in get_impl() and putIfMatch in
529 // Cliff Click's Java implementation
530 slot *res = (slot*) kvs->_data[idx * 2 + 3].load(memory_order_acquire);
533 # This is a complicated potential commit point since many many functions are
535 @Potential_commit_point_define: true
536 @Label: Read_Val_Point
544 static int hash(slot *key_slot) {
545 assert(key_slot != NULL && key_slot->_ptr != NULL);
546 TypeK* key = (TypeK*) key_slot->_ptr;
547 int h = key->hashCode();
548 // Spread bits according to Cliff Click's code
549 h += (h << 15) ^ 0xffffcd7d;
553 h += (h << 2) + (h << 14);
554 return h ^ (h >> 16);
557 // Heuristic to decide if reprobed too many times.
558 // Be careful here: Running over the limit on a 'get' acts as a 'miss'; on a
559 // put it triggers a table resize. Several places MUST have exact agreement.
560 static int reprobe_limit(int len) {
561 return REPROBE_LIMIT + (len >> 2);
564 static inline bool is_prime(slot *val) {
565 return (val != NULL) && val->_prime;
568 // Check for key equality. Try direct pointer comparison first (fast
569 // negative teset) and then the full 'equals' call
570 static bool keyeq(slot *K, slot *key_slot, int *hashes, int hash,
572 // Caller should've checked this.
574 TypeK* key_ptr = (TypeK*) key_slot->_ptr;
577 ((hashes[hash] == 0 || hashes[hash] == fullhash) &&
579 key_ptr->equals(K->_ptr));
582 static bool valeq(slot *val_slot1, slot *val_slot2) {
583 assert (val_slot1 != NULL);
584 TypeK* ptr1 = (TypeV*) val_slot1->_ptr;
585 if (val_slot2 == NULL || ptr1 == NULL) return false;
586 return ptr1->equals(val_slot2->_ptr);
589 // Together with key() preserve the happens-before relationship on newly
591 static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
592 return kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
593 desired, memory_order_release, memory_order_release);
597 Same as the val() function, we only label the CAS operation as the
598 potential commit point.
600 // Together with val() preserve the happens-before relationship on newly
602 static inline bool CAS_val(kvs_data *kvs, int idx, void *expected, void
604 bool res = kvs->_data[2 * idx + 3].compare_exchange_strong(expected,
605 desired, memory_order_release, memory_order_release);
607 # If it is a successful put instead of a copy or any other internal
608 # operantions, expected != NULL
610 @Potential_commit_point_define: __ATOMIC_RET__ == true
611 @Label: Write_Val_Point
617 slot* get_impl(cliffc_hashtable *topmap, kvs_data *kvs, slot* key_slot, int
619 int len = kvs->_size;
620 CHM *chm = get_chm(kvs);
621 int *hashes = get_hashes(kvs);
623 int idx = fullhash & (len - 1);
626 slot *K = key(kvs, idx);
627 slot *V = val(kvs, idx);
630 @Commit_point_define: V == NULL
631 @Potential_commit_point_label: Read_Val_Point
632 @Label: Get_Success_Point_1
636 if (V == NULL) return NULL; // A miss
638 if (keyeq(K, key_slot, hashes, idx, fullhash)) {
639 // Key hit! Check if table-resize in progress
643 @Commit_point_define: true
644 @Potential_commit_point_label: Read_Val_Point
645 @Label: Get_Success_Point_2
648 return (V == TOMBSTONE) ? NULL : V; // Return this value
650 // Otherwise, finish the copy & retry in the new table
651 return get_impl(topmap, chm->copy_slot_and_check(topmap, kvs,
652 idx, key_slot), key_slot, fullhash);
655 if (++reprobe_cnt >= REPROBE_LIMIT ||
656 key_slot == TOMBSTONE) {
657 // Retry in new table
658 // Atomic read (acquire) can be here
659 kvs_data *newkvs = chm->_newkvs.load(memory_order_acquire);
662 @Commit_point_define_check: newkvs == NULL
663 @Label: Get_Success_Point_3
666 return newkvs == NULL ? NULL : get_impl(topmap,
667 topmap->help_copy(newkvs), key_slot, fullhash);
670 idx = (idx + 1) & (len - 1); // Reprobe by 1
674 // A wrapper of the essential function putIfMatch()
675 TypeV* putIfMatch(TypeK& key, TypeV& value, slot *old_val) {
676 // TODO: Should throw an exception rather return NULL
677 if (old_val == NULL) {
680 void *key_ptr = (void*) new TypeK(key);
681 slot *key_slot = new slot(false, key_ptr);
683 void *val_ptr = (void*) new TypeV(value);
684 slot *value_slot = new slot(false, val_ptr);
685 kvs_data *kvs = _kvs.load(memory_order_acquire);
686 slot *res = putIfMatch(this, kvs, key_slot, value_slot, old_val);
687 // Only when copy_slot() call putIfMatch() will it return NULL
688 assert (res != NULL);
689 assert (!is_prime(res));
690 return res == TOMBSTONE ? NULL : (TypeV*) res->_ptr;
694 Put, Remove, PutIfAbsent, etc will call this function. Return the old
695 value. If the returned value is equals to the expVal (or expVal is
696 NO_MATCH_OLD), then this function puts the val_slot to the table 'kvs'.
697 Only copy_slot will pass a NULL expVal, and putIfMatch only returns a
698 NULL if passed a NULL expVal.
700 static slot* putIfMatch(cliffc_hashtable *topmap, kvs_data *kvs, slot
701 *key_slot, slot *val_slot, slot *expVal) {
702 assert (val_slot != NULL);
703 assert (!is_prime(val_slot));
704 assert (!is_prime(expVal));
706 int fullhash = hash(key_slot);
707 int len = kvs->_size;
708 CHM *chm = get_chm(kvs);
709 int *hashes = get_hashes(kvs);
710 int idx = fullhash & (len - 1);
718 while (true) { // Spin till we get a key slot
721 if (K == NULL) { // Get a free slot
722 if (val_slot == TOMBSTONE) return val_slot;
723 // Claim the null key-slot
724 if (CAS_key(kvs, idx, NULL, key_slot)) {
725 chm->_slots.fetch_add(1, memory_order_relaxed); // Inc key-slots-used count
726 hashes[idx] = fullhash; // Memorize full hash
729 K = key(kvs, idx); // CAS failed, get updated value
733 // Key slot not null, there exists a Key here
734 if (keyeq(K, key_slot, hashes, idx, fullhash))
737 // Notice that the logic here should be consistent with that of get.
738 // The first predicate means too many reprobes means nothing in the
740 if (++reprobe_cnt >= reprobe_limit(len) ||
741 K == TOMBSTONE) { // Found a Tombstone key, no more keys
742 newkvs = chm->resize(topmap, kvs);
743 // Help along an existing copy
744 if (expVal != NULL) topmap->help_copy(newkvs);
745 return putIfMatch(topmap, newkvs, key_slot, val_slot, expVal);
748 idx = (idx + 1) & (len - 1); // Reprobe
749 } // End of spinning till we get a Key slot
751 if (val_slot == V) return V; // Fast cutout for no-change
753 // Here it tries to resize cause it doesn't want other threads to stop
754 // its progress (eagerly try to resize soon)
755 newkvs = chm->_newkvs.load(memory_order_acquire);
756 if (newkvs == NULL &&
757 ((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V)))
758 newkvs = chm->resize(topmap, kvs); // Force the copy to start
760 // Finish the copy and then put it in the new table
762 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs, idx,
763 expVal), key_slot, val_slot, expVal);
765 // Decided to update the existing table
767 assert (!is_prime(V));
769 if (expVal != NO_MATCH_OLD &&
771 (expVal != MATCH_ANY || V == TOMBSTONE || V == NULL) &&
772 !(V == NULL && expVal == TOMBSTONE) &&
773 (expVal == NULL || !valeq(expVal, V))) {
776 @Commit_point_define: expVal == TOMBSTONE
777 @Potential_commit_point_label: Read_Val_Point
778 @Label: PutIfAbsent_Fail_Point
779 # This is a check for the PutIfAbsent() when the value
785 @Commit_point_define: expVal != NULL && val_slot == TOMBSTONE
786 @Potential_commit_point_label: Read_Val_Point
787 @Label: RemoveIfMatch_Fail_Point
792 @Commit_point_define: !valeq(expVal, V)
793 @Potential_commit_point_label: Read_Val_Point
794 @Label: ReplaceIfMatch_Fail_Point
797 return V; // Do not update!
800 if (CAS_val(kvs, idx, V, val_slot)) {
803 # The only point where a successful put happens
804 @Commit_point_define: true
805 @Potential_commit_point_label: Write_Val_Point
806 @Label: Write_Success_Point
809 if (expVal != NULL) { // Not called by a table-copy
810 // CAS succeeded, should adjust size
811 // Both normal put's and table-copy calls putIfMatch, but
812 // table-copy does not increase the number of live K/V pairs
813 if ((V == NULL || V == TOMBSTONE) &&
814 val_slot != TOMBSTONE)
815 chm->_size.fetch_add(1, memory_order_relaxed);
816 if (!(V == NULL || V == TOMBSTONE) &&
817 val_slot == TOMBSTONE)
818 chm->_size.fetch_add(-1, memory_order_relaxed);
820 return (V == NULL && expVal != NULL) ? TOMBSTONE : V;
825 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs,
826 idx, expVal), key_slot, val_slot, expVal);
830 // Help along an existing table-resize. This is a fast cut-out wrapper.
831 kvs_data* help_copy(kvs_data *helper) {
832 kvs_data *topkvs = _kvs.load(memory_order_acquire);
833 CHM *topchm = get_chm(topkvs);
834 // No cpy in progress
835 if (topchm->_newkvs.load(memory_order_acquire) == NULL) return helper;
836 topchm->help_copy_impl(this, topkvs, false);