1 #ifndef CLIFFC_HASHTABLE_H
2 #define CLIFFC_HASHTABLE_H
14 This header file declares and defines a simplified version of Cliff Click's
15 NonblockingHashMap. It contains all the necessary structrues and main
16 functions. In simplified_cliffc_hashtable.cc file, it has the definition for
20 template<typename TypeK, typename TypeV>
21 class cliffc_hashtable;
24 Corresponding the the Object[] array in Cliff Click's Java implementation.
25 It keeps the first two slots for CHM (Hashtable control unit) and the hash
26 records (an array of hash used for fast negative key-equality check).
34 int real_size = sizeof(atomic<void*>) * 2 + 2;
35 _data = new atomic<void*>[real_size];
36 // The control block should be initialized in resize()
37 // Init the hash record array
38 int *hashes = new int[_size];
40 for (i = 0; i < _size; i++) {
43 _data[1].store(hashes, memory_order_relaxed);
44 // Init the data to Null slot
45 for (i = 2; i < real_size; i++) {
46 _data[i].store(NULL, memory_order_relaxed);
51 int *hashes = (int*) _data[1].load(memory_order_relaxed);
59 shared_ptr<void> _ptr;
61 slot(bool prime, shared_ptr<void> ptr) {
70 TypeK must have defined function "int hashCode()" which return the hash
71 code for the its object, and "int equals(TypeK anotherKey)" which is
72 used to judge equality.
73 TypeK and TypeV should define their own copy constructor.
74 To make the memory management safe and similar to Cliff Click's Java
75 implementation, we use shared_ptr instead of normal pointer in terms of the
76 pointers that point to TypeK and TypeV.
78 template<typename TypeK, typename TypeV>
79 class cliffc_hashtable {
81 # The synchronization we have for the hashtable gives us the property of
82 # serializability, so we should have a sequential hashtable when we check the
83 # correctness. The key thing is to identify all the commit point.
88 CLASS = cliffc_hashtable;
91 spec_hashtable<TypeK, TypeV*> map;
92 spec_hashtable<TypeK, Tag> id_map;
95 map = spec_hashtable<TypeK, TypeV*>();
96 id_map = spec_hashtable<TypeK, TypeV*>();
99 bool equals_val(TypeV *ptr1, TypeV *ptr2) {
104 # Update the tag for the current key slot if the corresponding tag
105 # is NULL, otherwise just return that tag. It will update the next
106 # available tag too if it requires a new tag for that key slot.
107 Tag getKeyTag(TypeK &key) {
108 if (id_map.get(key) == NULL) {
109 Tag cur_tag = tag.current();
110 id_map.put(key, cur_tag);
114 return id_map.get(key);
130 PutIfAbsent(COND_PutIfAbsentSucc),
132 RemoveIfMatch(COND_RemoveIfMatchSucc),
134 ReplaceIfMatch(COND_ReplaceIfMatchSucc)
137 Write_interface -> Read_interface
143 The control structure for the hashtable
147 friend class cliffc_hashtable;
149 atomic<kvs_data*> _newkvs;
151 // Size of active K,V pairs
154 // Count of used slots
157 // The next part of the table to copy
158 atomic_int _copy_idx;
160 // Work-done reporting
161 atomic_int _copy_done;
165 _size.store(size, memory_order_relaxed);
166 _slots.store(0, memory_order_relaxed);
168 _copy_idx.store(0, memory_order_relaxed);
169 _copy_done.store(0, memory_order_release);
176 // Heuristic to decide if the table is too full
177 bool table_full(int reprobe_cnt, int len) {
179 reprobe_cnt >= REPROBE_LIMIT &&
180 _slots.load(memory_order_relaxed) >= reprobe_limit(len);
183 kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
184 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
188 // No copy in-progress, start one; Only double the table size
189 int oldlen = kvs->_size;
190 int sz = _size.load(memory_order_relaxed);
193 // Just follow Cliff Click's heuristic to decide the new size
194 if (sz >= (oldlen >> 2)) { // If we are 25% full
195 newsz = oldlen << 1; // Double size
196 if (sz >= (oldlen >> 1))
197 newsz = oldlen << 2; // Double double size
200 // We do not record the record timestamp
201 if (newsz <= oldlen) newsz = oldlen << 1;
202 // Do not shrink ever
203 if (newsz < oldlen) newsz = oldlen;
205 // Last check cause the 'new' below is expensive
206 newkvs = _newkvs.load(memory_order_acquire);
207 if (newkvs != NULL) return newkvs;
209 newkvs = new kvs_data(newsz);
210 void *chm = (void*) new CHM(sz);
211 newkvs->_data[0].store(chm, memory_order_relaxed);
213 kvs_data *cur_newkvs;
214 // Another check after the slow allocation
215 if ((cur_newkvs = _newkvs.load(memory_order_acquire)) != NULL)
217 // CAS the _newkvs to the allocated table
218 kvs_data *desired = (kvs_data*) NULL;
219 kvs_data *expected = (kvs_data*) newkvs;
220 if (!_newkvs.compare_exchange_strong(desired, expected, memory_order_release,
221 memory_order_release)) {
222 // Should clean the allocated area
224 newkvs = _newkvs.load(memory_order_acquire);
229 void help_copy_impl(cliffc_hashtable *topmap, kvs_data *oldkvs,
231 assert (get_chm(oldkvs) == this);
232 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
233 int oldlen = oldkvs->_size;
234 int min_copy_work = oldlen > 1024 ? 1024 : oldlen;
236 // Just follow Cliff Click's code here
237 int panic_start = -1;
239 while (_copy_done.load(memory_order_acquire) < oldlen) {
240 copyidx = _copy_idx.load(memory_order_acquire);
241 if (panic_start == -1) { // No painc
242 copyidx = _copy_idx.load(memory_order_acquire);
243 while (copyidx < (oldlen << 1) &&
244 !_copy_idx.compare_exchange_strong(copyidx, copyidx +
245 min_copy_work, memory_order_release, memory_order_release))
246 copyidx = _copy_idx.load(memory_order_relaxed);
247 if (!(copyidx < (oldlen << 1)))
248 panic_start = copyidx;
251 // Now copy the chunk of work we claimed
253 for (int i = 0; i < min_copy_work; i++)
254 if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs,
258 copy_check_and_promote(topmap, oldkvs, workdone);
260 copyidx += min_copy_work;
261 if (!copy_all && panic_start == -1)
262 return; // We are done with the work we claim
264 copy_check_and_promote(topmap, oldkvs, 0); // See if we can promote
267 kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
268 *oldkvs, int idx, void *should_help) {
269 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
270 // We're only here cause the caller saw a Prime
271 if (copy_slot(topmap, idx, oldkvs, _newkvs))
272 copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied
273 return (should_help == NULL) ? newkvs : topmap->help_copy(newkvs);
276 void copy_check_and_promote(cliffc_hashtable *topmap, kvs_data*
277 oldkvs, int workdone) {
278 int oldlen = oldkvs->_size;
279 int copyDone = _copy_done.load(memory_order_relaxed);
282 copyDone = _copy_done.load(memory_order_relaxed);
283 if (_copy_done.compare_exchange_weak(copyDone, copyDone +
284 workdone, memory_order_relaxed, memory_order_relaxed))
289 // Promote the new table to the current table
290 if (copyDone + workdone == oldlen &&
291 topmap->_kvs.load(memory_order_acquire) == oldkvs)
292 topmap->_kvs.compare_exchange_strong(oldkvs, _newkvs, memory_order_release,
293 memory_order_release);
296 bool copy_slot(cliffc_hashtable *topmap, int idx, kvs_data *oldkvs,
299 while ((key_slot = key(oldkvs, idx)) == NULL)
300 CAS_key(oldkvs, idx, NULL, TOMBSTONE);
302 // First CAS old to Prime
303 slot *oldval = val(oldkvs, idx);
304 while (!is_prime(oldval)) {
305 slot *box = (oldval == NULL || oldval == TOMBSTONE)
306 ? TOMBPRIME : new slot(true, oldval->_ptr);
307 if (CAS_val(oldkvs, idx, oldval, box)) {
308 if (box == TOMBPRIME)
309 return 1; // Copy done
310 // Otherwise we CAS'd the box
311 oldval = box; // Record updated oldval
314 oldval = val(oldkvs, idx); // Else re-try
317 if (oldval == TOMBPRIME) return false; // Copy already completed here
319 slot *old_unboxed = new slot(false, oldval->_ptr);
320 int copied_into_new = (putIfMatch(topmap, newkvs, key_slot, old_unboxed,
323 // Old value is exposed in the new table
324 while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME))
325 oldval = val(oldkvs, idx);
327 return copied_into_new;
334 static const int Default_Init_Size = 8; // Intial table size
336 static slot* const MATCH_ANY;
337 static slot* const NO_MATCH_OLD;
339 static slot* const TOMBPRIME;
340 static slot* const TOMBSTONE;
342 static const int REPROBE_LIMIT = 10; // Forces a table-resize
344 atomic<kvs_data*> _kvs;
348 // Should initialize the CHM for the construction of the table
349 // For other CHM in kvs_data, they should be initialzed in resize()
350 // because the size is determined dynamically
351 kvs_data *kvs = new kvs_data(Default_Init_Size);
352 void *chm = (void*) new CHM(0);
353 kvs->_data[0].store(chm, memory_order_relaxed);
354 _kvs.store(kvs, memory_order_release);
357 cliffc_hashtable(int init_size) {
358 // Should initialize the CHM for the construction of the table
359 // For other CHM in kvs_data, they should be initialzed in resize()
360 // because the size is determined dynamically
361 kvs_data *kvs = new kvs_data(init_size);
362 void *chm = (void*) new CHM(0);
363 kvs->_data[0].store(chm, memory_order_relaxed);
364 _kvs.store(kvs, memory_order_release);
370 @Commit_point_set: Read_Val_Point1 | Read_Val_Point2 | Read_Val_Point3
371 @ID: __sequential.getKeyTag(key)
373 TypeV *_Old_Val = __sequential.map.get(key)
375 __sequential.equals_val(_Old_Val, __RET__)
378 shared_ptr<TypeV> get(TypeK& key) {
379 void *key_ptr = (void*) new TypeK(key);
380 slot *key_slot = new slot(false, shared_ptr<void>(key_ptr));
381 int fullhash = hash(key_slot);
382 slot *V = get_impl(this, _kvs, key_slot, fullhash);
383 if (V == NULL) return NULL;
384 assert (!is_prime(V));
385 return static_pointer_cast<TypeV>(V->_ptr);
391 @Commit_point_set: Write_Val_Point
392 @ID: __sequential.getKeyTag(key)
394 # Remember this old value at checking point
395 TypeV *_Old_Val = __sequential.map.get(key)
396 __sequential.map.put(key, &val);
398 __sequential.equals_val(__RET__, _Old_Val)
401 shared_ptr<TypeV> put(TypeK& key, TypeV& val) {
402 return putIfMatch(key, val, NO_MATCH_OLD);
407 @Interface: PutIfAbsent
409 Write_Val_Point | PutIfAbsent_Fail_Point
410 @Condition: __sequential.map.get(key) == NULL
412 COND_PutIfAbsentSucc :: __RET__ == NULL
413 @ID: __sequential.getKeyTag(key)
415 TypeV *_Old_Val = __sequential.map.get(key)
417 __sequential.map.put(key, &value);
419 __COND_SAT__ ? __RET__ == NULL : __sequential.equals_val(_Old_Val, __RET__)
422 shared_ptr<TypeV> putIfAbsent(TypeK& key, TypeV& value) {
423 return putIfMatch(key, val, TOMBSTONE);
428 @Interface: RemoveAny
429 @Commit_point_set: Write_Val_Point
430 @ID: __sequential.getKeyTag(key)
432 TypeV *_Old_Val = __sequential.map.get(key)
433 __sequential.map.put(key, NULL);
435 __sequential.equals_val(__RET__, _Old_Val)
438 shared_ptr<TypeV> remove(TypeK& key) {
439 return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD);
444 @Interface: RemoveIfMatch
446 Write_Val_Point | RemoveIfMatch_Fail_Point
448 __sequential.equals_val(__sequential.map.get(key), &val)
450 COND_RemoveIfMatchSucc :: __RET__ == true
451 @ID: __sequential.getKeyTag(key)
454 __sequential.map.put(key, NULL);
456 __COND_SAT__ ? __RET__ : !__RET__
459 bool remove(TypeK& key, TypeV& val) {
460 slot *val_slot = val == NULL ? NULL : new slot(false, val);
461 return putIfMatch(key, TOMBSTONE, val) == val;
467 @Interface: ReplaceAny
470 @ID: __sequential.getKeyTag(key)
472 TypeV *_Old_Val = __sequential.map.get(key)
474 __sequential.equals_val(__RET__, _Old_Val)
477 shared_ptr<TypeV> replace(TypeK& key, TypeV& val) {
478 return putIfMatch(key, val, MATCH_ANY);
483 @Interface: ReplaceIfMatch
485 Write_Val_Point | ReplaceIfMatch_Fail_Point
487 __sequential.equals_val(__sequential.map.get(key), &oldval)
489 COND_ReplaceIfMatchSucc :: __RET__ == true
490 @ID: __sequential.getKeyTag(key)
493 __sequential.map.put(key, &newval);
495 __COND_SAT__ ? __RET__ : !__RET__
498 bool replace(TypeK& key, TypeV& oldval, TypeV& newval) {
499 return putIfMatch(key, newval, oldval) == oldval;
503 static CHM* get_chm(kvs_data* kvs) {
504 return (CHM*) kvs->_data[0].load(memory_order_relaxed);
507 static int* get_hashes(kvs_data *kvs) {
508 return (int *) kvs->_data[1].load(memory_order_relaxed);
511 // Preserve happens-before semantics on newly inserted keys
512 static inline slot* key(kvs_data *kvs, int idx) {
513 assert (idx >= 0 && idx < kvs->_size);
514 // Corresponding to the volatile read in get_impl() and putIfMatch in
515 // Cliff Click's Java implementation
516 return (slot*) kvs->_data[idx * 2 + 2].load(memory_order_acquire);
520 The atomic operation in val() function is a "potential" commit point,
521 which means in some case it is a real commit point while it is not for
522 some other cases. This so happens because the val() function is such a
523 fundamental function that many internal operation will call. Our
524 strategy is that we label any potential commit points and check if they
525 really are the commit points later.
527 // Preserve happens-before semantics on newly inserted values
528 static inline slot* val(kvs_data *kvs, int idx) {
529 assert (idx >= 0 && idx < kvs->_size);
530 // Corresponding to the volatile read in get_impl() and putIfMatch in
531 // Cliff Click's Java implementation
532 slot *res = (slot*) kvs->_data[idx * 2 + 3].load(memory_order_acquire);
535 # This is a complicated potential commit point since many many functions are
537 @Potential_commit_point_define: true
538 @Label: Read_Val_Point
546 static int hash(slot *key_slot) {
547 assert(key_slot != NULL && key_slot->_ptr != NULL);
548 shared_ptr<TypeK> key = static_pointer_cast<TypeK>(key_slot->_ptr);
549 int h = key->hashCode();
550 // Spread bits according to Cliff Click's code
551 h += (h << 15) ^ 0xffffcd7d;
555 h += (h << 2) + (h << 14);
556 return h ^ (h >> 16);
559 // Heuristic to decide if reprobed too many times.
560 // Be careful here: Running over the limit on a 'get' acts as a 'miss'; on a
561 // put it triggers a table resize. Several places MUST have exact agreement.
562 static int reprobe_limit(int len) {
563 return REPROBE_LIMIT + (len >> 2);
566 static inline bool is_prime(slot *val) {
567 return (val != NULL) && val->_prime;
570 // Check for key equality. Try direct pointer comparison first (fast
571 // negative teset) and then the full 'equals' call
572 static bool keyeq(slot *K, slot *key_slot, int *hashes, int hash,
574 // Caller should've checked this.
576 shared_ptr<TypeK> key_ptr = static_pointer_cast<TypeK>(key_slot->_ptr);
579 ((hashes[hash] == 0 || hashes[hash] == fullhash) &&
581 key_ptr->equals(K->_ptr));
584 static bool valeq(slot *val_slot1, slot *val_slot2) {
585 assert (val_slot1 != NULL);
586 shared_ptr<TypeK> ptr1 = static_pointer_cast<TypeV>(val_slot1->_ptr);
587 if (val_slot2 == NULL || ptr1 == NULL) return false;
588 return ptr1->equals(val_slot2->_ptr);
591 // Together with key() preserve the happens-before relationship on newly
593 static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
594 return kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
595 desired, memory_order_release, memory_order_release);
599 Same as the val() function, we only label the CAS operation as the
600 potential commit point.
602 // Together with val() preserve the happens-before relationship on newly
604 static inline bool CAS_val(kvs_data *kvs, int idx, void *expected, void
606 bool res = kvs->_data[2 * idx + 3].compare_exchange_strong(expected,
607 desired, memory_order_release, memory_order_release);
609 # If it is a successful put instead of a copy or any other internal
610 # operantions, expected != NULL
612 @Potential_commit_point_define: __ATOMIC_RET__ == true
613 @Label: Write_Val_Point
619 slot* get_impl(cliffc_hashtable *topmap, kvs_data *kvs, slot* key_slot, int
621 int len = kvs->_size;
622 CHM *chm = get_chm(kvs);
623 int *hashes = get_hashes(kvs);
625 int idx = fullhash & (len - 1);
628 slot *K = key(kvs, idx);
629 slot *V = val(kvs, idx);
632 @Commit_point_define: V == NULL
633 @Potential_commit_point_label: Read_Val_Point
634 @Label: Get_Success_Point_1
638 if (V == NULL) return NULL; // A miss
640 if (keyeq(K, key_slot, hashes, idx, fullhash)) {
641 // Key hit! Check if table-resize in progress
645 @Commit_point_define: true
646 @Potential_commit_point_label: Read_Val_Point
647 @Label: Get_Success_Point_2
650 return (V == TOMBSTONE) ? NULL : V; // Return this value
652 // Otherwise, finish the copy & retry in the new table
653 return get_impl(topmap, chm->copy_slot_and_check(topmap, kvs,
654 idx, key_slot), key_slot, fullhash);
657 if (++reprobe_cnt >= REPROBE_LIMIT ||
658 key_slot == TOMBSTONE) {
659 // Retry in new table
660 // Atomic read (acquire) can be here
661 kvs_data *newkvs = chm->_newkvs.load(memory_order_acquire);
664 @Commit_point_define_check: newkvs == NULL
665 @Label: Get_Success_Point_3
668 return newkvs == NULL ? NULL : get_impl(topmap,
669 topmap->help_copy(newkvs), key_slot, fullhash);
672 idx = (idx + 1) & (len - 1); // Reprobe by 1
676 // A wrapper of the essential function putIfMatch()
677 shared_ptr<TypeV> putIfMatch(TypeK& key, TypeV& value, slot *old_val) {
678 // TODO: Should throw an exception rather return NULL
679 if (old_val == NULL) {
682 void *key_ptr = (void*) new TypeK(key);
683 slot *key_slot = new slot(false, shared_ptr<void>(key_ptr));
685 void *val_ptr = (void*) new TypeV(value);
686 slot *value_slot = new slot(false, shared_ptr<void>(val_ptr));
687 slot *res = putIfMatch(this, _kvs, key_slot, value_slot, old_val);
688 // Only when copy_slot() call putIfMatch() will it return NULL
689 assert (res != NULL);
690 assert (!is_prime(res));
691 return res == TOMBSTONE ? NULL : static_pointer_cast<TypeV>(res->_ptr);
695 Put, Remove, PutIfAbsent, etc will call this function. Return the old
696 value. If the returned value is equals to the expVal (or expVal is
697 NO_MATCH_OLD), then this function puts the val_slot to the table 'kvs'.
698 Only copy_slot will pass a NULL expVal, and putIfMatch only returns a
699 NULL if passed a NULL expVal.
701 static slot* putIfMatch(cliffc_hashtable *topmap, kvs_data *kvs, slot
702 *key_slot, slot *val_slot, slot *expVal) {
703 assert (val_slot != NULL);
704 assert (!is_prime(val_slot));
705 assert (!is_prime(expVal));
707 int fullhash = hash(key_slot);
708 int len = kvs->_size;
709 CHM *chm = get_chm(kvs);
710 int *hashes = get_hashes(kvs);
711 int idx = fullhash & (len - 1);
719 while (true) { // Spin till we get a key slot
722 if (K == NULL) { // Get a free slot
723 if (val_slot == TOMBSTONE) return val_slot;
724 // Claim the null key-slot
725 if (CAS_key(kvs, idx, NULL, key_slot)) {
726 chm->_slots.fetch_add(1, memory_order_relaxed); // Inc key-slots-used count
727 hashes[idx] = fullhash; // Memorize full hash
730 K = key(kvs, idx); // CAS failed, get updated value
734 // Key slot not null, there exists a Key here
735 if (keyeq(K, key_slot, hashes, idx, fullhash))
738 // Notice that the logic here should be consistent with that of get.
739 // The first predicate means too many reprobes means nothing in the
741 if (++reprobe_cnt >= reprobe_limit(len) ||
742 K == TOMBSTONE) { // Found a Tombstone key, no more keys
743 newkvs = chm->resize(topmap, kvs);
744 // Help along an existing copy
745 if (expVal != NULL) topmap->help_copy(newkvs);
746 return putIfMatch(topmap, newkvs, key_slot, val_slot, expVal);
749 idx = (idx + 1) & (len - 1); // Reprobe
750 } // End of spinning till we get a Key slot
752 if (val_slot == V) return V; // Fast cutout for no-change
754 // Here it tries to resize cause it doesn't want other threads to stop
755 // its progress (eagerly try to resize soon)
756 newkvs = chm->_newkvs.load(memory_order_acquire);
757 if (newkvs == NULL &&
758 ((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V)))
759 newkvs = chm->resize(topmap, kvs); // Force the copy to start
761 // Finish the copy and then put it in the new table
763 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs, idx,
764 expVal), key_slot, val_slot, expVal);
766 // Decided to update the existing table
768 assert (!is_prime(V));
770 if (expVal != NO_MATCH_OLD &&
772 (expVal != MATCH_ANY || V == TOMBSTONE || V == NULL) &&
773 !(V == NULL && expVal == TOMBSTONE) &&
774 (expVal == NULL || !valeq(expVal, V))) {
777 @Commit_point_define: expVal == TOMBSTONE
778 @Potential_commit_point_label: Read_Val_Point
779 @Label: PutIfAbsent_Fail_Point
780 # This is a check for the PutIfAbsent() when the value
786 @Commit_point_define: expVal != NULL && val_slot == TOMBSTONE
787 @Potential_commit_point_label: Read_Val_Point
788 @Label: RemoveIfMatch_Fail_Point
793 @Commit_point_define: !valeq(expVal, V)
794 @Potential_commit_point_label: Read_Val_Point
795 @Label: ReplaceIfMatch_Fail_Point
798 return V; // Do not update!
801 if (CAS_val(kvs, idx, V, val_slot)) {
804 # The only point where a successful put happens
805 @Commit_point_define: true
806 @Potential_commit_point_label: Write_Val_Point
807 @Label: Write_Success_Point
810 if (expVal != NULL) { // Not called by a table-copy
811 // CAS succeeded, should adjust size
812 // Both normal put's and table-copy calls putIfMatch, but
813 // table-copy does not increase the number of live K/V pairs
814 if ((V == NULL || V == TOMBSTONE) &&
815 val_slot != TOMBSTONE)
816 chm->_size.fetch_add(1, memory_order_relaxed);
817 if (!(V == NULL || V == TOMBSTONE) &&
818 val_slot == TOMBSTONE)
819 chm->_size.fetch_add(-1, memory_order_relaxed);
821 return (V == NULL && expVal != NULL) ? TOMBSTONE : V;
826 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs,
827 idx, expVal), key_slot, val_slot, expVal);
831 // Help along an existing table-resize. This is a fast cut-out wrapper.
832 kvs_data* help_copy(kvs_data *helper) {
833 kvs_data *topkvs = _kvs.load(memory_order_acquire);
834 CHM *topchm = get_chm(topkvs);
835 // No cpy in progress
836 if (topchm->_newkvs.load(memory_order_acquire) == NULL) return helper;
837 topchm->help_copy_impl(this, topkvs, false);