1 #ifndef CLIFFC_HASHTABLE_H
2 #define CLIFFC_HASHTABLE_H
12 This header file declares and defines a simplified version of Cliff Click's
13 NonblockingHashMap. It contains all the necessary structrues and main
14 functions. In simplified_cliffc_hashtable.cc file, it has the definition for
18 template<typename TypeK, typename TypeV>
19 class cliffc_hashtable;
22 Corresponding the the Object[] array in Cliff Click's Java implementation.
23 It keeps the first two slots for CHM (Hashtable control unit) and the hash
24 records (an array of hash used for fast negative key-equality check).
32 int real_size = sizeof(atomic<void*>) * 2 + 2;
33 _data = new atomic<void*>[real_size];
34 // The control block should be initialized in resize()
35 // Init the hash record array
36 int *hashes = new int[_size];
38 for (i = 0; i < _size; i++) {
41 // Init the data to Null slot
42 for (i = 2; i < real_size; i++) {
43 _data[i].store(NULL, memory_order_relaxed);
45 _data[1].store(hashes, memory_order_release);
49 int *hashes = (int*) _data[1].load(memory_order_relaxed);
59 slot(bool prime, void *ptr) {
67 TypeK must have defined function "int hashCode()" which return the hash
68 code for the its object, and "int equals(TypeK anotherKey)" which is
69 used to judge equality.
70 TypeK and TypeV should define their own copy constructor.
72 template<typename TypeK, typename TypeV>
73 class cliffc_hashtable {
75 # The synchronization we have for the hashtable gives us the property of
76 # serializability, so we should have a sequential hashtable when we check the
77 # correctness. The key thing is to identify all the commit point.
82 CLASS = cliffc_hashtable;
85 spec_hashtable<TypeK, TypeV*> map;
86 spec_hashtable<TypeK, Tag> id_map;
89 map = spec_hashtable<TypeK, TypeV*>();
90 id_map = spec_hashtable<TypeK, TypeV*>();
93 bool equals_val(TypeV *ptr1, TypeV *ptr2) {
98 # Update the tag for the current key slot if the corresponding tag
99 # is NULL, otherwise just return that tag. It will update the next
100 # available tag too if it requires a new tag for that key slot.
101 Tag getKeyTag(TypeK &key) {
102 if (id_map.get(key) == NULL) {
103 Tag cur_tag = tag.current();
104 id_map.put(key, cur_tag);
108 return id_map.get(key);
124 PutIfAbsent(COND_PutIfAbsentSucc),
126 RemoveIfMatch(COND_RemoveIfMatchSucc),
128 ReplaceIfMatch(COND_ReplaceIfMatchSucc)
131 Write_interface -> Read_interface
137 The control structure for the hashtable
141 friend class cliffc_hashtable;
143 atomic<kvs_data*> _newkvs;
145 // Size of active K,V pairs
148 // Count of used slots
151 // The next part of the table to copy
152 atomic_int _copy_idx;
154 // Work-done reporting
155 atomic_int _copy_done;
159 _size.store(size, memory_order_relaxed);
160 _slots.store(0, memory_order_relaxed);
162 _copy_idx.store(0, memory_order_relaxed);
163 _copy_done.store(0, memory_order_release);
170 // Heuristic to decide if the table is too full
171 bool table_full(int reprobe_cnt, int len) {
173 reprobe_cnt >= REPROBE_LIMIT &&
174 _slots.load(memory_order_relaxed) >= reprobe_limit(len);
177 kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
178 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
182 // No copy in-progress, start one; Only double the table size
183 int oldlen = kvs->_size;
184 int sz = _size.load(memory_order_relaxed);
187 // Just follow Cliff Click's heuristic to decide the new size
188 if (sz >= (oldlen >> 2)) { // If we are 25% full
189 newsz = oldlen << 1; // Double size
190 if (sz >= (oldlen >> 1))
191 newsz = oldlen << 2; // Double double size
194 // We do not record the record timestamp
195 if (newsz <= oldlen) newsz = oldlen << 1;
196 // Do not shrink ever
197 if (newsz < oldlen) newsz = oldlen;
199 // Last check cause the 'new' below is expensive
200 newkvs = _newkvs.load(memory_order_acquire);
201 if (newkvs != NULL) return newkvs;
203 newkvs = new kvs_data(newsz);
204 void *chm = (void*) new CHM(sz);
205 newkvs->_data[0].store(chm, memory_order_relaxed);
207 kvs_data *cur_newkvs;
208 // Another check after the slow allocation
209 if ((cur_newkvs = _newkvs.load(memory_order_acquire)) != NULL)
211 // CAS the _newkvs to the allocated table
212 kvs_data *desired = (kvs_data*) NULL;
213 kvs_data *expected = (kvs_data*) newkvs;
214 if (!_newkvs.compare_exchange_strong(desired, expected, memory_order_release,
215 memory_order_release)) {
216 // Should clean the allocated area
218 newkvs = _newkvs.load(memory_order_acquire);
223 void help_copy_impl(cliffc_hashtable *topmap, kvs_data *oldkvs,
225 assert (get_chm(oldkvs) == this);
226 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
227 int oldlen = oldkvs->_size;
228 int min_copy_work = oldlen > 1024 ? 1024 : oldlen;
230 // Just follow Cliff Click's code here
231 int panic_start = -1;
233 while (_copy_done.load(memory_order_acquire) < oldlen) {
234 copyidx = _copy_idx.load(memory_order_acquire);
235 if (panic_start == -1) { // No painc
236 copyidx = _copy_idx.load(memory_order_acquire);
237 while (copyidx < (oldlen << 1) &&
238 !_copy_idx.compare_exchange_strong(copyidx, copyidx +
239 min_copy_work, memory_order_release, memory_order_release))
240 copyidx = _copy_idx.load(memory_order_relaxed);
241 if (!(copyidx < (oldlen << 1)))
242 panic_start = copyidx;
245 // Now copy the chunk of work we claimed
247 for (int i = 0; i < min_copy_work; i++)
248 if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs,
252 copy_check_and_promote(topmap, oldkvs, workdone);
254 copyidx += min_copy_work;
255 if (!copy_all && panic_start == -1)
256 return; // We are done with the work we claim
258 copy_check_and_promote(topmap, oldkvs, 0); // See if we can promote
261 kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
262 *oldkvs, int idx, void *should_help) {
263 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
264 // We're only here cause the caller saw a Prime
265 if (copy_slot(topmap, idx, oldkvs, newkvs))
266 copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied
267 return (should_help == NULL) ? newkvs : topmap->help_copy(newkvs);
270 void copy_check_and_promote(cliffc_hashtable *topmap, kvs_data*
271 oldkvs, int workdone) {
272 int oldlen = oldkvs->_size;
273 int copyDone = _copy_done.load(memory_order_relaxed);
276 copyDone = _copy_done.load(memory_order_relaxed);
277 if (_copy_done.compare_exchange_weak(copyDone, copyDone +
278 workdone, memory_order_relaxed, memory_order_relaxed))
283 // Promote the new table to the current table
284 if (copyDone + workdone == oldlen &&
285 topmap->_kvs.load(memory_order_acquire) == oldkvs) {
286 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
287 topmap->_kvs.compare_exchange_strong(oldkvs, newkvs, memory_order_release,
288 memory_order_release);
292 bool copy_slot(cliffc_hashtable *topmap, int idx, kvs_data *oldkvs,
295 while ((key_slot = key(oldkvs, idx)) == NULL)
296 CAS_key(oldkvs, idx, NULL, TOMBSTONE);
298 // First CAS old to Prime
299 slot *oldval = val(oldkvs, idx);
300 while (!is_prime(oldval)) {
301 slot *box = (oldval == NULL || oldval == TOMBSTONE)
302 ? TOMBPRIME : new slot(true, oldval->_ptr);
303 if (CAS_val(oldkvs, idx, oldval, box)) {
304 if (box == TOMBPRIME)
305 return 1; // Copy done
306 // Otherwise we CAS'd the box
307 oldval = box; // Record updated oldval
310 oldval = val(oldkvs, idx); // Else re-try
313 if (oldval == TOMBPRIME) return false; // Copy already completed here
315 slot *old_unboxed = new slot(false, oldval->_ptr);
316 int copied_into_new = (putIfMatch(topmap, newkvs, key_slot, old_unboxed,
319 // Old value is exposed in the new table
320 while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME))
321 oldval = val(oldkvs, idx);
323 return copied_into_new;
330 static const int Default_Init_Size = 8; // Intial table size
332 static slot* const MATCH_ANY;
333 static slot* const NO_MATCH_OLD;
335 static slot* const TOMBPRIME;
336 static slot* const TOMBSTONE;
338 static const int REPROBE_LIMIT = 10; // Forces a table-resize
340 atomic<kvs_data*> _kvs;
344 // Should initialize the CHM for the construction of the table
345 // For other CHM in kvs_data, they should be initialzed in resize()
346 // because the size is determined dynamically
347 kvs_data *kvs = new kvs_data(Default_Init_Size);
348 void *chm = (void*) new CHM(0);
349 kvs->_data[0].store(chm, memory_order_relaxed);
350 _kvs.store(kvs, memory_order_release);
353 cliffc_hashtable(int init_size) {
354 // Should initialize the CHM for the construction of the table
355 // For other CHM in kvs_data, they should be initialzed in resize()
356 // because the size is determined dynamically
357 kvs_data *kvs = new kvs_data(init_size);
358 void *chm = (void*) new CHM(0);
359 kvs->_data[0].store(chm, memory_order_relaxed);
360 _kvs.store(kvs, memory_order_release);
366 @Commit_point_set: Read_Val_Point1 | Read_Val_Point2 | Read_Val_Point3
367 @ID: __sequential.getKeyTag(key)
369 TypeV *_Old_Val = __sequential.map.get(key)
371 __sequential.equals_val(_Old_Val, __RET__)
374 TypeV* get(TypeK& key) {
375 void *key_ptr = (void*) new TypeK(key);
376 slot *key_slot = new slot(false, key_ptr);
377 int fullhash = hash(key_slot);
378 kvs_data *kvs = _kvs.load(memory_order_acquire);
379 slot *V = get_impl(this, kvs, key_slot, fullhash);
380 if (V == NULL) return NULL;
381 assert (!is_prime(V));
382 return (TypeV*) V->_ptr;
388 @Commit_point_set: Write_Val_Point
389 @ID: __sequential.getKeyTag(key)
391 # Remember this old value at checking point
392 TypeV *_Old_Val = __sequential.map.get(key)
393 __sequential.map.put(key, &val);
395 __sequential.equals_val(__RET__, _Old_Val)
398 TypeV* put(TypeK& key, TypeV& val) {
399 return putIfMatch(key, val, NO_MATCH_OLD);
404 @Interface: PutIfAbsent
406 Write_Val_Point | PutIfAbsent_Fail_Point
407 @Condition: __sequential.map.get(key) == NULL
409 COND_PutIfAbsentSucc :: __RET__ == NULL
410 @ID: __sequential.getKeyTag(key)
412 TypeV *_Old_Val = __sequential.map.get(key)
414 __sequential.map.put(key, &value);
416 __COND_SAT__ ? __RET__ == NULL : __sequential.equals_val(_Old_Val, __RET__)
419 TypeV* putIfAbsent(TypeK& key, TypeV& value) {
420 return putIfMatch(key, val, TOMBSTONE);
425 @Interface: RemoveAny
426 @Commit_point_set: Write_Val_Point
427 @ID: __sequential.getKeyTag(key)
429 TypeV *_Old_Val = __sequential.map.get(key)
430 __sequential.map.put(key, NULL);
432 __sequential.equals_val(__RET__, _Old_Val)
435 TypeV* remove(TypeK& key) {
436 return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD);
441 @Interface: RemoveIfMatch
443 Write_Val_Point | RemoveIfMatch_Fail_Point
445 __sequential.equals_val(__sequential.map.get(key), &val)
447 COND_RemoveIfMatchSucc :: __RET__ == true
448 @ID: __sequential.getKeyTag(key)
451 __sequential.map.put(key, NULL);
453 __COND_SAT__ ? __RET__ : !__RET__
456 bool remove(TypeK& key, TypeV& val) {
457 slot *val_slot = val == NULL ? NULL : new slot(false, val);
458 return putIfMatch(key, TOMBSTONE, val) == val;
464 @Interface: ReplaceAny
467 @ID: __sequential.getKeyTag(key)
469 TypeV *_Old_Val = __sequential.map.get(key)
471 __sequential.equals_val(__RET__, _Old_Val)
474 TypeV* replace(TypeK& key, TypeV& val) {
475 return putIfMatch(key, val, MATCH_ANY);
480 @Interface: ReplaceIfMatch
482 Write_Val_Point | ReplaceIfMatch_Fail_Point
484 __sequential.equals_val(__sequential.map.get(key), &oldval)
486 COND_ReplaceIfMatchSucc :: __RET__ == true
487 @ID: __sequential.getKeyTag(key)
490 __sequential.map.put(key, &newval);
492 __COND_SAT__ ? __RET__ : !__RET__
495 bool replace(TypeK& key, TypeV& oldval, TypeV& newval) {
496 return putIfMatch(key, newval, oldval) == oldval;
500 static CHM* get_chm(kvs_data* kvs) {
501 return (CHM*) kvs->_data[0].load(memory_order_relaxed);
504 static int* get_hashes(kvs_data *kvs) {
505 return (int *) kvs->_data[1].load(memory_order_relaxed);
508 // Preserve happens-before semantics on newly inserted keys
509 static inline slot* key(kvs_data *kvs, int idx) {
510 assert (idx >= 0 && idx < kvs->_size);
511 // Corresponding to the volatile read in get_impl() and putIfMatch in
512 // Cliff Click's Java implementation
513 return (slot*) kvs->_data[idx * 2 + 2].load(memory_order_acquire);
517 The atomic operation in val() function is a "potential" commit point,
518 which means in some case it is a real commit point while it is not for
519 some other cases. This so happens because the val() function is such a
520 fundamental function that many internal operation will call. Our
521 strategy is that we label any potential commit points and check if they
522 really are the commit points later.
524 // Preserve happens-before semantics on newly inserted values
525 static inline slot* val(kvs_data *kvs, int idx) {
526 assert (idx >= 0 && idx < kvs->_size);
527 // Corresponding to the volatile read in get_impl() and putIfMatch in
528 // Cliff Click's Java implementation
529 slot *res = (slot*) kvs->_data[idx * 2 + 3].load(memory_order_acquire);
532 # This is a complicated potential commit point since many many functions are
534 @Potential_commit_point_define: true
535 @Label: Read_Val_Point
543 static int hash(slot *key_slot) {
544 assert(key_slot != NULL && key_slot->_ptr != NULL);
545 TypeK* key = (TypeK*) key_slot->_ptr;
546 int h = key->hashCode();
547 // Spread bits according to Cliff Click's code
548 h += (h << 15) ^ 0xffffcd7d;
552 h += (h << 2) + (h << 14);
553 return h ^ (h >> 16);
556 // Heuristic to decide if reprobed too many times.
557 // Be careful here: Running over the limit on a 'get' acts as a 'miss'; on a
558 // put it triggers a table resize. Several places MUST have exact agreement.
559 static int reprobe_limit(int len) {
560 return REPROBE_LIMIT + (len >> 2);
563 static inline bool is_prime(slot *val) {
564 return (val != NULL) && val->_prime;
567 // Check for key equality. Try direct pointer comparison first (fast
568 // negative teset) and then the full 'equals' call
569 static bool keyeq(slot *K, slot *key_slot, int *hashes, int hash,
571 // Caller should've checked this.
573 TypeK* key_ptr = (TypeK*) key_slot->_ptr;
576 ((hashes[hash] == 0 || hashes[hash] == fullhash) &&
578 key_ptr->equals(K->_ptr));
581 static bool valeq(slot *val_slot1, slot *val_slot2) {
582 assert (val_slot1 != NULL);
583 TypeK* ptr1 = (TypeV*) val_slot1->_ptr;
584 if (val_slot2 == NULL || ptr1 == NULL) return false;
585 return ptr1->equals(val_slot2->_ptr);
588 // Together with key() preserve the happens-before relationship on newly
590 static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
591 return kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
592 desired, memory_order_release, memory_order_release);
596 Same as the val() function, we only label the CAS operation as the
597 potential commit point.
599 // Together with val() preserve the happens-before relationship on newly
601 static inline bool CAS_val(kvs_data *kvs, int idx, void *expected, void
603 bool res = kvs->_data[2 * idx + 3].compare_exchange_strong(expected,
604 desired, memory_order_release, memory_order_release);
606 # If it is a successful put instead of a copy or any other internal
607 # operantions, expected != NULL
609 @Potential_commit_point_define: __ATOMIC_RET__ == true
610 @Label: Write_Val_Point
616 slot* get_impl(cliffc_hashtable *topmap, kvs_data *kvs, slot* key_slot, int
618 int len = kvs->_size;
619 CHM *chm = get_chm(kvs);
620 int *hashes = get_hashes(kvs);
622 int idx = fullhash & (len - 1);
625 slot *K = key(kvs, idx);
626 slot *V = val(kvs, idx);
629 @Commit_point_define: V == NULL
630 @Potential_commit_point_label: Read_Val_Point
631 @Label: Get_Success_Point_1
635 if (V == NULL) return NULL; // A miss
637 if (keyeq(K, key_slot, hashes, idx, fullhash)) {
638 // Key hit! Check if table-resize in progress
642 @Commit_point_define: true
643 @Potential_commit_point_label: Read_Val_Point
644 @Label: Get_Success_Point_2
647 return (V == TOMBSTONE) ? NULL : V; // Return this value
649 // Otherwise, finish the copy & retry in the new table
650 return get_impl(topmap, chm->copy_slot_and_check(topmap, kvs,
651 idx, key_slot), key_slot, fullhash);
654 if (++reprobe_cnt >= REPROBE_LIMIT ||
655 key_slot == TOMBSTONE) {
656 // Retry in new table
657 // Atomic read (acquire) can be here
658 kvs_data *newkvs = chm->_newkvs.load(memory_order_acquire);
661 @Commit_point_define_check: newkvs == NULL
662 @Label: Get_Success_Point_3
665 return newkvs == NULL ? NULL : get_impl(topmap,
666 topmap->help_copy(newkvs), key_slot, fullhash);
669 idx = (idx + 1) & (len - 1); // Reprobe by 1
673 // A wrapper of the essential function putIfMatch()
674 TypeV* putIfMatch(TypeK& key, TypeV& value, slot *old_val) {
675 // TODO: Should throw an exception rather return NULL
676 if (old_val == NULL) {
679 void *key_ptr = (void*) new TypeK(key);
680 slot *key_slot = new slot(false, key_ptr);
682 void *val_ptr = (void*) new TypeV(value);
683 slot *value_slot = new slot(false, val_ptr);
684 kvs_data *kvs = _kvs.load(memory_order_acquire);
685 slot *res = putIfMatch(this, kvs, key_slot, value_slot, old_val);
686 // Only when copy_slot() call putIfMatch() will it return NULL
687 assert (res != NULL);
688 assert (!is_prime(res));
689 return res == TOMBSTONE ? NULL : (TypeV*) res->_ptr;
693 Put, Remove, PutIfAbsent, etc will call this function. Return the old
694 value. If the returned value is equals to the expVal (or expVal is
695 NO_MATCH_OLD), then this function puts the val_slot to the table 'kvs'.
696 Only copy_slot will pass a NULL expVal, and putIfMatch only returns a
697 NULL if passed a NULL expVal.
699 static slot* putIfMatch(cliffc_hashtable *topmap, kvs_data *kvs, slot
700 *key_slot, slot *val_slot, slot *expVal) {
701 assert (val_slot != NULL);
702 assert (!is_prime(val_slot));
703 assert (!is_prime(expVal));
705 int fullhash = hash(key_slot);
706 int len = kvs->_size;
707 CHM *chm = get_chm(kvs);
708 int *hashes = get_hashes(kvs);
709 int idx = fullhash & (len - 1);
717 while (true) { // Spin till we get a key slot
720 if (K == NULL) { // Get a free slot
721 if (val_slot == TOMBSTONE) return val_slot;
722 // Claim the null key-slot
723 if (CAS_key(kvs, idx, NULL, key_slot)) {
724 chm->_slots.fetch_add(1, memory_order_relaxed); // Inc key-slots-used count
725 hashes[idx] = fullhash; // Memorize full hash
728 K = key(kvs, idx); // CAS failed, get updated value
732 // Key slot not null, there exists a Key here
733 if (keyeq(K, key_slot, hashes, idx, fullhash))
736 // Notice that the logic here should be consistent with that of get.
737 // The first predicate means too many reprobes means nothing in the
739 if (++reprobe_cnt >= reprobe_limit(len) ||
740 K == TOMBSTONE) { // Found a Tombstone key, no more keys
741 newkvs = chm->resize(topmap, kvs);
742 // Help along an existing copy
743 if (expVal != NULL) topmap->help_copy(newkvs);
744 return putIfMatch(topmap, newkvs, key_slot, val_slot, expVal);
747 idx = (idx + 1) & (len - 1); // Reprobe
748 } // End of spinning till we get a Key slot
750 if (val_slot == V) return V; // Fast cutout for no-change
752 // Here it tries to resize cause it doesn't want other threads to stop
753 // its progress (eagerly try to resize soon)
754 newkvs = chm->_newkvs.load(memory_order_acquire);
755 if (newkvs == NULL &&
756 ((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V)))
757 newkvs = chm->resize(topmap, kvs); // Force the copy to start
759 // Finish the copy and then put it in the new table
761 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs, idx,
762 expVal), key_slot, val_slot, expVal);
764 // Decided to update the existing table
766 assert (!is_prime(V));
768 if (expVal != NO_MATCH_OLD &&
770 (expVal != MATCH_ANY || V == TOMBSTONE || V == NULL) &&
771 !(V == NULL && expVal == TOMBSTONE) &&
772 (expVal == NULL || !valeq(expVal, V))) {
775 @Commit_point_define: expVal == TOMBSTONE
776 @Potential_commit_point_label: Read_Val_Point
777 @Label: PutIfAbsent_Fail_Point
778 # This is a check for the PutIfAbsent() when the value
784 @Commit_point_define: expVal != NULL && val_slot == TOMBSTONE
785 @Potential_commit_point_label: Read_Val_Point
786 @Label: RemoveIfMatch_Fail_Point
791 @Commit_point_define: !valeq(expVal, V)
792 @Potential_commit_point_label: Read_Val_Point
793 @Label: ReplaceIfMatch_Fail_Point
796 return V; // Do not update!
799 if (CAS_val(kvs, idx, V, val_slot)) {
802 # The only point where a successful put happens
803 @Commit_point_define: true
804 @Potential_commit_point_label: Write_Val_Point
805 @Label: Write_Success_Point
808 if (expVal != NULL) { // Not called by a table-copy
809 // CAS succeeded, should adjust size
810 // Both normal put's and table-copy calls putIfMatch, but
811 // table-copy does not increase the number of live K/V pairs
812 if ((V == NULL || V == TOMBSTONE) &&
813 val_slot != TOMBSTONE)
814 chm->_size.fetch_add(1, memory_order_relaxed);
815 if (!(V == NULL || V == TOMBSTONE) &&
816 val_slot == TOMBSTONE)
817 chm->_size.fetch_add(-1, memory_order_relaxed);
819 return (V == NULL && expVal != NULL) ? TOMBSTONE : V;
824 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs,
825 idx, expVal), key_slot, val_slot, expVal);
829 // Help along an existing table-resize. This is a fast cut-out wrapper.
830 kvs_data* help_copy(kvs_data *helper) {
831 kvs_data *topkvs = _kvs.load(memory_order_acquire);
832 CHM *topchm = get_chm(topkvs);
833 // No cpy in progress
834 if (topchm->_newkvs.load(memory_order_acquire) == NULL) return helper;
835 topchm->help_copy_impl(this, topkvs, false);