1 #ifndef CLIFFC_HASHTABLE_H
2 #define CLIFFC_HASHTABLE_H
9 #define MODEL_ASSERT assert
11 #include <model-assert.h>
16 #include <cdsannotate.h>
17 #include <specannotation.h>
18 #include <model_memory.h>
23 This header file declares and defines a simplified version of Cliff Click's
24 NonblockingHashMap. It contains all the necessary structrues and main
25 functions. In simplified_cliffc_hashtable.cc file, it has the definition for
29 template<typename TypeK, typename TypeV>
30 class cliffc_hashtable;
33 Corresponding the the Object[] array in Cliff Click's Java implementation.
34 It keeps the first two slots for CHM (Hashtable control unit) and the hash
35 records (an array of hash used for fast negative key-equality check).
43 int real_size = sz * 2 + 2;
44 _data = new atomic<void*>[real_size];
45 // The control block should be initialized in resize()
46 // Init the hash record array
47 int *hashes = new int[_size];
49 for (i = 0; i < _size; i++) {
52 // Init the data to Null slot
53 for (i = 2; i < real_size; i++) {
54 _data[i].store(NULL, memory_order_relaxed);
56 _data[1].store(hashes, memory_order_release);
60 int *hashes = (int*) _data[1].load(memory_order_relaxed);
70 slot(bool prime, void *ptr) {
78 TypeK must have defined function "int hashCode()" which return the hash
79 code for the its object, and "int equals(TypeK anotherKey)" which is
80 used to judge equality.
81 TypeK and TypeV should define their own copy constructor.
88 template<typename TypeK, typename TypeV>
89 class cliffc_hashtable {
91 # The synchronization we have for the hashtable gives us the property of
92 # serializability, so we should have a sequential hashtable when we check the
93 # correctness. The key thing is to identify all the commit point.
98 CLASS = cliffc_hashtable;
105 map = new_spec_table_default(equals_key);
106 id_map = new_spec_table_default(equals_id);
110 bool equals_key(void *ptr1, void *ptr2) {
111 TypeK *key1 = (TypeK*) ptr1,
112 *key2 = (TypeK*) ptr2;
113 if (key1 == NULL || key2 == NULL)
115 return key1->equals(key2);
119 bool equals_val(void *ptr1, void *ptr2) {
120 TypeV *val1 = (TypeV*) ptr1,
121 *val2 = (TypeV*) ptr2;
122 if (val1 == NULL || val2 == NULL)
124 return val1->equals(val2);
128 bool equals_id(void *ptr1, void *ptr2) {
129 id_tag_t *id1 = (id_tag_t*) ptr1,
130 *id2 = (id_tag_t*) ptr2;
131 if (id1 == NULL || id2 == NULL)
137 # Update the tag for the current key slot if the corresponding tag
138 # is NULL, otherwise just return that tag. It will update the next
139 # available tag too if it requires a new tag for that key slot.
140 id_tag_t getKeyTag(TypeK *key) {
141 if (spec_table_contains(id_map, key)) {
142 id_tag_t *cur_tag = MODEL_MALLOC(sizeof(id_tag_t));
143 *cur_tag = current(tag);
144 spec_table_put(id_map, key, cur_tag);
148 id_tag_t *res = (id_tag_t*) spec_table_get(id_map, key);
165 PutIfAbsent(COND_PutIfAbsentSucc),
167 RemoveIfMatch(COND_RemoveIfMatchSucc),
169 ReplaceIfMatch(COND_ReplaceIfMatchSucc)
172 //Write_interface -> Read_interface
180 The control structure for the hashtable
184 friend class cliffc_hashtable;
186 atomic<kvs_data*> _newkvs;
188 // Size of active K,V pairs
191 // Count of used slots
194 // The next part of the table to copy
195 atomic_int _copy_idx;
197 // Work-done reporting
198 atomic_int _copy_done;
202 _newkvs.store(NULL, memory_order_relaxed);
203 _size.store(size, memory_order_relaxed);
204 _slots.store(0, memory_order_relaxed);
206 _copy_idx.store(0, memory_order_relaxed);
207 _copy_done.store(0, memory_order_release);
214 // Heuristic to decide if the table is too full
215 bool table_full(int reprobe_cnt, int len) {
217 reprobe_cnt >= REPROBE_LIMIT &&
218 _slots.load(memory_order_relaxed) >= reprobe_limit(len);
221 kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
222 //model_print("resizing...\n");
223 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
227 // No copy in-progress, start one; Only double the table size
228 int oldlen = kvs->_size;
229 int sz = _size.load(memory_order_relaxed);
232 // Just follow Cliff Click's heuristic to decide the new size
233 if (sz >= (oldlen >> 2)) { // If we are 25% full
234 newsz = oldlen << 1; // Double size
235 if (sz >= (oldlen >> 1))
236 newsz = oldlen << 2; // Double double size
239 // We do not record the record timestamp
240 if (newsz <= oldlen) newsz = oldlen << 1;
241 // Do not shrink ever
242 if (newsz < oldlen) newsz = oldlen;
244 // Last check cause the 'new' below is expensive
245 newkvs = _newkvs.load(memory_order_acquire);
246 if (newkvs != NULL) return newkvs;
248 newkvs = new kvs_data(newsz);
249 void *chm = (void*) new CHM(sz);
250 newkvs->_data[0].store(chm, memory_order_release);
252 kvs_data *cur_newkvs;
253 // Another check after the slow allocation
254 if ((cur_newkvs = _newkvs.load(memory_order_acquire)) != NULL)
256 // CAS the _newkvs to the allocated table
257 kvs_data *desired = (kvs_data*) NULL;
258 kvs_data *expected = (kvs_data*) newkvs;
259 if (!_newkvs.compare_exchange_strong(desired, expected, memory_order_release,
260 memory_order_release)) {
261 // Should clean the allocated area
263 newkvs = _newkvs.load(memory_order_acquire);
268 void help_copy_impl(cliffc_hashtable *topmap, kvs_data *oldkvs,
270 MODEL_ASSERT (get_chm(oldkvs) == this);
271 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
272 int oldlen = oldkvs->_size;
273 int min_copy_work = oldlen > 1024 ? 1024 : oldlen;
275 // Just follow Cliff Click's code here
276 int panic_start = -1;
278 while (_copy_done.load(memory_order_acquire) < oldlen) {
279 copyidx = _copy_idx.load(memory_order_acquire);
280 if (panic_start == -1) { // No painc
281 copyidx = _copy_idx.load(memory_order_acquire);
282 while (copyidx < (oldlen << 1) &&
283 !_copy_idx.compare_exchange_strong(copyidx, copyidx +
284 min_copy_work, memory_order_release, memory_order_release))
285 copyidx = _copy_idx.load(memory_order_relaxed);
286 if (!(copyidx < (oldlen << 1)))
287 panic_start = copyidx;
290 // Now copy the chunk of work we claimed
292 for (int i = 0; i < min_copy_work; i++)
293 if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs,
297 copy_check_and_promote(topmap, oldkvs, workdone);
299 copyidx += min_copy_work;
300 if (!copy_all && panic_start == -1)
301 return; // We are done with the work we claim
303 copy_check_and_promote(topmap, oldkvs, 0); // See if we can promote
306 kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
307 *oldkvs, int idx, void *should_help) {
308 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
309 // We're only here cause the caller saw a Prime
310 if (copy_slot(topmap, idx, oldkvs, newkvs))
311 copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied
312 return (should_help == NULL) ? newkvs : topmap->help_copy(newkvs);
315 void copy_check_and_promote(cliffc_hashtable *topmap, kvs_data*
316 oldkvs, int workdone) {
317 int oldlen = oldkvs->_size;
318 int copyDone = _copy_done.load(memory_order_relaxed);
321 copyDone = _copy_done.load(memory_order_relaxed);
322 if (_copy_done.compare_exchange_weak(copyDone, copyDone +
323 workdone, memory_order_relaxed, memory_order_relaxed))
328 // Promote the new table to the current table
329 if (copyDone + workdone == oldlen &&
330 topmap->_kvs.load(memory_order_acquire) == oldkvs) {
331 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
332 topmap->_kvs.compare_exchange_strong(oldkvs, newkvs, memory_order_release,
333 memory_order_release);
337 bool copy_slot(cliffc_hashtable *topmap, int idx, kvs_data *oldkvs,
340 while ((key_slot = key(oldkvs, idx)) == NULL)
341 CAS_key(oldkvs, idx, NULL, TOMBSTONE);
343 // First CAS old to Prime
344 slot *oldval = val(oldkvs, idx);
345 while (!is_prime(oldval)) {
346 slot *box = (oldval == NULL || oldval == TOMBSTONE)
347 ? TOMBPRIME : new slot(true, oldval->_ptr);
348 if (CAS_val(oldkvs, idx, oldval, box)) {
349 if (box == TOMBPRIME)
350 return 1; // Copy done
351 // Otherwise we CAS'd the box
352 oldval = box; // Record updated oldval
355 oldval = val(oldkvs, idx); // Else re-try
358 if (oldval == TOMBPRIME) return false; // Copy already completed here
360 slot *old_unboxed = new slot(false, oldval->_ptr);
361 int copied_into_new = (putIfMatch(topmap, newkvs, key_slot, old_unboxed,
364 // Old value is exposed in the new table
365 while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME))
366 oldval = val(oldkvs, idx);
368 return copied_into_new;
375 static const int Default_Init_Size = 4; // Intial table size
377 static slot* const MATCH_ANY;
378 static slot* const NO_MATCH_OLD;
380 static slot* const TOMBPRIME;
381 static slot* const TOMBSTONE;
383 static const int REPROBE_LIMIT = 10; // Forces a table-resize
385 atomic<kvs_data*> _kvs;
389 // Should initialize the CHM for the construction of the table
390 // For other CHM in kvs_data, they should be initialzed in resize()
391 // because the size is determined dynamically
392 kvs_data *kvs = new kvs_data(Default_Init_Size);
393 void *chm = (void*) new CHM(0);
394 kvs->_data[0].store(chm, memory_order_relaxed);
395 _kvs.store(kvs, memory_order_release);
398 cliffc_hashtable(int init_size) {
399 // Should initialize the CHM for the construction of the table
400 // For other CHM in kvs_data, they should be initialzed in resize()
401 // because the size is determined dynamically
402 kvs_data *kvs = new kvs_data(init_size);
403 void *chm = (void*) new CHM(0);
404 kvs->_data[0].store(chm, memory_order_relaxed);
405 _kvs.store(kvs, memory_order_release);
411 @Commit_point_set: Get_Success_Point1 | Get_Success_Point2 | Get_Success_Point3
414 void *_Old_Val = spec_table_get(map, key);
416 equals_val(_Old_Val, __RET__)
419 TypeV* get(TypeK *key) {
420 slot *key_slot = new slot(false, key);
421 int fullhash = hash(key_slot);
422 kvs_data *kvs = _kvs.load(memory_order_acquire);
423 slot *V = get_impl(this, kvs, key_slot, fullhash);
424 if (V == NULL) return NULL;
425 MODEL_ASSERT (!is_prime(V));
426 return (TypeV*) V->_ptr;
432 @Commit_point_set: Write_Success_Point
435 # Remember this old value at checking point
436 void *_Old_Val = spec_table_get(map, key);
437 spec_table_put(map, key, val);
439 equals_val(__RET__, _Old_Val)
442 TypeV* put(TypeK *key, TypeV *val) {
443 return putIfMatch(key, val, NO_MATCH_OLD);
448 @Interface: PutIfAbsent
450 Write_Success_Point | PutIfAbsent_Fail_Point
451 @Condition: !spec_table_contains(map, key)
453 COND_PutIfAbsentSucc :: __RET__ == NULL
456 void *_Old_Val = spec_table_get(map, key);
458 spec_table_put(map, key, value);
460 __COND_SAT__ ? __RET__ == NULL : equals_val(_Old_Val, __RET__)
463 TypeV* putIfAbsent(TypeK *key, TypeV *value) {
464 return putIfMatch(key, val, TOMBSTONE);
469 @Interface: RemoveAny
470 @Commit_point_set: Write_Success_Point
473 void *_Old_Val = spec_table_get(map, key);
474 spec_table_put(map, key, NULL);
476 equals_val(__RET__, _Old_Val)
479 TypeV* remove(TypeK *key) {
480 return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD);
485 @Interface: RemoveIfMatch
487 Write_Success_Point | RemoveIfMatch_Fail_Point
489 equals_val(spec_table_get(map, key), val)
491 COND_RemoveIfMatchSucc :: __RET__ == true
495 spec_table_put(map, key, NULL);
497 __COND_SAT__ ? __RET__ : !__RET__
500 bool remove(TypeK *key, TypeV *val) {
501 slot *val_slot = val == NULL ? NULL : new slot(false, val);
502 return putIfMatch(key, TOMBSTONE, val) == val;
508 @Interface: ReplaceAny
513 void *_Old_Val = spec_table_get(map, key);
515 equals_val(__RET__, _Old_Val)
518 TypeV* replace(TypeK *key, TypeV *val) {
519 return putIfMatch(key, val, MATCH_ANY);
524 @Interface: ReplaceIfMatch
526 Write_Success_Point | ReplaceIfMatch_Fail_Point
528 equals_val(spec_table_get(map, key), oldval)
530 COND_ReplaceIfMatchSucc :: __RET__ == true
534 spec_table_put(map, key, newval);
536 __COND_SAT__ ? __RET__ : !__RET__
539 bool replace(TypeK *key, TypeV *oldval, TypeV *newval) {
540 return putIfMatch(key, newval, oldval) == oldval;
544 static CHM* get_chm(kvs_data* kvs) {
545 CHM *res = (CHM*) kvs->_data[0].load(memory_order_relaxed);
549 static int* get_hashes(kvs_data *kvs) {
550 return (int *) kvs->_data[1].load(memory_order_relaxed);
553 // Preserve happens-before semantics on newly inserted keys
554 static inline slot* key(kvs_data *kvs, int idx) {
555 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
556 // Corresponding to the volatile read in get_impl() and putIfMatch in
557 // Cliff Click's Java implementation
558 slot *res = (slot*) kvs->_data[idx * 2 + 2].load(memory_order_acquire);
563 The atomic operation in val() function is a "potential" commit point,
564 which means in some case it is a real commit point while it is not for
565 some other cases. This so happens because the val() function is such a
566 fundamental function that many internal operation will call. Our
567 strategy is that we label any potential commit points and check if they
568 really are the commit points later.
570 // Preserve happens-before semantics on newly inserted values
571 static inline slot* val(kvs_data *kvs, int idx) {
572 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
573 // Corresponding to the volatile read in get_impl() and putIfMatch in
574 // Cliff Click's Java implementation
575 slot *res = (slot*) kvs->_data[idx * 2 + 3].load(memory_order_acquire);
578 # This is a complicated potential commit point since many many functions are
580 @Potential_commit_point_define: true
581 @Label: Read_Val_Point
589 static int hash(slot *key_slot) {
590 MODEL_ASSERT(key_slot != NULL && key_slot->_ptr != NULL);
591 TypeK* key = (TypeK*) key_slot->_ptr;
592 int h = key->hashCode();
593 // Spread bits according to Cliff Click's code
594 h += (h << 15) ^ 0xffffcd7d;
598 h += (h << 2) + (h << 14);
599 return h ^ (h >> 16);
602 // Heuristic to decide if reprobed too many times.
603 // Be careful here: Running over the limit on a 'get' acts as a 'miss'; on a
604 // put it triggers a table resize. Several places MUST have exact agreement.
605 static int reprobe_limit(int len) {
606 return REPROBE_LIMIT + (len >> 2);
609 static inline bool is_prime(slot *val) {
610 return (val != NULL) && val->_prime;
613 // Check for key equality. Try direct pointer comparison first (fast
614 // negative teset) and then the full 'equals' call
615 static bool keyeq(slot *K, slot *key_slot, int *hashes, int hash,
617 // Caller should've checked this.
618 MODEL_ASSERT (K != NULL);
619 TypeK* key_ptr = (TypeK*) key_slot->_ptr;
622 ((hashes[hash] == 0 || hashes[hash] == fullhash) &&
624 key_ptr->equals(K->_ptr));
627 static bool valeq(slot *val_slot1, slot *val_slot2) {
628 MODEL_ASSERT (val_slot1 != NULL);
629 TypeK* ptr1 = (TypeV*) val_slot1->_ptr;
630 if (val_slot2 == NULL || ptr1 == NULL) return false;
631 return ptr1->equals(val_slot2->_ptr);
634 // Together with key() preserve the happens-before relationship on newly
636 static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
637 return kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
638 desired, memory_order_release, memory_order_release);
642 Same as the val() function, we only label the CAS operation as the
643 potential commit point.
645 // Together with val() preserve the happens-before relationship on newly
647 static inline bool CAS_val(kvs_data *kvs, int idx, void *expected, void
649 bool res = kvs->_data[2 * idx + 3].compare_exchange_strong(expected,
650 desired, memory_order_release, memory_order_release);
652 # If it is a successful put instead of a copy or any other internal
653 # operantions, expected != NULL
655 @Potential_commit_point_define: res == true
656 @Label: Write_Val_Point
662 slot* get_impl(cliffc_hashtable *topmap, kvs_data *kvs, slot* key_slot, int
664 int len = kvs->_size;
665 CHM *chm = get_chm(kvs);
666 int *hashes = get_hashes(kvs);
668 int idx = fullhash & (len - 1);
671 slot *K = key(kvs, idx);
672 slot *V = val(kvs, idx);
675 @Commit_point_define: V == NULL
676 @Potential_commit_point_label: Read_Val_Point
677 @Label: Get_Success_Point_1
681 if (K == NULL) return NULL; // A miss
683 if (keyeq(K, key_slot, hashes, idx, fullhash)) {
684 // Key hit! Check if table-resize in progress
688 @Commit_point_define: true
689 @Potential_commit_point_label: Read_Val_Point
690 @Label: Get_Success_Point_2
693 return (V == TOMBSTONE) ? NULL : V; // Return this value
695 // Otherwise, finish the copy & retry in the new table
696 return get_impl(topmap, chm->copy_slot_and_check(topmap, kvs,
697 idx, key_slot), key_slot, fullhash);
700 if (++reprobe_cnt >= REPROBE_LIMIT ||
701 key_slot == TOMBSTONE) {
702 // Retry in new table
703 // Atomic read (acquire) can be here
704 kvs_data *newkvs = chm->_newkvs.load(memory_order_acquire);
707 @Commit_point_define_check: newkvs == NULL
708 @Label: Get_Success_Point_3
711 return newkvs == NULL ? NULL : get_impl(topmap,
712 topmap->help_copy(newkvs), key_slot, fullhash);
715 idx = (idx + 1) & (len - 1); // Reprobe by 1
719 // A wrapper of the essential function putIfMatch()
720 TypeV* putIfMatch(TypeK *key, TypeV *value, slot *old_val) {
721 // TODO: Should throw an exception rather return NULL
722 if (old_val == NULL) {
725 slot *key_slot = new slot(false, key);
727 slot *value_slot = new slot(false, value);
728 kvs_data *kvs = _kvs.load(memory_order_acquire);
729 slot *res = putIfMatch(this, kvs, key_slot, value_slot, old_val);
730 // Only when copy_slot() call putIfMatch() will it return NULL
731 MODEL_ASSERT (res != NULL);
732 MODEL_ASSERT (!is_prime(res));
733 return res == TOMBSTONE ? NULL : (TypeV*) res->_ptr;
737 Put, Remove, PutIfAbsent, etc will call this function. Return the old
738 value. If the returned value is equals to the expVal (or expVal is
739 NO_MATCH_OLD), then this function puts the val_slot to the table 'kvs'.
740 Only copy_slot will pass a NULL expVal, and putIfMatch only returns a
741 NULL if passed a NULL expVal.
743 static slot* putIfMatch(cliffc_hashtable *topmap, kvs_data *kvs, slot
744 *key_slot, slot *val_slot, slot *expVal) {
745 MODEL_ASSERT (val_slot != NULL);
746 MODEL_ASSERT (!is_prime(val_slot));
747 MODEL_ASSERT (!is_prime(expVal));
749 int fullhash = hash(key_slot);
750 int len = kvs->_size;
751 CHM *chm = get_chm(kvs);
752 int *hashes = get_hashes(kvs);
753 int idx = fullhash & (len - 1);
761 while (true) { // Spin till we get a key slot
764 if (K == NULL) { // Get a free slot
765 if (val_slot == TOMBSTONE) return val_slot;
766 // Claim the null key-slot
767 if (CAS_key(kvs, idx, NULL, key_slot)) {
768 chm->_slots.fetch_add(1, memory_order_relaxed); // Inc key-slots-used count
769 hashes[idx] = fullhash; // Memorize full hash
772 K = key(kvs, idx); // CAS failed, get updated value
773 MODEL_ASSERT (K != NULL);
776 // Key slot not null, there exists a Key here
777 if (keyeq(K, key_slot, hashes, idx, fullhash))
780 // Notice that the logic here should be consistent with that of get.
781 // The first predicate means too many reprobes means nothing in the
783 if (++reprobe_cnt >= reprobe_limit(len) ||
784 K == TOMBSTONE) { // Found a Tombstone key, no more keys
785 newkvs = chm->resize(topmap, kvs);
786 // Help along an existing copy
787 if (expVal != NULL) topmap->help_copy(newkvs);
788 return putIfMatch(topmap, newkvs, key_slot, val_slot, expVal);
791 idx = (idx + 1) & (len - 1); // Reprobe
792 } // End of spinning till we get a Key slot
794 if (val_slot == V) return V; // Fast cutout for no-change
796 // Here it tries to resize cause it doesn't want other threads to stop
797 // its progress (eagerly try to resize soon)
798 newkvs = chm->_newkvs.load(memory_order_acquire);
799 if (newkvs == NULL &&
800 ((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V)))
801 newkvs = chm->resize(topmap, kvs); // Force the copy to start
803 // Finish the copy and then put it in the new table
805 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs, idx,
806 expVal), key_slot, val_slot, expVal);
808 // Decided to update the existing table
810 MODEL_ASSERT (!is_prime(V));
812 if (expVal != NO_MATCH_OLD &&
814 (expVal != MATCH_ANY || V == TOMBSTONE || V == NULL) &&
815 !(V == NULL && expVal == TOMBSTONE) &&
816 (expVal == NULL || !valeq(expVal, V))) {
819 @Commit_point_define: expVal == TOMBSTONE
820 @Potential_commit_point_label: Read_Val_Point
821 @Label: PutIfAbsent_Fail_Point
822 # This is a check for the PutIfAbsent() when the value
828 @Commit_point_define: expVal != NULL && val_slot == TOMBSTONE
829 @Potential_commit_point_label: Read_Val_Point
830 @Label: RemoveIfMatch_Fail_Point
835 @Commit_point_define: !valeq(expVal, V)
836 @Potential_commit_point_label: Read_Val_Point
837 @Label: ReplaceIfMatch_Fail_Point
840 return V; // Do not update!
843 if (CAS_val(kvs, idx, V, val_slot)) {
846 # The only point where a successful put happens
847 @Commit_point_define: true
848 @Potential_commit_point_label: Write_Val_Point
849 @Label: Write_Success_Point
852 if (expVal != NULL) { // Not called by a table-copy
853 // CAS succeeded, should adjust size
854 // Both normal put's and table-copy calls putIfMatch, but
855 // table-copy does not increase the number of live K/V pairs
856 if ((V == NULL || V == TOMBSTONE) &&
857 val_slot != TOMBSTONE)
858 chm->_size.fetch_add(1, memory_order_relaxed);
859 if (!(V == NULL || V == TOMBSTONE) &&
860 val_slot == TOMBSTONE)
861 chm->_size.fetch_add(-1, memory_order_relaxed);
863 return (V == NULL && expVal != NULL) ? TOMBSTONE : V;
868 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs,
869 idx, expVal), key_slot, val_slot, expVal);
873 // Help along an existing table-resize. This is a fast cut-out wrapper.
874 kvs_data* help_copy(kvs_data *helper) {
875 kvs_data *topkvs = _kvs.load(memory_order_acquire);
876 CHM *topchm = get_chm(topkvs);
877 // No cpy in progress
878 if (topchm->_newkvs.load(memory_order_acquire) == NULL) return helper;
879 topchm->help_copy_impl(this, topkvs, false);