1 #ifndef CLIFFC_HASHTABLE_H
2 #define CLIFFC_HASHTABLE_H
9 #define MODEL_ASSERT assert
11 #include <model-assert.h>
18 This header file declares and defines a simplified version of Cliff Click's
19 NonblockingHashMap. It contains all the necessary structrues and main
20 functions. In simplified_cliffc_hashtable.cc file, it has the definition for
24 template<typename TypeK, typename TypeV>
25 class cliffc_hashtable;
28 Corresponding the the Object[] array in Cliff Click's Java implementation.
29 It keeps the first two slots for CHM (Hashtable control unit) and the hash
30 records (an array of hash used for fast negative key-equality check).
38 int real_size = sz * 2 + 2;
39 _data = new atomic<void*>[real_size];
40 // The control block should be initialized in resize()
41 // Init the hash record array
42 int *hashes = new int[_size];
44 for (i = 0; i < _size; i++) {
47 // Init the data to Null slot
48 for (i = 2; i < real_size; i++) {
49 _data[i].store(NULL, memory_order_relaxed);
51 _data[1].store(hashes, memory_order_relaxed);
55 int *hashes = (int*) _data[1].load(memory_order_relaxed);
65 slot(bool prime, void *ptr) {
73 TypeK must have defined function "int hashCode()" which return the hash
74 code for the its object, and "int equals(TypeK anotherKey)" which is
75 used to judge equality.
76 TypeK and TypeV should define their own copy constructor.
83 template<typename TypeK, typename TypeV>
84 class cliffc_hashtable {
86 # The synchronization we have for the hashtable gives us the property of
87 # serializability, so we should have a sequential hashtable when we check the
88 # correctness. The key thing is to identify all the commit point.
93 CLASS = cliffc_hashtable;
100 map = new_spec_table_default(equals_key);
101 id_map = new_spec_table_default(equals_id);
105 bool equals_key(void *ptr1, void *ptr2) {
106 TypeK *key1 = (TypeK*) ptr1,
107 *key2 = (TypeK*) ptr2;
108 if (key1 == NULL || key2 == NULL)
110 return key1->equals(key2);
114 bool equals_val(void *ptr1, void *ptr2) {
117 TypeV *val1 = (TypeV*) ptr1,
118 *val2 = (TypeV*) ptr2;
119 if (val1 == NULL || val2 == NULL)
121 return val1->equals(val2);
125 bool equals_id(void *ptr1, void *ptr2) {
126 id_tag_t *id1 = (id_tag_t*) ptr1,
127 *id2 = (id_tag_t*) ptr2;
128 if (id1 == NULL || id2 == NULL)
130 return (*id1).tag == (*id2).tag;
134 # Update the tag for the current key slot if the corresponding tag
135 # is NULL, otherwise just return that tag. It will update the next
136 # available tag too if it requires a new tag for that key slot.
137 call_id_t getKeyTag(TypeK *key) {
138 if (!spec_table_contains(id_map, key)) {
139 call_id_t cur_id = current(tag);
140 spec_table_put(id_map, key, (void*) cur_id);
144 call_id_t res = (call_id_t) spec_table_get(id_map, key);
156 The control structure for the hashtable
160 friend class cliffc_hashtable;
162 atomic<kvs_data*> _newkvs;
164 // Size of active K,V pairs
167 // Count of used slots
170 // The next part of the table to copy
171 atomic_int _copy_idx;
173 // Work-done reporting
174 atomic_int _copy_done;
178 _newkvs.store(NULL, memory_order_relaxed);
179 _size.store(size, memory_order_relaxed);
180 _slots.store(0, memory_order_relaxed);
182 _copy_idx.store(0, memory_order_relaxed);
183 _copy_done.store(0, memory_order_relaxed);
190 // Heuristic to decide if the table is too full
191 bool table_full(int reprobe_cnt, int len) {
193 reprobe_cnt >= REPROBE_LIMIT &&
194 _slots.load(memory_order_relaxed) >= reprobe_limit(len);
197 kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
198 //model_print("resizing...\n");
199 /**** FIXME: miss ****/
200 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
204 // No copy in-progress, start one; Only double the table size
205 int oldlen = kvs->_size;
206 int sz = _size.load(memory_order_relaxed);
209 // Just follow Cliff Click's heuristic to decide the new size
210 if (sz >= (oldlen >> 2)) { // If we are 25% full
211 newsz = oldlen << 1; // Double size
212 if (sz >= (oldlen >> 1))
213 newsz = oldlen << 2; // Double double size
216 // We do not record the record timestamp
217 if (newsz <= oldlen) newsz = oldlen << 1;
218 // Do not shrink ever
219 if (newsz < oldlen) newsz = oldlen;
221 // Last check cause the 'new' below is expensive
222 /**** FIXME: miss ****/
223 newkvs = _newkvs.load(memory_order_acquire);
224 //model_print("hey1\n");
225 if (newkvs != NULL) return newkvs;
227 newkvs = new kvs_data(newsz);
228 void *chm = (void*) new CHM(sz);
229 //model_print("hey2\n");
230 newkvs->_data[0].store(chm, memory_order_relaxed);
232 kvs_data *cur_newkvs;
233 // Another check after the slow allocation
234 /**** FIXME: miss ****/
235 if ((cur_newkvs = _newkvs.load(memory_order_acquire)) != NULL)
237 // CAS the _newkvs to the allocated table
238 kvs_data *desired = (kvs_data*) NULL;
239 kvs_data *expected = (kvs_data*) newkvs;
240 /**** FIXME: miss ****/
241 //model_print("release in resize!\n");
242 if (!_newkvs.compare_exchange_strong(desired, expected, memory_order_release,
243 memory_order_relaxed)) {
244 // Should clean the allocated area
246 /**** FIXME: miss ****/
247 newkvs = _newkvs.load(memory_order_acquire);
252 void help_copy_impl(cliffc_hashtable *topmap, kvs_data *oldkvs,
254 MODEL_ASSERT (get_chm(oldkvs) == this);
255 /**** FIXME: miss ****/
256 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
257 int oldlen = oldkvs->_size;
258 int min_copy_work = oldlen > 1024 ? 1024 : oldlen;
260 // Just follow Cliff Click's code here
261 int panic_start = -1;
263 while (_copy_done.load(memory_order_relaxed) < oldlen) {
264 copyidx = _copy_idx.load(memory_order_relaxed);
265 if (panic_start == -1) { // No painc
266 copyidx = _copy_idx.load(memory_order_relaxed);
267 while (copyidx < (oldlen << 1) &&
268 !_copy_idx.compare_exchange_strong(copyidx, copyidx +
269 min_copy_work, memory_order_relaxed, memory_order_relaxed))
270 copyidx = _copy_idx.load(memory_order_relaxed);
271 if (!(copyidx < (oldlen << 1)))
272 panic_start = copyidx;
275 // Now copy the chunk of work we claimed
277 for (int i = 0; i < min_copy_work; i++)
278 if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs,
282 copy_check_and_promote(topmap, oldkvs, workdone);
284 copyidx += min_copy_work;
285 if (!copy_all && panic_start == -1)
286 return; // We are done with the work we claim
288 copy_check_and_promote(topmap, oldkvs, 0); // See if we can promote
291 kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
292 *oldkvs, int idx, void *should_help) {
293 /**** FIXME: miss ****/
294 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
295 // We're only here cause the caller saw a Prime
296 if (copy_slot(topmap, idx, oldkvs, newkvs))
297 copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied
298 return (should_help == NULL) ? newkvs : topmap->help_copy(newkvs);
301 void copy_check_and_promote(cliffc_hashtable *topmap, kvs_data*
302 oldkvs, int workdone) {
303 int oldlen = oldkvs->_size;
304 int copyDone = _copy_done.load(memory_order_relaxed);
307 copyDone = _copy_done.load(memory_order_relaxed);
308 if (_copy_done.compare_exchange_weak(copyDone, copyDone +
309 workdone, memory_order_relaxed, memory_order_relaxed))
314 // Promote the new table to the current table
315 if (copyDone + workdone == oldlen &&
316 topmap->_kvs.load(memory_order_relaxed) == oldkvs) {
317 /**** FIXME: miss ****/
318 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
319 /**** CDSChecker error ****/
320 topmap->_kvs.compare_exchange_strong(oldkvs, newkvs, memory_order_release,
321 memory_order_relaxed);
325 bool copy_slot(cliffc_hashtable *topmap, int idx, kvs_data *oldkvs,
328 while ((key_slot = key(oldkvs, idx)) == NULL)
329 CAS_key(oldkvs, idx, NULL, TOMBSTONE);
331 // First CAS old to Prime
332 slot *oldval = val(oldkvs, idx);
333 while (!is_prime(oldval)) {
334 slot *box = (oldval == NULL || oldval == TOMBSTONE)
335 ? TOMBPRIME : new slot(true, oldval->_ptr);
336 if (CAS_val(oldkvs, idx, oldval, box)) {
337 if (box == TOMBPRIME)
338 return 1; // Copy done
339 // Otherwise we CAS'd the box
340 oldval = box; // Record updated oldval
343 oldval = val(oldkvs, idx); // Else re-try
346 if (oldval == TOMBPRIME) return false; // Copy already completed here
348 slot *old_unboxed = new slot(false, oldval->_ptr);
349 int copied_into_new = (putIfMatch(topmap, newkvs, key_slot, old_unboxed,
352 // Old value is exposed in the new table
353 while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME))
354 oldval = val(oldkvs, idx);
356 return copied_into_new;
363 static const int Default_Init_Size = 4; // Intial table size
365 static slot* const MATCH_ANY;
366 static slot* const NO_MATCH_OLD;
368 static slot* const TOMBPRIME;
369 static slot* const TOMBSTONE;
371 static const int REPROBE_LIMIT = 10; // Forces a table-resize
373 atomic<kvs_data*> _kvs;
377 // Should initialize the CHM for the construction of the table
378 // For other CHM in kvs_data, they should be initialzed in resize()
379 // because the size is determined dynamically
385 kvs_data *kvs = new kvs_data(Default_Init_Size);
386 void *chm = (void*) new CHM(0);
387 kvs->_data[0].store(chm, memory_order_relaxed);
388 _kvs.store(kvs, memory_order_relaxed);
391 cliffc_hashtable(int init_size) {
392 // Should initialize the CHM for the construction of the table
393 // For other CHM in kvs_data, they should be initialzed in resize()
394 // because the size is determined dynamically
401 kvs_data *kvs = new kvs_data(init_size);
402 void *chm = (void*) new CHM(0);
403 kvs->_data[0].store(chm, memory_order_relaxed);
404 _kvs.store(kvs, memory_order_relaxed);
410 //@Commit_point_set: Get_Point1 | Get_Point2 | Get_ReadKVS | Get_ReadNewKVS | Get_Clear
411 @Commit_point_set: Get_Point1 | Get_Point2 | Get_Clear
412 //@Commit_point_set: Get_Point1 | Get_Point2 | Get_Point3
415 TypeV *_Old_Val = (TypeV*) spec_table_get(map, key);
416 //bool passed = equals_val(_Old_Val, __RET__);
419 int old = _Old_Val == NULL ? 0 : _Old_Val->_val;
420 int ret = __RET__ == NULL ? 0 : __RET__->_val;
421 //model_print("Get: key: %d, _Old_Val: %d, RET: %d\n",
422 //key->_val, old, ret);
425 //__RET__ == NULL ? true : equals_val(_Old_Val, __RET__)
426 equals_val(_Old_Val, __RET__)
429 TypeV* get(TypeK *key) {
430 slot *key_slot = new slot(false, key);
431 int fullhash = hash(key_slot);
432 /**** CDSChecker error ****/
433 kvs_data *kvs = _kvs.load(memory_order_acquire);
436 @Commit_point_define_check: true
440 slot *V = get_impl(this, kvs, key_slot, fullhash);
441 if (V == NULL) return NULL;
442 MODEL_ASSERT (!is_prime(V));
443 return (TypeV*) V->_ptr;
449 //@Commit_point_set: Put_Point | Put_ReadKVS | Put_ReadNewKVS | Put_WriteKey
450 @Commit_point_set: Put_Point | Put_WriteKey
453 # Remember this old value at checking point
454 TypeV *_Old_Val = (TypeV*) spec_table_get(map, key);
455 spec_table_put(map, key, val);
456 //bool passed = equals_val(__RET__, _Old_Val);
459 int old = _Old_Val == NULL ? 0 : _Old_Val->_val;
460 int ret = __RET__ == NULL ? 0 : __RET__->_val;
461 //model_print("Put: key: %d, val: %d, _Old_Val: %d, RET: %d\n",
462 // key->_val, val->_val, old, ret);
465 equals_val(__RET__, _Old_Val)
468 TypeV* put(TypeK *key, TypeV *val) {
469 return putIfMatch(key, val, NO_MATCH_OLD);
474 @Interface: PutIfAbsent
476 Write_Success_Point | PutIfAbsent_Fail_Point
477 @Condition: !spec_table_contains(map, key)
479 COND_PutIfAbsentSucc :: __RET__ == NULL
482 void *_Old_Val = spec_table_get(map, key);
484 spec_table_put(map, key, value);
486 __COND_SAT__ ? __RET__ == NULL : equals_val(_Old_Val, __RET__)
489 TypeV* putIfAbsent(TypeK *key, TypeV *value) {
490 return putIfMatch(key, val, TOMBSTONE);
495 @Interface: RemoveAny
496 @Commit_point_set: Write_Success_Point
499 void *_Old_Val = spec_table_get(map, key);
500 spec_table_put(map, key, NULL);
502 equals_val(__RET__, _Old_Val)
505 TypeV* remove(TypeK *key) {
506 return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD);
511 @Interface: RemoveIfMatch
513 Write_Success_Point | RemoveIfMatch_Fail_Point
515 equals_val(spec_table_get(map, key), val)
517 COND_RemoveIfMatchSucc :: __RET__ == true
521 spec_table_put(map, key, NULL);
523 __COND_SAT__ ? __RET__ : !__RET__
526 bool remove(TypeK *key, TypeV *val) {
527 slot *val_slot = val == NULL ? NULL : new slot(false, val);
528 return putIfMatch(key, TOMBSTONE, val) == val;
534 @Interface: ReplaceAny
539 void *_Old_Val = spec_table_get(map, key);
541 equals_val(__RET__, _Old_Val)
544 TypeV* replace(TypeK *key, TypeV *val) {
545 return putIfMatch(key, val, MATCH_ANY);
550 @Interface: ReplaceIfMatch
552 Write_Success_Point | ReplaceIfMatch_Fail_Point
554 equals_val(spec_table_get(map, key), oldval)
556 COND_ReplaceIfMatchSucc :: __RET__ == true
560 spec_table_put(map, key, newval);
562 __COND_SAT__ ? __RET__ : !__RET__
565 bool replace(TypeK *key, TypeV *oldval, TypeV *newval) {
566 return putIfMatch(key, newval, oldval) == oldval;
570 static CHM* get_chm(kvs_data* kvs) {
571 CHM *res = (CHM*) kvs->_data[0].load(memory_order_relaxed);
575 static int* get_hashes(kvs_data *kvs) {
576 return (int *) kvs->_data[1].load(memory_order_relaxed);
579 // Preserve happens-before semantics on newly inserted keys
580 static inline slot* key(kvs_data *kvs, int idx) {
581 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
582 // Corresponding to the volatile read in get_impl() and putIfMatch in
583 // Cliff Click's Java implementation
584 slot *res = (slot*) kvs->_data[idx * 2 + 2].load(memory_order_relaxed);
587 # This is a complicated potential commit point since many many functions are
589 @Potential_commit_point_define: true
590 @Label: Read_Key_Point
597 The atomic operation in val() function is a "potential" commit point,
598 which means in some case it is a real commit point while it is not for
599 some other cases. This so happens because the val() function is such a
600 fundamental function that many internal operation will call. Our
601 strategy is that we label any potential commit points and check if they
602 really are the commit points later.
604 // Preserve happens-before semantics on newly inserted values
605 static inline slot* val(kvs_data *kvs, int idx) {
606 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
607 // Corresponding to the volatile read in get_impl() and putIfMatch in
608 // Cliff Click's Java implementation
609 /**** CDSChecker error & hb violation ****/
610 slot *res = (slot*) kvs->_data[idx * 2 + 3].load(memory_order_acquire);
613 # This is a complicated potential commit point since many many functions are
615 @Potential_commit_point_define: true
616 @Label: Read_Val_Point
624 static int hash(slot *key_slot) {
625 MODEL_ASSERT(key_slot != NULL && key_slot->_ptr != NULL);
626 TypeK* key = (TypeK*) key_slot->_ptr;
627 int h = key->hashCode();
628 // Spread bits according to Cliff Click's code
629 h += (h << 15) ^ 0xffffcd7d;
633 h += (h << 2) + (h << 14);
634 return h ^ (h >> 16);
637 // Heuristic to decide if reprobed too many times.
638 // Be careful here: Running over the limit on a 'get' acts as a 'miss'; on a
639 // put it triggers a table resize. Several places MUST have exact agreement.
640 static int reprobe_limit(int len) {
641 return REPROBE_LIMIT + (len >> 2);
644 static inline bool is_prime(slot *val) {
645 return (val != NULL) && val->_prime;
648 // Check for key equality. Try direct pointer comparison first (fast
649 // negative teset) and then the full 'equals' call
650 static bool keyeq(slot *K, slot *key_slot, int *hashes, int hash,
652 // Caller should've checked this.
653 MODEL_ASSERT (K != NULL);
654 TypeK* key_ptr = (TypeK*) key_slot->_ptr;
657 ((hashes[hash] == 0 || hashes[hash] == fullhash) &&
659 key_ptr->equals(K->_ptr));
662 static bool valeq(slot *val_slot1, slot *val_slot2) {
663 MODEL_ASSERT (val_slot1 != NULL);
664 TypeK* ptr1 = (TypeV*) val_slot1->_ptr;
665 if (val_slot2 == NULL || ptr1 == NULL) return false;
666 return ptr1->equals(val_slot2->_ptr);
669 // Together with key() preserve the happens-before relationship on newly
671 static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
672 bool res = kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
673 desired, memory_order_relaxed, memory_order_relaxed);
675 # If it is a successful put instead of a copy or any other internal
676 # operantions, expected != NULL
678 @Potential_commit_point_define: res
679 @Label: Write_Key_Point
686 Same as the val() function, we only label the CAS operation as the
687 potential commit point.
689 // Together with val() preserve the happens-before relationship on newly
691 static inline bool CAS_val(kvs_data *kvs, int idx, void *expected, void
693 /**** CDSChecker error & HB violation ****/
694 bool res = kvs->_data[2 * idx + 3].compare_exchange_strong(expected,
695 desired, memory_order_acq_rel, memory_order_relaxed);
697 # If it is a successful put instead of a copy or any other internal
698 # operantions, expected != NULL
700 @Potential_commit_point_define: res
701 @Label: Write_Val_Point
707 slot* get_impl(cliffc_hashtable *topmap, kvs_data *kvs, slot* key_slot, int
709 int len = kvs->_size;
710 CHM *chm = get_chm(kvs);
711 int *hashes = get_hashes(kvs);
713 int idx = fullhash & (len - 1);
716 slot *K = key(kvs, idx);
719 @Commit_point_define: K == NULL
720 @Potential_commit_point_label: Read_Key_Point
724 slot *V = val(kvs, idx);
727 //model_print("Key is null\n");
728 return NULL; // A miss
731 if (keyeq(K, key_slot, hashes, idx, fullhash)) {
732 // Key hit! Check if table-resize in progress
736 @Commit_point_clear: true
743 @Commit_point_define: true
744 @Potential_commit_point_label: Read_Val_Point
748 return (V == TOMBSTONE) ? NULL : V; // Return this value
750 // Otherwise, finish the copy & retry in the new table
751 return get_impl(topmap, chm->copy_slot_and_check(topmap, kvs,
752 idx, key_slot), key_slot, fullhash);
755 if (++reprobe_cnt >= REPROBE_LIMIT ||
756 key_slot == TOMBSTONE) {
757 // Retry in new table
758 // Atomic read can be here
759 /**** FIXME: miss ****/
760 kvs_data *newkvs = chm->_newkvs.load(memory_order_acquire);
763 @Commit_point_define_check: true
764 @Label: Get_ReadNewKVS
767 return newkvs == NULL ? NULL : get_impl(topmap,
768 topmap->help_copy(newkvs), key_slot, fullhash);
771 idx = (idx + 1) & (len - 1); // Reprobe by 1
775 // A wrapper of the essential function putIfMatch()
776 TypeV* putIfMatch(TypeK *key, TypeV *value, slot *old_val) {
777 // TODO: Should throw an exception rather return NULL
778 if (old_val == NULL) {
781 slot *key_slot = new slot(false, key);
783 slot *value_slot = new slot(false, value);
784 /**** FIXME: miss ****/
785 kvs_data *kvs = _kvs.load(memory_order_acquire);
788 @Commit_point_define_check: true
792 slot *res = putIfMatch(this, kvs, key_slot, value_slot, old_val);
793 // Only when copy_slot() call putIfMatch() will it return NULL
794 MODEL_ASSERT (res != NULL);
795 MODEL_ASSERT (!is_prime(res));
796 return res == TOMBSTONE ? NULL : (TypeV*) res->_ptr;
800 Put, Remove, PutIfAbsent, etc will call this function. Return the old
801 value. If the returned value is equals to the expVal (or expVal is
802 NO_MATCH_OLD), then this function puts the val_slot to the table 'kvs'.
803 Only copy_slot will pass a NULL expVal, and putIfMatch only returns a
804 NULL if passed a NULL expVal.
806 static slot* putIfMatch(cliffc_hashtable *topmap, kvs_data *kvs, slot
807 *key_slot, slot *val_slot, slot *expVal) {
808 MODEL_ASSERT (val_slot != NULL);
809 MODEL_ASSERT (!is_prime(val_slot));
810 MODEL_ASSERT (!is_prime(expVal));
812 int fullhash = hash(key_slot);
813 int len = kvs->_size;
814 CHM *chm = get_chm(kvs);
815 int *hashes = get_hashes(kvs);
816 int idx = fullhash & (len - 1);
824 while (true) { // Spin till we get a key slot
827 if (K == NULL) { // Get a free slot
828 if (val_slot == TOMBSTONE) return val_slot;
829 // Claim the null key-slot
830 if (CAS_key(kvs, idx, NULL, key_slot)) {
833 @Commit_point_define: true
834 @Potential_commit_point_label: Write_Key_Point
838 chm->_slots.fetch_add(1, memory_order_relaxed); // Inc key-slots-used count
839 hashes[idx] = fullhash; // Memorize full hash
842 K = key(kvs, idx); // CAS failed, get updated value
843 MODEL_ASSERT (K != NULL);
846 // Key slot not null, there exists a Key here
847 if (keyeq(K, key_slot, hashes, idx, fullhash))
850 // Notice that the logic here should be consistent with that of get.
851 // The first predicate means too many reprobes means nothing in the
853 if (++reprobe_cnt >= reprobe_limit(len) ||
854 K == TOMBSTONE) { // Found a Tombstone key, no more keys
855 newkvs = chm->resize(topmap, kvs);
856 //model_print("resize1\n");
857 // Help along an existing copy
858 if (expVal != NULL) topmap->help_copy(newkvs);
859 return putIfMatch(topmap, newkvs, key_slot, val_slot, expVal);
862 idx = (idx + 1) & (len - 1); // Reprobe
863 } // End of spinning till we get a Key slot
865 if (val_slot == V) return V; // Fast cutout for no-change
867 // Here it tries to resize cause it doesn't want other threads to stop
868 // its progress (eagerly try to resize soon)
869 /**** FIXME: miss ****/
870 newkvs = chm->_newkvs.load(memory_order_acquire);
873 @Commit_point_define_check: true
874 @Label: Put_ReadNewKVS
877 if (newkvs == NULL &&
878 ((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V))) {
879 //model_print("resize2\n");
880 newkvs = chm->resize(topmap, kvs); // Force the copy to start
883 // Finish the copy and then put it in the new table
885 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs, idx,
886 expVal), key_slot, val_slot, expVal);
888 // Decided to update the existing table
890 MODEL_ASSERT (!is_prime(V));
892 if (expVal != NO_MATCH_OLD &&
894 (expVal != MATCH_ANY || V == TOMBSTONE || V == NULL) &&
895 !(V == NULL && expVal == TOMBSTONE) &&
896 (expVal == NULL || !valeq(expVal, V))) {
899 @Commit_point_define: expVal == TOMBSTONE
900 @Potential_commit_point_label: Read_Val_Point
901 @Label: PutIfAbsent_Fail_Point
902 # This is a check for the PutIfAbsent() when the value
908 @Commit_point_define: expVal != NULL && val_slot == TOMBSTONE
909 @Potential_commit_point_label: Read_Val_Point
910 @Label: RemoveIfMatch_Fail_Point
915 @Commit_point_define: expVal != NULL && !valeq(expVal, V)
916 @Potential_commit_point_label: Read_Val_Point
917 @Label: ReplaceIfMatch_Fail_Point
920 return V; // Do not update!
923 if (CAS_val(kvs, idx, V, val_slot)) {
926 # The only point where a successful put happens
927 @Commit_point_define: true
928 @Potential_commit_point_label: Write_Val_Point
932 if (expVal != NULL) { // Not called by a table-copy
933 // CAS succeeded, should adjust size
934 // Both normal put's and table-copy calls putIfMatch, but
935 // table-copy does not increase the number of live K/V pairs
936 if ((V == NULL || V == TOMBSTONE) &&
937 val_slot != TOMBSTONE)
938 chm->_size.fetch_add(1, memory_order_relaxed);
939 if (!(V == NULL || V == TOMBSTONE) &&
940 val_slot == TOMBSTONE)
941 chm->_size.fetch_add(-1, memory_order_relaxed);
943 return (V == NULL && expVal != NULL) ? TOMBSTONE : V;
948 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs,
949 idx, expVal), key_slot, val_slot, expVal);
953 // Help along an existing table-resize. This is a fast cut-out wrapper.
954 kvs_data* help_copy(kvs_data *helper) {
955 /**** FIXME: miss ****/
956 kvs_data *topkvs = _kvs.load(memory_order_acquire);
957 CHM *topchm = get_chm(topkvs);
958 // No cpy in progress
959 if (topchm->_newkvs.load(memory_order_relaxed) == NULL) return helper;
960 topchm->help_copy_impl(this, topkvs, false);