1 #ifndef CLIFFC_HASHTABLE_H
2 #define CLIFFC_HASHTABLE_H
10 #define MODEL_ASSERT assert
12 #include <model-assert.h>
17 #include <cdsannotate.h>
18 #include <specannotation.h>
19 #include <model_memory.h>
25 This header file declares and defines a simplified version of Cliff Click's
26 NonblockingHashMap. It contains all the necessary structrues and main
27 functions. In simplified_cliffc_hashtable.cc file, it has the definition for
31 template<typename TypeK, typename TypeV>
32 class cliffc_hashtable;
35 Corresponding the the Object[] array in Cliff Click's Java implementation.
36 It keeps the first two slots for CHM (Hashtable control unit) and the hash
37 records (an array of hash used for fast negative key-equality check).
45 int real_size = sz * 2 + 2;
46 _data = new atomic<void*>[real_size];
47 // The control block should be initialized in resize()
48 // Init the hash record array
49 int *hashes = new int[_size];
51 for (i = 0; i < _size; i++) {
54 // Init the data to Null slot
55 for (i = 2; i < real_size; i++) {
56 _data[i].store(NULL, memory_order_relaxed);
58 _data[1].store(hashes, memory_order_relaxed);
62 int *hashes = (int*) _data[1].load(memory_order_relaxed);
72 slot(bool prime, void *ptr) {
80 TypeK must have defined function "int hashCode()" which return the hash
81 code for the its object, and "int equals(TypeK anotherKey)" which is
82 used to judge equality.
83 TypeK and TypeV should define their own copy constructor.
90 template<typename TypeK, typename TypeV>
91 class cliffc_hashtable {
93 # The synchronization we have for the hashtable gives us the property of
94 # serializability, so we should have a sequential hashtable when we check the
95 # correctness. The key thing is to identify all the commit point.
100 CLASS = cliffc_hashtable;
107 map = new_spec_table_default(equals_key);
108 id_map = new_spec_table_default(equals_id);
111 // model_print("cleaning up\n");
113 bool equals_key(void *ptr1, void *ptr2) {
114 TypeK *key1 = (TypeK*) ptr1,
115 *key2 = (TypeK*) ptr2;
116 if (key1 == NULL || key2 == NULL)
118 return key1->equals(key2);
122 bool equals_val(void *ptr1, void *ptr2) {
125 TypeV *val1 = (TypeV*) ptr1,
126 *val2 = (TypeV*) ptr2;
127 if (val1 == NULL || val2 == NULL)
129 return val1->equals(val2);
133 bool equals_id(void *ptr1, void *ptr2) {
134 id_tag_t *id1 = (id_tag_t*) ptr1,
135 *id2 = (id_tag_t*) ptr2;
136 if (id1 == NULL || id2 == NULL)
138 return (*id1).tag == (*id2).tag;
142 # Update the tag for the current key slot if the corresponding tag
143 # is NULL, otherwise just return that tag. It will update the next
144 # available tag too if it requires a new tag for that key slot.
145 call_id_t getKeyTag(TypeK *key) {
146 if (!spec_table_contains(id_map, key)) {
147 call_id_t cur_id = current(tag);
148 spec_table_put(id_map, key, (void*) cur_id);
152 call_id_t res = (call_id_t) spec_table_get(id_map, key);
164 The control structure for the hashtable
168 friend class cliffc_hashtable;
170 atomic<kvs_data*> _newkvs;
172 // Size of active K,V pairs
175 // Count of used slots
178 // The next part of the table to copy
179 atomic_int _copy_idx;
181 // Work-done reporting
182 atomic_int _copy_done;
186 _newkvs.store(NULL, memory_order_relaxed);
187 _size.store(size, memory_order_relaxed);
188 _slots.store(0, memory_order_relaxed);
190 _copy_idx.store(0, memory_order_relaxed);
191 _copy_done.store(0, memory_order_relaxed);
198 // Heuristic to decide if the table is too full
199 bool table_full(int reprobe_cnt, int len) {
201 reprobe_cnt >= REPROBE_LIMIT &&
202 _slots.load(memory_order_relaxed) >= reprobe_limit(len);
205 kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
206 //model_print("resizing...\n");
207 /**** FIXME: miss ****/
208 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
212 // No copy in-progress, start one; Only double the table size
213 int oldlen = kvs->_size;
214 int sz = _size.load(memory_order_relaxed);
217 // Just follow Cliff Click's heuristic to decide the new size
218 if (sz >= (oldlen >> 2)) { // If we are 25% full
219 newsz = oldlen << 1; // Double size
220 if (sz >= (oldlen >> 1))
221 newsz = oldlen << 2; // Double double size
224 // We do not record the record timestamp
225 if (newsz <= oldlen) newsz = oldlen << 1;
226 // Do not shrink ever
227 if (newsz < oldlen) newsz = oldlen;
229 // Last check cause the 'new' below is expensive
230 /**** FIXME: miss ****/
231 newkvs = _newkvs.load(memory_order_acquire);
232 //model_print("hey1\n");
233 if (newkvs != NULL) return newkvs;
235 newkvs = new kvs_data(newsz);
236 void *chm = (void*) new CHM(sz);
237 //model_print("hey2\n");
238 newkvs->_data[0].store(chm, memory_order_relaxed);
240 kvs_data *cur_newkvs;
241 // Another check after the slow allocation
242 /**** FIXME: miss ****/
243 if ((cur_newkvs = _newkvs.load(memory_order_acquire)) != NULL)
245 // CAS the _newkvs to the allocated table
246 kvs_data *desired = (kvs_data*) NULL;
247 kvs_data *expected = (kvs_data*) newkvs;
248 /**** FIXME: miss ****/
249 //model_print("release in resize!\n");
250 if (!_newkvs.compare_exchange_strong(desired, expected, memory_order_release,
251 memory_order_relaxed)) {
252 // Should clean the allocated area
254 /**** FIXME: miss ****/
255 newkvs = _newkvs.load(memory_order_acquire);
260 void help_copy_impl(cliffc_hashtable *topmap, kvs_data *oldkvs,
262 MODEL_ASSERT (get_chm(oldkvs) == this);
263 /**** FIXME: miss ****/
264 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
265 int oldlen = oldkvs->_size;
266 int min_copy_work = oldlen > 1024 ? 1024 : oldlen;
268 // Just follow Cliff Click's code here
269 int panic_start = -1;
271 while (_copy_done.load(memory_order_relaxed) < oldlen) {
272 copyidx = _copy_idx.load(memory_order_relaxed);
273 if (panic_start == -1) { // No painc
274 copyidx = _copy_idx.load(memory_order_relaxed);
275 while (copyidx < (oldlen << 1) &&
276 !_copy_idx.compare_exchange_strong(copyidx, copyidx +
277 min_copy_work, memory_order_relaxed, memory_order_relaxed))
278 copyidx = _copy_idx.load(memory_order_relaxed);
279 if (!(copyidx < (oldlen << 1)))
280 panic_start = copyidx;
283 // Now copy the chunk of work we claimed
285 for (int i = 0; i < min_copy_work; i++)
286 if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs,
290 copy_check_and_promote(topmap, oldkvs, workdone);
292 copyidx += min_copy_work;
293 if (!copy_all && panic_start == -1)
294 return; // We are done with the work we claim
296 copy_check_and_promote(topmap, oldkvs, 0); // See if we can promote
299 kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
300 *oldkvs, int idx, void *should_help) {
301 /**** FIXME: miss ****/
302 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
303 // We're only here cause the caller saw a Prime
304 if (copy_slot(topmap, idx, oldkvs, newkvs))
305 copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied
306 return (should_help == NULL) ? newkvs : topmap->help_copy(newkvs);
309 void copy_check_and_promote(cliffc_hashtable *topmap, kvs_data*
310 oldkvs, int workdone) {
311 int oldlen = oldkvs->_size;
312 int copyDone = _copy_done.load(memory_order_relaxed);
315 copyDone = _copy_done.load(memory_order_relaxed);
316 if (_copy_done.compare_exchange_weak(copyDone, copyDone +
317 workdone, memory_order_relaxed, memory_order_relaxed))
322 // Promote the new table to the current table
323 if (copyDone + workdone == oldlen &&
324 topmap->_kvs.load(memory_order_relaxed) == oldkvs) {
325 /**** FIXME: miss ****/
326 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
327 /**** CDSChecker error ****/
328 topmap->_kvs.compare_exchange_strong(oldkvs, newkvs, memory_order_release,
329 memory_order_relaxed);
333 bool copy_slot(cliffc_hashtable *topmap, int idx, kvs_data *oldkvs,
336 while ((key_slot = key(oldkvs, idx)) == NULL)
337 CAS_key(oldkvs, idx, NULL, TOMBSTONE);
339 // First CAS old to Prime
340 slot *oldval = val(oldkvs, idx);
341 while (!is_prime(oldval)) {
342 slot *box = (oldval == NULL || oldval == TOMBSTONE)
343 ? TOMBPRIME : new slot(true, oldval->_ptr);
344 if (CAS_val(oldkvs, idx, oldval, box)) {
345 if (box == TOMBPRIME)
346 return 1; // Copy done
347 // Otherwise we CAS'd the box
348 oldval = box; // Record updated oldval
351 oldval = val(oldkvs, idx); // Else re-try
354 if (oldval == TOMBPRIME) return false; // Copy already completed here
356 slot *old_unboxed = new slot(false, oldval->_ptr);
357 int copied_into_new = (putIfMatch(topmap, newkvs, key_slot, old_unboxed,
360 // Old value is exposed in the new table
361 while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME))
362 oldval = val(oldkvs, idx);
364 return copied_into_new;
371 static const int Default_Init_Size = 4; // Intial table size
373 static slot* const MATCH_ANY;
374 static slot* const NO_MATCH_OLD;
376 static slot* const TOMBPRIME;
377 static slot* const TOMBSTONE;
379 static const int REPROBE_LIMIT = 10; // Forces a table-resize
381 atomic<kvs_data*> _kvs;
385 // Should initialize the CHM for the construction of the table
386 // For other CHM in kvs_data, they should be initialzed in resize()
387 // because the size is determined dynamically
393 kvs_data *kvs = new kvs_data(Default_Init_Size);
394 void *chm = (void*) new CHM(0);
395 kvs->_data[0].store(chm, memory_order_relaxed);
396 _kvs.store(kvs, memory_order_relaxed);
399 cliffc_hashtable(int init_size) {
400 // Should initialize the CHM for the construction of the table
401 // For other CHM in kvs_data, they should be initialzed in resize()
402 // because the size is determined dynamically
409 kvs_data *kvs = new kvs_data(init_size);
410 void *chm = (void*) new CHM(0);
411 kvs->_data[0].store(chm, memory_order_relaxed);
412 _kvs.store(kvs, memory_order_relaxed);
418 //@Commit_point_set: Get_Point1 | Get_Point2 | Get_ReadKVS | Get_ReadNewKVS | Get_Clear
419 @Commit_point_set: Get_Point1 | Get_Point2 | Get_Clear
420 //@Commit_point_set: Get_Point1 | Get_Point2 | Get_Point3
423 TypeV *_Old_Val = (TypeV*) spec_table_get(map, key);
424 //bool passed = equals_val(_Old_Val, __RET__);
427 int old = _Old_Val == NULL ? 0 : _Old_Val->_val;
428 int ret = __RET__ == NULL ? 0 : __RET__->_val;
429 //model_print("Get: key: %d, _Old_Val: %d, RET: %d\n",
430 //key->_val, old, ret);
433 //__RET__ == NULL ? true : equals_val(_Old_Val, __RET__)
434 equals_val(_Old_Val, __RET__)
437 TypeV* get(TypeK *key) {
438 slot *key_slot = new slot(false, key);
439 int fullhash = hash(key_slot);
440 /**** CDSChecker error ****/
441 kvs_data *kvs = _kvs.load(memory_order_acquire);
444 @Commit_point_define_check: true
448 slot *V = get_impl(this, kvs, key_slot, fullhash);
449 if (V == NULL) return NULL;
450 MODEL_ASSERT (!is_prime(V));
451 return (TypeV*) V->_ptr;
457 //@Commit_point_set: Put_Point | Put_ReadKVS | Put_ReadNewKVS | Put_WriteKey
458 @Commit_point_set: Put_Point | Put_WriteKey
461 # Remember this old value at checking point
462 TypeV *_Old_Val = (TypeV*) spec_table_get(map, key);
463 spec_table_put(map, key, val);
464 //bool passed = equals_val(__RET__, _Old_Val);
467 int old = _Old_Val == NULL ? 0 : _Old_Val->_val;
468 int ret = __RET__ == NULL ? 0 : __RET__->_val;
469 //model_print("Put: key: %d, val: %d, _Old_Val: %d, RET: %d\n",
470 // key->_val, val->_val, old, ret);
473 equals_val(__RET__, _Old_Val)
476 TypeV* put(TypeK *key, TypeV *val) {
477 return putIfMatch(key, val, NO_MATCH_OLD);
482 @Interface: PutIfAbsent
484 Write_Success_Point | PutIfAbsent_Fail_Point
485 @Condition: !spec_table_contains(map, key)
487 COND_PutIfAbsentSucc :: __RET__ == NULL
490 void *_Old_Val = spec_table_get(map, key);
492 spec_table_put(map, key, value);
494 __COND_SAT__ ? __RET__ == NULL : equals_val(_Old_Val, __RET__)
497 TypeV* putIfAbsent(TypeK *key, TypeV *value) {
498 return putIfMatch(key, val, TOMBSTONE);
503 @Interface: RemoveAny
504 @Commit_point_set: Write_Success_Point
507 void *_Old_Val = spec_table_get(map, key);
508 spec_table_put(map, key, NULL);
510 equals_val(__RET__, _Old_Val)
513 TypeV* remove(TypeK *key) {
514 return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD);
519 @Interface: RemoveIfMatch
521 Write_Success_Point | RemoveIfMatch_Fail_Point
523 equals_val(spec_table_get(map, key), val)
525 COND_RemoveIfMatchSucc :: __RET__ == true
529 spec_table_put(map, key, NULL);
531 __COND_SAT__ ? __RET__ : !__RET__
534 bool remove(TypeK *key, TypeV *val) {
535 slot *val_slot = val == NULL ? NULL : new slot(false, val);
536 return putIfMatch(key, TOMBSTONE, val) == val;
542 @Interface: ReplaceAny
547 void *_Old_Val = spec_table_get(map, key);
549 equals_val(__RET__, _Old_Val)
552 TypeV* replace(TypeK *key, TypeV *val) {
553 return putIfMatch(key, val, MATCH_ANY);
558 @Interface: ReplaceIfMatch
560 Write_Success_Point | ReplaceIfMatch_Fail_Point
562 equals_val(spec_table_get(map, key), oldval)
564 COND_ReplaceIfMatchSucc :: __RET__ == true
568 spec_table_put(map, key, newval);
570 __COND_SAT__ ? __RET__ : !__RET__
573 bool replace(TypeK *key, TypeV *oldval, TypeV *newval) {
574 return putIfMatch(key, newval, oldval) == oldval;
578 static CHM* get_chm(kvs_data* kvs) {
579 CHM *res = (CHM*) kvs->_data[0].load(memory_order_relaxed);
583 static int* get_hashes(kvs_data *kvs) {
584 return (int *) kvs->_data[1].load(memory_order_relaxed);
587 // Preserve happens-before semantics on newly inserted keys
588 static inline slot* key(kvs_data *kvs, int idx) {
589 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
590 // Corresponding to the volatile read in get_impl() and putIfMatch in
591 // Cliff Click's Java implementation
592 slot *res = (slot*) kvs->_data[idx * 2 + 2].load(memory_order_relaxed);
595 # This is a complicated potential commit point since many many functions are
597 @Potential_commit_point_define: true
598 @Label: Read_Key_Point
605 The atomic operation in val() function is a "potential" commit point,
606 which means in some case it is a real commit point while it is not for
607 some other cases. This so happens because the val() function is such a
608 fundamental function that many internal operation will call. Our
609 strategy is that we label any potential commit points and check if they
610 really are the commit points later.
612 // Preserve happens-before semantics on newly inserted values
613 static inline slot* val(kvs_data *kvs, int idx) {
614 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
615 // Corresponding to the volatile read in get_impl() and putIfMatch in
616 // Cliff Click's Java implementation
617 /**** CDSChecker error & hb violation ****/
618 slot *res = (slot*) kvs->_data[idx * 2 + 3].load(memory_order_acquire);
621 # This is a complicated potential commit point since many many functions are
623 @Potential_commit_point_define: true
624 @Label: Read_Val_Point
632 static int hash(slot *key_slot) {
633 MODEL_ASSERT(key_slot != NULL && key_slot->_ptr != NULL);
634 TypeK* key = (TypeK*) key_slot->_ptr;
635 int h = key->hashCode();
636 // Spread bits according to Cliff Click's code
637 h += (h << 15) ^ 0xffffcd7d;
641 h += (h << 2) + (h << 14);
642 return h ^ (h >> 16);
645 // Heuristic to decide if reprobed too many times.
646 // Be careful here: Running over the limit on a 'get' acts as a 'miss'; on a
647 // put it triggers a table resize. Several places MUST have exact agreement.
648 static int reprobe_limit(int len) {
649 return REPROBE_LIMIT + (len >> 2);
652 static inline bool is_prime(slot *val) {
653 return (val != NULL) && val->_prime;
656 // Check for key equality. Try direct pointer comparison first (fast
657 // negative teset) and then the full 'equals' call
658 static bool keyeq(slot *K, slot *key_slot, int *hashes, int hash,
660 // Caller should've checked this.
661 MODEL_ASSERT (K != NULL);
662 TypeK* key_ptr = (TypeK*) key_slot->_ptr;
665 ((hashes[hash] == 0 || hashes[hash] == fullhash) &&
667 key_ptr->equals(K->_ptr));
670 static bool valeq(slot *val_slot1, slot *val_slot2) {
671 MODEL_ASSERT (val_slot1 != NULL);
672 TypeK* ptr1 = (TypeV*) val_slot1->_ptr;
673 if (val_slot2 == NULL || ptr1 == NULL) return false;
674 return ptr1->equals(val_slot2->_ptr);
677 // Together with key() preserve the happens-before relationship on newly
679 static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
680 bool res = kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
681 desired, memory_order_relaxed, memory_order_relaxed);
683 # If it is a successful put instead of a copy or any other internal
684 # operantions, expected != NULL
686 @Potential_commit_point_define: res
687 @Label: Write_Key_Point
694 Same as the val() function, we only label the CAS operation as the
695 potential commit point.
697 // Together with val() preserve the happens-before relationship on newly
699 static inline bool CAS_val(kvs_data *kvs, int idx, void *expected, void
701 /**** CDSChecker error & HB violation ****/
702 bool res = kvs->_data[2 * idx + 3].compare_exchange_strong(expected,
703 desired, memory_order_acq_rel, memory_order_relaxed);
705 # If it is a successful put instead of a copy or any other internal
706 # operantions, expected != NULL
708 @Potential_commit_point_define: res
709 @Label: Write_Val_Point
715 slot* get_impl(cliffc_hashtable *topmap, kvs_data *kvs, slot* key_slot, int
717 int len = kvs->_size;
718 CHM *chm = get_chm(kvs);
719 int *hashes = get_hashes(kvs);
721 int idx = fullhash & (len - 1);
724 slot *K = key(kvs, idx);
727 @Commit_point_define: K == NULL
728 @Potential_commit_point_label: Read_Key_Point
732 slot *V = val(kvs, idx);
735 //model_print("Key is null\n");
736 return NULL; // A miss
739 if (keyeq(K, key_slot, hashes, idx, fullhash)) {
740 // Key hit! Check if table-resize in progress
744 @Commit_point_clear: true
751 @Commit_point_define: true
752 @Potential_commit_point_label: Read_Val_Point
756 return (V == TOMBSTONE) ? NULL : V; // Return this value
758 // Otherwise, finish the copy & retry in the new table
759 return get_impl(topmap, chm->copy_slot_and_check(topmap, kvs,
760 idx, key_slot), key_slot, fullhash);
763 if (++reprobe_cnt >= REPROBE_LIMIT ||
764 key_slot == TOMBSTONE) {
765 // Retry in new table
766 // Atomic read can be here
767 /**** FIXME: miss ****/
768 kvs_data *newkvs = chm->_newkvs.load(memory_order_acquire);
771 @Commit_point_define_check: true
772 @Label: Get_ReadNewKVS
775 return newkvs == NULL ? NULL : get_impl(topmap,
776 topmap->help_copy(newkvs), key_slot, fullhash);
779 idx = (idx + 1) & (len - 1); // Reprobe by 1
783 // A wrapper of the essential function putIfMatch()
784 TypeV* putIfMatch(TypeK *key, TypeV *value, slot *old_val) {
785 // TODO: Should throw an exception rather return NULL
786 if (old_val == NULL) {
789 slot *key_slot = new slot(false, key);
791 slot *value_slot = new slot(false, value);
792 /**** FIXME: miss ****/
793 kvs_data *kvs = _kvs.load(memory_order_acquire);
796 @Commit_point_define_check: true
800 slot *res = putIfMatch(this, kvs, key_slot, value_slot, old_val);
801 // Only when copy_slot() call putIfMatch() will it return NULL
802 MODEL_ASSERT (res != NULL);
803 MODEL_ASSERT (!is_prime(res));
804 return res == TOMBSTONE ? NULL : (TypeV*) res->_ptr;
808 Put, Remove, PutIfAbsent, etc will call this function. Return the old
809 value. If the returned value is equals to the expVal (or expVal is
810 NO_MATCH_OLD), then this function puts the val_slot to the table 'kvs'.
811 Only copy_slot will pass a NULL expVal, and putIfMatch only returns a
812 NULL if passed a NULL expVal.
814 static slot* putIfMatch(cliffc_hashtable *topmap, kvs_data *kvs, slot
815 *key_slot, slot *val_slot, slot *expVal) {
816 MODEL_ASSERT (val_slot != NULL);
817 MODEL_ASSERT (!is_prime(val_slot));
818 MODEL_ASSERT (!is_prime(expVal));
820 int fullhash = hash(key_slot);
821 int len = kvs->_size;
822 CHM *chm = get_chm(kvs);
823 int *hashes = get_hashes(kvs);
824 int idx = fullhash & (len - 1);
832 while (true) { // Spin till we get a key slot
835 if (K == NULL) { // Get a free slot
836 if (val_slot == TOMBSTONE) return val_slot;
837 // Claim the null key-slot
838 if (CAS_key(kvs, idx, NULL, key_slot)) {
841 @Commit_point_define: true
842 @Potential_commit_point_label: Write_Key_Point
846 chm->_slots.fetch_add(1, memory_order_relaxed); // Inc key-slots-used count
847 hashes[idx] = fullhash; // Memorize full hash
850 K = key(kvs, idx); // CAS failed, get updated value
851 MODEL_ASSERT (K != NULL);
854 // Key slot not null, there exists a Key here
855 if (keyeq(K, key_slot, hashes, idx, fullhash))
858 // Notice that the logic here should be consistent with that of get.
859 // The first predicate means too many reprobes means nothing in the
861 if (++reprobe_cnt >= reprobe_limit(len) ||
862 K == TOMBSTONE) { // Found a Tombstone key, no more keys
863 newkvs = chm->resize(topmap, kvs);
864 //model_print("resize1\n");
865 // Help along an existing copy
866 if (expVal != NULL) topmap->help_copy(newkvs);
867 return putIfMatch(topmap, newkvs, key_slot, val_slot, expVal);
870 idx = (idx + 1) & (len - 1); // Reprobe
871 } // End of spinning till we get a Key slot
873 if (val_slot == V) return V; // Fast cutout for no-change
875 // Here it tries to resize cause it doesn't want other threads to stop
876 // its progress (eagerly try to resize soon)
877 /**** FIXME: miss ****/
878 newkvs = chm->_newkvs.load(memory_order_acquire);
881 @Commit_point_define_check: true
882 @Label: Put_ReadNewKVS
885 if (newkvs == NULL &&
886 ((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V))) {
887 //model_print("resize2\n");
888 newkvs = chm->resize(topmap, kvs); // Force the copy to start
891 // Finish the copy and then put it in the new table
893 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs, idx,
894 expVal), key_slot, val_slot, expVal);
896 // Decided to update the existing table
898 MODEL_ASSERT (!is_prime(V));
900 if (expVal != NO_MATCH_OLD &&
902 (expVal != MATCH_ANY || V == TOMBSTONE || V == NULL) &&
903 !(V == NULL && expVal == TOMBSTONE) &&
904 (expVal == NULL || !valeq(expVal, V))) {
907 @Commit_point_define: expVal == TOMBSTONE
908 @Potential_commit_point_label: Read_Val_Point
909 @Label: PutIfAbsent_Fail_Point
910 # This is a check for the PutIfAbsent() when the value
916 @Commit_point_define: expVal != NULL && val_slot == TOMBSTONE
917 @Potential_commit_point_label: Read_Val_Point
918 @Label: RemoveIfMatch_Fail_Point
923 @Commit_point_define: expVal != NULL && !valeq(expVal, V)
924 @Potential_commit_point_label: Read_Val_Point
925 @Label: ReplaceIfMatch_Fail_Point
928 return V; // Do not update!
931 if (CAS_val(kvs, idx, V, val_slot)) {
934 # The only point where a successful put happens
935 @Commit_point_define: true
936 @Potential_commit_point_label: Write_Val_Point
940 if (expVal != NULL) { // Not called by a table-copy
941 // CAS succeeded, should adjust size
942 // Both normal put's and table-copy calls putIfMatch, but
943 // table-copy does not increase the number of live K/V pairs
944 if ((V == NULL || V == TOMBSTONE) &&
945 val_slot != TOMBSTONE)
946 chm->_size.fetch_add(1, memory_order_relaxed);
947 if (!(V == NULL || V == TOMBSTONE) &&
948 val_slot == TOMBSTONE)
949 chm->_size.fetch_add(-1, memory_order_relaxed);
951 return (V == NULL && expVal != NULL) ? TOMBSTONE : V;
956 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs,
957 idx, expVal), key_slot, val_slot, expVal);
961 // Help along an existing table-resize. This is a fast cut-out wrapper.
962 kvs_data* help_copy(kvs_data *helper) {
963 /**** FIXME: miss ****/
964 kvs_data *topkvs = _kvs.load(memory_order_acquire);
965 CHM *topchm = get_chm(topkvs);
966 // No cpy in progress
967 if (topchm->_newkvs.load(memory_order_relaxed) == NULL) return helper;
968 topchm->help_copy_impl(this, topkvs, false);