1 #ifndef CLIFFC_HASHTABLE_H
2 #define CLIFFC_HASHTABLE_H
9 #define MODEL_ASSERT assert
11 #include <model-assert.h>
16 #include <cdsannotate.h>
17 #include <specannotation.h>
18 #include <model_memory.h>
25 template<typename TypeK, typename TypeV>
26 class cliffc_hashtable;
35 int real_size = sz * 2 + 2;
36 _data = new atomic<void*>[real_size];
37 int *hashes = new int[_size];
39 for (i = 0; i < _size; i++) {
42 for (i = 2; i < real_size; i++) {
43 _data[i].store(NULL, memory_order_relaxed);
45 _data[1].store(hashes, memory_order_relaxed);
49 int *hashes = (int*) _data[1].load(memory_order_relaxed);
59 slot(bool prime, void *ptr) {
68 template<typename TypeK, typename TypeV>
69 class cliffc_hashtable {
70 /* All other user-defined structs */
71 static spec_table * map;
72 static spec_table * id_map;
73 static id_tag_t * tag;
74 /* All other user-defined functions */
75 inline static bool equals_key ( void * ptr1 , void * ptr2 ) {
76 TypeK * key1 = ( TypeK * ) ptr1 , * key2 = ( TypeK * ) ptr2 ;
77 if ( key1 == NULL || key2 == NULL ) return false ;
78 return key1 -> equals ( key2 ) ;
81 inline static bool equals_val ( void * ptr1 , void * ptr2 ) {
82 if ( ptr1 == ptr2 ) return true ;
83 TypeV * val1 = ( TypeV * ) ptr1 , * val2 = ( TypeV * ) ptr2 ;
84 if ( val1 == NULL || val2 == NULL ) return false ;
85 return val1 -> equals ( val2 ) ;
88 inline static bool equals_id ( void * ptr1 , void * ptr2 ) {
89 id_tag_t * id1 = ( id_tag_t * ) ptr1 , * id2 = ( id_tag_t * ) ptr2 ;
90 if ( id1 == NULL || id2 == NULL ) return false ;
91 return ( * id1 ) . tag == ( * id2 ) . tag ;
94 inline static call_id_t getKeyTag ( TypeK * key ) {
95 if ( ! spec_table_contains ( id_map , key ) ) {
96 call_id_t cur_id = current ( tag ) ;
97 spec_table_put ( id_map , key , ( void * ) cur_id ) ;
102 call_id_t res = ( call_id_t ) spec_table_get ( id_map , key ) ;
107 /* Definition of interface info struct: Put */
108 typedef struct Put_info {
113 /* End of info struct definition: Put */
115 /* ID function of interface: Put */
116 inline static call_id_t Put_id(void *info, thread_id_t __TID__) {
117 Put_info* theInfo = (Put_info*)info;
118 TypeV * __RET__ = theInfo->__RET__;
119 TypeK * key = theInfo->key;
120 TypeV * val = theInfo->val;
122 call_id_t __ID__ = getKeyTag ( key );
125 /* End of ID function: Put */
127 /* Check action function of interface: Put */
128 inline static bool Put_check_action(void *info, call_id_t __ID__, thread_id_t __TID__) {
130 Put_info* theInfo = (Put_info*)info;
131 TypeV * __RET__ = theInfo->__RET__;
132 TypeK * key = theInfo->key;
133 TypeV * val = theInfo->val;
135 TypeV * _Old_Val = ( TypeV * ) spec_table_get ( map , key ) ;
136 spec_table_put ( map , key , val ) ;
137 bool passed = false ;
139 int old = _Old_Val == NULL ? 0 : _Old_Val -> _val ;
140 int ret = __RET__ == NULL ? 0 : __RET__ -> _val ;
142 check_passed = equals_val ( __RET__ , _Old_Val );
147 /* End of check action function: Put */
149 /* Definition of interface info struct: Get */
150 typedef struct Get_info {
154 /* End of info struct definition: Get */
156 /* ID function of interface: Get */
157 inline static call_id_t Get_id(void *info, thread_id_t __TID__) {
158 Get_info* theInfo = (Get_info*)info;
159 TypeV * __RET__ = theInfo->__RET__;
160 TypeK * key = theInfo->key;
162 call_id_t __ID__ = getKeyTag ( key );
165 /* End of ID function: Get */
167 /* Check action function of interface: Get */
168 inline static bool Get_check_action(void *info, call_id_t __ID__, thread_id_t __TID__) {
170 Get_info* theInfo = (Get_info*)info;
171 TypeV * __RET__ = theInfo->__RET__;
172 TypeK * key = theInfo->key;
174 TypeV * _Old_Val = ( TypeV * ) spec_table_get ( map , key ) ;
175 bool passed = false ;
177 int old = _Old_Val == NULL ? 0 : _Old_Val -> _val ;
178 int ret = __RET__ == NULL ? 0 : __RET__ -> _val ;
180 check_passed = equals_val ( _Old_Val , __RET__ );
185 /* End of check action function: Get */
187 #define INTERFACE_SIZE 2
188 static void** func_ptr_table;
189 static anno_hb_init** hb_init_table;
191 /* Initialization of sequential varialbes */
192 static void __SPEC_INIT__() {
193 map = new_spec_table_default ( equals_key ) ;
194 id_map = new_spec_table_default ( equals_id ) ;
195 tag = new_id_tag ( ) ;
198 /* Cleanup routine of sequential variables */
199 static void __SPEC_CLEANUP__() {
202 /* Define function for sequential code initialization */
203 inline static void __sequential_init() {
204 /* Init func_ptr_table */
205 func_ptr_table = (void**) malloc(sizeof(void*) * 2 * 2);
206 func_ptr_table[2 * 1] = (void*) &Put_id;
207 func_ptr_table[2 * 1 + 1] = (void*) &Put_check_action;
208 func_ptr_table[2 * 0] = (void*) &Get_id;
209 func_ptr_table[2 * 0 + 1] = (void*) &Get_check_action;
210 /* Put(true) -> Put(true) */
211 struct anno_hb_init *hbConditionInit0 = (struct anno_hb_init*) malloc(sizeof(struct anno_hb_init));
212 hbConditionInit0->interface_num_before = 1; // Put
213 hbConditionInit0->hb_condition_num_before = 0; //
214 hbConditionInit0->interface_num_after = 1; // Put
215 hbConditionInit0->hb_condition_num_after = 0; //
216 /* Put(true) -> Get(true) */
217 struct anno_hb_init *hbConditionInit1 = (struct anno_hb_init*) malloc(sizeof(struct anno_hb_init));
218 hbConditionInit1->interface_num_before = 1; // Put
219 hbConditionInit1->hb_condition_num_before = 0; //
220 hbConditionInit1->interface_num_after = 0; // Get
221 hbConditionInit1->hb_condition_num_after = 0; //
222 /* Init hb_init_table */
223 hb_init_table = (anno_hb_init**) malloc(sizeof(anno_hb_init*) * 2);
224 #define HB_INIT_TABLE_SIZE 2
225 hb_init_table[0] = hbConditionInit0;
226 hb_init_table[1] = hbConditionInit1;
227 /* Pass init info, including function table info & HB rules */
228 struct anno_init *anno_init = (struct anno_init*) malloc(sizeof(struct anno_init));
229 anno_init->init_func = (void*) __SPEC_INIT__;
230 anno_init->cleanup_func = (void*) __SPEC_CLEANUP__;
231 anno_init->func_table = func_ptr_table;
232 anno_init->func_table_size = INTERFACE_SIZE;
233 anno_init->hb_init_table = hb_init_table;
234 anno_init->hb_init_table_size = HB_INIT_TABLE_SIZE;
235 struct spec_annotation *init = (struct spec_annotation*) malloc(sizeof(struct spec_annotation));
237 init->annotation = anno_init;
238 cdsannotate(SPEC_ANALYSIS, init);
242 /* End of Global construct generation in class */
249 friend class cliffc_hashtable;
251 atomic<kvs_data*> _newkvs;
257 atomic_int _copy_idx;
259 atomic_int _copy_done;
263 _newkvs.store(NULL, memory_order_relaxed);
264 _size.store(size, memory_order_relaxed);
265 _slots.store(0, memory_order_relaxed);
267 _copy_idx.store(0, memory_order_relaxed);
268 _copy_done.store(0, memory_order_relaxed);
275 bool table_full(int reprobe_cnt, int len) {
277 reprobe_cnt >= REPROBE_LIMIT &&
278 _slots.load(memory_order_relaxed) >= reprobe_limit(len);
281 kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
283 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
287 int oldlen = kvs->_size;
288 int sz = _size.load(memory_order_relaxed);
291 if (sz >= (oldlen >> 2)) { newsz = oldlen << 1; if (sz >= (oldlen >> 1))
292 newsz = oldlen << 2; }
294 if (newsz <= oldlen) newsz = oldlen << 1;
295 if (newsz < oldlen) newsz = oldlen;
298 newkvs = _newkvs.load(memory_order_acquire);
299 if (newkvs != NULL) return newkvs;
301 newkvs = new kvs_data(newsz);
302 void *chm = (void*) new CHM(sz);
303 newkvs->_data[0].store(chm, memory_order_relaxed);
305 kvs_data *cur_newkvs;
307 if ((cur_newkvs = _newkvs.load(memory_order_acquire)) != NULL)
309 kvs_data *desired = (kvs_data*) NULL;
310 kvs_data *expected = (kvs_data*) newkvs;
312 if (!_newkvs.compare_exchange_strong(desired, expected, memory_order_release,
313 memory_order_relaxed)) {
316 newkvs = _newkvs.load(memory_order_acquire);
321 void help_copy_impl(cliffc_hashtable *topmap, kvs_data *oldkvs,
323 MODEL_ASSERT (get_chm(oldkvs) == this);
325 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
326 int oldlen = oldkvs->_size;
327 int min_copy_work = oldlen > 1024 ? 1024 : oldlen;
329 int panic_start = -1;
331 while (_copy_done.load(memory_order_relaxed) < oldlen) {
332 copyidx = _copy_idx.load(memory_order_relaxed);
333 if (panic_start == -1) { copyidx = _copy_idx.load(memory_order_relaxed);
334 while (copyidx < (oldlen << 1) &&
335 !_copy_idx.compare_exchange_strong(copyidx, copyidx +
336 min_copy_work, memory_order_relaxed, memory_order_relaxed))
337 copyidx = _copy_idx.load(memory_order_relaxed);
338 if (!(copyidx < (oldlen << 1)))
339 panic_start = copyidx;
343 for (int i = 0; i < min_copy_work; i++)
344 if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs,
348 copy_check_and_promote(topmap, oldkvs, workdone);
350 copyidx += min_copy_work;
351 if (!copy_all && panic_start == -1)
353 copy_check_and_promote(topmap, oldkvs, 0); }
355 kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
356 *oldkvs, int idx, void *should_help) {
358 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
359 if (copy_slot(topmap, idx, oldkvs, newkvs))
360 copy_check_and_promote(topmap, oldkvs, 1); return (should_help == NULL) ? newkvs : topmap->help_copy(newkvs);
363 void copy_check_and_promote(cliffc_hashtable *topmap, kvs_data*
364 oldkvs, int workdone) {
365 int oldlen = oldkvs->_size;
366 int copyDone = _copy_done.load(memory_order_relaxed);
369 copyDone = _copy_done.load(memory_order_relaxed);
370 if (_copy_done.compare_exchange_weak(copyDone, copyDone +
371 workdone, memory_order_relaxed, memory_order_relaxed))
376 if (copyDone + workdone == oldlen &&
377 topmap->_kvs.load(memory_order_relaxed) == oldkvs) {
379 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
381 topmap->_kvs.compare_exchange_strong(oldkvs, newkvs, memory_order_release,
382 memory_order_relaxed);
386 bool copy_slot(cliffc_hashtable *topmap, int idx, kvs_data *oldkvs,
389 while ((key_slot = key(oldkvs, idx)) == NULL)
390 CAS_key(oldkvs, idx, NULL, TOMBSTONE);
392 slot *oldval = val(oldkvs, idx);
393 while (!is_prime(oldval)) {
394 slot *box = (oldval == NULL || oldval == TOMBSTONE)
395 ? TOMBPRIME : new slot(true, oldval->_ptr);
396 if (CAS_val(oldkvs, idx, oldval, box)) {
397 if (box == TOMBPRIME)
398 return 1; oldval = box; break;
400 oldval = val(oldkvs, idx); }
402 if (oldval == TOMBPRIME) return false;
403 slot *old_unboxed = new slot(false, oldval->_ptr);
404 int copied_into_new = (putIfMatch(topmap, newkvs, key_slot, old_unboxed,
407 while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME))
408 oldval = val(oldkvs, idx);
410 return copied_into_new;
417 static const int Default_Init_Size = 4;
418 static slot* const MATCH_ANY;
419 static slot* const NO_MATCH_OLD;
421 static slot* const TOMBPRIME;
422 static slot* const TOMBSTONE;
424 static const int REPROBE_LIMIT = 10;
425 atomic<kvs_data*> _kvs;
431 kvs_data *kvs = new kvs_data(Default_Init_Size);
432 void *chm = (void*) new CHM(0);
433 kvs->_data[0].store(chm, memory_order_relaxed);
434 _kvs.store(kvs, memory_order_relaxed);
437 cliffc_hashtable(int init_size) {
441 kvs_data *kvs = new kvs_data(init_size);
442 void *chm = (void*) new CHM(0);
443 kvs->_data[0].store(chm, memory_order_relaxed);
444 _kvs.store(kvs, memory_order_relaxed);
448 TypeV * get(TypeK * key) {
449 /* Interface begins */
450 struct anno_interface_begin *interface_begin = (struct anno_interface_begin*) malloc(sizeof(struct anno_interface_begin));
451 interface_begin->interface_num = 0; // Get
452 interface_begin->interface_name = "Get";
453 struct spec_annotation *annotation_interface_begin = (struct spec_annotation*) malloc(sizeof(struct spec_annotation));
454 annotation_interface_begin->type = INTERFACE_BEGIN;
455 annotation_interface_begin->annotation = interface_begin;
456 cdsannotate(SPEC_ANALYSIS, annotation_interface_begin);
457 TypeV * __RET__ = __wrapper__get(key);
458 struct anno_hb_condition *hb_condition = (struct anno_hb_condition*) malloc(sizeof(struct anno_hb_condition));
459 hb_condition->interface_num = 0; // Get
460 hb_condition->hb_condition_num = 0;
461 struct spec_annotation *annotation_hb_condition = (struct spec_annotation*) malloc(sizeof(struct spec_annotation));
462 annotation_hb_condition->type = HB_CONDITION;
463 annotation_hb_condition->annotation = hb_condition;
464 cdsannotate(SPEC_ANALYSIS, annotation_hb_condition);
466 Get_info* info = (Get_info*) malloc(sizeof(Get_info));
467 info->__RET__ = __RET__;
469 struct anno_interface_end *interface_end = (struct anno_interface_end*) malloc(sizeof(struct anno_interface_end));
470 interface_end->interface_num = 0; // Get
471 interface_end->info = info;
472 struct spec_annotation *annoation_interface_end = (struct spec_annotation*) malloc(sizeof(struct spec_annotation));
473 annoation_interface_end->type = INTERFACE_END;
474 annoation_interface_end->annotation = interface_end;
475 cdsannotate(SPEC_ANALYSIS, annoation_interface_end);
479 TypeV * __wrapper__get(TypeK * key) {
480 slot *key_slot = new slot(false, key);
481 int fullhash = hash(key_slot);
483 kvs_data *kvs = _kvs.load(memory_order_acquire);
485 slot *V = get_impl(this, kvs, key_slot, fullhash);
486 if (V == NULL) return NULL;
487 MODEL_ASSERT (!is_prime(V));
488 return (TypeV*) V->_ptr;
492 TypeV * put(TypeK * key, TypeV * val) {
493 /* Interface begins */
494 struct anno_interface_begin *interface_begin = (struct anno_interface_begin*) malloc(sizeof(struct anno_interface_begin));
495 interface_begin->interface_num = 1; // Put
496 interface_begin->interface_name = "Put";
497 struct spec_annotation *annotation_interface_begin = (struct spec_annotation*) malloc(sizeof(struct spec_annotation));
498 annotation_interface_begin->type = INTERFACE_BEGIN;
499 annotation_interface_begin->annotation = interface_begin;
500 cdsannotate(SPEC_ANALYSIS, annotation_interface_begin);
501 TypeV * __RET__ = __wrapper__put(key, val);
502 struct anno_hb_condition *hb_condition = (struct anno_hb_condition*) malloc(sizeof(struct anno_hb_condition));
503 hb_condition->interface_num = 1; // Put
504 hb_condition->hb_condition_num = 0;
505 struct spec_annotation *annotation_hb_condition = (struct spec_annotation*) malloc(sizeof(struct spec_annotation));
506 annotation_hb_condition->type = HB_CONDITION;
507 annotation_hb_condition->annotation = hb_condition;
508 cdsannotate(SPEC_ANALYSIS, annotation_hb_condition);
510 Put_info* info = (Put_info*) malloc(sizeof(Put_info));
511 info->__RET__ = __RET__;
514 struct anno_interface_end *interface_end = (struct anno_interface_end*) malloc(sizeof(struct anno_interface_end));
515 interface_end->interface_num = 1; // Put
516 interface_end->info = info;
517 struct spec_annotation *annoation_interface_end = (struct spec_annotation*) malloc(sizeof(struct spec_annotation));
518 annoation_interface_end->type = INTERFACE_END;
519 annoation_interface_end->annotation = interface_end;
520 cdsannotate(SPEC_ANALYSIS, annoation_interface_end);
524 TypeV * __wrapper__put(TypeK * key, TypeV * val) {
525 return putIfMatch(key, val, NO_MATCH_OLD);
529 TypeV* putIfAbsent(TypeK *key, TypeV *value) {
530 return putIfMatch(key, val, TOMBSTONE);
534 TypeV* remove(TypeK *key) {
535 return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD);
539 bool remove(TypeK *key, TypeV *val) {
540 slot *val_slot = val == NULL ? NULL : new slot(false, val);
541 return putIfMatch(key, TOMBSTONE, val) == val;
546 TypeV* replace(TypeK *key, TypeV *val) {
547 return putIfMatch(key, val, MATCH_ANY);
551 bool replace(TypeK *key, TypeV *oldval, TypeV *newval) {
552 return putIfMatch(key, newval, oldval) == oldval;
556 static CHM* get_chm(kvs_data* kvs) {
557 CHM *res = (CHM*) kvs->_data[0].load(memory_order_relaxed);
561 static int* get_hashes(kvs_data *kvs) {
562 return (int *) kvs->_data[1].load(memory_order_relaxed);
565 static inline slot* key(kvs_data *kvs, int idx) {
566 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
567 slot *res = (slot*) kvs->_data[idx * 2 + 2].load(memory_order_relaxed);
568 /* Automatically generated code for potential commit point: Read_Key_Point */
571 struct anno_potential_cp_define *potential_cp_define = (struct anno_potential_cp_define*) malloc(sizeof(struct anno_potential_cp_define));
572 potential_cp_define->label_num = 0;
573 potential_cp_define->label_name = "Read_Key_Point";
574 potential_cp_define->is_additional_point = false;
575 struct spec_annotation *annotation_potential_cp_define = (struct spec_annotation*) malloc(sizeof(struct spec_annotation));
576 annotation_potential_cp_define->type = POTENTIAL_CP_DEFINE;
577 annotation_potential_cp_define->annotation = potential_cp_define;
578 cdsannotate(SPEC_ANALYSIS, annotation_potential_cp_define);
585 static inline slot* val(kvs_data *kvs, int idx) {
586 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
588 slot *res = (slot*) kvs->_data[idx * 2 + 3].load(memory_order_acquire);
589 /* Automatically generated code for potential commit point: Read_Val_Point */
592 struct anno_potential_cp_define *potential_cp_define = (struct anno_potential_cp_define*) malloc(sizeof(struct anno_potential_cp_define));
593 potential_cp_define->label_num = 1;
594 potential_cp_define->label_name = "Read_Val_Point";
595 potential_cp_define->is_additional_point = false;
596 struct spec_annotation *annotation_potential_cp_define = (struct spec_annotation*) malloc(sizeof(struct spec_annotation));
597 annotation_potential_cp_define->type = POTENTIAL_CP_DEFINE;
598 annotation_potential_cp_define->annotation = potential_cp_define;
599 cdsannotate(SPEC_ANALYSIS, annotation_potential_cp_define);
607 static int hash(slot *key_slot) {
608 MODEL_ASSERT(key_slot != NULL && key_slot->_ptr != NULL);
609 TypeK* key = (TypeK*) key_slot->_ptr;
610 int h = key->hashCode();
611 h += (h << 15) ^ 0xffffcd7d;
615 h += (h << 2) + (h << 14);
616 return h ^ (h >> 16);
619 static int reprobe_limit(int len) {
620 return REPROBE_LIMIT + (len >> 2);
623 static inline bool is_prime(slot *val) {
624 return (val != NULL) && val->_prime;
627 static bool keyeq(slot *K, slot *key_slot, int *hashes, int hash,
629 MODEL_ASSERT (K != NULL);
630 TypeK* key_ptr = (TypeK*) key_slot->_ptr;
633 ((hashes[hash] == 0 || hashes[hash] == fullhash) &&
635 key_ptr->equals(K->_ptr));
638 static bool valeq(slot *val_slot1, slot *val_slot2) {
639 MODEL_ASSERT (val_slot1 != NULL);
640 TypeK* ptr1 = (TypeV*) val_slot1->_ptr;
641 if (val_slot2 == NULL || ptr1 == NULL) return false;
642 return ptr1->equals(val_slot2->_ptr);
645 static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
646 bool res = kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
647 desired, memory_order_relaxed, memory_order_relaxed);
648 /* Automatically generated code for potential commit point: Write_Key_Point */
651 struct anno_potential_cp_define *potential_cp_define = (struct anno_potential_cp_define*) malloc(sizeof(struct anno_potential_cp_define));
652 potential_cp_define->label_num = 2;
653 potential_cp_define->label_name = "Write_Key_Point";
654 potential_cp_define->is_additional_point = false;
655 struct spec_annotation *annotation_potential_cp_define = (struct spec_annotation*) malloc(sizeof(struct spec_annotation));
656 annotation_potential_cp_define->type = POTENTIAL_CP_DEFINE;
657 annotation_potential_cp_define->annotation = potential_cp_define;
658 cdsannotate(SPEC_ANALYSIS, annotation_potential_cp_define);
665 static inline bool CAS_val(kvs_data *kvs, int idx, void *expected, void
668 bool res = kvs->_data[2 * idx + 3].compare_exchange_strong(expected,
669 desired, memory_order_acq_rel, memory_order_relaxed);
670 /* Automatically generated code for potential commit point: Write_Val_Point */
673 struct anno_potential_cp_define *potential_cp_define = (struct anno_potential_cp_define*) malloc(sizeof(struct anno_potential_cp_define));
674 potential_cp_define->label_num = 3;
675 potential_cp_define->label_name = "Write_Val_Point";
676 potential_cp_define->is_additional_point = false;
677 struct spec_annotation *annotation_potential_cp_define = (struct spec_annotation*) malloc(sizeof(struct spec_annotation));
678 annotation_potential_cp_define->type = POTENTIAL_CP_DEFINE;
679 annotation_potential_cp_define->annotation = potential_cp_define;
680 cdsannotate(SPEC_ANALYSIS, annotation_potential_cp_define);
686 slot* get_impl(cliffc_hashtable *topmap, kvs_data *kvs, slot* key_slot, int
688 int len = kvs->_size;
689 CHM *chm = get_chm(kvs);
690 int *hashes = get_hashes(kvs);
692 int idx = fullhash & (len - 1);
695 slot *K = key(kvs, idx);
696 /* Automatically generated code for commit point define: Get_Point1 */
699 struct anno_cp_define *cp_define = (struct anno_cp_define*) malloc(sizeof(struct anno_cp_define));
700 cp_define->label_num = 4;
701 cp_define->label_name = "Get_Point1";
702 cp_define->potential_cp_label_num = 0;
703 cp_define->potential_label_name = "Read_Key_Point";
704 cp_define->interface_num = 0;
705 cp_define->is_additional_point = false;
706 struct spec_annotation *annotation_cp_define = (struct spec_annotation*) malloc(sizeof(struct spec_annotation));
707 annotation_cp_define->type = CP_DEFINE;
708 annotation_cp_define->annotation = cp_define;
709 cdsannotate(SPEC_ANALYSIS, annotation_cp_define);
712 slot *V = val(kvs, idx);
717 if (keyeq(K, key_slot, hashes, idx, fullhash)) {
719 /* Automatically generated code for commit point clear: Get_Clear */
722 struct anno_cp_clear *cp_clear = (struct anno_cp_clear*) malloc(sizeof(struct anno_cp_clear));
723 struct spec_annotation *annotation_cp_clear = (struct spec_annotation*) malloc(sizeof(struct spec_annotation));
724 annotation_cp_clear->type = CP_CLEAR;
725 annotation_cp_clear->annotation = cp_clear;
726 cdsannotate(SPEC_ANALYSIS, annotation_cp_clear);
730 /* Automatically generated code for commit point define: Get_Point2 */
733 struct anno_cp_define *cp_define = (struct anno_cp_define*) malloc(sizeof(struct anno_cp_define));
734 cp_define->label_num = 6;
735 cp_define->label_name = "Get_Point2";
736 cp_define->potential_cp_label_num = 1;
737 cp_define->potential_label_name = "Read_Val_Point";
738 cp_define->interface_num = 0;
739 cp_define->is_additional_point = false;
740 struct spec_annotation *annotation_cp_define = (struct spec_annotation*) malloc(sizeof(struct spec_annotation));
741 annotation_cp_define->type = CP_DEFINE;
742 annotation_cp_define->annotation = cp_define;
743 cdsannotate(SPEC_ANALYSIS, annotation_cp_define);
746 return (V == TOMBSTONE) ? NULL : V; }
747 return get_impl(topmap, chm->copy_slot_and_check(topmap, kvs,
748 idx, key_slot), key_slot, fullhash);
751 if (++reprobe_cnt >= REPROBE_LIMIT ||
752 key_slot == TOMBSTONE) {
754 kvs_data *newkvs = chm->_newkvs.load(memory_order_acquire);
756 return newkvs == NULL ? NULL : get_impl(topmap,
757 topmap->help_copy(newkvs), key_slot, fullhash);
760 idx = (idx + 1) & (len - 1); }
763 TypeV* putIfMatch(TypeK *key, TypeV *value, slot *old_val) {
764 if (old_val == NULL) {
767 slot *key_slot = new slot(false, key);
769 slot *value_slot = new slot(false, value);
771 kvs_data *kvs = _kvs.load(memory_order_acquire);
773 slot *res = putIfMatch(this, kvs, key_slot, value_slot, old_val);
774 MODEL_ASSERT (res != NULL);
775 MODEL_ASSERT (!is_prime(res));
776 return res == TOMBSTONE ? NULL : (TypeV*) res->_ptr;
780 static slot* putIfMatch(cliffc_hashtable *topmap, kvs_data *kvs, slot
781 *key_slot, slot *val_slot, slot *expVal) {
782 MODEL_ASSERT (val_slot != NULL);
783 MODEL_ASSERT (!is_prime(val_slot));
784 MODEL_ASSERT (!is_prime(expVal));
786 int fullhash = hash(key_slot);
787 int len = kvs->_size;
788 CHM *chm = get_chm(kvs);
789 int *hashes = get_hashes(kvs);
790 int idx = fullhash & (len - 1);
797 while (true) { K = key(kvs, idx);
799 if (K == NULL) { if (val_slot == TOMBSTONE) return val_slot;
800 if (CAS_key(kvs, idx, NULL, key_slot)) {
801 /* Automatically generated code for commit point define: Put_WriteKey */
804 struct anno_cp_define *cp_define = (struct anno_cp_define*) malloc(sizeof(struct anno_cp_define));
805 cp_define->label_num = 7;
806 cp_define->label_name = "Put_WriteKey";
807 cp_define->potential_cp_label_num = 2;
808 cp_define->potential_label_name = "Write_Key_Point";
809 cp_define->interface_num = 1;
810 cp_define->is_additional_point = false;
811 struct spec_annotation *annotation_cp_define = (struct spec_annotation*) malloc(sizeof(struct spec_annotation));
812 annotation_cp_define->type = CP_DEFINE;
813 annotation_cp_define->annotation = cp_define;
814 cdsannotate(SPEC_ANALYSIS, annotation_cp_define);
817 chm->_slots.fetch_add(1, memory_order_relaxed); hashes[idx] = fullhash; break;
819 K = key(kvs, idx); MODEL_ASSERT (K != NULL);
822 if (keyeq(K, key_slot, hashes, idx, fullhash))
824 if (++reprobe_cnt >= reprobe_limit(len) ||
825 K == TOMBSTONE) { newkvs = chm->resize(topmap, kvs);
826 if (expVal != NULL) topmap->help_copy(newkvs);
827 return putIfMatch(topmap, newkvs, key_slot, val_slot, expVal);
830 idx = (idx + 1) & (len - 1); }
831 if (val_slot == V) return V;
833 newkvs = chm->_newkvs.load(memory_order_acquire);
835 if (newkvs == NULL &&
836 ((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V))) {
837 newkvs = chm->resize(topmap, kvs); }
840 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs, idx,
841 expVal), key_slot, val_slot, expVal);
844 MODEL_ASSERT (!is_prime(V));
846 if (expVal != NO_MATCH_OLD &&
848 (expVal != MATCH_ANY || V == TOMBSTONE || V == NULL) &&
849 !(V == NULL && expVal == TOMBSTONE) &&
850 (expVal == NULL || !valeq(expVal, V))) {
856 if (CAS_val(kvs, idx, V, val_slot)) {
857 /* Automatically generated code for commit point define: Put_Point */
860 struct anno_cp_define *cp_define = (struct anno_cp_define*) malloc(sizeof(struct anno_cp_define));
861 cp_define->label_num = 8;
862 cp_define->label_name = "Put_Point";
863 cp_define->potential_cp_label_num = 3;
864 cp_define->potential_label_name = "Write_Val_Point";
865 cp_define->interface_num = 1;
866 cp_define->is_additional_point = false;
867 struct spec_annotation *annotation_cp_define = (struct spec_annotation*) malloc(sizeof(struct spec_annotation));
868 annotation_cp_define->type = CP_DEFINE;
869 annotation_cp_define->annotation = cp_define;
870 cdsannotate(SPEC_ANALYSIS, annotation_cp_define);
873 if (expVal != NULL) { if ((V == NULL || V == TOMBSTONE) &&
874 val_slot != TOMBSTONE)
875 chm->_size.fetch_add(1, memory_order_relaxed);
876 if (!(V == NULL || V == TOMBSTONE) &&
877 val_slot == TOMBSTONE)
878 chm->_size.fetch_add(-1, memory_order_relaxed);
880 return (V == NULL && expVal != NULL) ? TOMBSTONE : V;
884 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs,
885 idx, expVal), key_slot, val_slot, expVal);
889 kvs_data* help_copy(kvs_data *helper) {
891 kvs_data *topkvs = _kvs.load(memory_order_acquire);
892 CHM *topchm = get_chm(topkvs);
893 if (topchm->_newkvs.load(memory_order_relaxed) == NULL) return helper;
894 topchm->help_copy_impl(this, topkvs, false);
898 template<typename TypeK, typename TypeV>
899 void** cliffc_hashtable<TypeK, TypeV>::func_ptr_table;
900 template<typename TypeK, typename TypeV>
901 anno_hb_init** cliffc_hashtable<TypeK, TypeV>::hb_init_table;
902 template<typename TypeK, typename TypeV>
903 spec_table * cliffc_hashtable<TypeK, TypeV>::map;
904 template<typename TypeK, typename TypeV>
905 spec_table * cliffc_hashtable<TypeK, TypeV>::id_map;
906 template<typename TypeK, typename TypeV>
907 id_tag_t * cliffc_hashtable<TypeK, TypeV>::tag;