for (i = 2; i < real_size; i++) {
_data[i].store(NULL, memory_order_relaxed);
}
- _data[1].store(hashes, memory_order_release);
+ _data[1].store(hashes, memory_order_relaxed);
}
~kvs_data() {
_slots.store(0, memory_order_relaxed);
_copy_idx.store(0, memory_order_relaxed);
- _copy_done.store(0, memory_order_release);
+ _copy_done.store(0, memory_order_relaxed);
}
~CHM() {}
}
kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
- //model_print("resizing...\n");
+ model_print("resizing...\n");
kvs_data *newkvs = _newkvs.load(memory_order_acquire);
if (newkvs != NULL)
return newkvs;
// Last check cause the 'new' below is expensive
newkvs = _newkvs.load(memory_order_acquire);
+ model_print("hey1\n");
if (newkvs != NULL) return newkvs;
newkvs = new kvs_data(newsz);
void *chm = (void*) new CHM(sz);
- newkvs->_data[0].store(chm, memory_order_release);
+ model_print("hey2\n");
+ newkvs->_data[0].store(chm, memory_order_relaxed);
kvs_data *cur_newkvs;
// Another check after the slow allocation
// Just follow Cliff Click's code here
int panic_start = -1;
int copyidx;
- while (_copy_done.load(memory_order_acquire) < oldlen) {
- copyidx = _copy_idx.load(memory_order_acquire);
+ while (_copy_done.load(memory_order_relaxed) < oldlen) {
+ copyidx = _copy_idx.load(memory_order_relaxed);
if (panic_start == -1) { // No painc
- copyidx = _copy_idx.load(memory_order_acquire);
+ copyidx = _copy_idx.load(memory_order_relaxed);
while (copyidx < (oldlen << 1) &&
!_copy_idx.compare_exchange_strong(copyidx, copyidx +
min_copy_work, memory_order_release, memory_order_relaxed))
kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
*oldkvs, int idx, void *should_help) {
- kvs_data *newkvs = _newkvs.load(memory_order_acquire);
+ kvs_data *newkvs = _newkvs.load(memory_order_relaxed);
// We're only here cause the caller saw a Prime
if (copy_slot(topmap, idx, oldkvs, newkvs))
copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied
// Promote the new table to the current table
if (copyDone + workdone == oldlen &&
- topmap->_kvs.load(memory_order_acquire) == oldkvs) {
- kvs_data *newkvs = _newkvs.load(memory_order_acquire);
+ topmap->_kvs.load(memory_order_relaxed) == oldkvs) {
+ kvs_data *newkvs = _newkvs.load(memory_order_relaxed);
topmap->_kvs.compare_exchange_strong(oldkvs, newkvs, memory_order_release,
memory_order_relaxed);
}
kvs_data *kvs = new kvs_data(Default_Init_Size);
void *chm = (void*) new CHM(0);
kvs->_data[0].store(chm, memory_order_relaxed);
- _kvs.store(kvs, memory_order_release);
+ _kvs.store(kvs, memory_order_relaxed);
}
cliffc_hashtable(int init_size) {
kvs_data *kvs = new kvs_data(init_size);
void *chm = (void*) new CHM(0);
kvs->_data[0].store(chm, memory_order_relaxed);
- _kvs.store(kvs, memory_order_release);
+ _kvs.store(kvs, memory_order_relaxed);
}
/**
@Commit_point_set: Get_Success_Point1 | Get_Success_Point2 | Get_Success_Point3
@ID: getKeyTag(key)
@Action:
- void *_Old_Val = spec_table_get(map, key);
+ TypeV *_Old_Val = (TypeV*) spec_table_get(map, key);
+ //bool passed = equals_val(_Old_Val, __RET__);
+ bool passed = false;
+ if (!passed) {
+ int old = _Old_Val == NULL ? 0 : _Old_Val->_val;
+ int ret = __RET__ == NULL ? 0 : __RET__->_val;
+ model_print("Get: key: %d, _Old_Val: %d, RET: %d\n",
+ key->_val, old, ret);
+ }
@Post_check:
- __RET__ == NULL ? true : equals_val(_Old_Val, __RET__)
+ //__RET__ == NULL ? true : equals_val(_Old_Val, __RET__)
+ equals_val(_Old_Val, __RET__)
@End
*/
TypeV* get(TypeK *key) {
slot *key_slot = new slot(false, key);
int fullhash = hash(key_slot);
- kvs_data *kvs = _kvs.load(memory_order_acquire);
+ kvs_data *kvs = _kvs.load(memory_order_relaxed);
slot *V = get_impl(this, kvs, key_slot, fullhash);
if (V == NULL) return NULL;
MODEL_ASSERT (!is_prime(V));
@ID: getKeyTag(key)
@Action:
# Remember this old value at checking point
- void *_Old_Val = spec_table_get(map, key);
+ TypeV *_Old_Val = (TypeV*) spec_table_get(map, key);
spec_table_put(map, key, val);
+ //bool passed = equals_val(__RET__, _Old_Val);
+ bool passed = false;
+ if (!passed) {
+ int old = _Old_Val == NULL ? 0 : _Old_Val->_val;
+ int ret = __RET__ == NULL ? 0 : __RET__->_val;
+ model_print("Put: key: %d, val: %d, _Old_Val: %d, RET: %d\n",
+ key->_val, val->_val, old, ret);
+ }
@Post_check:
equals_val(__RET__, _Old_Val)
@End
MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
// Corresponding to the volatile read in get_impl() and putIfMatch in
// Cliff Click's Java implementation
- slot *res = (slot*) kvs->_data[idx * 2 + 2].load(memory_order_acquire);
+ slot *res = (slot*) kvs->_data[idx * 2 + 2].load(memory_order_relaxed);
+ /**
+ @Begin
+ # This is a complicated potential commit point since many many functions are
+ # calling val().
+ @Potential_commit_point_define: true
+ @Label: Read_Key_Point
+ @End
+ */
return res;
}
// inserted keys
static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
return kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
- desired, memory_order_release, memory_order_relaxed);
+ desired, memory_order_relaxed, memory_order_relaxed);
}
/**
# If it is a successful put instead of a copy or any other internal
# operantions, expected != NULL
@Begin
- @Potential_commit_point_define: res == true
+ @Potential_commit_point_define: res
@Label: Write_Val_Point
@End
*/
int reprobe_cnt = 0;
while (true) {
slot *K = key(kvs, idx);
- slot *V = val(kvs, idx);
/**
@Begin
@Commit_point_define: K == NULL
- @Potential_commit_point_label: Read_Val_Point
+ @Potential_commit_point_label: Read_Key_Point
@Label: Get_Success_Point_1
@End
*/
+ slot *V = val(kvs, idx);
+
- if (K == NULL) return NULL; // A miss
+ if (K == NULL) {
+ model_print("Key is null\n");
+ return NULL; // A miss
+ }
if (keyeq(K, key_slot, hashes, idx, fullhash)) {
// Key hit! Check if table-resize in progress
if (++reprobe_cnt >= REPROBE_LIMIT ||
key_slot == TOMBSTONE) {
// Retry in new table
- // Atomic read (acquire) can be here
- kvs_data *newkvs = chm->_newkvs.load(memory_order_acquire);
+ // Atomic read can be here
+ kvs_data *newkvs = chm->_newkvs.load(memory_order_relaxed);
/**
@Begin
@Commit_point_define_check: newkvs == NULL
slot *key_slot = new slot(false, key);
slot *value_slot = new slot(false, value);
- kvs_data *kvs = _kvs.load(memory_order_acquire);
+ kvs_data *kvs = _kvs.load(memory_order_relaxed);
slot *res = putIfMatch(this, kvs, key_slot, value_slot, old_val);
// Only when copy_slot() call putIfMatch() will it return NULL
MODEL_ASSERT (res != NULL);
// Here it tries to resize cause it doesn't want other threads to stop
// its progress (eagerly try to resize soon)
- newkvs = chm->_newkvs.load(memory_order_acquire);
+ newkvs = chm->_newkvs.load(memory_order_relaxed);
if (newkvs == NULL &&
((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V))) {
//model_print("resize2\n");
// Help along an existing table-resize. This is a fast cut-out wrapper.
kvs_data* help_copy(kvs_data *helper) {
- kvs_data *topkvs = _kvs.load(memory_order_acquire);
+ kvs_data *topkvs = _kvs.load(memory_order_relaxed);
CHM *topchm = get_chm(topkvs);
// No cpy in progress
- if (topchm->_newkvs.load(memory_order_acquire) == NULL) return helper;
+ if (topchm->_newkvs.load(memory_order_relaxed) == NULL) return helper;
topchm->help_copy_impl(this, topkvs, false);
return helper;
}