#include <iostream>
#include <atomic>
+#include "stdio.h"
//#include <common.h>
#ifdef STANDALONE
#include <assert.h>
for (i = 2; i < real_size; i++) {
_data[i].store(NULL, memory_order_relaxed);
}
- _data[1].store(hashes, memory_order_release);
+ _data[1].store(hashes, memory_order_relaxed);
}
~kvs_data() {
map = new_spec_table_default(equals_key);
id_map = new_spec_table_default(equals_id);
tag = new_id_tag();
-
+ //@Cleanup:
+ // model_print("cleaning up\n");
@DefineFunc:
bool equals_key(void *ptr1, void *ptr2) {
TypeK *key1 = (TypeK*) ptr1,
return res;
}
}
-
- @Interface_cluster:
- Read_interface = {
- Get,
- PutIfAbsent,
- RemoveAny,
- RemoveIfMatch,
- ReplaceAny,
- ReplaceIfMatch
- }
-
- Write_interface = {
- Put,
- PutIfAbsent(COND_PutIfAbsentSucc),
- RemoveAny,
- RemoveIfMatch(COND_RemoveIfMatchSucc),
- ReplaceAny,
- ReplaceIfMatch(COND_ReplaceIfMatchSucc)
- }
@Happens_before:
- //Write_interface -> Read_interface
Put->Get
Put->Put
@End
_slots.store(0, memory_order_relaxed);
_copy_idx.store(0, memory_order_relaxed);
- _copy_done.store(0, memory_order_release);
+ _copy_done.store(0, memory_order_relaxed);
}
~CHM() {}
kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
//model_print("resizing...\n");
+ /**** FIXME: miss ****/
kvs_data *newkvs = _newkvs.load(memory_order_acquire);
if (newkvs != NULL)
return newkvs;
if (newsz < oldlen) newsz = oldlen;
// Last check cause the 'new' below is expensive
+ /**** FIXME: miss ****/
newkvs = _newkvs.load(memory_order_acquire);
+ //model_print("hey1\n");
if (newkvs != NULL) return newkvs;
newkvs = new kvs_data(newsz);
void *chm = (void*) new CHM(sz);
- newkvs->_data[0].store(chm, memory_order_release);
+ //model_print("hey2\n");
+ newkvs->_data[0].store(chm, memory_order_relaxed);
kvs_data *cur_newkvs;
// Another check after the slow allocation
+ /**** FIXME: miss ****/
if ((cur_newkvs = _newkvs.load(memory_order_acquire)) != NULL)
return cur_newkvs;
// CAS the _newkvs to the allocated table
kvs_data *desired = (kvs_data*) NULL;
kvs_data *expected = (kvs_data*) newkvs;
+ /**** FIXME: miss ****/
+ //model_print("release in resize!\n");
if (!_newkvs.compare_exchange_strong(desired, expected, memory_order_release,
memory_order_relaxed)) {
// Should clean the allocated area
delete newkvs;
+ /**** FIXME: miss ****/
newkvs = _newkvs.load(memory_order_acquire);
}
return newkvs;
void help_copy_impl(cliffc_hashtable *topmap, kvs_data *oldkvs,
bool copy_all) {
MODEL_ASSERT (get_chm(oldkvs) == this);
+ /**** FIXME: miss ****/
kvs_data *newkvs = _newkvs.load(memory_order_acquire);
int oldlen = oldkvs->_size;
int min_copy_work = oldlen > 1024 ? 1024 : oldlen;
// Just follow Cliff Click's code here
int panic_start = -1;
int copyidx;
- while (_copy_done.load(memory_order_acquire) < oldlen) {
- copyidx = _copy_idx.load(memory_order_acquire);
+ while (_copy_done.load(memory_order_relaxed) < oldlen) {
+ copyidx = _copy_idx.load(memory_order_relaxed);
if (panic_start == -1) { // No painc
- copyidx = _copy_idx.load(memory_order_acquire);
+ copyidx = _copy_idx.load(memory_order_relaxed);
while (copyidx < (oldlen << 1) &&
!_copy_idx.compare_exchange_strong(copyidx, copyidx +
- min_copy_work, memory_order_release, memory_order_relaxed))
+ min_copy_work, memory_order_relaxed, memory_order_relaxed))
copyidx = _copy_idx.load(memory_order_relaxed);
if (!(copyidx < (oldlen << 1)))
panic_start = copyidx;
kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
*oldkvs, int idx, void *should_help) {
+ /**** FIXME: miss ****/
kvs_data *newkvs = _newkvs.load(memory_order_acquire);
// We're only here cause the caller saw a Prime
if (copy_slot(topmap, idx, oldkvs, newkvs))
// Promote the new table to the current table
if (copyDone + workdone == oldlen &&
- topmap->_kvs.load(memory_order_acquire) == oldkvs) {
+ topmap->_kvs.load(memory_order_relaxed) == oldkvs) {
+ /**** FIXME: miss ****/
kvs_data *newkvs = _newkvs.load(memory_order_acquire);
+ /**** CDSChecker error ****/
topmap->_kvs.compare_exchange_strong(oldkvs, newkvs, memory_order_release,
memory_order_relaxed);
}
kvs_data *kvs = new kvs_data(Default_Init_Size);
void *chm = (void*) new CHM(0);
kvs->_data[0].store(chm, memory_order_relaxed);
- _kvs.store(kvs, memory_order_release);
+ _kvs.store(kvs, memory_order_relaxed);
}
cliffc_hashtable(int init_size) {
kvs_data *kvs = new kvs_data(init_size);
void *chm = (void*) new CHM(0);
kvs->_data[0].store(chm, memory_order_relaxed);
- _kvs.store(kvs, memory_order_release);
+ _kvs.store(kvs, memory_order_relaxed);
}
/**
@Begin
@Interface: Get
- @Commit_point_set: Get_Success_Point1 | Get_Success_Point2 | Get_Success_Point3
+ //@Commit_point_set: Get_Point1 | Get_Point2 | Get_ReadKVS | Get_ReadNewKVS | Get_Clear
+ @Commit_point_set: Get_Point1 | Get_Point2 | Get_Clear
+ //@Commit_point_set: Get_Point1 | Get_Point2 | Get_Point3
@ID: getKeyTag(key)
@Action:
- void *_Old_Val = spec_table_get(map, key);
+ TypeV *_Old_Val = (TypeV*) spec_table_get(map, key);
+ //bool passed = equals_val(_Old_Val, __RET__);
+ bool passed = false;
+ if (!passed) {
+ int old = _Old_Val == NULL ? 0 : _Old_Val->_val;
+ int ret = __RET__ == NULL ? 0 : __RET__->_val;
+ //model_print("Get: key: %d, _Old_Val: %d, RET: %d\n",
+ //key->_val, old, ret);
+ }
@Post_check:
- __RET__ == NULL ? true : equals_val(_Old_Val, __RET__)
+ //__RET__ == NULL ? true : equals_val(_Old_Val, __RET__)
+ equals_val(_Old_Val, __RET__)
@End
*/
TypeV* get(TypeK *key) {
slot *key_slot = new slot(false, key);
int fullhash = hash(key_slot);
+ /**** CDSChecker error ****/
kvs_data *kvs = _kvs.load(memory_order_acquire);
+ /**
+ //@Begin
+ @Commit_point_define_check: true
+ @Label: Get_ReadKVS
+ @End
+ */
slot *V = get_impl(this, kvs, key_slot, fullhash);
if (V == NULL) return NULL;
MODEL_ASSERT (!is_prime(V));
/**
@Begin
@Interface: Put
- @Commit_point_set: Write_Success_Point
+ //@Commit_point_set: Put_Point | Put_ReadKVS | Put_ReadNewKVS | Put_WriteKey
+ @Commit_point_set: Put_Point | Put_WriteKey
@ID: getKeyTag(key)
@Action:
# Remember this old value at checking point
- void *_Old_Val = spec_table_get(map, key);
+ TypeV *_Old_Val = (TypeV*) spec_table_get(map, key);
spec_table_put(map, key, val);
+ //bool passed = equals_val(__RET__, _Old_Val);
+ bool passed = false;
+ if (!passed) {
+ int old = _Old_Val == NULL ? 0 : _Old_Val->_val;
+ int ret = __RET__ == NULL ? 0 : __RET__->_val;
+ //model_print("Put: key: %d, val: %d, _Old_Val: %d, RET: %d\n",
+ // key->_val, val->_val, old, ret);
+ }
@Post_check:
equals_val(__RET__, _Old_Val)
@End
MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
// Corresponding to the volatile read in get_impl() and putIfMatch in
// Cliff Click's Java implementation
- slot *res = (slot*) kvs->_data[idx * 2 + 2].load(memory_order_acquire);
+ slot *res = (slot*) kvs->_data[idx * 2 + 2].load(memory_order_relaxed);
+ /**
+ @Begin
+ # This is a complicated potential commit point since many many functions are
+ # calling val().
+ @Potential_commit_point_define: true
+ @Label: Read_Key_Point
+ @End
+ */
return res;
}
MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
// Corresponding to the volatile read in get_impl() and putIfMatch in
// Cliff Click's Java implementation
+ /**** CDSChecker error & hb violation ****/
slot *res = (slot*) kvs->_data[idx * 2 + 3].load(memory_order_acquire);
/**
@Begin
// Together with key() preserve the happens-before relationship on newly
// inserted keys
static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
- return kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
- desired, memory_order_release, memory_order_relaxed);
+ bool res = kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
+ desired, memory_order_relaxed, memory_order_relaxed);
+ /**
+ # If it is a successful put instead of a copy or any other internal
+ # operantions, expected != NULL
+ @Begin
+ @Potential_commit_point_define: res
+ @Label: Write_Key_Point
+ @End
+ */
+ return res;
}
/**
// inserted values
static inline bool CAS_val(kvs_data *kvs, int idx, void *expected, void
*desired) {
+ /**** CDSChecker error & HB violation ****/
bool res = kvs->_data[2 * idx + 3].compare_exchange_strong(expected,
- desired, memory_order_release, memory_order_relaxed);
+ desired, memory_order_acq_rel, memory_order_relaxed);
/**
# If it is a successful put instead of a copy or any other internal
# operantions, expected != NULL
@Begin
- @Potential_commit_point_define: res == true
+ @Potential_commit_point_define: res
@Label: Write_Val_Point
@End
*/
int reprobe_cnt = 0;
while (true) {
slot *K = key(kvs, idx);
- slot *V = val(kvs, idx);
/**
@Begin
@Commit_point_define: K == NULL
- @Potential_commit_point_label: Read_Val_Point
- @Label: Get_Success_Point_1
+ @Potential_commit_point_label: Read_Key_Point
+ @Label: Get_Point1
@End
*/
-
- if (K == NULL) return NULL; // A miss
+ slot *V = val(kvs, idx);
+
+ if (K == NULL) {
+ //model_print("Key is null\n");
+ return NULL; // A miss
+ }
if (keyeq(K, key_slot, hashes, idx, fullhash)) {
// Key hit! Check if table-resize in progress
if (!is_prime(V)) {
+ /**
+ @Begin
+ @Commit_point_clear: true
+ @Label: Get_Clear
+ @End
+ */
+
/**
@Begin
@Commit_point_define: true
@Potential_commit_point_label: Read_Val_Point
- @Label: Get_Success_Point_2
+ @Label: Get_Point2
@End
*/
return (V == TOMBSTONE) ? NULL : V; // Return this value
if (++reprobe_cnt >= REPROBE_LIMIT ||
key_slot == TOMBSTONE) {
// Retry in new table
- // Atomic read (acquire) can be here
+ // Atomic read can be here
+ /**** FIXME: miss ****/
kvs_data *newkvs = chm->_newkvs.load(memory_order_acquire);
/**
- @Begin
- @Commit_point_define_check: newkvs == NULL
- @Label: Get_Success_Point_3
+ //@Begin
+ @Commit_point_define_check: true
+ @Label: Get_ReadNewKVS
@End
*/
return newkvs == NULL ? NULL : get_impl(topmap,
slot *key_slot = new slot(false, key);
slot *value_slot = new slot(false, value);
+ /**** FIXME: miss ****/
kvs_data *kvs = _kvs.load(memory_order_acquire);
+ /**
+ //@Begin
+ @Commit_point_define_check: true
+ @Label: Put_ReadKVS
+ @End
+ */
slot *res = putIfMatch(this, kvs, key_slot, value_slot, old_val);
// Only when copy_slot() call putIfMatch() will it return NULL
MODEL_ASSERT (res != NULL);
if (val_slot == TOMBSTONE) return val_slot;
// Claim the null key-slot
if (CAS_key(kvs, idx, NULL, key_slot)) {
+ /**
+ @Begin
+ @Commit_point_define: true
+ @Potential_commit_point_label: Write_Key_Point
+ @Label: Put_WriteKey
+ @End
+ */
chm->_slots.fetch_add(1, memory_order_relaxed); // Inc key-slots-used count
hashes[idx] = fullhash; // Memorize full hash
break;
// Here it tries to resize cause it doesn't want other threads to stop
// its progress (eagerly try to resize soon)
+ /**** FIXME: miss ****/
newkvs = chm->_newkvs.load(memory_order_acquire);
+ /**
+ //@Begin
+ @Commit_point_define_check: true
+ @Label: Put_ReadNewKVS
+ @End
+ */
if (newkvs == NULL &&
((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V))) {
//model_print("resize2\n");
!(V == NULL && expVal == TOMBSTONE) &&
(expVal == NULL || !valeq(expVal, V))) {
/**
- @Begin
+ //@Begin
@Commit_point_define: expVal == TOMBSTONE
@Potential_commit_point_label: Read_Val_Point
@Label: PutIfAbsent_Fail_Point
@End
*/
/**
- @Begin
+ //@Begin
@Commit_point_define: expVal != NULL && val_slot == TOMBSTONE
@Potential_commit_point_label: Read_Val_Point
@Label: RemoveIfMatch_Fail_Point
@End
*/
/**
- @Begin
+ //@Begin
@Commit_point_define: expVal != NULL && !valeq(expVal, V)
@Potential_commit_point_label: Read_Val_Point
@Label: ReplaceIfMatch_Fail_Point
# The only point where a successful put happens
@Commit_point_define: true
@Potential_commit_point_label: Write_Val_Point
- @Label: Write_Success_Point
+ @Label: Put_Point
@End
*/
if (expVal != NULL) { // Not called by a table-copy
// Help along an existing table-resize. This is a fast cut-out wrapper.
kvs_data* help_copy(kvs_data *helper) {
+ /**** FIXME: miss ****/
kvs_data *topkvs = _kvs.load(memory_order_acquire);
CHM *topchm = get_chm(topkvs);
// No cpy in progress
- if (topchm->_newkvs.load(memory_order_acquire) == NULL) return helper;
+ if (topchm->_newkvs.load(memory_order_relaxed) == NULL) return helper;
topchm->help_copy_impl(this, topkvs, false);
return helper;
}