#include <iostream>
#include <atomic>
-#include <common.h>
+#include "stdio.h"
+//#include <common.h>
+#ifdef STANDALONE
+#include <assert.h>
+#define MODEL_ASSERT assert
+#else
#include <model-assert.h>
+#endif
+
+#include <spec_lib.h>
+#include <stdlib.h>
+#include <cdsannotate.h>
+#include <specannotation.h>
+#include <model_memory.h>
+#include "common.h"
using namespace std;
for (i = 2; i < real_size; i++) {
_data[i].store(NULL, memory_order_relaxed);
}
- _data[1].store(hashes, memory_order_release);
+ _data[1].store(hashes, memory_order_relaxed);
}
~kvs_data() {
used to judge equality.
TypeK and TypeV should define their own copy constructor.
*/
+/**
+ @Begin
+ @Class_begin
+ @End
+*/
template<typename TypeK, typename TypeV>
class cliffc_hashtable {
/**
@Begin
@Options:
- LANG = C;
+ LANG = CPP;
CLASS = cliffc_hashtable;
@Global_define:
@DeclareVar:
- spec_hashtable<TypeK, TypeV*> map;
- spec_hashtable<TypeK, Tag> id_map;
- Tag tag;
+ spec_table *map;
+ spec_table *id_map;
+ id_tag_t *tag;
@InitVar:
- map = spec_hashtable<TypeK, TypeV*>();
- id_map = spec_hashtable<TypeK, TypeV*>();
- tag = Tag();
+ map = new_spec_table_default(equals_key);
+ id_map = new_spec_table_default(equals_id);
+ tag = new_id_tag();
+ //@Cleanup:
+ // model_print("cleaning up\n");
@DefineFunc:
- bool equals_val(TypeV *ptr1, TypeV *ptr2) {
- // ...
+ bool equals_key(void *ptr1, void *ptr2) {
+ TypeK *key1 = (TypeK*) ptr1, *key2 = (TypeK*) ptr2;
+ if (key1 == NULL || key2 == NULL)
+ return false;
+ return key1->equals(key2);
}
+ @DefineFunc:
+ bool equals_val(void *ptr1, void *ptr2) {
+ if (ptr1 == ptr2)
+ return true;
+ TypeV *val1 = (TypeV*) ptr1, *val2 = (TypeV*) ptr2;
+ if (val1 == NULL || val2 == NULL)
+ return false;
+ return val1->equals(val2);
+ }
+
+ @DefineFunc:
+ bool equals_id(void *ptr1, void *ptr2) {
+ id_tag_t *id1 = (id_tag_t*) ptr1, *id2 = (id_tag_t*) ptr2;
+ if (id1 == NULL || id2 == NULL)
+ return false;
+ return (*id1).tag == (*id2).tag;
+ }
+
@DefineFunc:
# Update the tag for the current key slot if the corresponding tag
# is NULL, otherwise just return that tag. It will update the next
# available tag too if it requires a new tag for that key slot.
- Tag getKeyTag(TypeK &key) {
- if (id_map.get(key) == NULL) {
- Tag cur_tag = tag.current();
- id_map.put(key, cur_tag);
- tag.next();
- return cur_tag;
+ call_id_t getKeyTag(TypeK *key) {
+ if (!spec_table_contains(id_map, key)) {
+ call_id_t cur_id = current(tag);
+ spec_table_put(id_map, key, (void*) cur_id);
+ next(tag);
+ return cur_id;
} else {
- return id_map.get(key);
+ call_id_t res = (call_id_t) spec_table_get(id_map, key);
+ return res;
}
}
-
- @Interface_cluster:
- Read_interface = {
- Get,
- PutIfAbsent,
- RemoveAny,
- RemoveIfMatch,
- ReplaceAny,
- ReplaceIfMatch
- }
-
- Write_interface = {
- Put,
- PutIfAbsent(COND_PutIfAbsentSucc),
- RemoveAny,
- RemoveIfMatch(COND_RemoveIfMatchSucc),
- ReplaceAny,
- ReplaceIfMatch(COND_ReplaceIfMatchSucc)
- }
@Happens_before:
- Write_interface -> Read_interface
+ Put->Get
+ Put->Put
@End
*/
_slots.store(0, memory_order_relaxed);
_copy_idx.store(0, memory_order_relaxed);
- _copy_done.store(0, memory_order_release);
+ _copy_done.store(0, memory_order_relaxed);
}
~CHM() {}
kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
//model_print("resizing...\n");
+ /**** FIXME: miss ****/
kvs_data *newkvs = _newkvs.load(memory_order_acquire);
if (newkvs != NULL)
return newkvs;
if (newsz < oldlen) newsz = oldlen;
// Last check cause the 'new' below is expensive
+ /**** FIXME: miss ****/
newkvs = _newkvs.load(memory_order_acquire);
+ //model_print("hey1\n");
if (newkvs != NULL) return newkvs;
newkvs = new kvs_data(newsz);
void *chm = (void*) new CHM(sz);
- newkvs->_data[0].store(chm, memory_order_release);
+ //model_print("hey2\n");
+ newkvs->_data[0].store(chm, memory_order_relaxed);
kvs_data *cur_newkvs;
// Another check after the slow allocation
+ /**** FIXME: miss ****/
if ((cur_newkvs = _newkvs.load(memory_order_acquire)) != NULL)
return cur_newkvs;
// CAS the _newkvs to the allocated table
kvs_data *desired = (kvs_data*) NULL;
kvs_data *expected = (kvs_data*) newkvs;
+ /**** FIXME: miss ****/
+ //model_print("release in resize!\n");
if (!_newkvs.compare_exchange_strong(desired, expected, memory_order_release,
- memory_order_release)) {
+ memory_order_relaxed)) {
// Should clean the allocated area
delete newkvs;
+ /**** FIXME: miss ****/
newkvs = _newkvs.load(memory_order_acquire);
}
return newkvs;
void help_copy_impl(cliffc_hashtable *topmap, kvs_data *oldkvs,
bool copy_all) {
MODEL_ASSERT (get_chm(oldkvs) == this);
+ /**** FIXME: miss ****/
kvs_data *newkvs = _newkvs.load(memory_order_acquire);
int oldlen = oldkvs->_size;
int min_copy_work = oldlen > 1024 ? 1024 : oldlen;
// Just follow Cliff Click's code here
int panic_start = -1;
int copyidx;
- while (_copy_done.load(memory_order_acquire) < oldlen) {
- copyidx = _copy_idx.load(memory_order_acquire);
+ while (_copy_done.load(memory_order_relaxed) < oldlen) {
+ copyidx = _copy_idx.load(memory_order_relaxed);
if (panic_start == -1) { // No painc
- copyidx = _copy_idx.load(memory_order_acquire);
+ copyidx = _copy_idx.load(memory_order_relaxed);
while (copyidx < (oldlen << 1) &&
!_copy_idx.compare_exchange_strong(copyidx, copyidx +
- min_copy_work, memory_order_release, memory_order_release))
+ min_copy_work, memory_order_relaxed, memory_order_relaxed))
copyidx = _copy_idx.load(memory_order_relaxed);
if (!(copyidx < (oldlen << 1)))
panic_start = copyidx;
kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
*oldkvs, int idx, void *should_help) {
+ /**** FIXME: miss ****/
kvs_data *newkvs = _newkvs.load(memory_order_acquire);
// We're only here cause the caller saw a Prime
if (copy_slot(topmap, idx, oldkvs, newkvs))
// Promote the new table to the current table
if (copyDone + workdone == oldlen &&
- topmap->_kvs.load(memory_order_acquire) == oldkvs) {
+ topmap->_kvs.load(memory_order_relaxed) == oldkvs) {
+ /**** FIXME: miss ****/
kvs_data *newkvs = _newkvs.load(memory_order_acquire);
+ /**** CDSChecker error ****/
topmap->_kvs.compare_exchange_strong(oldkvs, newkvs, memory_order_release,
- memory_order_release);
+ memory_order_relaxed);
}
}
// Should initialize the CHM for the construction of the table
// For other CHM in kvs_data, they should be initialzed in resize()
// because the size is determined dynamically
+ /**
+ @Begin
+ @Entry_point
+ @End
+ */
kvs_data *kvs = new kvs_data(Default_Init_Size);
void *chm = (void*) new CHM(0);
kvs->_data[0].store(chm, memory_order_relaxed);
- _kvs.store(kvs, memory_order_release);
+ _kvs.store(kvs, memory_order_relaxed);
}
cliffc_hashtable(int init_size) {
// Should initialize the CHM for the construction of the table
// For other CHM in kvs_data, they should be initialzed in resize()
// because the size is determined dynamically
+
+ /**
+ @Begin
+ @Entry_point
+ @End
+ */
kvs_data *kvs = new kvs_data(init_size);
void *chm = (void*) new CHM(0);
kvs->_data[0].store(chm, memory_order_relaxed);
- _kvs.store(kvs, memory_order_release);
+ _kvs.store(kvs, memory_order_relaxed);
}
/**
@Begin
@Interface: Get
- @Commit_point_set: Read_Val_Point1 | Read_Val_Point2 | Read_Val_Point3
- @ID: __sequential.getKeyTag(key)
+ //@Commit_point_set: Get_Point1 | Get_Point2 | Get_ReadKVS | Get_ReadNewKVS | Get_Clear
+ @Commit_point_set: Get_Point1 | Get_Point2 | Get_Clear
+ //@Commit_point_set: Get_Point1 | Get_Point2 | Get_Point3
+ @ID: getKeyTag(key)
@Action:
- TypeV *_Old_Val = __sequential.map.get(key)
+ TypeV *_Old_Val = (TypeV*) spec_table_get(map, key);
+ //bool passed = equals_val(_Old_Val, __RET__);
+ //bool passed = false;
+ //if (!passed) {
+ //int old = _Old_Val == NULL ? 0 : _Old_Val->_val;
+ //int ret = __RET__ == NULL ? 0 : __RET__->_val;
+ //model_print("Get: key: %d, _Old_Val: %d, RET: %d\n",
+ //key->_val, old, ret);
+ //}
@Post_check:
- __sequential.equals_val(_Old_Val, __RET__)
+ //__RET__ == NULL ? true : equals_val(_Old_Val, __RET__)
+ equals_val(_Old_Val, __RET__)
@End
*/
- TypeV* get(TypeK& key) {
- void *key_ptr = (void*) new TypeK(key);
- slot *key_slot = new slot(false, key_ptr);
+ TypeV* get(TypeK *key) {
+ slot *key_slot = new slot(false, key);
int fullhash = hash(key_slot);
+ /**** CDSChecker error ****/
kvs_data *kvs = _kvs.load(memory_order_acquire);
+ /**
+ //@Begin
+ @Commit_point_define_check: true
+ @Label: Get_ReadKVS
+ @End
+ */
slot *V = get_impl(this, kvs, key_slot, fullhash);
if (V == NULL) return NULL;
MODEL_ASSERT (!is_prime(V));
/**
@Begin
@Interface: Put
- @Commit_point_set: Write_Val_Point
- @ID: __sequential.getKeyTag(key)
+ //@Commit_point_set: Put_Point | Put_ReadKVS | Put_ReadNewKVS | Put_WriteKey
+ @Commit_point_set: Put_Point | Put_WriteKey
+ @ID: getKeyTag(key)
@Action:
# Remember this old value at checking point
- TypeV *_Old_Val = __sequential.map.get(key)
- __sequential.map.put(key, &val);
+ TypeV *_Old_Val = (TypeV*) spec_table_get(map, key);
+ spec_table_put(map, key, val);
+ //bool passed = equals_val(__RET__, _Old_Val);
+ //bool passed = false;
+ //if (!passed) {
+ //int old = _Old_Val == NULL ? 0 : _Old_Val->_val;
+ //int ret = __RET__ == NULL ? 0 : __RET__->_val;
+ //model_print("Put: key: %d, val: %d, _Old_Val: %d, RET: %d\n",
+ // key->_val, val->_val, old, ret);
+ //}
@Post_check:
- __sequential.equals_val(__RET__, _Old_Val)
+ equals_val(__RET__, _Old_Val)
@End
*/
- TypeV* put(TypeK& key, TypeV& val) {
+ TypeV* put(TypeK *key, TypeV *val) {
return putIfMatch(key, val, NO_MATCH_OLD);
}
/**
- @Begin
+// @Begin
@Interface: PutIfAbsent
@Commit_point_set:
- Write_Val_Point | PutIfAbsent_Fail_Point
- @Condition: __sequential.map.get(key) == NULL
+ Write_Success_Point | PutIfAbsent_Fail_Point
+ @Condition: !spec_table_contains(map, key)
@HB_condition:
COND_PutIfAbsentSucc :: __RET__ == NULL
- @ID: __sequential.getKeyTag(key)
+ @ID: getKeyTag(key)
@Action:
- TypeV *_Old_Val = __sequential.map.get(key)
+ void *_Old_Val = spec_table_get(map, key);
if (__COND_SAT__)
- __sequential.map.put(key, &value);
+ spec_table_put(map, key, value);
@Post_check:
- __COND_SAT__ ? __RET__ == NULL : __sequential.equals_val(_Old_Val, __RET__)
+ __COND_SAT__ ? __RET__ == NULL : equals_val(_Old_Val, __RET__)
@End
*/
- TypeV* putIfAbsent(TypeK& key, TypeV& value) {
+ TypeV* putIfAbsent(TypeK *key, TypeV *value) {
return putIfMatch(key, val, TOMBSTONE);
}
/**
- @Begin
+// @Begin
@Interface: RemoveAny
- @Commit_point_set: Write_Val_Point
- @ID: __sequential.getKeyTag(key)
+ @Commit_point_set: Write_Success_Point
+ @ID: getKeyTag(key)
@Action:
- TypeV *_Old_Val = __sequential.map.get(key)
- __sequential.map.put(key, NULL);
+ void *_Old_Val = spec_table_get(map, key);
+ spec_table_put(map, key, NULL);
@Post_check:
- __sequential.equals_val(__RET__, _Old_Val)
+ equals_val(__RET__, _Old_Val)
@End
*/
- TypeV* remove(TypeK& key) {
+ TypeV* remove(TypeK *key) {
return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD);
}
/**
- @Begin
+// @Begin
@Interface: RemoveIfMatch
@Commit_point_set:
- Write_Val_Point | RemoveIfMatch_Fail_Point
+ Write_Success_Point | RemoveIfMatch_Fail_Point
@Condition:
- __sequential.equals_val(__sequential.map.get(key), &val)
+ equals_val(spec_table_get(map, key), val)
@HB_condition:
COND_RemoveIfMatchSucc :: __RET__ == true
- @ID: __sequential.getKeyTag(key)
+ @ID: getKeyTag(key)
@Action:
if (__COND_SAT__)
- __sequential.map.put(key, NULL);
+ spec_table_put(map, key, NULL);
@Post_check:
__COND_SAT__ ? __RET__ : !__RET__
@End
*/
- bool remove(TypeK& key, TypeV& val) {
+ bool remove(TypeK *key, TypeV *val) {
slot *val_slot = val == NULL ? NULL : new slot(false, val);
return putIfMatch(key, TOMBSTONE, val) == val;
}
/**
- @Begin
+// @Begin
@Interface: ReplaceAny
@Commit_point_set:
- Write_Val_Point
- @ID: __sequential.getKeyTag(key)
+ Write_Success_Point
+ @ID: getKeyTag(key)
@Action:
- TypeV *_Old_Val = __sequential.map.get(key)
+ void *_Old_Val = spec_table_get(map, key);
@Post_check:
- __sequential.equals_val(__RET__, _Old_Val)
+ equals_val(__RET__, _Old_Val)
@End
*/
- TypeV* replace(TypeK& key, TypeV& val) {
+ TypeV* replace(TypeK *key, TypeV *val) {
return putIfMatch(key, val, MATCH_ANY);
}
/**
- @Begin
+// @Begin
@Interface: ReplaceIfMatch
@Commit_point_set:
- Write_Val_Point | ReplaceIfMatch_Fail_Point
+ Write_Success_Point | ReplaceIfMatch_Fail_Point
@Condition:
- __sequential.equals_val(__sequential.map.get(key), &oldval)
+ equals_val(spec_table_get(map, key), oldval)
@HB_condition:
COND_ReplaceIfMatchSucc :: __RET__ == true
- @ID: __sequential.getKeyTag(key)
+ @ID: getKeyTag(key)
@Action:
if (__COND_SAT__)
- __sequential.map.put(key, &newval);
+ spec_table_put(map, key, newval);
@Post_check:
__COND_SAT__ ? __RET__ : !__RET__
@End
*/
- bool replace(TypeK& key, TypeV& oldval, TypeV& newval) {
+ bool replace(TypeK *key, TypeV *oldval, TypeV *newval) {
return putIfMatch(key, newval, oldval) == oldval;
}
MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
// Corresponding to the volatile read in get_impl() and putIfMatch in
// Cliff Click's Java implementation
- slot *res = (slot*) kvs->_data[idx * 2 + 2].load(memory_order_acquire);
+ slot *res = (slot*) kvs->_data[idx * 2 + 2].load(memory_order_relaxed);
+ /**
+ @Begin
+ # This is a complicated potential commit point since many many functions are
+ # calling val().
+ @Potential_commit_point_define: true
+ @Label: Read_Key_Point
+ @End
+ */
return res;
}
MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
// Corresponding to the volatile read in get_impl() and putIfMatch in
// Cliff Click's Java implementation
+ /**** CDSChecker error & hb violation ****/
slot *res = (slot*) kvs->_data[idx * 2 + 3].load(memory_order_acquire);
/**
@Begin
// Together with key() preserve the happens-before relationship on newly
// inserted keys
static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
- return kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
- desired, memory_order_release, memory_order_release);
+ bool res = kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
+ desired, memory_order_relaxed, memory_order_relaxed);
+ /**
+ # If it is a successful put instead of a copy or any other internal
+ # operantions, expected != NULL
+ @Begin
+ @Potential_commit_point_define: res
+ @Label: Write_Key_Point
+ @End
+ */
+ return res;
}
/**
// inserted values
static inline bool CAS_val(kvs_data *kvs, int idx, void *expected, void
*desired) {
+ /**** CDSChecker error & HB violation ****/
bool res = kvs->_data[2 * idx + 3].compare_exchange_strong(expected,
- desired, memory_order_release, memory_order_release);
+ desired, memory_order_acq_rel, memory_order_relaxed);
/**
# If it is a successful put instead of a copy or any other internal
# operantions, expected != NULL
@Begin
- @Potential_commit_point_define: __ATOMIC_RET__ == true
+ @Potential_commit_point_define: res
@Label: Write_Val_Point
@End
*/
int reprobe_cnt = 0;
while (true) {
slot *K = key(kvs, idx);
- slot *V = val(kvs, idx);
/**
@Begin
- @Commit_point_define: V == NULL
- @Potential_commit_point_label: Read_Val_Point
- @Label: Get_Success_Point_1
+ @Commit_point_define: K == NULL
+ @Potential_commit_point_label: Read_Key_Point
+ @Label: Get_Point1
@End
*/
-
- if (K == NULL) return NULL; // A miss
+ slot *V = val(kvs, idx);
+
+ if (K == NULL) {
+ //model_print("Key is null\n");
+ return NULL; // A miss
+ }
if (keyeq(K, key_slot, hashes, idx, fullhash)) {
// Key hit! Check if table-resize in progress
if (!is_prime(V)) {
+ /**
+ @Begin
+ @Commit_point_clear: true
+ @Label: Get_Clear
+ @End
+ */
+
/**
@Begin
@Commit_point_define: true
@Potential_commit_point_label: Read_Val_Point
- @Label: Get_Success_Point_2
+ @Label: Get_Point2
@End
*/
return (V == TOMBSTONE) ? NULL : V; // Return this value
if (++reprobe_cnt >= REPROBE_LIMIT ||
key_slot == TOMBSTONE) {
// Retry in new table
- // Atomic read (acquire) can be here
+ // Atomic read can be here
+ /**** FIXME: miss ****/
kvs_data *newkvs = chm->_newkvs.load(memory_order_acquire);
/**
- @Begin
- @Commit_point_define_check: newkvs == NULL
- @Label: Get_Success_Point_3
+ //@Begin
+ @Commit_point_define_check: true
+ @Label: Get_ReadNewKVS
@End
*/
return newkvs == NULL ? NULL : get_impl(topmap,
}
// A wrapper of the essential function putIfMatch()
- TypeV* putIfMatch(TypeK& key, TypeV& value, slot *old_val) {
+ TypeV* putIfMatch(TypeK *key, TypeV *value, slot *old_val) {
// TODO: Should throw an exception rather return NULL
if (old_val == NULL) {
return NULL;
}
- void *key_ptr = (void*) new TypeK(key);
- slot *key_slot = new slot(false, key_ptr);
+ slot *key_slot = new slot(false, key);
- void *val_ptr = (void*) new TypeV(value);
- slot *value_slot = new slot(false, val_ptr);
+ slot *value_slot = new slot(false, value);
+ /**** FIXME: miss ****/
kvs_data *kvs = _kvs.load(memory_order_acquire);
+ /**
+ //@Begin
+ @Commit_point_define_check: true
+ @Label: Put_ReadKVS
+ @End
+ */
slot *res = putIfMatch(this, kvs, key_slot, value_slot, old_val);
// Only when copy_slot() call putIfMatch() will it return NULL
MODEL_ASSERT (res != NULL);
if (val_slot == TOMBSTONE) return val_slot;
// Claim the null key-slot
if (CAS_key(kvs, idx, NULL, key_slot)) {
+ /**
+ @Begin
+ @Commit_point_define: true
+ @Potential_commit_point_label: Write_Key_Point
+ @Label: Put_WriteKey
+ @End
+ */
chm->_slots.fetch_add(1, memory_order_relaxed); // Inc key-slots-used count
hashes[idx] = fullhash; // Memorize full hash
break;
if (++reprobe_cnt >= reprobe_limit(len) ||
K == TOMBSTONE) { // Found a Tombstone key, no more keys
newkvs = chm->resize(topmap, kvs);
+ //model_print("resize1\n");
// Help along an existing copy
if (expVal != NULL) topmap->help_copy(newkvs);
return putIfMatch(topmap, newkvs, key_slot, val_slot, expVal);
// Here it tries to resize cause it doesn't want other threads to stop
// its progress (eagerly try to resize soon)
+ /**** FIXME: miss ****/
newkvs = chm->_newkvs.load(memory_order_acquire);
+ /**
+ //@Begin
+ @Commit_point_define_check: true
+ @Label: Put_ReadNewKVS
+ @End
+ */
if (newkvs == NULL &&
- ((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V)))
+ ((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V))) {
+ //model_print("resize2\n");
newkvs = chm->resize(topmap, kvs); // Force the copy to start
+ }
// Finish the copy and then put it in the new table
if (newkvs != NULL)
!(V == NULL && expVal == TOMBSTONE) &&
(expVal == NULL || !valeq(expVal, V))) {
/**
- @Begin
+ //@Begin
@Commit_point_define: expVal == TOMBSTONE
@Potential_commit_point_label: Read_Val_Point
@Label: PutIfAbsent_Fail_Point
@End
*/
/**
- @Begin
+ //@Begin
@Commit_point_define: expVal != NULL && val_slot == TOMBSTONE
@Potential_commit_point_label: Read_Val_Point
@Label: RemoveIfMatch_Fail_Point
@End
*/
/**
- @Begin
- @Commit_point_define: !valeq(expVal, V)
+ //@Begin
+ @Commit_point_define: expVal != NULL && !valeq(expVal, V)
@Potential_commit_point_label: Read_Val_Point
@Label: ReplaceIfMatch_Fail_Point
@End
# The only point where a successful put happens
@Commit_point_define: true
@Potential_commit_point_label: Write_Val_Point
- @Label: Write_Success_Point
+ @Label: Put_Point
@End
*/
if (expVal != NULL) { // Not called by a table-copy
// Help along an existing table-resize. This is a fast cut-out wrapper.
kvs_data* help_copy(kvs_data *helper) {
+ /**** FIXME: miss ****/
kvs_data *topkvs = _kvs.load(memory_order_acquire);
CHM *topchm = get_chm(topkvs);
// No cpy in progress
- if (topchm->_newkvs.load(memory_order_acquire) == NULL) return helper;
+ if (topchm->_newkvs.load(memory_order_relaxed) == NULL) return helper;
topchm->help_copy_impl(this, topkvs, false);
return helper;
}
};
+/**
+ @Begin
+ @Class_end
+ @End
+*/
#endif