using std::memory_order_release;
using std::memory_order_acq_rel;
using std::memory_order_seq_cst;
+using std::volatile_order;
/**
* @brief A recognizable don't-care value for use in the ModelAction::value
ATOMIC_NOTIFY_ALL, // < A notify all action
ATOMIC_WAIT, // < A wait action
ATOMIC_ANNOTATION, // < An annotation action to pass information to a trace analysis
+ VOLATILE_READ,
+ VOLATILE_WRITE,
NOOP // no operation, which returns control to scheduler
} action_type_t;
#include "threads-model.h"
#include "datarace.h"
-memory_order orders[6] = {
+memory_order orders[8] = {
memory_order_relaxed, memory_order_consume, memory_order_acquire,
- memory_order_release, memory_order_acq_rel, memory_order_seq_cst
+ memory_order_release, memory_order_acq_rel, memory_order_seq_cst,
+ volatile_order
};
static void ensureModel() {
model->switch_to_master(new ModelAction(ATOMIC_RMWC, position, orders[atomic_index], obj));
}
+// cds volatile loads
+uint8_t cds_volatile_load8(void * obj, int atomic_index, const char * position) {
+ ensureModel();
+ return (uint8_t) model->switch_to_master(
+ new ModelAction(VOLATILE_READ, position, orders[atomic_index], obj));
+}
+uint16_t cds_volatile_load16(void * obj, int atomic_index, const char * position) {
+ ensureModel();
+ return (uint16_t) model->switch_to_master(
+ new ModelAction(VOLATILE_READ, position, orders[atomic_index], obj));
+}
+uint32_t cds_volatile_load32(void * obj, int atomic_index, const char * position) {
+ ensureModel();
+ return (uint32_t) model->switch_to_master(
+ new ModelAction(VOLATILE_READ, position, orders[atomic_index], obj)
+ );
+}
+uint64_t cds_volatile_load64(void * obj, int atomic_index, const char * position) {
+ ensureModel();
+ return model->switch_to_master(
+ new ModelAction(VOLATILE_READ, position, orders[atomic_index], obj));
+}
+
+// cds volatile stores
+void cds_volatile_store8(void * obj, uint8_t val, int atomic_index, const char * position) {
+ ensureModel();
+ model->switch_to_master(new ModelAction(VOLATILE_WRITE, position, orders[atomic_index], obj, (uint64_t) val));
+}
+void cds_volatile_store16(void * obj, uint16_t val, int atomic_index, const char * position) {
+ ensureModel();
+ model->switch_to_master(new ModelAction(VOLATILE_WRITE, position, orders[atomic_index], obj, (uint64_t) val));
+}
+void cds_volatile_store32(void * obj, uint32_t val, int atomic_index, const char * position) {
+ ensureModel();
+ model->switch_to_master(new ModelAction(VOLATILE_WRITE, position, orders[atomic_index], obj, (uint64_t) val));
+}
+void cds_volatile_store64(void * obj, uint64_t val, int atomic_index, const char * position) {
+ ensureModel();
+ model->switch_to_master(new ModelAction(VOLATILE_WRITE, position, orders[atomic_index], obj, (uint64_t) val));
+}
+
// cds atomic inits
#define CDSATOMICINT(size) \
void cds_atomic_init ## size (void * obj, uint ## size ## _t val, const char * position) { \
next_thread_id(INITIAL_THREAD_ID),
used_sequence_numbers(0),
bugs(),
+ dataraces(),
bad_synchronization(false),
asserted(false)
{ }
~model_snapshot_members() {
for (unsigned int i = 0;i < bugs.size();i++)
delete bugs[i];
+ for (unsigned int i = 0;i < dataraces.size(); i++)
+ delete dataraces[i];
bugs.clear();
+ dataraces.clear();
}
unsigned int next_thread_id;
modelclock_t used_sequence_numbers;
SnapVector<bug_message *> bugs;
+ SnapVector<bug_message *> dataraces;
/** @brief Incorrectly-ordered synchronization was made */
bool bad_synchronization;
bool asserted;
return false;
}
+/* @brief Put data races in a different list from other bugs. The purpose
+ * is to continue the program untill the number of data races exceeds a
+ * threshold */
+
+/* TODO: check whether set_assert should be called */
+bool ModelExecution::assert_race(const char *msg)
+{
+ priv->dataraces.push_back(new bug_message(msg));
+
+ if (isfeasibleprefix()) {
+ set_assert();
+ return true;
+ }
+ return false;
+}
+
/** @return True, if any bugs have been reported for this execution */
bool ModelExecution::have_bug_reports() const
{
return priv->bugs.size() != 0;
}
+/** @return True, if any fatal bugs have been reported for this execution.
+ * Any bug other than a data race is considered a fatal bug. Data races
+ * are not considered fatal unless the number of races is exceeds
+ * a threshold (temporarily set as 15).
+ */
+bool ModelExecution::have_fatal_bug_reports() const
+{
+ return priv->bugs.size() != 0 || priv->dataraces.size() >= 15;
+}
+
SnapVector<bug_message *> * ModelExecution::get_bugs() const
{
return &priv->bugs;
curr = check_current_action(curr);
ASSERT(curr);
- // model_print("poitner loc: %p, thread: %d, type: %d, order: %d, position: %s\n", curr, curr->get_tid(), curr->get_type(), curr->get_mo(), curr->get_position() );
- model->get_history()->add_func_atomic( curr, curr_thrd->get_id() );
+ /* Process this action in ModelHistory for records*/
+ model->get_history()->process_action( curr, curr_thrd->get_id() );
if (curr_thrd->is_blocked() || curr_thrd->is_complete())
scheduler->remove_thread(curr_thrd);
bool check_action_enabled(ModelAction *curr);
bool assert_bug(const char *msg);
+ bool assert_race(const char *msg);
+
bool have_bug_reports() const;
+ bool have_fatal_bug_reports() const;
+
SnapVector<bug_message *> * get_bugs() const;
bool has_asserted() const;
FuncNode::FuncNode() :
func_inst_map(),
inst_list(),
- entry_insts()
+ entry_insts(),
+ thrd_read_map(),
+ read_locations()
{}
/* Check whether FuncInst with the same type, position, and location
func_inst = new FuncInst(act, this);
inst->get_collisions()->push_back(func_inst);
- inst_list.push_back(func_inst); // delete?
- if (func_inst->is_read())
- group_reads_by_loc(func_inst);
+ inst_list.push_back(func_inst); // delete?
return func_inst;
}
}
FuncInst * func_inst = new FuncInst(act, this);
+
func_inst_map.put(position, func_inst);
inst_list.push_back(func_inst);
- if (func_inst->is_read())
- group_reads_by_loc(func_inst);
-
return func_inst;
}
entry_insts.push_back(inst);
}
-/* group atomic read actions by memory location */
-void FuncNode::group_reads_by_loc(FuncInst * inst)
+/* @param inst_list a list of FuncInsts; this argument comes from ModelExecution
+ * Link FuncInsts in a list - add one FuncInst to another's predecessors and successors
+ */
+void FuncNode::link_insts(func_inst_list_t * inst_list)
{
- ASSERT(inst);
- if ( !inst->is_read() )
+ if (inst_list == NULL)
return;
- void * location = inst->get_location();
+ func_inst_list_t::iterator it = inst_list->begin();
+ func_inst_list_t::iterator prev;
- func_inst_list_mt * reads;
- if ( !reads_by_loc.contains(location) ) {
- reads = new func_inst_list_mt();
- reads->push_back(inst);
- reads_by_loc.put(location, reads);
+ if (inst_list->size() == 0)
return;
+
+ /* add the first instruction to the list of entry insts */
+ FuncInst * entry_inst = *it;
+ add_entry_inst(entry_inst);
+
+ it++;
+ while (it != inst_list->end()) {
+ prev = it;
+ prev--;
+
+ FuncInst * prev_inst = *prev;
+ FuncInst * curr_inst = *it;
+
+ prev_inst->add_succ(curr_inst);
+ curr_inst->add_pred(prev_inst);
+
+ it++;
}
+}
- reads = reads_by_loc.get(location);
- func_inst_list_mt::iterator it;
- for (it = reads->begin();it != reads->end();it++) {
- if (inst == *it)
- return;
+/* @param tid thread id
+ * Store the values read by atomic read actions into thrd_read_map */
+void FuncNode::store_read(ModelAction * act, uint32_t tid)
+{
+ ASSERT(act);
+
+ void * location = act->get_location();
+ uint64_t read_from_val = act->get_reads_from_value();
+
+ if (thrd_read_map.size() <= tid)
+ thrd_read_map.resize(tid + 1);
+
+ read_map_t * read_map = thrd_read_map[tid];
+ if (read_map == NULL) {
+ read_map = new read_map_t();
+ thrd_read_map[tid] = read_map;
}
- reads->push_back(inst);
+ read_map->put(location, read_from_val);
+
+ /* Store the memory locations where atomic reads happen */
+ bool push_loc = true;
+ ModelList<void *>::iterator it;
+ for (it = read_locations.begin(); it != read_locations.end(); it++) {
+ if (location == *it) {
+ push_loc = false;
+ break;
+ }
+ }
+
+ if (push_loc)
+ read_locations.push_back(location);
+}
+
+uint64_t FuncNode::query_last_read(ModelAction * act, uint32_t tid)
+{
+ if (thrd_read_map.size() <= tid)
+ return 0xdeadbeef;
+
+ read_map_t * read_map = thrd_read_map[tid];
+ void * location = act->get_location();
+
+ /* last read value not found */
+ if ( !read_map->contains(location) )
+ return 0xdeadbeef;
+
+ uint64_t read_val = read_map->get(location);
+ return read_val;
+}
+
+/* @param tid thread id
+ * Reset read map for a thread. This function shall only be called
+ * when a thread exits a function
+ */
+void FuncNode::clear_read_map(uint32_t tid)
+{
+ ASSERT(thrd_read_map.size() > tid);
+ thrd_read_map[tid]->reset();
+}
+
+/* @param tid thread id
+ * Print the values read by the last read actions for each memory location
+ */
+void FuncNode::print_last_read(uint32_t tid)
+{
+ ASSERT(thrd_read_map.size() > tid);
+ read_map_t * read_map = thrd_read_map[tid];
+
+ ModelList<void *>::iterator it;
+ for (it = read_locations.begin(); it != read_locations.end(); it++) {
+ if ( !read_map->contains(*it) )
+ break;
+
+ uint64_t read_val = read_map->get(*it);
+ model_print("last read of thread %d at %p: 0x%x\n", tid, *it, read_val);
+ }
}
class ModelAction;
typedef ModelList<FuncInst *> func_inst_list_mt;
+typedef HashTable<void *, uint64_t, uintptr_t, 4, model_malloc, model_calloc, model_free> read_map_t;
class FuncNode {
public:
func_inst_list_mt * get_inst_list() { return &inst_list; }
func_inst_list_mt * get_entry_insts() { return &entry_insts; }
void add_entry_inst(FuncInst * inst);
+ void link_insts(func_inst_list_t * inst_list);
- void group_reads_by_loc(FuncInst * inst);
+ void store_read(ModelAction * act, uint32_t tid);
+ uint64_t query_last_read(ModelAction * act, uint32_t tid);
+ void clear_read_map(uint32_t tid);
+
+ void print_last_read(uint32_t tid);
MEMALLOC
private:
/* Use source line number as the key of hashtable, to check if
* atomic operation with this line number has been added or not
- *
- * To do: cds_atomic_compare_exchange contains three atomic operations
- * that are feeded with the same source line number by llvm pass
*/
HashTable<const char *, FuncInst *, uintptr_t, 4, model_malloc, model_calloc, model_free> func_inst_map;
/* possible entry atomic actions in this function */
func_inst_list_mt entry_insts;
- /* group atomic read actions by memory location */
- HashTable<void *, func_inst_list_mt *, uintptr_t, 4, model_malloc, model_calloc, model_free> reads_by_loc;
+ /* Store the values read by atomic read actions per memory location for each thread */
+ ModelVector<read_map_t *> thrd_read_map;
+ ModelList<void *> read_locations;
};
/** @brief Constructor */
ModelHistory::ModelHistory() :
- func_counter(0), /* function id starts with 0 */
+ func_counter(1), /* function id starts with 1 */
func_map(),
func_map_rev(),
- func_atomics()
+ func_nodes()
{}
void ModelHistory::enter_function(const uint32_t func_id, thread_id_t tid)
uint32_t last_func_id = func_list->back();
if (last_func_id == func_id) {
- func_list->pop_back();
+ /* clear read map upon exiting functions */
+ FuncNode * func_node = func_nodes[func_id];
+ func_node->clear_read_map(tid);
func_inst_list_t * curr_inst_list = func_inst_lists->back();
- link_insts(curr_inst_list);
+ func_node->link_insts(curr_inst_list);
+ func_list->pop_back();
func_inst_lists->pop_back();
} else {
model_print("trying to exit with a wrong function id\n");
//model_print("thread %d exiting func %d\n", tid, func_id);
}
-void ModelHistory::add_func_atomic(ModelAction *act, thread_id_t tid)
+void ModelHistory::process_action(ModelAction *act, thread_id_t tid)
{
/* return if thread i has not entered any function or has exited
from all functions */
uint32_t func_id = func_list->back();
- if ( func_atomics.size() <= func_id )
- func_atomics.resize( func_id + 1 );
+ if ( func_nodes.size() <= func_id )
+ func_nodes.resize( func_id + 1 );
- FuncNode * func_node = func_atomics[func_id];
+ FuncNode * func_node = func_nodes[func_id];
if (func_node == NULL) {
const char * func_name = func_map_rev[func_id];
func_node = new FuncNode();
func_node->set_func_id(func_id);
func_node->set_func_name(func_name);
- func_atomics[func_id] = func_node;
+ func_nodes[func_id] = func_node;
}
+ /* add corresponding FuncInst to func_node */
FuncInst * inst = func_node->get_or_add_action(act);
- if (inst != NULL) {
- func_inst_list_t * curr_inst_list = func_inst_lists->back();
-
- ASSERT(curr_inst_list != NULL);
- curr_inst_list->push_back(inst);
- }
-}
-
-/* Link FuncInsts in a list - add one FuncInst to another's predecessors and successors */
-void ModelHistory::link_insts(func_inst_list_t * inst_list)
-{
- if (inst_list == NULL)
- return;
- func_inst_list_t::iterator it = inst_list->begin();
- func_inst_list_t::iterator prev;
-
- if (inst_list->size() == 0)
+ if (inst == NULL)
return;
- /* add the first instruction to the list of entry insts */
- FuncInst * entry_inst = *it;
- FuncNode * func_node = entry_inst->get_func_node();
- func_node->add_entry_inst(entry_inst);
-
- it++;
- while (it != inst_list->end()) {
- prev = it;
- prev--;
+ if (inst->is_read())
+ func_node->store_read(act, tid);
- FuncInst * prev_inst = *prev;
- FuncInst * curr_inst = *it;
+ /* add to curr_inst_list */
+ func_inst_list_t * curr_inst_list = func_inst_lists->back();
+ ASSERT(curr_inst_list != NULL);
+ curr_inst_list->push_back(inst);
+}
- prev_inst->add_succ(curr_inst);
- curr_inst->add_pred(prev_inst);
+/* return the FuncNode given its func_id */
+FuncNode * ModelHistory::get_func_node(uint32_t func_id)
+{
+ if (func_nodes.size() <= func_id) // this node has not been added
+ return NULL;
- it++;
- }
+ return func_nodes[func_id];
}
void ModelHistory::print()
{
- for (uint32_t i = 0;i < func_atomics.size();i++ ) {
- FuncNode * funcNode = func_atomics[i];
- if (funcNode == NULL)
- continue;
+ /* function id starts with 1 */
+ for (uint32_t i = 1; i < func_nodes.size(); i++) {
+ FuncNode * func_node = func_nodes[i];
- func_inst_list_mt * entry_insts = funcNode->get_entry_insts();
+ func_inst_list_mt * entry_insts = func_node->get_entry_insts();
+ model_print("function %s has entry actions\n", func_node->get_func_name());
- model_print("function %s has entry actions\n", funcNode->get_func_name());
func_inst_list_mt::iterator it;
for (it = entry_insts->begin();it != entry_insts->end();it++) {
FuncInst *inst = *it;
uint32_t get_func_counter() { return func_counter; }
void incr_func_counter() { func_counter++; }
- void add_func_atomic(ModelAction *act, thread_id_t tid);
+ void process_action(ModelAction *act, thread_id_t tid);
HashTable<const char *, uint32_t, uintptr_t, 4, model_malloc, model_calloc, model_free> * getFuncMap() { return &func_map; }
ModelVector<const char *> * getFuncMapRev() { return &func_map_rev; }
- ModelVector<FuncNode *> * getFuncAtomics() { return &func_atomics; }
+ ModelVector<FuncNode *> * getFuncNodes() { return &func_nodes; }
+ FuncNode * get_func_node(uint32_t func_id);
- void link_insts(func_inst_list_t * inst_list);
void print();
MEMALLOC
/* map integer ids to function names */
ModelVector<const char *> func_map_rev;
- ModelVector<FuncNode *> func_atomics;
+ ModelVector<FuncNode *> func_nodes;
};
// void model_fence_action_helper(int atomic_index);
/* the following functions are used by llvm pass */
+// cds volatile loads
+uint8_t cds_volatile_load8(void * obj, int atomic_index, const char * position);
+uint16_t cds_volatile_load16(void * obj, int atomic_index, const char * position);
+uint32_t cds_volatile_load32(void * obj, int atomic_index, const char * position);
+uint64_t cds_volatile_load64(void * obj, int atomic_index, const char * position);
+
+// cds volatile stores
+void cds_volatile_store8(void * obj, uint8_t val, int atomic_index, const char * position);
+void cds_volatile_store16(void * obj, uint16_t val, int atomic_index, const char * position);
+void cds_volatile_store32(void * obj, uint32_t val, int atomic_index, const char * position);
+void cds_volatile_store64(void * obj, uint64_t val, int atomic_index, const char * position);
void cds_atomic_init8(void * obj, uint8_t val, const char * position);
void cds_atomic_init16(void * obj, uint16_t val, const char * position);
typedef enum memory_order {
memory_order_relaxed, memory_order_consume, memory_order_acquire,
- memory_order_release, memory_order_acq_rel, memory_order_seq_cst
+ memory_order_release, memory_order_acq_rel, memory_order_seq_cst,
+ volatile_order
} memory_order;
#ifdef __cplusplus
int pthread_attr_setstackaddr(pthread_attr_t *, void *);
int pthread_attr_setstacksize(pthread_attr_t *, size_t);
int pthread_cancel(pthread_t);
-int pthread_cond_broadcast(pthread_cond_t *);
int pthread_cond_destroy(pthread_cond_t *);
int pthread_condattr_destroy(pthread_condattr_t *);
int pthread_condattr_getpshared(const pthread_condattr_t *, int *);
return execution->assert_bug(str);
}
+/**
+ * @brief Assert a data race in the executing program.
+ *
+ * Different from assert_bug, the program will not be aborted immediately
+ * upon calling this function, unless the number of data races exceeds
+ * a threshold.
+ *
+ * @param msg Descriptive message for the bug (do not include newline char)
+ * @return True if bug is immediately-feasible
+ */
+bool ModelChecker::assert_race(const char *msg, ...)
+{
+ char str[800];
+
+ va_list ap;
+ va_start(ap, msg);
+ vsnprintf(str, sizeof(str), msg, ap);
+ va_end(ap);
+
+ return execution->assert_race(str);
+}
+
/**
* @brief Assert a bug in the executing program, asserted by a user thread
* @see ModelChecker::assert_bug
/* Infeasible -> don't take any more steps */
if (execution->is_infeasible())
return true;
- else if (execution->isfeasibleprefix() && execution->have_bug_reports()) {
+ else if (execution->isfeasibleprefix() && execution->have_fatal_bug_reports()) {
execution->set_assert();
return true;
}
uint64_t switch_to_master(ModelAction *act);
bool assert_bug(const char *msg, ...);
+ bool assert_race(const char *msg, ...);
+
void assert_user_bug(const char *msg);
model_params params;
v->notify_one();
return 0;
}
+
+int pthread_cond_broadcast(pthread_cond_t *p_cond) {
+ // notify all blocked threads
+ ModelExecution *execution = model->get_execution();
+ if ( !execution->getCondMap()->contains(p_cond) )
+ pthread_cond_init(p_cond, NULL);
+
+ cdsc::snapcondition_variable *v = execution->getCondMap()->get(p_cond);
+
+ v->notify_all();
+ return 0;
+}