#include <stdio.h>
#include <algorithm>
+#include <mutex>
#include "model.h"
#include "action.h"
#include "cyclegraph.h"
#include "promise.h"
#include "datarace.h"
-#include "mutex.h"
-#include "threads.h"
+#include "threads-model.h"
#define INITIAL_THREAD_ID 0
ModelChecker *model;
+struct bug_message {
+ bug_message(const char *str) {
+ const char *fmt = " [BUG] %s\n";
+ msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
+ sprintf(msg, fmt, str);
+ }
+ ~bug_message() { if (msg) snapshot_free(msg); }
+
+ char *msg;
+ void print() { model_print("%s", msg); }
+
+ SNAPSHOTALLOC
+};
+
+/**
+ * Structure for holding small ModelChecker members that should be snapshotted
+ */
+struct model_snapshot_members {
+ ModelAction *current_action;
+ unsigned int next_thread_id;
+ modelclock_t used_sequence_numbers;
+ Thread *nextThread;
+ ModelAction *next_backtrack;
+ std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
+ struct execution_stats stats;
+};
+
/** @brief Constructor */
ModelChecker::ModelChecker(struct model_params params) :
/* Initialize default scheduler */
params(params),
scheduler(new Scheduler()),
- num_executions(0),
- num_feasible_executions(0),
diverge(NULL),
earliest_diverge(NULL),
action_trace(new action_list_t()),
thread_map(new HashTable<int, Thread *, int>()),
- obj_map(new HashTable<const void *, action_list_t, uintptr_t, 4>()),
- lock_waiters_map(new HashTable<const void *, action_list_t, uintptr_t, 4>()),
- obj_thrd_map(new HashTable<void *, std::vector<action_list_t>, uintptr_t, 4 >()),
- promises(new std::vector<Promise *>()),
- futurevalues(new std::vector<struct PendingFutureValue>()),
- pending_rel_seqs(new std::vector<struct release_seq *>()),
- thrd_last_action(new std::vector<ModelAction *>(1)),
+ obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
+ lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
+ condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
+ obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
+ promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
+ futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
+ pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
+ thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
node_stack(new NodeStack()),
mo_graph(new CycleGraph()),
failed_promise(false),
bad_synchronization(false)
{
/* Allocate this "size" on the snapshotting heap */
- priv = (struct model_snapshot_members *)calloc(1, sizeof(*priv));
+ priv = (struct model_snapshot_members *)snapshot_calloc(1, sizeof(*priv));
/* First thread created will have id INITIAL_THREAD_ID */
priv->next_thread_id = INITIAL_THREAD_ID;
+
+ /* Initialize a model-checker thread, for special ModelActions */
+ model_thread = new Thread(get_next_id());
+ thread_map->put(id_to_int(model_thread->get_id()), model_thread);
}
/** @brief Destructor */
delete obj_thrd_map;
delete obj_map;
delete lock_waiters_map;
+ delete condvar_waiters_map;
delete action_trace;
for (unsigned int i = 0; i < promises->size(); i++)
delete node_stack;
delete scheduler;
delete mo_graph;
+
+ for (unsigned int i = 0; i < priv->bugs.size(); i++)
+ delete priv->bugs[i];
+ priv->bugs.clear();
+ snapshot_free(priv);
+}
+
+static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr) {
+ action_list_t * tmp=hash->get(ptr);
+ if (tmp==NULL) {
+ tmp=new action_list_t();
+ hash->put(ptr, tmp);
+ }
+ return tmp;
+}
+
+static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr) {
+ std::vector<action_list_t> * tmp=hash->get(ptr);
+ if (tmp==NULL) {
+ tmp=new std::vector<action_list_t>();
+ hash->put(ptr, tmp);
+ }
+ return tmp;
}
/**
}
/** @return the number of user threads created during this execution */
-unsigned int ModelChecker::get_num_threads()
+unsigned int ModelChecker::get_num_threads() const
{
return priv->next_thread_id;
}
return ++priv->used_sequence_numbers;
}
+Node * ModelChecker::get_curr_node() {
+ return node_stack->get_head();
+}
+
/**
* @brief Choose the next thread to execute.
*
earliest_diverge=diverge;
Node *nextnode = next->get_node();
+ Node *prevnode = nextnode->get_parent();
+ scheduler->update_sleep_set(prevnode);
+
/* Reached divergence point */
- if (nextnode->increment_promise()) {
+ if (nextnode->increment_misc()) {
+ /* The next node will try to satisfy a different misc_index values. */
+ tid = next->get_tid();
+ node_stack->pop_restofstack(2);
+ } else if (nextnode->increment_promise()) {
/* The next node will try to satisfy a different set of promises. */
tid = next->get_tid();
node_stack->pop_restofstack(2);
node_stack->pop_restofstack(2);
} else {
/* Make a different thread execute for next step */
- Node *node = nextnode->get_parent();
- tid = node->get_next_backtrack();
+ scheduler->add_sleep(thread_map->get(id_to_int(next->get_tid())));
+ tid = prevnode->get_next_backtrack();
+ /* Make sure the backtracked thread isn't sleeping. */
node_stack->pop_restofstack(1);
if (diverge==earliest_diverge) {
- earliest_diverge=node->get_action();
+ earliest_diverge=prevnode->get_action();
}
}
+ /* The correct sleep set is in the parent node. */
+ execute_sleep_set();
+
DEBUG("*** Divergence point ***\n");
diverge = NULL;
return thread_map->get(id_to_int(tid));
}
+/**
+ * We need to know what the next actions of all threads in the sleep
+ * set will be. This method computes them and stores the actions at
+ * the corresponding thread object's pending action.
+ */
+
+void ModelChecker::execute_sleep_set() {
+ for(unsigned int i=0;i<get_num_threads();i++) {
+ thread_id_t tid=int_to_id(i);
+ Thread *thr=get_thread(tid);
+ if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET &&
+ thr->get_pending() == NULL ) {
+ thr->set_state(THREAD_RUNNING);
+ scheduler->next_thread(thr);
+ Thread::swap(&system_context, thr);
+ priv->current_action->set_sleep_flag();
+ thr->set_pending(priv->current_action);
+ }
+ }
+ priv->current_action = NULL;
+}
+
+void ModelChecker::wake_up_sleeping_actions(ModelAction * curr) {
+ for(unsigned int i=0;i<get_num_threads();i++) {
+ thread_id_t tid=int_to_id(i);
+ Thread *thr=get_thread(tid);
+ if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET ) {
+ ModelAction *pending_act=thr->get_pending();
+ if ((!curr->is_rmwr())&&pending_act->could_synchronize_with(curr)) {
+ //Remove this thread from sleep set
+ scheduler->remove_sleep(thr);
+ }
+ }
+ }
+}
+
+/**
+ * Check if we are in a deadlock. Should only be called at the end of an
+ * execution, although it should not give false positives in the middle of an
+ * execution (there should be some ENABLED thread).
+ *
+ * @return True if program is in a deadlock; false otherwise
+ */
+bool ModelChecker::is_deadlocked() const
+{
+ bool blocking_threads = false;
+ for (unsigned int i = 0; i < get_num_threads(); i++) {
+ thread_id_t tid = int_to_id(i);
+ if (is_enabled(tid))
+ return false;
+ Thread *t = get_thread(tid);
+ if (!t->is_model_thread() && t->get_pending())
+ blocking_threads = true;
+ }
+ return blocking_threads;
+}
+
+/**
+ * Check if this is a complete execution. That is, have all thread completed
+ * execution (rather than exiting because sleep sets have forced a redundant
+ * execution).
+ *
+ * @return True if the execution is complete.
+ */
+bool ModelChecker::is_complete_execution() const
+{
+ for (unsigned int i = 0; i < get_num_threads(); i++)
+ if (is_enabled(int_to_id(i)))
+ return false;
+ return true;
+}
+
+/**
+ * @brief Assert a bug in the executing program.
+ *
+ * Use this function to assert any sort of bug in the user program. If the
+ * current trace is feasible (actually, a prefix of some feasible execution),
+ * then this execution will be aborted, printing the appropriate message. If
+ * the current trace is not yet feasible, the error message will be stashed and
+ * printed if the execution ever becomes feasible.
+ *
+ * @param msg Descriptive message for the bug (do not include newline char)
+ * @return True if bug is immediately-feasible
+ */
+bool ModelChecker::assert_bug(const char *msg)
+{
+ priv->bugs.push_back(new bug_message(msg));
+
+ if (isfeasibleprefix()) {
+ set_assert();
+ return true;
+ }
+ return false;
+}
+
+/**
+ * @brief Assert a bug in the executing program, asserted by a user thread
+ * @see ModelChecker::assert_bug
+ * @param msg Descriptive message for the bug (do not include newline char)
+ */
+void ModelChecker::assert_user_bug(const char *msg)
+{
+ /* If feasible bug, bail out now */
+ if (assert_bug(msg))
+ switch_to_master(NULL);
+}
+
+/** @return True, if any bugs have been reported for this execution */
+bool ModelChecker::have_bug_reports() const
+{
+ return priv->bugs.size() != 0;
+}
+
+/** @brief Print bug report listing for this execution (if any bugs exist) */
+void ModelChecker::print_bugs() const
+{
+ if (have_bug_reports()) {
+ model_print("Bug report: %zu bug%s detected\n",
+ priv->bugs.size(),
+ priv->bugs.size() > 1 ? "s" : "");
+ for (unsigned int i = 0; i < priv->bugs.size(); i++)
+ priv->bugs[i]->print();
+ }
+}
+
+/**
+ * @brief Record end-of-execution stats
+ *
+ * Must be run when exiting an execution. Records various stats.
+ * @see struct execution_stats
+ */
+void ModelChecker::record_stats()
+{
+ stats.num_total++;
+ if (!isfinalfeasible())
+ stats.num_infeasible++;
+ else if (have_bug_reports())
+ stats.num_buggy_executions++;
+ else if (is_complete_execution())
+ stats.num_complete++;
+}
+
+/** @brief Print execution stats */
+void ModelChecker::print_stats() const
+{
+ model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
+ model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
+ model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
+ model_print("Total executions: %d\n", stats.num_total);
+ model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
+}
+
/**
* Queries the model-checker for more executions to explore and, if one
* exists, resets the model-checker state to execute a new execution.
{
DBG();
- num_executions++;
-
- if (isfinalfeasible()) {
- printf("Earliest divergence point since last feasible execution:\n");
+ if (isfinalfeasible() && (is_complete_execution() || have_bug_reports())) {
+ model_print("Earliest divergence point since last feasible execution:\n");
if (earliest_diverge)
earliest_diverge->print();
else
- printf("(Not set)\n");
+ model_print("(Not set)\n");
earliest_diverge = NULL;
- num_feasible_executions++;
- }
- DEBUG("Number of acquires waiting on pending release sequences: %zu\n",
- pending_rel_seqs->size());
+ if (is_deadlocked())
+ assert_bug("Deadlock detected");
- if (isfinalfeasible() || DBG_ENABLED())
+ checkDataRaces();
+ print_bugs();
+ model_print("\n");
+ print_summary();
+ } else if (DBG_ENABLED()) {
+ model_print("\n");
print_summary();
+ }
+
+ record_stats();
if ((diverge = get_next_backtrack()) == NULL)
return false;
if (DBG_ENABLED()) {
- printf("Next execution will diverge at:\n");
+ model_print("Next execution will diverge at:\n");
diverge->print();
}
case ATOMIC_WRITE:
case ATOMIC_RMW: {
/* linear search: from most recent to oldest */
- action_list_t *list = obj_map->get_safe_ptr(act->get_location());
+ action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
action_list_t::reverse_iterator rit;
for (rit = list->rbegin(); rit != list->rend(); rit++) {
ModelAction *prev = *rit;
case ATOMIC_LOCK:
case ATOMIC_TRYLOCK: {
/* linear search: from most recent to oldest */
- action_list_t *list = obj_map->get_safe_ptr(act->get_location());
+ action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
action_list_t::reverse_iterator rit;
for (rit = list->rbegin(); rit != list->rend(); rit++) {
ModelAction *prev = *rit;
}
case ATOMIC_UNLOCK: {
/* linear search: from most recent to oldest */
- action_list_t *list = obj_map->get_safe_ptr(act->get_location());
+ action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
action_list_t::reverse_iterator rit;
for (rit = list->rbegin(); rit != list->rend(); rit++) {
ModelAction *prev = *rit;
}
break;
}
+ case ATOMIC_WAIT: {
+ /* linear search: from most recent to oldest */
+ action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
+ action_list_t::reverse_iterator rit;
+ for (rit = list->rbegin(); rit != list->rend(); rit++) {
+ ModelAction *prev = *rit;
+ if (!act->same_thread(prev)&&prev->is_failed_trylock())
+ return prev;
+ if (!act->same_thread(prev)&&prev->is_notify())
+ return prev;
+ }
+ break;
+ }
+
+ case ATOMIC_NOTIFY_ALL:
+ case ATOMIC_NOTIFY_ONE: {
+ /* linear search: from most recent to oldest */
+ action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
+ action_list_t::reverse_iterator rit;
+ for (rit = list->rbegin(); rit != list->rend(); rit++) {
+ ModelAction *prev = *rit;
+ if (!act->same_thread(prev)&&prev->is_wait())
+ return prev;
+ }
+ break;
+ }
default:
break;
}
return NULL;
}
-/** This method find backtracking points where we should try to
+/** This method finds backtracking points where we should try to
* reorder the parameter ModelAction against.
*
* @param the ModelAction to find backtracking points for.
for(int i = low_tid; i < high_tid; i++) {
thread_id_t tid = int_to_id(i);
- if (!node->is_enabled(tid))
+
+ /* Make sure this thread can be enabled here. */
+ if (i >= node->get_num_threads())
+ break;
+
+ /* Don't backtrack into a point where the thread is disabled or sleeping. */
+ if (node->enabled_status(tid)!=THREAD_ENABLED)
continue;
/* Check if this has been explored already */
if (unfair)
continue;
}
-
/* Cache the latest backtracking point */
if (!priv->next_backtrack || *prev > *priv->next_backtrack)
priv->next_backtrack = prev;
*/
bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
{
- uint64_t value;
+ uint64_t value = VALUE_NONE;
bool updated = false;
while (true) {
const ModelAction *reads_from = curr->get_node()->get_read_from();
* @return True if synchronization was updated; false otherwise
*/
bool ModelChecker::process_mutex(ModelAction *curr) {
- std::mutex *mutex = (std::mutex *)curr->get_location();
- struct std::mutex_state *state = mutex->get_state();
+ std::mutex *mutex=NULL;
+ struct std::mutex_state *state=NULL;
+
+ if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
+ mutex = (std::mutex *)curr->get_location();
+ state = mutex->get_state();
+ } else if(curr->is_wait()) {
+ mutex = (std::mutex *)curr->get_value();
+ state = mutex->get_state();
+ }
+
switch (curr->get_type()) {
case ATOMIC_TRYLOCK: {
bool success = !state->islocked;
}
//otherwise fall into the lock case
case ATOMIC_LOCK: {
- if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock) {
- printf("Lock access before initialization\n");
- set_assert();
- }
+ if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
+ assert_bug("Lock access before initialization");
state->islocked = true;
ModelAction *unlock = get_last_unlock(curr);
//synchronize with the previous unlock statement
//unlock the lock
state->islocked = false;
//wake up the other threads
- action_list_t *waiters = lock_waiters_map->get_safe_ptr(curr->get_location());
+ action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
+ //activate all the waiting threads
+ for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
+ scheduler->wake(get_thread(*rit));
+ }
+ waiters->clear();
+ break;
+ }
+ case ATOMIC_WAIT: {
+ //unlock the lock
+ state->islocked = false;
+ //wake up the other threads
+ action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
+ //activate all the waiting threads
+ for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
+ scheduler->wake(get_thread(*rit));
+ }
+ waiters->clear();
+ //check whether we should go to sleep or not...simulate spurious failures
+ if (curr->get_node()->get_misc()==0) {
+ get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
+ //disable us
+ scheduler->sleep(get_current_thread());
+ }
+ break;
+ }
+ case ATOMIC_NOTIFY_ALL: {
+ action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
//activate all the waiting threads
for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
scheduler->wake(get_thread(*rit));
waiters->clear();
break;
}
+ case ATOMIC_NOTIFY_ONE: {
+ action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
+ int wakeupthread=curr->get_node()->get_misc();
+ action_list_t::iterator it = waiters->begin();
+ advance(it, wakeupthread);
+ scheduler->wake(get_thread(*it));
+ waiters->erase(it);
+ break;
+ }
+
default:
ASSERT(0);
}
if (promises->size() == 0) {
for (unsigned int i = 0; i < futurevalues->size(); i++) {
struct PendingFutureValue pfv = (*futurevalues)[i];
- if (pfv.act->get_node()->add_future_value(pfv.value, pfv.expiration) &&
+ //Do more ambitious checks now that mo is more complete
+ if (mo_may_allow(pfv.writer, pfv.act)&&
+ pfv.act->get_node()->add_future_value(pfv.writer->get_value(), pfv.writer->get_seq_number()+params.maxfuturedelay) &&
(!priv->next_backtrack || *pfv.act > *priv->next_backtrack))
priv->next_backtrack = pfv.act;
}
break;
}
case THREAD_JOIN: {
- Thread *waiting, *blocking;
- waiting = get_thread(curr);
- blocking = (Thread *)curr->get_location();
- if (!blocking->is_complete()) {
- blocking->push_wait_list(curr);
- scheduler->sleep(waiting);
- } else {
- do_complete_join(curr);
- updated = true; /* trigger rel-seq checks */
- }
+ Thread *blocking = (Thread *)curr->get_location();
+ ModelAction *act = get_last_action(blocking->get_id());
+ curr->synchronize_with(act);
+ updated = true; /* trigger rel-seq checks */
break;
}
case THREAD_FINISH: {
Thread *th = get_thread(curr);
while (!th->wait_list_empty()) {
ModelAction *act = th->pop_wait_list();
- Thread *wake = get_thread(act);
- scheduler->wake(wake);
- do_complete_join(act);
- updated = true; /* trigger rel-seq checks */
+ scheduler->wake(get_thread(act));
}
th->complete();
updated = true; /* trigger rel-seq checks */
ASSERT(release->same_thread(rf));
if (write == NULL) {
+ /**
+ * @todo Forcing a synchronization requires that we set
+ * modification order constraints. For instance, we can't allow
+ * a fixup sequence in which two separate read-acquire
+ * operations read from the same sequence, where the first one
+ * synchronizes and the other doesn't. Essentially, we can't
+ * allow any writes to insert themselves between 'release' and
+ * 'rf'
+ */
+
/* Must synchronize */
if (!acquire->synchronize_with(release)) {
set_bad_synchronization();
mo_graph->addEdge(release, write);
mo_graph->addEdge(write, rf);
}
+
+ /* See if we have realized a data race */
+ checkDataRaces();
}
/**
* initializing clock vectors, and computing the promises to fulfill.
*
* @param curr The current action, as passed from the user context; may be
- * freed/invalidated after the execution of this function
- * @return The current action, as processed by the ModelChecker. Is only the
- * same as the parameter @a curr if this is a newly-explored action.
+ * freed/invalidated after the execution of this function, with a different
+ * action "returned" its place (pass-by-reference)
+ * @return True if curr is a newly-explored action; false otherwise
*/
-ModelAction * ModelChecker::initialize_curr_action(ModelAction *curr)
+bool ModelChecker::initialize_curr_action(ModelAction **curr)
{
ModelAction *newcurr;
- if (curr->is_rmwc() || curr->is_rmw()) {
- newcurr = process_rmw(curr);
- delete curr;
+ if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
+ newcurr = process_rmw(*curr);
+ delete *curr;
if (newcurr->is_rmw())
compute_promises(newcurr);
- return newcurr;
+
+ *curr = newcurr;
+ return false;
}
- curr->set_seq_number(get_next_seq_num());
+ (*curr)->set_seq_number(get_next_seq_num());
- newcurr = node_stack->explore_action(curr, scheduler->get_enabled());
+ newcurr = node_stack->explore_action(*curr, scheduler->get_enabled());
if (newcurr) {
/* First restore type and order in case of RMW operation */
- if (curr->is_rmwr())
- newcurr->copy_typeandorder(curr);
+ if ((*curr)->is_rmwr())
+ newcurr->copy_typeandorder(*curr);
- ASSERT(curr->get_location() == newcurr->get_location());
- newcurr->copy_from_new(curr);
+ ASSERT((*curr)->get_location() == newcurr->get_location());
+ newcurr->copy_from_new(*curr);
/* Discard duplicate ModelAction; use action from NodeStack */
- delete curr;
+ delete *curr;
/* Always compute new clock vector */
newcurr->create_cv(get_parent_action(newcurr->get_tid()));
+
+ *curr = newcurr;
+ return false; /* Action was explored previously */
} else {
- newcurr = curr;
+ newcurr = *curr;
/* Always compute new clock vector */
newcurr->create_cv(get_parent_action(newcurr->get_tid()));
compute_promises(newcurr);
else if (newcurr->is_relseq_fixup())
compute_relseq_breakwrites(newcurr);
+ else if (newcurr->is_wait())
+ newcurr->get_node()->set_misc_max(2);
+ else if (newcurr->is_notify_one()) {
+ newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
+ }
+ return true; /* This was a new ModelAction */
}
- return newcurr;
}
/**
- * This method checks whether a model action is enabled at the given point.
- * At this point, it checks whether a lock operation would be successful at this point.
- * If not, it puts the thread in a waiter list.
+ * @brief Check whether a model action is enabled.
+ *
+ * Checks whether a lock or join operation would be successful (i.e., is the
+ * lock already locked, or is the joined thread already complete). If not, put
+ * the action in a waiter list.
+ *
* @param curr is the ModelAction to check whether it is enabled.
* @return a bool that indicates whether the action is enabled.
*/
struct std::mutex_state * state = lock->get_state();
if (state->islocked) {
//Stick the action in the appropriate waiting queue
- lock_waiters_map->get_safe_ptr(curr->get_location())->push_back(curr);
+ get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
+ return false;
+ }
+ } else if (curr->get_type() == THREAD_JOIN) {
+ Thread *blocking = (Thread *)curr->get_location();
+ if (!blocking->is_complete()) {
+ blocking->push_wait_list(curr);
return false;
}
}
return true;
}
+/**
+ * Stores the ModelAction for the current thread action. Call this
+ * immediately before switching from user- to system-context to pass
+ * data between them.
+ * @param act The ModelAction created by the user-thread action
+ */
+void ModelChecker::set_current_action(ModelAction *act) {
+ priv->current_action = act;
+}
+
/**
* This is the heart of the model checker routine. It performs model-checking
* actions corresponding to a given "current action." Among other processes, it
Thread * ModelChecker::check_current_action(ModelAction *curr)
{
ASSERT(curr);
-
bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
if (!check_action_enabled(curr)) {
/* Make the execution look like we chose to run this action
- * much later, when a lock is actually available to release */
+ * much later, when a lock/join can succeed */
get_current_thread()->set_pending(curr);
scheduler->sleep(get_current_thread());
return get_next_thread(NULL);
}
- ModelAction *newcurr = initialize_curr_action(curr);
+ bool newly_explored = initialize_curr_action(&curr);
+
+ wake_up_sleeping_actions(curr);
/* Add the action to lists before any other model-checking tasks */
if (!second_part_of_rmw)
- add_action_to_lists(newcurr);
+ add_action_to_lists(curr);
/* Build may_read_from set for newly-created actions */
- if (curr == newcurr && curr->is_read())
+ if (newly_explored && curr->is_read())
build_reads_from_past(curr);
- curr = newcurr;
/* Initialize work_queue with the "current action" work */
work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
-
- while (!work_queue.empty()) {
+ while (!work_queue.empty() && !has_asserted()) {
WorkQueueEntry work = work_queue.front();
work_queue.pop_front();
}
check_curr_backtracking(curr);
-
set_backtracking(curr);
-
return get_next_thread(curr);
}
-/**
- * Complete a THREAD_JOIN operation, by synchronizing with the THREAD_FINISH
- * operation from the Thread it is joining with. Must be called after the
- * completion of the Thread in question.
- * @param join The THREAD_JOIN action
- */
-void ModelChecker::do_complete_join(ModelAction *join)
-{
- Thread *blocking = (Thread *)join->get_location();
- ModelAction *act = get_last_action(blocking->get_id());
- join->synchronize_with(act);
-}
-
void ModelChecker::check_curr_backtracking(ModelAction * curr) {
Node *currnode = curr->get_node();
Node *parnode = currnode->get_parent();
if ((!parnode->backtrack_empty() ||
+ !currnode->misc_empty() ||
!currnode->read_from_empty() ||
!currnode->future_value_empty() ||
!currnode->promise_empty() ||
}
}
-bool ModelChecker::promises_expired() {
+bool ModelChecker::promises_expired() const
+{
for (unsigned int promise_index = 0; promise_index < promises->size(); promise_index++) {
Promise *promise = (*promises)[promise_index];
if (promise->get_expiration()<priv->used_sequence_numbers) {
/** @return whether the current partial trace must be a prefix of a
* feasible trace. */
-bool ModelChecker::isfeasibleprefix() {
- return promises->size() == 0 && pending_rel_seqs->size() == 0;
+bool ModelChecker::isfeasibleprefix() const
+{
+ return promises->size() == 0 && pending_rel_seqs->size() == 0 && isfeasible();
}
/** @return whether the current partial trace is feasible. */
-bool ModelChecker::isfeasible() {
+bool ModelChecker::isfeasible() const
+{
if (DBG_ENABLED() && mo_graph->checkForRMWViolation())
DEBUG("Infeasible: RMW violation\n");
/** @return whether the current partial trace is feasible other than
* multiple RMW reading from the same store. */
-bool ModelChecker::isfeasibleotherthanRMW() {
+bool ModelChecker::isfeasibleotherthanRMW() const
+{
if (DBG_ENABLED()) {
if (mo_graph->checkForCycles())
DEBUG("Infeasible: modification order cycles\n");
}
/** Returns whether the current completed trace is feasible. */
-bool ModelChecker::isfinalfeasible() {
+bool ModelChecker::isfinalfeasible() const
+{
if (DBG_ENABLED() && promises->size() != 0)
DEBUG("Infeasible: unrevolved promises\n");
//accidentally clear by rolling back
if (!isfeasible())
return;
- std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
+ std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
int tid = id_to_int(curr->get_tid());
/* Skip checks */
ModelAction *act = *rit;
if (!act->is_read())
return;
-
+
if (act->get_reads_from() != rf)
return;
if (act->get_node()->get_read_from_size() <= 1)
ModelAction *act=*rit;
bool foundvalue = false;
for (int j = 0; j<act->get_node()->get_read_from_size(); j++) {
- if (act->get_node()->get_read_from_at(i)==write) {
+ if (act->get_node()->get_read_from_at(j)==write) {
foundvalue = true;
break;
}
*/
bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf)
{
- std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
+ std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
unsigned int i;
bool added = false;
ASSERT(curr->is_read());
*/
void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelAction *rf)
{
- std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
+ std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
unsigned int i;
ASSERT(curr->is_read());
*/
bool ModelChecker::w_modification_order(ModelAction *curr)
{
- std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
+ std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
unsigned int i;
bool added = false;
ASSERT(curr->is_write());
* 1) If RMW and it actually read from something, then we
* already have all relevant edges, so just skip to next
* thread.
- *
+ *
* 2) If RMW and it didn't read from anything, we should
* whatever edge we can get to speed up convergence.
*
if (curr->is_rmw()) {
if (curr->get_reads_from()!=NULL)
break;
- else
+ else
continue;
} else
continue;
*/
if (act->is_write())
mo_graph->addEdge(act, curr);
- else if (act->is_read()) {
+ else if (act->is_read()) {
//if previous read accessed a null, just keep going
if (act->get_reads_from() == NULL)
continue;
(3) cannot synchronize with us
(4) is in a different thread
=>
- that read could potentially read from our write.
+ that read could potentially read from our write. Note that
+ these checks are overly conservative at this point, we'll
+ do more checks before actually removing the
+ pendingfuturevalue.
+
*/
if (thin_air_constraint_may_allow(curr, act)) {
if (isfeasible() ||
(curr->is_rmw() && act->is_rmw() && curr->get_reads_from() == act->get_reads_from() && isfeasibleotherthanRMW())) {
- struct PendingFutureValue pfv = {curr->get_value(),curr->get_seq_number()+params.maxfuturedelay,act};
+ struct PendingFutureValue pfv = {curr,act};
futurevalues->push_back(pfv);
}
}
return true;
}
+/**
+ * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
+ * some constraints. This method checks one the following constraint (others
+ * require compiler support):
+ *
+ * If X --hb-> Y --mo-> Z, then X should not read from Z.
+ */
+bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
+{
+ std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
+ unsigned int i;
+ /* Iterate over all threads */
+ for (i = 0; i < thrd_lists->size(); i++) {
+ const ModelAction *write_after_read = NULL;
+
+ /* Iterate over actions in thread, starting from most recent */
+ action_list_t *list = &(*thrd_lists)[i];
+ action_list_t::reverse_iterator rit;
+ for (rit = list->rbegin(); rit != list->rend(); rit++) {
+ ModelAction *act = *rit;
+
+ if (!reader->happens_before(act))
+ break;
+ else if (act->is_write())
+ write_after_read = act;
+ else if (act->is_read() && act->get_reads_from() != NULL && act != reader) {
+ write_after_read = act->get_reads_from();
+ }
+ }
+
+ if (write_after_read && write_after_read!=writer && mo_graph->checkReachable(write_after_read, writer))
+ return false;
+ }
+ return true;
+}
+
/**
* Finds the head(s) of the release sequence(s) containing a given ModelAction.
* The ModelAction under consideration is expected to be taking part in
* "returns" two pieces of data: a pass-by-reference vector of @a release_heads
* and a boolean representing certainty.
*
- * @todo Finish lazy updating, when promises are fulfilled in the future
* @param rf The action that might be part of a release sequence. Must be a
* write.
* @param release_heads A pass-by-reference style return parameter. After
/* else relaxed write; check modification order for contiguous subsequence
* -> rf must be same thread as release */
int tid = id_to_int(rf->get_tid());
- std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(rf->get_location());
+ std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
action_list_t *list = &(*thrd_lists)[tid];
action_list_t::const_reverse_iterator rit;
ModelAction *last = get_last_action(int_to_id(i));
Thread *th = get_thread(int_to_id(i));
if ((last && rf->happens_before(last)) ||
- !scheduler->is_enabled(th) ||
+ !is_enabled(th) ||
th->is_complete())
future_ordered = true;
+ ASSERT(!th->is_model_thread() || future_ordered);
+
for (rit = list->rbegin(); rit != list->rend(); rit++) {
const ModelAction *act = *rit;
/* Reach synchronization -> this thread is complete */
continue;
}
- /* Only writes can break release sequences */
- if (!act->is_write())
+ /* Only non-RMW writes can break release sequences */
+ if (!act->is_write() || act->is_rmw())
continue;
/* Check modification order */
bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
{
bool updated = false;
- std::vector<struct release_seq *>::iterator it = pending_rel_seqs->begin();
+ std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
while (it != pending_rel_seqs->end()) {
struct release_seq *pending = *it;
ModelAction *act = pending->acquire;
}
// If we resolved promises or data races, see if we have realized a data race.
- if (checkDataRaces()) {
- set_assert();
- }
+ checkDataRaces();
return updated;
}
int tid = id_to_int(act->get_tid());
action_trace->push_back(act);
- obj_map->get_safe_ptr(act->get_location())->push_back(act);
+ get_safe_ptr_action(obj_map, act->get_location())->push_back(act);
- std::vector<action_list_t> *vec = obj_thrd_map->get_safe_ptr(act->get_location());
+ std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
if (tid >= (int)vec->size())
vec->resize(priv->next_thread_id);
(*vec)[tid].push_back(act);
if ((int)thrd_last_action->size() <= tid)
thrd_last_action->resize(get_num_threads());
(*thrd_last_action)[tid] = act;
+
+ if (act->is_wait()) {
+ void *mutex_loc=(void *) act->get_value();
+ get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
+
+ std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
+ if (tid >= (int)vec->size())
+ vec->resize(priv->next_thread_id);
+ (*vec)[tid].push_back(act);
+
+ if ((int)thrd_last_action->size() <= tid)
+ thrd_last_action->resize(get_num_threads());
+ (*thrd_last_action)[tid] = act;
+ }
}
/**
ModelAction * ModelChecker::get_last_seq_cst(ModelAction *curr) const
{
void *location = curr->get_location();
- action_list_t *list = obj_map->get_safe_ptr(location);
+ action_list_t *list = get_safe_ptr_action(obj_map, location);
/* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
action_list_t::reverse_iterator rit;
for (rit = list->rbegin(); rit != list->rend(); rit++)
ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
{
void *location = curr->get_location();
- action_list_t *list = obj_map->get_safe_ptr(location);
+ action_list_t *list = get_safe_ptr_action(obj_map, location);
/* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
action_list_t::reverse_iterator rit;
for (rit = list->rbegin(); rit != list->rend(); rit++)
- if ((*rit)->is_unlock())
+ if ((*rit)->is_unlock() || (*rit)->is_wait())
return *rit;
return NULL;
}
bool ModelChecker::resolve_promises(ModelAction *write)
{
bool resolved = false;
- std::vector<thread_id_t> threads_to_check;
+ std::vector< thread_id_t, ModelAlloc<thread_id_t> > threads_to_check;
for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
Promise *promise = (*promises)[promise_index];
//Make sure the promise's value matches the write's value
ASSERT(promise->get_value() == write->get_value());
delete(promise);
-
+
promises->erase(promises->begin() + promise_index);
threads_to_check.push_back(read->get_tid());
act->is_read() &&
!act->could_synchronize_with(curr) &&
!act->same_thread(curr) &&
+ act->get_location() == curr->get_location() &&
promise->get_value() == curr->get_value()) {
- curr->get_node()->set_promise(i);
+ curr->get_node()->set_promise(i, act->is_rmw());
}
}
}
}
}
+void ModelChecker::check_promises_thread_disabled() {
+ for (unsigned int i = 0; i < promises->size(); i++) {
+ Promise *promise = (*promises)[i];
+ if (promise->check_promise()) {
+ failed_promise = true;
+ return;
+ }
+ }
+}
+
/** Checks promises in response to addition to modification order for threads.
* Definitions:
* pthread is the thread that performed the read that created the promise
- *
+ *
* pread is the read that created the promise
*
* pwrite is either the first write to same location as pread by
for (unsigned int i = 0; i < promises->size(); i++) {
Promise *promise = (*promises)[i];
const ModelAction *act = promise->get_action();
-
+
//Is this promise on the same location?
if ( act->get_location() != location )
continue;
//do we have a pwrite for the promise, if not, set it
if (promise->get_write() == NULL ) {
promise->set_write(write);
+ //The pwrite cannot happen before the promise
+ if (write->happens_before(act) && (write != act)) {
+ failed_promise = true;
+ return;
+ }
}
if (mo_graph->checkPromise(write, promise)) {
failed_promise = true;
return;
}
}
-
+
//Don't do any lookups twice for the same thread
if (promise->has_sync_thread(tid))
continue;
-
- if (mo_graph->checkReachable(promise->get_write(), write)) {
+
+ if (promise->get_write()&&mo_graph->checkReachable(promise->get_write(), write)) {
if (promise->increment_threads(tid)) {
failed_promise = true;
return;
*/
void ModelChecker::build_reads_from_past(ModelAction *curr)
{
- std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
+ std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
unsigned int i;
ASSERT(curr->is_read());
/* Don't consider more than one seq_cst write if we are a seq_cst read. */
if (!curr->is_seqcst() || (!act->is_seqcst() && (last_seq_cst == NULL || !act->happens_before(last_seq_cst))) || act == last_seq_cst) {
- DEBUG("Adding action to may_read_from:\n");
- if (DBG_ENABLED()) {
- act->print();
- curr->print();
+ if (!curr->get_sleep_flag() || curr->is_seqcst() || sleep_can_read_from(curr, act)) {
+ DEBUG("Adding action to may_read_from:\n");
+ if (DBG_ENABLED()) {
+ act->print();
+ curr->print();
+ }
+ curr->get_node()->add_read_from(act);
}
- curr->get_node()->add_read_from(act);
}
/* Include at most one act per-thread that "happens before" curr */
}
}
- if (!initialized) {
- /** @todo Need a more informative way of reporting errors. */
- printf("ERROR: may read from uninitialized atomic\n");
- }
+ if (!initialized)
+ assert_bug("May read from uninitialized atomic");
if (DBG_ENABLED() || !initialized) {
- printf("Reached read action:\n");
+ model_print("Reached read action:\n");
curr->print();
- printf("Printing may_read_from\n");
+ model_print("Printing may_read_from\n");
curr->get_node()->print_may_read_from();
- printf("End printing may_read_from\n");
+ model_print("End printing may_read_from\n");
}
+}
+
+bool ModelChecker::sleep_can_read_from(ModelAction * curr, const ModelAction *write) {
+ while(true) {
+ Node *prevnode=write->get_node()->get_parent();
- ASSERT(initialized);
+ bool thread_sleep=prevnode->enabled_status(curr->get_tid())==THREAD_SLEEP_SET;
+ if (write->is_release()&&thread_sleep)
+ return true;
+ if (!write->is_rmw()) {
+ return false;
+ }
+ if (write->get_reads_from()==NULL)
+ return true;
+ write=write->get_reads_from();
+ }
}
-static void print_list(action_list_t *list)
+static void print_list(action_list_t *list, int exec_num = -1)
{
action_list_t::iterator it;
- printf("---------------------------------------------------------------------\n");
- printf("Trace:\n");
+ model_print("---------------------------------------------------------------------\n");
+ if (exec_num >= 0)
+ model_print("Execution %d:\n", exec_num);
+
+ unsigned int hash=0;
for (it = list->begin(); it != list->end(); it++) {
(*it)->print();
+ hash=hash^(hash<<3)^((*it)->hash());
}
- printf("---------------------------------------------------------------------\n");
+ model_print("HASH %u\n", hash);
+ model_print("---------------------------------------------------------------------\n");
}
#if SUPPORT_MOD_ORDER_DUMP
void ModelChecker::dumpGraph(char *filename) {
char buffer[200];
- sprintf(buffer, "%s.dot",filename);
- FILE *file=fopen(buffer, "w");
- fprintf(file, "digraph %s {\n",filename);
+ sprintf(buffer, "%s.dot",filename);
+ FILE *file=fopen(buffer, "w");
+ fprintf(file, "digraph %s {\n",filename);
mo_graph->dumpNodes(file);
ModelAction ** thread_array=(ModelAction **)model_calloc(1, sizeof(ModelAction *)*get_num_threads());
-
+
for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
ModelAction *action=*it;
if (action->is_read()) {
if (thread_array[action->get_tid()] != NULL) {
fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
}
-
+
thread_array[action->get_tid()]=action;
}
- fprintf(file,"}\n");
+ fprintf(file,"}\n");
model_free(thread_array);
- fclose(file);
+ fclose(file);
}
#endif
void ModelChecker::print_summary()
{
- printf("\n");
- printf("Number of executions: %d\n", num_executions);
- printf("Number of feasible executions: %d\n", num_feasible_executions);
- printf("Total nodes created: %d\n", node_stack->get_total_nodes());
-
#if SUPPORT_MOD_ORDER_DUMP
scheduler->print();
char buffername[100];
- sprintf(buffername, "exec%04u", num_executions);
+ sprintf(buffername, "exec%04u", stats.num_total);
mo_graph->dumpGraphToFile(buffername);
- sprintf(buffername, "graph%04u", num_executions);
- dumpGraph(buffername);
+ sprintf(buffername, "graph%04u", stats.num_total);
+ dumpGraph(buffername);
#endif
if (!isfinalfeasible())
- printf("INFEASIBLE EXECUTION!\n");
- print_list(action_trace);
- printf("\n");
+ model_print("INFEASIBLE EXECUTION!\n");
+ print_list(action_trace, stats.num_total);
+ model_print("\n");
}
/**
}
/**
- * Removes a thread from the scheduler.
+ * Removes a thread from the scheduler.
* @param the thread to remove.
*/
void ModelChecker::remove_thread(Thread *t)
return get_thread(act->get_tid());
}
+/**
+ * @brief Check if a Thread is currently enabled
+ * @param t The Thread to check
+ * @return True if the Thread is currently enabled
+ */
+bool ModelChecker::is_enabled(Thread *t) const
+{
+ return scheduler->is_enabled(t);
+}
+
+/**
+ * @brief Check if a Thread is currently enabled
+ * @param tid The ID of the Thread to check
+ * @return True if the Thread is currently enabled
+ */
+bool ModelChecker::is_enabled(thread_id_t tid) const
+{
+ return scheduler->is_enabled(tid);
+}
+
/**
* Switch from a user-context to the "master thread" context (a.k.a. system
* context). This switch is made with the intention of exploring a particular
if (has_asserted())
return false;
- Thread *curr = thread_current();
+ Thread *curr = priv->current_action ? get_thread(priv->current_action) : NULL;
if (curr) {
if (curr->get_state() == THREAD_READY) {
ASSERT(priv->current_action);
/* Infeasible -> don't take any more steps */
if (!isfeasible())
return false;
+ else if (isfeasibleprefix() && have_bug_reports()) {
+ set_assert();
+ return false;
+ }
+
+ if (params.bound != 0) {
+ if (priv->used_sequence_numbers > params.bound) {
+ return false;
+ }
+ }
DEBUG("(%d, %d)\n", curr ? id_to_int(curr->get_id()) : -1,
next ? id_to_int(next->get_id()) : -1);
+ /*
+ * Launch end-of-execution release sequence fixups only when there are:
+ *
+ * (1) no more user threads to run (or when execution replay chooses
+ * the 'model_thread')
+ * (2) pending release sequences
+ * (3) pending assertions (i.e., data races)
+ * (4) no pending promises
+ */
+ if (!pending_rel_seqs->empty() && (!next || next->is_model_thread()) &&
+ isfinalfeasible() && !unrealizedraces.empty()) {
+ model_print("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
+ pending_rel_seqs->size());
+ ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
+ std::memory_order_seq_cst, NULL, VALUE_NONE,
+ model_thread);
+ set_current_action(fixup);
+ return true;
+ }
+
/* next == NULL -> don't take any more steps */
if (!next)
return false;